diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 733bacb4..00000000 --- a/.coveragerc +++ /dev/null @@ -1,11 +0,0 @@ -[run] -branch = True -source = rally - -[report] -ignore_errors = True -precision = 3 -omit = */migrations/versions/ca3626f62937_init_migration.py - -[html] -directory = cover diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index e6dbcd91..00000000 --- a/.dockerignore +++ /dev/null @@ -1,5 +0,0 @@ -rally-jobs -tests -contrib -test-requirements.txt -tox.ini diff --git a/.gitignore b/.gitignore deleted file mode 100644 index e6cf03ff..00000000 --- a/.gitignore +++ /dev/null @@ -1,51 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg* -dist -build -eggs -.eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -!devstack/lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml -cover -cover-master -.testrepository -*.sqlite -.venv -.cache -.test_results/ - -# Docs -doc/source/_build/ - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.idea -.pydevproject -*.swp - -# Mac Desktop Service Store -*.DS_Store diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 413df514..00000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/rally.git diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 120000 index c5fee138..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1 +0,0 @@ -doc/source/contribute.rst \ No newline at end of file diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 16d43e8f..00000000 --- a/Dockerfile +++ /dev/null @@ -1,68 +0,0 @@ -FROM ubuntu:16.04 -MAINTAINER Sergey Skripnick - -# install prereqs -RUN apt-get update && apt-get install --yes wget python vim bash-completion gcc lsb-release - -# ubuntu's pip is too old to work with the version of requests we -# require, so get pip with get-pip.py -RUN wget https://bootstrap.pypa.io/get-pip.py && \ - python get-pip.py && \ - rm -f get-pip.py - -# install bindep -RUN pip install bindep - -# create rally user -RUN apt-get install sudo && \ - useradd -u 65500 -m rally && \ - usermod -aG sudo rally && \ - echo "rally ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/00-rally-user && \ - ln -s /opt/rally/doc /home/rally/rally-docs - -# install rally. the COPY command below frequently invalidates -# subsequent cache -COPY . /tmp/rally -WORKDIR /tmp/rally -RUN ./install_rally.sh --system --verbose --yes \ - --db-name /home/rally/.rally.sqlite && \ - pip install -r optional-requirements.txt && \ - mkdir /opt/rally/ && \ - # TODO(andreykurilin): build docs to rst before move, since we have several - # extensions. - mv certification/ samples/ doc/ /opt/rally/ && \ - chown -R rally /opt/rally /etc/rally && \ - rm -rf /tmp/* && \ - apt-get -y remove \ - build-essential \ - libxml2-dev \ - libxslt1-dev \ - python3 \ - && \ - apt-get -y autoremove && \ - apt-get clean - -RUN echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' \ - >> /etc/bash.bashrc; echo '\ -╔═════════════════════════════════════════════════════════════════════════════╗\n\ -║ Welcome to Rally Docker container! ║\n\ -║ Rally certification tasks, samples and docs are located at /opt/rally/ ║\n\ -║ Rally at readthedocs - http://rally.readthedocs.org ║\n\ -║ How to contribute - http://rally.readthedocs.org/en/latest/contribute.html ║\n\ -║ If you have any questions, you can reach the Rally team by: ║\n\ -║ * e-mail - openstack-dev@lists.openstack.org with tag [Rally] in subject ║\n\ -║ * irc - "#openstack-rally" channel at freenode.net ║\n\ -╚═════════════════════════════════════════════════════════════════════════════╝\n' > /etc/motd - -VOLUME ["/home/rally"] - -WORKDIR /home/rally/ -USER rally -ENV HOME /home/rally/ -CMD ["bash", "--login"] - -RUN rally-manage db recreate - -# TODO(stpierre): Find a way to use `rally` as the -# entrypoint. Currently this is complicated by the need to run -# rally-manage to create the database. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/README b/README new file mode 100644 index 00000000..8fcd2b2f --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 1c491fbe..00000000 --- a/README.rst +++ /dev/null @@ -1,102 +0,0 @@ -===== -Rally -===== - -Team and repository tags -======================== - -.. image:: https://governance.openstack.org/badges/rally.svg - :target: https://governance.openstack.org/reference/tags/index.html - -.. image:: https://img.shields.io/pypi/v/rally.svg - :target: https://pypi.python.org/pypi/rally/ - :alt: Latest Version - -.. image:: https://img.shields.io/badge/gitter-join_chat-ff69b4.svg - :target: https://gitter.im/rally-dev/Lobby - :alt: Gitter Chat - -.. image:: https://img.shields.io/badge/tasks-trello_board-blue.svg - :target: https://trello.com/b/DoD8aeZy/rally - :alt: Trello Board - -.. image:: https://img.shields.io/github/license/openstack/rally.svg - :target: https://www.apache.org/licenses/LICENSE-2.0 - :alt: Apache License, Version 2.0 - - -What is Rally -============= - -Rally is a Benchmark-as-a-Service project for OpenStack. - -Rally is intended to provide the community with a benchmarking tool that is capable of performing **specific**, **complicated** and **reproducible** test cases on **real deployment** scenarios. - -If you are here, you are probably familiar with OpenStack and you also know that it's a really huge ecosystem of cooperative services. When something fails, performs slowly or doesn't scale, it's really hard to answer different questions on "what", "why" and "where" has happened. Another reason why you could be here is that you would like to build an OpenStack CI/CD system that will allow you to improve SLA, performance and stability of OpenStack continuously. - -The OpenStack QA team mostly works on CI/CD that ensures that new patches don't break some specific single node installation of OpenStack. On the other hand it's clear that such CI/CD is only an indication and does not cover all cases (e.g. if a cloud works well on a single node installation it doesn't mean that it will continue to do so on a 1k servers installation under high load as well). Rally aims to fix this and help us to answer the question "How does OpenStack work at scale?". To make it possible, we are going to automate and unify all steps that are required for benchmarking OpenStack at scale: multi-node OS deployment, verification, benchmarking & profiling. - - -**Rally** workflow can be visualized by the following diagram: - -.. image:: doc/source/images/Rally-Actions.png - :alt: Rally Architecture - - -Who Is Using Rally -================== - -.. image:: doc/source/images/Rally_who_is_using.png - :alt: Who is Using Rally - - -Documentation -============= - -`Rally documentation on ReadTheDocs `_ is a perfect place to start learning about Rally. It provides you with an **easy** and **illustrative** guidance through this benchmarking tool. For example, check out the `Rally step-by-step tutorial `_ that explains, in a series of lessons, how to explore the power of Rally in benchmarking your OpenStack clouds. - - -Architecture ------------- - -In terms of software architecture, Rally is built of 4 main components: - -1. **Server Providers** - provide servers (virtual servers), with ssh access, in one L3 network. -2. **Deploy Engines** - deploy OpenStack cloud on servers that are presented by **Server Providers** -3. **Verification** - component that runs tempest (or another specific set of tests) against a deployed cloud, collects results & presents them in human readable form. -4. **Benchmark engine** - allows to write parameterized benchmark scenarios & run them against the cloud. - -Use Cases ---------- - -There are 3 major high level Rally Use Cases: - -.. image:: doc/source/images/Rally-UseCases.png - :alt: Rally Use Cases - - -Typical cases where Rally aims to help are: - -- Automate measuring & profiling focused on how new code changes affect the OS performance; -- Using Rally profiler to detect scaling & performance issues; -- Investigate how different deployments affect the OS performance: - - Find the set of suitable OpenStack deployment architectures; - - Create deployment specifications for different loads (amount of controllers, swift nodes, etc.); -- Automate the search for hardware best suited for particular OpenStack cloud; -- Automate the production cloud specification generation: - - Determine terminal loads for basic cloud operations: VM start & stop, Block Device create/destroy & various OpenStack API methods; - - Check performance of basic cloud operations in case of different loads. - - -Links ----------------------- - -* Free software: Apache license -* Documentation: https://rally.readthedocs.org/en/latest/ -* Source: https://git.openstack.org/cgit/openstack/rally -* Bugs: https://bugs.launchpad.net/rally -* Step-by-step tutorial: https://rally.readthedocs.io/en/latest/quick_start/tutorial.html -* RoadMap: https://docs.google.com/a/mirantis.com/spreadsheets/d/16DXpfbqvlzMFaqaXAcJsBzzpowb_XpymaK2aFY2gA2g -* Launchpad page: https://launchpad.net/rally -* Gitter chat: https://gitter.im/rally-dev/Lobby -* Trello board: https://trello.com/b/DoD8aeZy/rally diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 1d15bb36..00000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] \ No newline at end of file diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index c857bb6d..00000000 --- a/bindep.txt +++ /dev/null @@ -1,23 +0,0 @@ -# This is a cross-platform list tracking distribution packages needed by tests; -# see http://docs.openstack.org/infra/bindep/ for additional information. - -build-essential [platform:dpkg] -gcc [platform:rpm] -gmp-devel [platform:rpm] -libffi-dev [platform:dpkg] -libffi-devel [platform:rpm !platform:opensuse] -libffi48-devel [platform:opensuse] -libpq-dev [platform:dpkg] -libssl-dev [platform:dpkg] -libxml2-dev [platform:dpkg] -libxml2-devel [platform:rpm] -libxslt1-dev [platform:dpkg] -libxslt-devel [platform:rpm] -openssl-devel [platform:rpm] -postgresql-devel [platform:rpm !platform:opensuse] -postgresql93-devel [platform:opensuse] -python-dev [platform:dpkg] -python-devel [platform:rpm] -redhat-rpm-config [platform:rpm] -iputils-ping [platform:dpkg] -iputils [platform:rpm] diff --git a/certification/openstack/README.rst b/certification/openstack/README.rst deleted file mode 100644 index 8382eda6..00000000 --- a/certification/openstack/README.rst +++ /dev/null @@ -1,50 +0,0 @@ -============================ -OpenStack Certification Task -============================ - -How To Validate & Run Task --------------------------- - -To validate task with your own parameters run: - -.. code-block:: console - - $ rally task validate task.yaml --task-args-file task_arguments.yaml - - -To start task with your own parameters run: - -.. code-block:: console - - $ rally task start task.yaml --task-args-file task_arguments.yaml - - -Task Arguments --------------- - -File task_arguments.yaml contains all task options: - -+------------------------+----------------------------------------------------+ -| Name | Description | -+========================+====================================================+ -| service_list | List of services which should be tested | -+------------------------+----------------------------------------------------+ -| smoke | Dry run without load from 1 user | -+------------------------+----------------------------------------------------+ -| use_existing_users | In case of testing cloud with r/o Keystone e.g. AD | -+------------------------+----------------------------------------------------+ -| image_name | Images name that exist in cloud | -+------------------------+----------------------------------------------------+ -| flavor_name | Flavor name that exist in cloud | -+------------------------+----------------------------------------------------+ -| glance_image_location | URL of image that is used to test Glance upload | -+------------------------+----------------------------------------------------+ -| users_amount | Expected amount of users | -+------------------------+----------------------------------------------------+ -| tenants_amount | Expected amount of tenants | -+------------------------+----------------------------------------------------+ -| controllers_amount | Amount of OpenStack API nodes (controllers) | -+------------------------+----------------------------------------------------+ - -All options have default values, hoverer user should change them to reflect -configuration and size of tested OpenStack cloud. diff --git a/certification/openstack/macro/macro.yaml b/certification/openstack/macro/macro.yaml deleted file mode 100644 index e6542f37..00000000 --- a/certification/openstack/macro/macro.yaml +++ /dev/null @@ -1,95 +0,0 @@ -{%- macro user_context(tenants,users_per_tenant, use_existing_users) -%} -{%- if use_existing_users and caller is not defined -%} {} -{%- else %} - {%- if not use_existing_users %} - users: - tenants: {{ tenants }} - users_per_tenant: {{ users_per_tenant }} - {%- endif %} - {%- if caller is defined %} - {{ caller() }} - {%- endif %} -{%- endif %} -{%- endmacro %} - -{%- macro vm_params(image=none, flavor=none, size=none) %} -{%- if flavor is not none %} - flavor: - name: {{ flavor }} -{%- endif %} -{%- if image is not none %} - image: - name: {{ image }} -{%- endif %} -{%- if size is not none %} - size: {{ size }} -{%- endif %} -{%- endmacro %} - -{%- macro unlimited_volumes() %} - cinder: - gigabytes: -1 - snapshots: -1 - volumes: -1 -{%- endmacro %} - -{%- macro constant_runner(concurrency=1, times=1, is_smoke=True) %} - type: "constant" - {%- if is_smoke %} - concurrency: 1 - times: 1 - {%- else %} - concurrency: {{ concurrency }} - times: {{ times }} - {%- endif %} -{%- endmacro %} - -{%- macro rps_runner(rps=1, times=1, is_smoke=True) %} - type: rps - {%- if is_smoke %} - rps: 1 - times: 1 - {%- else %} - rps: {{ rps }} - times: {{ times }} - {%- endif %} -{%- endmacro %} - -{%- macro no_failures_sla() %} - failure_rate: - max: 0 -{%- endmacro %} - -{%- macro volumes(size=1, volumes_per_tenant=1) %} - volumes: - size: {{ size }} - volumes_per_tenant: {{ volumes_per_tenant }} -{%- endmacro %} - -{%- macro unlimited_nova(keypairs=false) %} - nova: - cores: -1 - floating_ips: -1 - instances: -1 - {%- if keypairs %} - key_pairs: -1 - {%- endif %} - ram: -1 - security_group_rules: -1 - security_groups: -1 -{%- endmacro %} - -{%- macro unlimited_neutron() %} -{% if "neutron" in service_list %} - neutron: - network: -1 - port: -1 - subnet: -1 -{% endif %} -{%- endmacro %} - -{%- macro glance_args(location, container="bare", type="qcow2") %} - container_format: {{ container }} - disk_format: {{ type }} - image_location: {{ location }} -{%- endmacro %} diff --git a/certification/openstack/scenario/authentication.yaml b/certification/openstack/scenario/authentication.yaml deleted file mode 100644 index d31f71e6..00000000 --- a/certification/openstack/scenario/authentication.yaml +++ /dev/null @@ -1,8 +0,0 @@ - Authenticate.keystone: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ rps_runner(rps=15*controllers_amount, times=20000*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/certification/openstack/scenario/cinder.yaml b/certification/openstack/scenario/cinder.yaml deleted file mode 100644 index fd8a953d..00000000 --- a/certification/openstack/scenario/cinder.yaml +++ /dev/null @@ -1,191 +0,0 @@ - CinderVolumes.create_and_attach_volume: - - - args: - {{ vm_params(image_name,flavor_name,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(30, 10*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_delete_snapshot: - - - args: - force: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_delete_volume: - - - args: - size: - max: 1 - min: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - {{ vm_params(image_name,none,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_extend_volume: - - - args: - new_size: 2 - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_list_snapshots: - - - args: - detailed: true - force: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_list_volume: - - - args: - detailed: true - {{ vm_params(image_name,none,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - detailed: true - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_upload_volume_to_image: - - - args: - container_format: "bare" - disk_format: "raw" - do_delete: true - force: false - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(40, 13*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_from_volume_and_delete_volume: - - - args: - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_nested_snapshots_and_attach_volume: - - - args: - nested_level: 1 - size: - max: 1 - min: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - servers: - {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} - servers_per_tenant: 1 - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(10, 3*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/certification/openstack/scenario/glance.yaml b/certification/openstack/scenario/glance.yaml deleted file mode 100644 index 39f46ac8..00000000 --- a/certification/openstack/scenario/glance.yaml +++ /dev/null @@ -1,30 +0,0 @@ - GlanceImages.create_and_delete_image: - - - args: - {{ glance_args(location=glance_image_location) }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.create_and_list_image: - - - args: - {{ glance_args(location=glance_image_location) }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.list_images: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/certification/openstack/scenario/keystone.yaml b/certification/openstack/scenario/keystone.yaml deleted file mode 100644 index 3db23b38..00000000 --- a/certification/openstack/scenario/keystone.yaml +++ /dev/null @@ -1,62 +0,0 @@ - KeystoneBasic.add_and_remove_user_role: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_add_and_list_user_roles: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_list_tenants: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 10*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_delete_role: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_delete_service: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.get_entities: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 3*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_update_and_delete_tenant: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/certification/openstack/scenario/neutron.yaml b/certification/openstack/scenario/neutron.yaml deleted file mode 100644 index 0e6d5f45..00000000 --- a/certification/openstack/scenario/neutron.yaml +++ /dev/null @@ -1,245 +0,0 @@ - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_routers: - - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - port: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_networks: - - - args: - network_create_args: {} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: false - name: "_updated" - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - port_update_args: - admin_state_up: false - device_id: "dummy_id" - device_owner: "dummy_owner" - name: "_port_updated" - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - router_create_args: {} - router_update_args: - admin_state_up: false - name: "_router_updated" - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - port: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnet_create_args: {} - subnet_update_args: - enable_dhcp: false - name: "_subnet_updated" - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.neutron_update: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} \ No newline at end of file diff --git a/certification/openstack/scenario/nova.yaml b/certification/openstack/scenario/nova.yaml deleted file mode 100644 index 8f8a8aaa..00000000 --- a/certification/openstack/scenario/nova.yaml +++ /dev/null @@ -1,195 +0,0 @@ - NovaKeypair.boot_and_delete_server_with_keypair: - - - args: - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaKeypair.create_and_delete_keypair: - - - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=67*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaKeypair.create_and_list_keypairs: - - - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=67*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_bounce_server: - - - args: - actions: - - - hard_reboot: 1 - - - soft_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_delete_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_list_server: - - - args: - detailed: true - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 10*controllers_amount), times=333*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_rebuild_server: - - - args: - {{ vm_params(flavor=flavor_name) }} - from_image: - name: {{ image_name }} - to_image: - name: {{ image_name }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_from_volume_and_delete: - - - args: - {{ vm_params(image_name, flavor_name) }} - volume_size: 5 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_volumes() }} - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 3*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.pause_and_unpause_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - force_delete: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.snapshot_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.nova_update_and_delete: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/certification/openstack/task.yaml b/certification/openstack/task.yaml deleted file mode 100644 index 5739e31c..00000000 --- a/certification/openstack/task.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{%- set glance_image_location = glance_image_location|default("https://download.cirros-cloud.net/0.3.5/cirros-0.3.5-i386-disk.img") %} -{%- set image_name = image_name|default("^(cirros.*-disk|TestVM)$") %} -{%- set flavor_name = flavor_name|default("m1.tiny") %} -{%- set use_existing_users = use_existing_users|default(false) %} -{%- set service_list = service_list|default(["authentication", "cinder", "keystone", "nova", "glance", "neutron"]) %} -{%- set smoke = smoke|default(true) %} -{%- set controllers_amount = controllers_amount|default(1) %} -{%- if smoke %} -{%- set users_amount = 1 %} -{%- set tenants_amount = 1 %} -{%- else %} -{%- set users_amount = users_amount|default(1) %} -{%- set tenants_amount = tenants_amount|default(1) %} -{%- endif %} - -{%- from "macro/macro.yaml" import user_context, vm_params, unlimited_volumes, constant_runner, rps_runner, no_failures_sla -%} -{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args -%} - ---- -{% if "authentication" in service_list %} -{%- include "scenario/authentication.yaml"-%} -{% endif %} - -{% if "cinder" in service_list %} -{%- include "scenario/cinder.yaml"-%} -{% endif %} - -{% if "keystone" in service_list %} -{%- include "scenario/keystone.yaml"-%} -{% endif %} - -{% if "nova" in service_list %} -{%- include "scenario/nova.yaml"-%} -{% endif %} - -{% if "glance" in service_list %} -{%- include "scenario/glance.yaml"-%} -{% endif %} - -{% if "neutron" in service_list %} -{%- include "scenario/neutron.yaml"-%} -{% endif %} diff --git a/certification/openstack/task_arguments.yaml b/certification/openstack/task_arguments.yaml deleted file mode 100644 index cbc4eca1..00000000 --- a/certification/openstack/task_arguments.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - service_list: - - authentication - - nova - - neutron - - keystone - - cinder - - glance - use_existing_users: false - image_name: "^(cirros.*-disk|TestVM)$" - flavor_name: "m1.tiny" - glance_image_location: "" - smoke: true - users_amount: 1 - tenants_amount: 1 - controllers_amount: 3 - compute_amount: 77 - storage_amount: 20 - network_amount: 1 - diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index fce24446..00000000 --- a/devstack/README.rst +++ /dev/null @@ -1,30 +0,0 @@ -Rally with DevStack all-in-one installation -------------------------------------------- - -It is also possible to install Rally with DevStack. First, clone the -corresponding repositories: - -.. code-block:: bash - - git clone https://git.openstack.org/openstack-dev/devstack - git clone https://github.com/openstack/rally - -Then, configure DevStack to run Rally. First, create your ``local.conf`` file: - -.. code-block:: bash - - cd devstack - cp samples/local.conf local.conf - -Next, edit local.conf: add the following line to the ``[[local|localrc]]`` -section. - -.. code-block:: bash - - enable_plugin rally https://github.com/openstack/rally master - -Finally, run DevStack as usually: - -.. code-block:: bash - - ./stack.sh diff --git a/devstack/features.yaml b/devstack/features.yaml deleted file mode 100644 index 1794d503..00000000 --- a/devstack/features.yaml +++ /dev/null @@ -1,188 +0,0 @@ -config: - default: - master: [default, ceilometer, glance, horizon, nova, placement, swift, cinder, keystone] - "0.9": [default, ceilometer, glance, horizon, nova, placement, swift, cinder, keystone] - # This can be used by functional jobs that only want their dependencies installed - # and don't need to incur the overhead of installing all services in the process. - no_services: [default] - neutron: - features: [neutron, neutron-adv] - # different backends - postgres: - features: [postgresql] - # feature changes for different test matrixes - grenade: - rm-features: [trove, sahara, neutron-adv, horizon] - tempest: - features: [tempest] - # feature changes for different configs of existing services - nova_api_metadata_split: - features: [nova-md] - cells: - features: [nova-cells] - # feature declarations for incubated or recently integrated projects (so they - # can be tested outside the releases they were supported in) - trove: - features: [trove] - marconi: - features: [marconi] - zaqar: - features: [zaqar] - sahara: - features: [sahara] - ironic: - features: [ironic] - qpid: - features: [qpid] - zeromq: - features: [zeromq] - ceph: - features: [ceph] - heat: - features: [heat] - tlsproxy: - features: [tlsproxy] - cinder_mn_grenade: - features: [cinder-mn-grenade] - cinder_mn_grenade_sub_volschbak: - features: [cinder-mn-grenade-sub-volschbak] - cinder_mn_grenade_sub_bak: - features: [cinder-mn-grenade-sub-bak] - neutron_dvr: - features: [neutron-dvr] - -branches: - # The value of ""default" is the name of the "trunk" branch - default: master - # Normalized branch names only here, e.g. stable/ocata => ocata - allowed: [master, 0.9] - -primary: - default: - base: - services: [mysql, rabbit, dstat, peakmem_tracker] - - ceilometer: - base: - services: [ceilometer-acompute, ceilometer-acentral, ceilometer-collector, ceilometer-api, ceilometer-alarm-notifier, ceilometer-alarm-evaluator, ceilometer-anotification] - - glance: - base: - services: [g-api, g-reg] - - keystone: - base: - services: [key] - - horizon: - base: - services: [horizon] - - nova: - base: - services: [n-api, n-cauth, n-cond, n-cpu, n-net, n-novnc, n-obj, n-sch] - - nova-md: - base: - services: [n-api-meta] - - nova-cells: - base: - services: [n-cell] - rm-compute-ext: [agregates, hosts] - - placement: - base: - services: [placement-api] - - neutron: - base: - services: [q-svc, q-agt, q-dhcp, q-l3, q-meta, q-metering] - rm-services: [n-net] - - neutron-adv: - base: - rm-services: [n-net] - - neutron-dvr: - base: - services: [] - - swift: - base: - services: [s-proxy, s-account, s-container, s-object] - - cinder: - base: - services: [cinder, c-api, c-vol, c-sch, c-bak] - - # This will be used to disable c-vol, c-bak on primary node when running multinode grenade - # job that will test compatibility of new c-api, c-sch (primary) and old c-vol and c-bak (sub). - cinder-mn-grenade: - base: - rm-services: [c-vol, c-bak] - - # This will be used to disable c-vol, c-sch, c-bak on primary node when running multinode grenade - # job that will test compatibility of new c-api (primary) and old c-vol, c-sch and c-bak (sub). - cinder-mn-grenade-sub-volschbak: - base: - rm-services: [c-vol, c-sch, c-bak] - - # This will be used to disable c-bak on primary node when running multinode grenade - # job that will test compatibility of new c-api, c-sch, c-vol (primary) and old c-bak (sub). - cinder-mn-grenade-sub-bak: - base: - rm-services: [c-bak] - - heat: - base: - services: [heat, h-api, h-api-cfn, h-api-cw, h-eng] - - trove: - base: - services: [trove, tr-api, tr-tmgr, tr-cond] - - ironic: - base: - services: [ir-api, ir-cond] - rm-services: [cinder, c-api, c-vol, c-sch, c-bak] - - sahara: - base: - services: [sahara] - - marconi: - base: - services: [marconi-server] - - zaqar: - base: - services: [zaqar-server] - - tempest: - base: - services: [tempest] - - # service overrides - postgresql: - base: - services: [postgresql] - rm-services: [mysql] - - zeromq: - base: - services: [zeromq] - rm-services: [rabbit] - - qpid: - base: - services: [qpid] - rm-services: [rabbit] - - ceph: - base: - services: [ceph] - - tlsproxy: - base: - services: [tls-proxy] diff --git a/devstack/lib/rally b/devstack/lib/rally deleted file mode 100644 index e2c03c58..00000000 --- a/devstack/lib/rally +++ /dev/null @@ -1,133 +0,0 @@ -# lib/rally -# Functions to control the configuration and operation of the **Rally** - -# Dependencies: -# -# - ``functions`` file -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# - install_rally -# - configure_rally -# - init_rally - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -DIR=$(dirname ${BASH_SOURCE[0]}) -RALLY_DIR=$(readlink -m $DIR/../..) -RALLY_CONF_DIR=${RALLY_CONF_DIR:-/etc/rally} -RALLY_CONF_FILE=rally.conf - -# Debug mode -RALLY_DEBUG=${RALLY_DEBUG:-False} - -# Create deployment -RALLY_ADD_DEPLOYMENT=${RALLY_ADD_DEPLOYMENT:-"True"} -RALLY_ADD_DEPLOYMENT=$(trueorfalse True $RALLY_ADD_DEPLOYMENT) - -# Functions -# --------- - -# Creates a configuration file for the current deployment -# Uses the following variables: -# -# - ``ADMIN_PASSWORD``, ``KEYSTONE_SERVICE_PROTOCOL``, -# ``KEYSTONE_SERVICE_HOST``, ``KEYSTONE_SERVICE_PORT``, -# ``IDENTITY_API_VERSION`` - must be defined -# -# _create_deployment_config filename -function _create_deployment_config() { -if [[ "$IDENTITY_API_VERSION" == 2.0 ]] -then - cat >$1 <$1 <``" - " (ref__).\n\n __ #%(ref)s" % {"cmd": cmd, - "ref": cmd.replace(" ", "-")}) - - -def make_arguments_section(category_name, cmd_name, arguments, defaults): - elements = [utils.paragraph("**Command arguments**:")] - for args, kwargs in arguments: - # for future changes... - # :param args: a single command argument which can represented by - # several names(for example, --uuid and --task-id) in cli. - # :type args: tuple - # :param kwargs: description of argument. Have next format: - # {"dest": "action_kwarg_", - # "help": "just a description of argument" - # "metavar": "[optional] metavar of argument. Example:" - # "Example: argument '--file'; metavar 'path' ", - # "type": "[optional] class object of argument's type", - # "required": "[optional] boolean value"} - # :type kwargs: dict - dest = kwargs.get("dest").replace("action_kwarg_", "") - description = [] - if cmd_name != "use": - # lets add notes about specific default values and hint about - # "use" command with reference - if dest in ("deployment", "task"): - description.append(compose_note_about_default_uuids( - args[0], dest)) - description.append( - compose_use_cmd_hint_msg("rally %s use" % dest)) - elif dest == "verification": - description.append(compose_note_about_default_uuids( - args[0], dest)) - description.append( - compose_use_cmd_hint_msg("rally verify use")) - - description.append(kwargs.get("help")) - - action = kwargs.get("action") - if not action: - arg_type = kwargs.get("type") - if arg_type: - description.append("**Type**: %s" % arg_type.__name__) - - skip_default = dest in ("deployment", - "task_id", - "verification") - if not skip_default and dest in defaults: - description.append("**Default**: %s" % defaults[dest]) - metavar = kwargs.get("metavar") - - ref = "%s_%s_%s" % (category_name, cmd_name, args[0].replace("-", "")) - - if metavar: - args = ["%s %s" % (arg, metavar) for arg in args] - - elements.extend(utils.make_definition(", ".join(args), - ref, description)) - return elements - - -def get_defaults(func): - """Return a map of argument:default_value for specified function.""" - spec = inspect.getargspec(func) - if spec.defaults: - return dict(zip(spec.args[-len(spec.defaults):], spec.defaults)) - return {} - - -def make_command_section(category_name, name, parser): - # NOTE(andreykurilin): there is only one category in rally-manage, so - # let's just hardcode it. - cmd = "rally-manage" if category_name == "db" else "rally" - section = utils.subcategory("%s %s %s" % (cmd, category_name, name)) - section.extend(utils.parse_text(parser["description"])) - if parser["parser"].arguments: - defaults = get_defaults(parser["parser"].defaults["action_fn"]) - section.extend(make_arguments_section( - category_name, name, parser["parser"].arguments, defaults)) - return section - - -def make_category_section(name, parser): - category_obj = utils.category("Category: %s" % name) - # NOTE(andreykurilin): we are re-using `_add_command_parsers` method from - # `rally.cli.cliutils`, but, since it was designed to print help message, - # generated description for categories contains specification for all - # sub-commands. We don't need information about sub-commands at this point, - # so let's skip "generated description" and take it directly from category - # class. - description = parser.defaults["command_object"].__doc__ - # TODO(andreykurilin): write a decorator which will mark cli-class as - # deprecated without changing its docstring. - if description.startswith("[Deprecated"): - i = description.find("]") - msg = description[1:i] - description = description[i + 1:].strip() - category_obj.append(utils.warning(msg)) - category_obj.extend(utils.parse_text(description)) - - for command in sorted(parser.subparser.parsers.keys()): - subparser = parser.subparser.parsers[command] - category_obj.append(make_command_section(name, command, subparser)) - return category_obj - - -class CLIReferenceDirective(rst.Directive): - optional_arguments = 1 - option_spec = {"group": str} - - def run(self): - parser = Parser() - categories = copy.copy(main.categories) - categories["db"] = manage.DBCommands - if "group" in self.options: - categories = {k: v for k, v in categories.items() - if k == self.options["group"]} - cliutils._add_command_parsers(categories, parser) - - content = [] - for cg in sorted(categories.keys()): - content.append(make_category_section( - cg, parser.parsers[cg]["parser"])) - return content - - -def setup(app): - app.add_directive("make_cli_reference", CLIReferenceDirective) diff --git a/doc/ext/include_vars.py b/doc/ext/include_vars.py deleted file mode 100644 index 08777b37..00000000 --- a/doc/ext/include_vars.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from docutils import nodes -import json - -from oslo_utils import importutils - - -def include_var(name, rawtext, text, lineno, inliner, options=None, - content=None): - """include variable - - :param name: The local name of the interpreted role, the role name - actually used in the document. - :param rawtext: A string containing the enitre interpreted text input, - including the role and markup. Return it as a problematic - node linked to a system message if a problem is - encountered. - :param text: The interpreted text content. - :param lineno: The line number where the interpreted text begins. - :param inliner: The docutils.parsers.rst.states.Inliner object that - called include_var. It contains the several attributes - useful for error reporting and document tree access. - :param options: A dictionary of directive options for customization - (from the 'role' directive), to be interpreted by the - role function. Used for additional attributes for the - generated elements and other functionality. - :param content: A list of strings, the directive content for - customization (from the 'role' directive). To be - interpreted by the role function. - :return: - """ - obj = importutils.import_class(text) - if isinstance(obj, (tuple, list)): - obj = ", ".join(obj) - elif isinstance(obj, dict): - obj = json.dumps(dict, indent=4) - else: - obj = str(obj) - return [nodes.Text(obj)], [] - - -def setup(app): - app.add_role("include-var", include_var) diff --git a/doc/ext/plugin_reference.py b/doc/ext/plugin_reference.py deleted file mode 100644 index 3c4bb16d..00000000 --- a/doc/ext/plugin_reference.py +++ /dev/null @@ -1,380 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from docutils.parsers import rst -import json -import re - -from rally.common.plugin import discover -from rally.common.plugin import plugin -from rally.common import validation -from rally import plugins -import utils - - -JSON_SCHEMA_TYPES_MAP = {"boolean": "bool", - "string": "str", - "number": "float", - "integer": "int", - "array": "list", - "object": "dict"} - - -def process_jsonschema(schema): - """Process jsonschema and make it looks like regular docstring.""" - - if not schema: - # nothing to parse - return - - if "type" in schema: - - # str - if schema["type"] == "string": - doc = schema.get("description", "") - if "pattern" in schema: - doc += ("\n\nShould follow next pattern: %s." % - schema["pattern"]) - return {"doc": doc, "type": "str"} - - # int or float - elif schema["type"] in ("integer", "number"): - doc = schema.get("description", "") - if "minimum" in schema: - doc += "\n\nMin value: %s." % schema["minimum"] - if "maximum" in schema: - doc += "\n\nMax value: %s." % schema["maximum"] - return {"doc": doc, "type": JSON_SCHEMA_TYPES_MAP[schema["type"]]} - - # bool or null - elif schema["type"] in ("boolean", "null"): - return {"doc": schema.get("description", ""), - "type": "bool" if schema["type"] == "boolean" else "null"} - - # list - elif schema["type"] == "array": - info = {"doc": schema.get("description", ""), - "type": "list"} - - if "items" in schema: - if info["doc"]: - info["doc"] += "\n\n" - info["doc"] += ("Elements of the list should follow format(s) " - "described below:\n\n") - - items = schema["items"] - itype = None - if "type" in items: - itype = JSON_SCHEMA_TYPES_MAP.get(items["type"], - items["type"]) - info["doc"] += "- Type: %s. " % itype - if "description" in items: - # add indention - desc = items["description"].split("\n") - info["doc"] += "\n ".join(desc) - - new_schema = copy.copy(items) - new_schema.pop("description", None) - new_schema = json.dumps(new_schema, indent=4) - new_schema = "\n ".join( - new_schema.split("\n")) - - info["doc"] += ("\n Format:\n\n" - " .. code-block:: json\n\n" - " %s\n" % new_schema) - return info - - elif isinstance(schema["type"], list): - # it can be too complicated for parsing... do not do it deeply - return {"doc": schema.get("description", ""), - "type": "/".join(schema["type"])} - - # dict - elif schema["type"] == "object": - info = {"doc": schema.get("description", ""), - "type": "dict", - "parameters": []} - required_parameters = schema.get("required", []) - if "properties" in schema: - for name in schema["properties"]: - if isinstance(schema["properties"][name], str): - pinfo = {"name": name, - "type": schema["properties"][name], - "doc": ""} - else: - pinfo = process_jsonschema(schema["properties"][name]) - if name in required_parameters: - pinfo["required"] = True - pinfo["name"] = name - info["parameters"].append(pinfo) - elif "patternProperties" in schema: - info.pop("parameters", None) - info["patternProperties"] = [] - for k, v in schema["patternProperties"].items(): - info["patternProperties"].append(process_jsonschema(v)) - info["patternProperties"][-1]["name"] = k - info["patternProperties"][-1]["type"] = "str" - elif (not (set(schema.keys()) - {"type", "description", "$schema", - "additionalProperties"})): - # it is ok, schema accepts any object. nothing to add more - pass - elif "oneOf" in schema: - # Example: - # SCHEMA = {"type": "object", "$schema": consts.JSON_SCHEMA, - # "oneOf": [{"properties": {"foo": {"type": "string"}} - # "required": ["foo"], - # "additionalProperties": False}, - # {"properties": {"bar": {"type": "string"}} - # "required": ["bar"], - # "additionalProperties": False}, - # - oneOf = copy.deepcopy(schema["oneOf"]) - for item in oneOf: - for k, v in schema.items(): - if k not in ("oneOf", "description"): - item[k] = v - - return {"doc": schema.get("description", ""), - "type": "dict", - "oneOf": [process_jsonschema(item) for item in oneOf]} - else: - raise Exception("Failed to parse jsonschema: %s" % schema) - - if "definitions" in schema: - info["definitions"] = schema["definitions"] - return info - else: - raise Exception("Failed to parse jsonschema: %s" % schema) - - # enum - elif "enum" in schema: - doc = schema.get("description", "") - doc += "\nSet of expected values: '%s'." % ("', '".join( - [e or "None" for e in schema["enum"]])) - return {"doc": doc} - - elif "anyOf" in schema: - return {"doc": schema.get("description", ""), - "anyOf": [process_jsonschema(i) for i in schema["anyOf"]]} - - elif "oneOf" in schema: - return {"doc": schema.get("description", ""), - "oneOf": [process_jsonschema(i) for i in schema["oneOf"]]} - - elif "$ref" in schema: - return {"doc": schema.get("description", "n/a"), - "ref": schema["$ref"]} - else: - raise Exception("Failed to parse jsonschema: %s" % schema) - - -CATEGORIES = { - "Common": ["OS Client"], - "Deployment": ["Engine", "Provider Factory"], - "Task Component": ["Chart", "Context", "Exporter", "Hook", - "Resource Type", "SLA", "Scenario", "Scenario Runner", - "Trigger"], - "Verification Component": ["Verifier Context", "Verification Reporter", - "Verifier Manager"] -} - -# NOTE(andreykurilin): several bases do not have docstings at all, so it is -# redundant to display them -IGNORED_BASES = ["Resource Type", "Task Exporter", "OS Client"] - - -class PluginsReferenceDirective(rst.Directive): - optional_arguments = 1 - option_spec = {"base_cls": str} - - def _make_arg_items(self, items, ref_prefix, description=None, - title="Parameters"): - terms = [] - for item in items: - iname = item.get("name", "") or item.pop("type") - if "type" in item: - iname += " (%s)" % item["type"] - terms.append((iname, [item["doc"]])) - return utils.make_definitions(title=title, - ref_prefix=ref_prefix, - terms=terms, - descriptions=description) - - def _make_plugin_section(self, plugin_cls, base_name=None): - section_name = plugin_cls.get_name() - if base_name: - section_name += " [%s]" % base_name - section_obj = utils.section(section_name) - - info = plugin_cls.get_info() - if info["title"]: - section_obj.append(utils.paragraph(info["title"])) - - if info["description"]: - section_obj.extend(utils.parse_text(info["description"])) - - if info["namespace"]: - section_obj.append(utils.paragraph( - "**Namespace**: %s" % info["namespace"])) - - if base_name: - ref_prefix = "%s-%s-" % (base_name, plugin_cls.get_name()) - else: - ref_prefix = "%s-" % plugin_cls.get_name() - - if info["parameters"]: - section_obj.extend(self._make_arg_items(info["parameters"], - ref_prefix)) - - if info["returns"]: - section_obj.extend(utils.parse_text( - "**Returns**:\n%s" % info["returns"])) - - if info["schema"]: - schema = process_jsonschema(info["schema"]) - if "type" in schema: - if "parameters" in schema: - section_obj.extend(self._make_arg_items( - items=schema["parameters"], - ref_prefix=ref_prefix)) - elif "patternProperties" in schema: - section_obj.extend(self._make_arg_items( - items=schema["patternProperties"], - ref_prefix=ref_prefix, - description=["*Dictionary is expected. Keys should " - "follow pattern(s) described bellow.*"])) - elif "oneOf" in schema: - section_obj.append(utils.note( - "One of the following groups of " - "parameters should be provided.")) - for i, oneOf in enumerate(schema["oneOf"], 1): - description = None - if oneOf.get("doc", None): - description = [oneOf["doc"]] - section_obj.extend(self._make_arg_items( - items=oneOf["parameters"], - ref_prefix=ref_prefix, - title="Option %s of parameters" % i, - description=description)) - else: - section_obj.extend(self._make_arg_items( - items=[schema], ref_prefix=ref_prefix)) - else: - raise Exception("Failed to display provided schema: %s" % - info["schema"]) - - if issubclass(plugin_cls, validation.ValidatablePluginMixin): - validators = plugin_cls._meta_get("validators", default=[]) - platforms = [kwargs for name, args, kwargs in validators - if name == "required_platform"] - if platforms: - section_obj.append( - utils.paragraph("**Requires platform(s)**:")) - section = "" - for p in platforms: - section += "* %s" % p["platform"] - admin_msg = "credentials for admin user" - user_msg = ("regular users (temporary users can be created" - " via the 'users' context if admin user is " - "specified for the platform)") - if p.get("admin", False) and p.get("users", False): - section += " with %s and %s." % (admin_msg, user_msg) - elif p.get("admin", False): - section += " with %s." % admin_msg - elif p.get("users", False): - section += " with %s." % user_msg - section += "\n" - - section_obj.extend(utils.parse_text(section)) - - filename = info["module"].replace(".", "/") - ref = "https://github.com/openstack/rally/blob/master/%s.py" % filename - section_obj.extend(utils.parse_text("**Module**:\n`%s`__\n\n__ %s" - % (info["module"], ref))) - return section_obj - - def _make_plugin_base_section(self, base_cls, base_name=None): - if base_name: - title = ("%ss" % base_name if base_name[-1] != "y" - else "%sies" % base_name[:-1]) - subcategory_obj = utils.subcategory(title) - else: - subcategory_obj = [] - for p in sorted(base_cls.get_all(), key=lambda o: o.get_name()): - # do not display hidden contexts - if p._meta_get("hidden", False): - continue - subcategory_obj.append(self._make_plugin_section(p, base_name)) - - return subcategory_obj - - @staticmethod - def _parse_class_name(cls): - name = "" - for word in re.split(r"([A-Z][a-z]*)", cls.__name__): - if word: - if len(word) > 1 and name: - name += " " - name += word - return name - - def _get_all_plugins_bases(self): - """Return grouped and sorted all plugins bases.""" - bases = [] - bases_names = [] - for p in discover.itersubclasses(plugin.Plugin): - base_ref = getattr(p, "base_ref", None) - if base_ref == p: - name = self._parse_class_name(p) - if name in bases_names: - raise Exception("Two base classes with same name '%s' are " - "detected." % name) - bases_names.append(name) - category_of_base = "Common" - for cname, cbases in CATEGORIES.items(): - if name in cbases: - category_of_base = cname - - bases.append((category_of_base, name, p)) - return sorted(bases) - - def run(self): - plugins.load() - bases = self._get_all_plugins_bases() - if "base_cls" in self.options: - for _category_name, base_name, base_cls in bases: - if base_name == self.options["base_cls"]: - return self._make_plugin_base_section(base_cls) - raise Exception("Failed to generate plugins reference for '%s'" - " plugin base." % self.options["base_cls"]) - - categories = {} - - for category_name, base_name, base_cls in bases: - # FIXME(andreykurilin): do not ignore anything - if base_name in IGNORED_BASES: - continue - if category_name not in categories: - categories[category_name] = utils.category(category_name) - category_of_base = categories[category_name] - category_of_base.append(self._make_plugin_base_section(base_cls, - base_name)) - return [content for _name, content in sorted(categories.items())] - - -def setup(app): - plugins.load() - app.add_directive("generate_plugin_reference", PluginsReferenceDirective) diff --git a/doc/ext/utils.py b/doc/ext/utils.py deleted file mode 100644 index 25f38b3d..00000000 --- a/doc/ext/utils.py +++ /dev/null @@ -1,85 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Docutils is awful library. Let's apply some hacks and aliases to simplify usage -""" - -from docutils import frontend -from docutils import nodes -from docutils.parsers import rst -from docutils import utils -import string - -import six - - -def parse_text(text): - parser = rst.Parser() - settings = frontend.OptionParser( - components=(rst.Parser,)).get_default_values() - document = utils.new_document(text, settings) - parser.parse(text, document) - return document.children - -paragraph = lambda text: parse_text(text)[0] -note = lambda msg: nodes.note("", paragraph(msg)) -hint = lambda msg: nodes.hint("", *parse_text(msg)) -warning = lambda msg: nodes.warning("", paragraph(msg)) -category = lambda title: parse_text("%s\n%s" % (title, "-" * len(title)))[0] -subcategory = lambda title: parse_text("%s\n%s" % (title, "~" * len(title)))[0] -section = lambda title: parse_text("%s\n%s" % (title, "\"" * len(title)))[0] - - -def make_definition(term, ref, descriptions): - """Constructs definition with reference to it.""" - ref = ref.replace("_", "-").replace(" ", "-") - definition = parse_text( - ".. _%(ref)s:\n\n* *%(term)s* [ref__]\n\n__ #%(ref)s" % - {"ref": ref, "term": term}) - for descr in descriptions: - if descr: - if isinstance(descr, (six.text_type, six.binary_type)): - if descr[0] not in string.ascii_uppercase: - descr = descr.capitalize() - descr = paragraph(" %s" % descr) - definition.append(descr) - return definition - - -def make_definitions(title, ref_prefix, terms, descriptions=None): - """Constructs a list of definitions with reference to them.""" - raw_text = ["**%s**:" % title] - if descriptions: - for descr in descriptions: - raw_text.append(descr) - - for term, definitions in terms: - ref = ("%s%s" % (ref_prefix, term)).lower().replace( - ".", "-").replace("_", "-").replace(" ", "-") - raw_text.append(".. _%s:" % ref) - raw_text.append("* *%s* [ref__]" % term) - - for d in definitions: - d = d.strip() if d else None - if d: - if d[0] not in string.ascii_uppercase: - # .capitalize() removes existing caps - d = d[0].upper() + d[1:] - d = "\n ".join(d.split("\n")) - raw_text.append(" %s" % d) - - raw_text.append("__ #%s" % ref) - - return parse_text("\n\n".join(raw_text) + "\n") diff --git a/doc/feature_request/README.rst b/doc/feature_request/README.rst deleted file mode 100644 index 555c430c..00000000 --- a/doc/feature_request/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -================ -Feature requests -================ - -To request a new feature, you should create a document similar to other feature -requests. And contribute it to this directory using the next instruction_. - -If you don't have time to contribute your feature request via Gerrit, please -contact Andrey Kurilin (andr.kurilin@gmail.com) - -.. _instruction: http://rally.readthedocs.org/en/latest/contribute.html#how-to-contribute diff --git a/doc/feature_request/capture_task_logging.rst b/doc/feature_request/capture_task_logging.rst deleted file mode 100644 index 506a053f..00000000 --- a/doc/feature_request/capture_task_logging.rst +++ /dev/null @@ -1,22 +0,0 @@ -========================== -Capture Logs from services -========================== - - -Use case --------- - -A developer is executing various task and would like to capture logs as -well as test results. - - -Problem description -------------------- - -In case of errors it is quite hard to debug what happened. - - -Possible solution ------------------ - -* Add special context that can capture the logs from tested services. diff --git a/doc/feature_request/check_queue_perfdata.rst b/doc/feature_request/check_queue_perfdata.rst deleted file mode 100644 index 9d4b83d2..00000000 --- a/doc/feature_request/check_queue_perfdata.rst +++ /dev/null @@ -1,23 +0,0 @@ -==================== -Check queue perfdata -==================== - -Use case --------- - -Sometimes OpenStack services use common messaging system very prodigally. For -example Neutron metering agent sending all database table data on new object -creation i.e https://review.openstack.org/#/c/143672/. It cause to Neutron -degradation and other obvious problems. It will be nice to have a way to track -messages count and messages size in queue during tests/benchmarks. - -Problem description -------------------- - -Heavy usage of queue isn’t checked. - -Possible solution ------------------ - -* Before running tests/benchmarks start process which will connect to queue - topics and measure messages count, size and other data which we need. diff --git a/doc/feature_request/comparing_results_of_2_tasks.rst b/doc/feature_request/comparing_results_of_2_tasks.rst deleted file mode 100644 index c419a7c5..00000000 --- a/doc/feature_request/comparing_results_of_2_tasks.rst +++ /dev/null @@ -1,24 +0,0 @@ -======================================= -Ability to compare results between task -======================================= - - -Use case --------- - -During the work on performance it's essential to be able to compare results of -similar task before and after change in system. - - - -Problem description -------------------- - -There is no command to compare two or more tasks and get tables and graphs. - - - -Possible solution ------------------ - -* Add command that accepts 2 tasks UUID and prints graphs that compares result diff --git a/doc/feature_request/distributed_load_generation.rst b/doc/feature_request/distributed_load_generation.rst deleted file mode 100644 index 6546596c..00000000 --- a/doc/feature_request/distributed_load_generation.rst +++ /dev/null @@ -1,19 +0,0 @@ -=========================== -Distributed load generation -=========================== - -Use Case --------- - -Some OpenStack projects (Marconi, MagnetoDB) require a real huge load, -like 10-100k request per second for benchmarking. - -To generate such huge load Rally has to create load from different -servers. - -Problem Description -------------------- - -* Rally can't generate load from different servers -* Result processing can't handle big amount of data -* There is no support for chunking results diff --git a/doc/feature_request/explicitly_specify_existing_users_for_scenarios.rst b/doc/feature_request/explicitly_specify_existing_users_for_scenarios.rst deleted file mode 100644 index a933e70b..00000000 --- a/doc/feature_request/explicitly_specify_existing_users_for_scenarios.rst +++ /dev/null @@ -1,28 +0,0 @@ -=============================================== -Explicitly specify existing users for scenarios -=============================================== - - -Use Case --------- - -Rally allows to reuse existing users for scenario runs. And we should be able -to use only specified set of existing users for specific scenarios. - - -Problem Description -------------------- - -For the moment if used `deployment` with existing users then Rally chooses -user for each scenario run randomly. But there are cases when we may want -to use one scenario with one user and another with different one specific user. -Main reason for it is in different set of resources that each user has and -those resources may be required for scenarios. Without this feature Rally user -is forced to make all existing users similar and have all required resources -set up for all scenarios he uses. But it is redundant. - - -Possible solution ------------------ - -* Make it possible to use explicitly existing_users context diff --git a/doc/feature_request/historical_performance_data.rst b/doc/feature_request/historical_performance_data.rst deleted file mode 100644 index 4ccb8ca3..00000000 --- a/doc/feature_request/historical_performance_data.rst +++ /dev/null @@ -1,26 +0,0 @@ -=========================== -Historical performance data -=========================== - - -Use case --------- - -OpenStack is really rapidly developed. Hundreds of patches are merged daily -and it's really hard to track how performance is changed during time. -It will be nice to have a way to track performance of major functionality -of OpenStack running periodically rally task and building graphs that represent -how performance of specific method is changed during the time. - - -Problem description -------------------- - -There is no way to bind tasks - - -Possible solution ------------------ - -* Add grouping for tasks -* Add command that creates historical graphs diff --git a/doc/feature_request/implemented/LDAP_support.rst b/doc/feature_request/implemented/LDAP_support.rst deleted file mode 100644 index 8c341b03..00000000 --- a/doc/feature_request/implemented/LDAP_support.rst +++ /dev/null @@ -1,30 +0,0 @@ -=============================================== -Support benchmarking clouds that are using LDAP -=============================================== - -Use Case --------- - -A lot of production clouds are using LDAP with read only access. It means -that load can be generated only by existing in system users and there is no admin access. - - -Problem Description -------------------- - -Rally is using admin access to create temporary users that will be used to -produce load. - - -Possible Solution ------------------ - -* Add some way to pass already existing users - - -Current Solution ----------------- - -* Allow the user to specify existing users in the configuration of the *ExistingCloud* deployment plugin -* When such an *ExistingCloud* deployment is active, and the benchmark task file does not specify the *"users"* context, use the existing users instead of creating the temporary ones. -* Modify the *rally show ...* commands to list resources for each user separately. diff --git a/doc/feature_request/implemented/add_possibility_to_specify_concurrency_for_tempest.rst b/doc/feature_request/implemented/add_possibility_to_specify_concurrency_for_tempest.rst deleted file mode 100644 index 1d61e854..00000000 --- a/doc/feature_request/implemented/add_possibility_to_specify_concurrency_for_tempest.rst +++ /dev/null @@ -1,24 +0,0 @@ -======================================== -Running Tempest using custom concurrency -======================================== - - -Use case --------- - -User might want to use specific concurrency for running tests based on his -deployment and available resources. - - -Problem description -------------------- - -"rally verify start" command does not allow to specify concurrency -for tempest tests. And they always run using concurrency equal -to amount of CPU cores. - - -Possible solution ------------------ - -* Add ``--concurrency`` option to "rally verify start" command. diff --git a/doc/feature_request/implemented/stop_scenario_after_several_errors.rst b/doc/feature_request/implemented/stop_scenario_after_several_errors.rst deleted file mode 100644 index 79d7e74a..00000000 --- a/doc/feature_request/implemented/stop_scenario_after_several_errors.rst +++ /dev/null @@ -1,58 +0,0 @@ -================================== -Stop scenario after several errors -================================== - - -Use case --------- - -Starting long tests on the big environments. - - -Problem description -------------------- - -When we start a rally scenario on the env where keystone dies we get a lot of -time before timeout happens. - - -Example -------- -Times in hard tests -05:25:40 rally-scenarios.cinder -05:25:40 create-and-delete-volume [4074 iterations, 15 threads] OK 8.91 -08:00:02 create-and-delete-snapshot [5238 iterations, 15 threads] OK 17.46 -08:53:20 create-and-list-volume [4074 iterations, 15 threads] OK 3.18 -12:04:14 create-snapshot-and-attach-volume [2619 iterations, 15 threads] FAIL -14:18:44 create-and-attach-volume [2619 iterations, 15 threads] FAIL -14:23:47 rally-scenarios.vm -14:23:47 boot_runcommand_metadata_delete [5 iterations, 5 threads] FAIL -16:30:46 rally-scenarios.nova -16:30:46 boot_and_list_server [5820 iterations, 15 threads] FAIL -19:19:30 resize_server [5820 iterations, 15 threads] FAIL -02:51:13 boot_and_delete_server_with_secgroups [5820 iterations, 60 threads] FAIL - - -Times in light variant -00:38:25 rally-scenarios.cinder -00:38:25 create-and-delete-volume [14 iterations, 1 threads] OK 5.30 -00:40:39 create-and-delete-snapshot [18 iterations, 1 threads] OK 5.65 -00:41:52 create-and-list-volume [14 iterations, 1 threads] OK 2.89 -00:45:18 create-snapshot-and-attach-volume [9 iterations, 1 threads] OK 17.75 -00:48:54 create-and-attach-volume [9 iterations, 1 threads] OK 20.04 -00:52:29 rally-scenarios.vm -00:52:29 boot_runcommand_metadata_delete [5 iterations, 5 threads] OK 128.86 -00:56:42 rally-scenarios.nova -00:56:42 boot_and_list_server [20 iterations, 1 threads] OK 6.98 -01:04:48 resize_server [20 iterations, 1 threads] OK 22.90 - - -In the hard test we have a lot of timeouts from keystone and a lot of time on -test execution - -Possible solution ------------------ - -Improve SLA check functionality to work "online". And add ability to control -execution process and stop load generation in case of sla check failures. - diff --git a/doc/feature_request/installation_script_enhancements.rst b/doc/feature_request/installation_script_enhancements.rst deleted file mode 100644 index 9018a382..00000000 --- a/doc/feature_request/installation_script_enhancements.rst +++ /dev/null @@ -1,26 +0,0 @@ -====================================================================== -Enhancements to installation script: ``--version`` and ``--uninstall`` -====================================================================== - - -Use case --------- - -User might wish to control which rally version is installed or even purge -rally from the machine completely. - - -Problem description -------------------- - - #. Installation script doesn't allow to choose version. - #. No un-install support. - - - -Possible solution ------------------ - - #. Add ``--version`` option to installation script. - #. Add ``--uninstall`` option to installation script or create an - un-installation script diff --git a/doc/feature_request/installing_isolated.rst b/doc/feature_request/installing_isolated.rst deleted file mode 100644 index 154a6c34..00000000 --- a/doc/feature_request/installing_isolated.rst +++ /dev/null @@ -1,26 +0,0 @@ -================================================================================== -Installation script: ``--pypi-mirror``, ``--package-mirror`` and ``--venv-mirror`` -================================================================================== - - -Use case --------- - -Installation is pretty easy when there is an Internet connection available. -And there is surely a number of OpenStack uses when whole environment is -isolated. In this case, we need somehow specify where installation script -should take required libs and packages. - - -Problem description -------------------- - - #. Installation script can't work without direct Internet connection - - -Possible solution #1 --------------------- - - #. Add ``--pypi-mirror`` option to installation script. - #. Add ``--package-mirror`` option to installation script. - #. Add ``--venv-mirror`` option to installation script. diff --git a/doc/feature_request/launch_specific_benchmark.rst b/doc/feature_request/launch_specific_benchmark.rst deleted file mode 100644 index d9588f8f..00000000 --- a/doc/feature_request/launch_specific_benchmark.rst +++ /dev/null @@ -1,27 +0,0 @@ -============================ -Launch Specific Benchmark(s) -============================ - - -Use case --------- - -A developer is working on a feature that is covered by one or more specific -benchmarks/scenarios. He/she would like to execute a rally task with an -existing task template file (YAML or JSON) indicating exactly which -benchmark(s) will be executed. - - -Problem description -------------------- - -When executing a task with a template file in Rally, all benchmarks are -executed without the ability to specify one or a set of benchmarks the user -would like to execute. - - -Possible solution ------------------ - -* Add optional flag to rally task start command to specify one or more - benchmarks to execute as part of that test run. diff --git a/doc/feature_request/multi_scenarios_load_gen.rst b/doc/feature_request/multi_scenarios_load_gen.rst deleted file mode 100644 index 960f2510..00000000 --- a/doc/feature_request/multi_scenarios_load_gen.rst +++ /dev/null @@ -1,35 +0,0 @@ -====================================== -Using multi scenarios to generate load -====================================== - - -Use Case --------- - -Rally should be able to generate real life load. Simultaneously create load -on different components of OpenStack, e.g. simultaneously booting VM, uploading -image and listing users. - - -Problem Description -------------------- - -At the moment Rally is able to run only 1 scenario per benchmark. -Scenario are quite specific (e.g. boot and delete VM for example) and can't -actually generate real life load. - -Writing a lot of specific benchmark scenarios that will produce more real life -load will produce mess and a lot of duplication of code. - - -Possible solution ------------------ - -* Extend Rally task benchmark configuration in such way to support passing - multiple benchmark scenarios in single benchmark context - -* Extend Rally task output format to support results of multiple scenarios in - single benchmark separately. - -* Extend rally task plot2html and rally task detailed to show results - separately for every scenario. diff --git a/doc/feature_request/multiple_attach_volume.rst b/doc/feature_request/multiple_attach_volume.rst deleted file mode 100644 index 700b1fb3..00000000 --- a/doc/feature_request/multiple_attach_volume.rst +++ /dev/null @@ -1,20 +0,0 @@ -====================== -Multiple attach volume -====================== - - -Use Case --------- -Since multiple volume attaching support to OpenStack Mitaka, one volume can be -attached to several instances or hosts, Rally should add scenarios about -multiple attach volume. - - -Problem Description -------------------- -Rally lack of scenarios about multiple attach volume. - - -Possible solution ------------------ -* Add nova scenarios "multi_attach_volume" and "multi_detach_volume" diff --git a/doc/feature_request/persistence_benchmark_env.rst b/doc/feature_request/persistence_benchmark_env.rst deleted file mode 100644 index 87797d4b..00000000 --- a/doc/feature_request/persistence_benchmark_env.rst +++ /dev/null @@ -1,30 +0,0 @@ -================================================ -Add support of persistence benchmark environment -================================================ - -Use Case --------- - -To benchmark many of operations like show, list, detailed you need to have -already these resource in cloud. So it will be nice to be able to create -benchmark environment once before benchmarking. So run some amount of -benchmarks that are using it and at the end just delete all created resources -by benchmark environment. - - -Problem Description -------------------- - -Fortunately Rally has already a mechanism for creating benchmark environment, -that is used to create load. Unfortunately it's atomic operation: -(create environment, make load, delete environment). -This should be split to 3 separated steps. - - -Possible solution ------------------ - -* Add new CLI operations to work with benchmark environment: - (show, create, delete, list) - -* Allow task to start against benchmark environment (instead of deployment) diff --git a/doc/feature_request/production_ready_cleanup.rst b/doc/feature_request/production_ready_cleanup.rst deleted file mode 100644 index 62378f35..00000000 --- a/doc/feature_request/production_ready_cleanup.rst +++ /dev/null @@ -1,36 +0,0 @@ -======================== -Production read cleanups -======================== - -Use Case --------- - -Rally should delete in any case all resources that it created during benchmark. - - -Problem Description -------------------- - -* (implemented) Deletion rate limit - - You can kill cloud by deleting too many objects simultaneously, so deletion - rate limit is required - -* (implemented) Retry on failures - - There should be few attempts to delete resource in case of failures - -* (implemented) Log resources that failed to be deleted - - We should log warnings about all non deleted resources. This information - should include UUID of resource, it's type and project. - -* (implemented) Pluggable - - It should be simple to add new cleanups adding just plugins somewhere. - -* Disaster recovery - - Rally should use special name patterns, to be able to delete resources - in such case if something went wrong with server that is running Rally. And - you have just new instance (without old Rally DB) of Rally on new server. diff --git a/doc/release_notes/archive.rst b/doc/release_notes/archive.rst deleted file mode 100644 index d028cdd8..00000000 --- a/doc/release_notes/archive.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _release_notes/archive: - -================= -All release notes -================= - -.. toctree:: - :glob: - :maxdepth: 1 - - archive/* diff --git a/doc/release_notes/archive/v0.0.1.rst b/doc/release_notes/archive/v0.0.1.rst deleted file mode 100644 index ac8e31f4..00000000 --- a/doc/release_notes/archive/v0.0.1.rst +++ /dev/null @@ -1,28 +0,0 @@ -============ -Rally v0.0.1 -============ - - -Information ------------ - - -+------------------+-----------------+ -| Commits | **1039** | -+------------------+-----------------+ -| Bug fixes | **0** | -+------------------+-----------------+ -| Dev cycle | **547 days** | -+------------------+-----------------+ -| Release date | **26/Jan/2015** | -+------------------+-----------------+ - - - -Details -------- - -Rally is awesome tool for testing verifying and benchmarking OpenStack clouds. - -A lot of people started using Rally in their CI/CD so Rally team should provide -more stable product with clear strategy of deprecation and upgrades. diff --git a/doc/release_notes/archive/v0.0.2.rst b/doc/release_notes/archive/v0.0.2.rst deleted file mode 100644 index b2d7f7cd..00000000 --- a/doc/release_notes/archive/v0.0.2.rst +++ /dev/null @@ -1,223 +0,0 @@ -============ -Rally v0.0.2 -============ - -Information ------------ - -+------------------+-----------------+ -| Commits | **100** | -+------------------+-----------------+ -| Bug fixes | **18** | -+------------------+-----------------+ -| Dev cycle | **45 days** | -+------------------+-----------------+ -| Release date | **12/Mar/2015** | -+------------------+-----------------+ - - -Details -------- - -This release contains new features, new benchmark plugins, bug fixes, -various code and API improvements. - - -New Features -~~~~~~~~~~~~ - -* rally task start **--abort-on-sla-failure** - - Stopping load before things go wrong. - Load generation will be interrupted if SLA criteria stop passing. - -* Rally verify command supports multiple Tempest sources now. - -* python34 support - -* postgres DB backend support - - -API changes -~~~~~~~~~~~ - - -* [new] **rally [deployment | verify | task] use** subcommand - - It should be used instead of root command **rally use** - -* [new] Rally as a Lib API - - To avoid code duplication between Rally as CLI tool and Rally as a Service - we decide to make Rally as a Lib as a common part between these 2 modes. - - Rally as a Service will be a daemon that just maps HTTP request to Rally - as a Lib API. - -* [deprecated] **rally use** CLI command - -* [deprecated] Old Rally as a Lib API - - Old Rally API was quite mixed up so we decide to deprecate it - - -Plugins -~~~~~~~ - -* **Benchmark Scenario Runners**: - - [improved] Improved algorithm of generation load in **constant runner** - - Before we used processes to generate load, now it creates pool of - processes (amount of processes is equal to CPU count) after that in - each process use threads to generate load. So now you can easily - generate load of 1k concurrent scenarios. - - [improved] Unify code of **constant** and **rps** runners - - [interface] Added **abort()** to runner's plugin interface - - New method **abort()** is used to immediately interrupt execution. - - -* **Benchmark Scenarios**: - - [new] DesignateBasic.create_and_delete_server - - [new] DesignateBasic.create_and_list_servers - - [new] DesignateBasic.list_servers - - [new] MistralWorkbooks.list_workbooks - - [new] MistralWorkbooks.create_workbook - - [new] Quotas.neutron_update - - [new] HeatStacks.create_update_delete_stack - - [new] HeatStacks.list_stacks_and_resources - - [new] HeatStacks.create_suspend_resume_delete_stac - - [new] HeatStacks.create_check_delete_stack - - [new] NeutronNetworks.create_and_delete_routers - - [new] NovaKeypair.create_and_delete_keypair - - [new] NovaKeypair.create_and_list_keypairs - - [new] NovaKeypair.boot_and_delete_server_with_keypair - - [new] NovaServers.boot_server_from_volume_and_live_migrate - - [new] NovaServers.boot_server_attach_created_volume_and_live_migrate - - [new] CinderVolumes.create_and_upload_volume_to_image - - [fix] CinderVolumes.create_and_attach_volume - - Pass optional \*\*kwargs only to create server command - - [fix] GlanceImages.create_image_and_boot_instances - - Pass optional \*\*kwargs only to create server command - - [fix] TempestScenario.\* removed stress cleanup. - - Major issue is that tempest stress cleanup cleans whole OpenStack. - This is very dangerous, so it's better to remove it and leave some - extra resources. - - [improved] NovaSecGroup.boot_and_delete_server_with_secgroups - - Add optional \*\*kwargs that are passed to boot server comment - - -* **Benchmark Context**: - - [new] **stacks** - - Generates passed amount of heat stacks for all tenants. - - [new] **custom_image** - - Prepares images for benchmarks in VMs. - - To Support generating workloads in VMs by existing tools like: IPerf, - Blogbench, HPCC and others we have to have prepared images, with - already installed and configured tools. - - Rally team decide to generate such images on fly from passed to avoid - requirements of having big repository with a lot of images. - - This context is abstract context that allows to automate next steps: - - 1) runs VM with passed image (with floating ip and other stuff) - 2) execute abstract method that has access to VM - 3) snapshot this image - - In future we are going to use this as a base for making context that - prepares images. - - [improved] **allow_ssh** - - Automatically disable it if security group are disabled in neutron. - - [improved] **keypair** - - Key pairs are stored in "users" space it means that accessing keypair - from scenario is simpler now: - - self.context["user"]["keypair"]["private"] - - [fix] **users** - - Pass proper EndpointType for newly created users - - [fix] **sahara_edp** - - The Job Binaries data should be treated as a binary content - - -* **Benchmark SLA**: - - [interface] SLA calculations is done in additive way now - - Resolves scale issues, because now we don't need to have whole - array of iterations in memory to process SLA. - - This is required to implement **--abort-on-sla-failure** feature - - [all] SLA plugins were rewritten to implement new interface - - -Bug fixes -~~~~~~~~~ - -**18 bugs were fixed, the most critical are**: - - -* Fix **rally task detailed --iterations-data** - - It didn't work in case of missing atomic actions. Such situation can occur - if scenario method raises exceptions - -* Add user-friendly message if the task cannot be deleted - - In case of trying to delete task that is not in "finished" status users get - traces instead of user-friendly message try to run it with --force key. - - -* Network context cleanups networks properly now - - -Documentation -~~~~~~~~~~~~~ - -* Image sizes are fixed - -* New tutorial in "Step by Step" relate to **--abort-on-sla-failure** - -* Various fixes diff --git a/doc/release_notes/archive/v0.0.3.rst b/doc/release_notes/archive/v0.0.3.rst deleted file mode 100644 index 790765c8..00000000 --- a/doc/release_notes/archive/v0.0.3.rst +++ /dev/null @@ -1,153 +0,0 @@ -============ -Rally v0.0.3 -============ - -Information ------------ - -+------------------+-----------------+ -| Commits | **53** | -+------------------+-----------------+ -| Bug fixes | **14** | -+------------------+-----------------+ -| Dev cycle | **33 days** | -+------------------+-----------------+ -| Release date | **14/Apr/2015** | -+------------------+-----------------+ - - -Details -------- - -This release contains new features, new benchmark plugins, bug fixes, -various code and API improvements. - - -New Features & API changes -~~~~~~~~~~~~~~~~~~~~~~~~~~ - - - * Add the ability to specify versions for clients in benchmark scenarios - - You can call self.clients("glance", "2") and get any client for - specific version. - - * Add API for tempest uninstall - - $ rally-manage tempest uninstall - # removes fully tempest for active deployment - - - * Add a --uuids-only option to rally task list - - $ rally task list --uuids-only # returns list with only task uuids - - * Adds endpoint to --fromenv deployment creation - - $ rally deployment create --fromenv - # recognizes standard OS_ENDPOINT environment variable - - * Configure SSL per deployment - - Now SSL information is deployment specific not Rally specific and - rally.conf option is deprecated - - Like in this sample - https://github.com/openstack/rally/blob/14d0b5ba0c75ececfdb6a6c121d9cf2810571f77/samples/deployments/existing.json#L11-L12 - - -Specs -~~~~~ - - * [spec] Proposal for new task input file format - - This spec describes new task input format that will allow us to generate - multi scenario load which is crucial for HA and more real life testing: - - https://github.com/openstack/rally/blob/master/doc/specs/in-progress/new_rally_input_task_format.rst - - -Plugins -~~~~~~~ - -* **Benchmark Scenario Runners**: - - * Add a maximum concurrency option to rps runner - - To avoid running to heavy load you can set 'concurrency' to configuration - and in case if cloud is not able to process all requests it won't start - more parallel requests then 'concurrency' value. - - -* **Benchmark Scenarios**: - - [new] CeilometerAlarms.create_alarm_and_get_history - - [new] KeystoneBasic.get_entities - - [new] EC2Servers.boot_server - - [new] KeystoneBasic.create_and_delete_service - - [new] MuranoEnvironments.list_environments - - [new] MuranoEnvironments.create_and_delete_environment - - [new] NovaServers.suspend_and_resume_server - - [new] NovaServers.pause_and_unpause_server - - [new] NovaServers.boot_and_rebuild_server - - [new] KeystoneBasic.create_and_list_services - - [new] HeatStacks.list_stacks_and_events - - [improved] VMTask.boot_runcommand_delete - - restore ability to use fixed IP and floating IP to connect to VM - via ssh - - [fix] NovaServers.boot_server_attach_created_volume_and_live_migrate - - Kwargs in nova scenario were wrongly passed - - - -* **Benchmark SLA**: - - * [new] aborted_on_sla - - This is internal SLA criteria, that is added if task was aborted - - - * [new] something_went_wrong - - This is internal SLA criteria, that is added if something went wrong, - context failed to create or runner raised some exceptions - - -Bug fixes -~~~~~~~~~ - -**14 bugs were fixed, the most critical are**: - - * Set default task uuid to running task. Before it was set only after - task was fully finished. - - * The "rally task results" command showed a disorienting "task not found" - message for a task that is currently running. - - * Rally didn't know how to reconnect to OpenStack in case if token - expired. - - -Documentation -~~~~~~~~~~~~~ - -* New tutorial **task templates** - -https://rally.readthedocs.org/en/latest/tutorial/step_5_task_templates.html - -* Various fixes - diff --git a/doc/release_notes/archive/v0.0.4.rst b/doc/release_notes/archive/v0.0.4.rst deleted file mode 100644 index 17b56ff9..00000000 --- a/doc/release_notes/archive/v0.0.4.rst +++ /dev/null @@ -1,180 +0,0 @@ -============ -Rally v0.0.4 -============ - -Information ------------ - -+------------------+-----------------+ -| Commits | **87** | -+------------------+-----------------+ -| Bug fixes | **21** | -+------------------+-----------------+ -| Dev cycle | **30 days** | -+------------------+-----------------+ -| Release date | **14/May/2015** | -+------------------+-----------------+ - - -Details -------- - -This release contains new features, new benchmark plugins, bug fixes, various code and API improvements. - - -New Features & API changes -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Rally now can generate load with users that already exist - - Now one can use Rally for benchmarking OpenStack clouds that are using LDAP, AD or any other read-only keystone backend where it is not possible to create any users. To do this, one should set up the "users" section of the deployment configuration of the ExistingCloud type. This feature also makes it safer to run Rally against production clouds: when run from an isolated group of users, Rally won’t affect rest of the cloud users if something goes wrong. - -* New decorator *@osclients.Clients.register* can add new OpenStack clients at runtime - - It is now possible to add a new OpenStack client dynamically at runtime. The added client will be available from osclients.Clients at the module level and cached. Example: - -.. code-block:: none - - >>> from rally import osclients - >>> @osclients.Clients.register("supernova") - ... def another_nova_client(self): - ... from novaclient import client as nova - ... return nova.Client("2", auth_token=self.keystone().auth_token, - ... **self._get_auth_info(password_key="key")) - ... - >>> clients = osclients.Clients.create_from_env() - >>> clients.supernova().services.list()[:2] - [, ] - -* Assert methods now available for scenarios and contexts - - There is now a new *FunctionalMixin* class that implements basic unittest assert methods. The *base.Context* and *base.Scenario* classes inherit from this mixin, so now it is possible to use *base.assertX()* methods in scenarios and contexts. - -* Improved installation script - - The installation script has been almost completely rewritten. After this change, it can be run from an unprivileged user, supports different database types, allows to specify a custom python binary, always asks confirmation before doing potentially dangerous actions, automatically install needed software if run as root, and also automatically cleans up the virtualenv and/or the downloaded repository if interrupted. - - -Specs & Feature requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -* [Spec] Reorder plugins - - The spec describes how to split Rally framework and plugins codebase to make it simpler for newbies to understand how Rally code is organized and how it works. - -* [Feature request] Specify what benchmarks to execute in task - - This feature request proposes to add the ability to specify benchmark(s) to be executed when the user runs the *rally task start* command. A possible solution would be to add a special flag to the *rally task start* command. - - -Plugins -~~~~~~~ - -* **Benchmark Scenario Runners**: - - * Add limits for maximum Core usage to constant and rps runners - - The new 'max_cpu_usage' parameter can be used to avoid possible 100% usage of all available CPU cores by reducing the number of CPU cores available for processes started by the corresponding runner. - - -* **Benchmark Scenarios**: - - * [new] KeystoneBasic.create_update_and_delete_tenant - - * [new] KeystoneBasic.create_user_update_password - - * [new] NovaServers.shelve_and_unshelve_server - - * [new] NovaServers.boot_and_associate_floating_ip - - * [new] NovaServers.boot_lock_unlock_and_delete - - * [new] NovaHypervisors.list_hypervisors - - * [new] CeilometerSamples.list_samples - - * [new] CeilometerResource.get_resources_on_tenant - - * [new] SwiftObjects.create_container_and_object_then_delete_all - - * [new] SwiftObjects.create_container_and_object_then_download_object - - * [new] SwiftObjects.create_container_and_object_then_list_objects - - * [new] MuranoEnvironments.create_and_deploy_environment - - * [new] HttpRequests.check_random_request - - * [new] HttpRequests.check_request - - * [improved] NovaServers live migrate benchmarks - - add 'min_sleep' and 'max_sleep' parameters to simulate a pause between VM booting and running live migration - - * [improved] NovaServers.boot_and_live_migrate_server - - add a usage sample to samples/tasks - - * [improved] CinderVolumes benchmarks - - support size range to be passed to the 'size' argument as a dictionary - *{"min": , "max": }* - - -* **Benchmark Contexts**: - - * [new] MuranoPackage - - This new context can upload a package to Murano from some specified path. - - * [new] CeilometerSampleGenerator - - Context that can be used for creating samples and collecting resources for benchmarks in a list. - - -* **Benchmark SLA**: - - * [new] outliers - - This new SLA checks that the number of outliers (calculated from the mean and standard deviation of the iteration durations) does not exceed some maximum value. The SLA is highly configurable: the parameters used for outliers threshold calculation can be set by the user. - - -Bug fixes -~~~~~~~~~ - -**21 bugs were fixed, the most critical are**: - -* Make it possible to use relative imports for plugins that are outside of rally package. - -* Fix heat stacks cleanup by deleting them only 1 time per tenant (get rid of "stack not found" errors in logs). - -* Fix the wrong behavior of 'rally task detailed --iterations-data' (it lacked the iteration info before). - -* Fix security groups cleanup: a security group called "default", created automatically by Neutron, did not get deleted for each tenant. - - -Other changes -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Streaming algorithms that scale - - This release introduces the common/streaming_algorithms.py module. This module is going to contain implementations of benchmark data processing algorithms that scale: these algorithms do not store exhaustive information about every single benchmark iteration duration processed. For now, the module contains implementations of algorithms for computation of mean & standard deviation. - -* Coverage job to check that new patches come with unit tests - - Rally now has a coverage job that checks that every patch submitted for review does not decrease the number of lines covered by unit tests (at least too much). This job allows to mark most patches with no unit tests with '-1'. - -* Splitting the plugins code (Runners & SLA) into common/openstack plugins - - According to the spec "Reorder plugins" (see above), the plugins code for runners and SLA has been moved to the *plugins/common/* directory. Only base classes now remain in the *benchmark/* directory. - - -Documentation -~~~~~~~~~~~~~ - -* Various fixes - - * Remove obsolete *.rst* files (*deploy_engines.rst* / *server_providers.rst* / ...) - * Restructure the docs files to make them easier to navigate through - * Move the chapter on task templates to the 4th step in the tutorial - * Update the information about meetings (new release meeting & time changes) diff --git a/doc/release_notes/archive/v0.1.0.rst b/doc/release_notes/archive/v0.1.0.rst deleted file mode 100644 index 4eeede43..00000000 --- a/doc/release_notes/archive/v0.1.0.rst +++ /dev/null @@ -1,514 +0,0 @@ -============ -Rally v0.1.0 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **355** | -+------------------+-----------------------+ -| Bug fixes | **90** | -+------------------+-----------------------+ -| Dev cycle | **132 days** | -+------------------+-----------------------+ -| Release date | **25/September/2015** | -+------------------+-----------------------+ - - -Details -------- - -This release contains new features, new 42 plugins, 90 bug fixes, -various code and API improvements. - - -New Features & API changes -~~~~~~~~~~~~~~~~~~~~~~~~~~ -* **Improved installation script** - - * Add parameters: - - * ``--develop`` parameter to install rally in editable (develop) mode - - * ``--no-color`` to switch off output colorizing - useful for automated output parsing and terminals that don't - support colors. - - * Puts rally.conf under virtualenv etc/rally/ so you can have several - rally installations in virtualenv - - * Many fixes related to access of different file, like: rally.conf, - rally db file in case of sqlite - - * Update pip before Rally installation - - * Fix reinstallation - - -* **Separated Rally plugins & framework** - - Now plugins are here: - https://github.com/openstack/rally/tree/master/rally/plugins - - Plugins are as well separated common/* for common plugins - that can be use no matter what is tested and OpenStack related - plugins - - -* **New Rally Task framework** - - * All plugins has the same Plugin base: - rally.common.plugin.pluing.Plugin They have the same mechanisms for: - discovering, providing information based on docstrings, and in future - they will use the same deprecation/rename mechanism. - - * Some of files are moved: - - * rally/benchmark -> rally/task - - *This was done to unify naming of rally task command and - actually code that implements it.* - - * rally/benchmark/sla/base.py -> rally/task/sla.py - - * rally/benchmark/context/base.py -> rally/task/context.py - - * rally/benchmark/scenarios/base.py -> rally/task/scenario.py - - * rally/benchmark/runners/base.py -> rally/task/runner.py - - * rally/benchmark/scenarios/utils.py -> rally/task/utils.py - - This was done to: - - * avoid doing rally.benchmark.scenarios import base as scenario_base - - * remove one level of nesting - - * simplify framework structure - - - * Some of classes and methods were renamed - - * Plugin configuration: - - * context.context() -> context.configure() - - * scenario.scenario() -> scenario.configure() - - * Introduced runner.configure() - - * Introduced sla.configure() - - This resolves 3 problems: - - * Unifies configuration of different types of plugins - - * Simplifies plugin interface - - * Looks nice with new modules path: - >>> from rally.task import scenario - >>> @scenario.configure() - - - * Atomic Actions were changed: - - * New rally.task.atomic module - - This allow us in future to reuse atomic actions in Context plugins - - * Renames: - - rally.benchmark.scenarios.base.AtomicAction - -> rally.task.atomic.ActionTimer - - rally.benchmark.scenarios.base.atomic_action() - -> rally.task.atomic.action_timer() - - * **Context plugins decide how to map their data for scenario** - - Now Context.map_for_scenario method can be override to decide - how to pass context object to each iteration of scenario. - - * Samples of NEW vs OLD context, sla, scenario and runner plugins: - - * Context - - .. code-block:: python - - # Old - from rally.benchmark.context import base - - @base.context(name="users", order=100) - class YourContext(base.Context): - - def setup(self): - # ... - - def cleanup(self): - # ... - - # New - from rally.task import context - - @context.configure(name="users", order=100) - class YourContext(context.Context): - - def setup(self): - # ... - - def cleanup(self): - # ... - - def map_for_scenario(self): - # Maps context object to the scenario context object - # like context["users"] -> context["user"] and so on. - .. - - - * Scenario - - .. code-block:: python - - # Old Scenario - - from rally.benchmark.scenarios import base - from rally.benchmark import validation - - class ScenarioPlugin(base.Scenario): - - @base.scenario() - def some(self): - self._do_some_action() - - - @base.atomic_action_timer("some_timer") - def _do_some_action(self): - # ... - - # New Scenario - - from rally.task import atomic - from rally.task import scenario - from rally.task import validation - - # OpenStack scenario has different base now: - # rally.plugins.openstack.scenario.OpenStackScenario - class ScenarioPlugin(scenario.Scenario): - - @scenario.configure() - def some(self): - self._do_some_action() - - @atomic.action_timer("some_action") - def _do_some_action(self): - # ... - .. - - * Runner - - .. code-block:: python - - ## Old - - from rally.benchmark.runners import base - - class SomeRunner(base.ScenarioRunner): - - __execution_type__ = "some_runner" - - def _run_scenario(self, cls, method_name, context, args) - # Load generation - - def abort(self): - # Method that aborts load generation - - ## New - - from rally.task import runner - - @runner.configure(name="some_runner") - class SomeRunner(runner.ScenarioRunner): - - def _run_scenario(self, cls, method_name, context, args) - # Load generation - - def abort(self): - # Method that aborts load generation - - .. - - * SLA - - .. code-block:: python - - # Old - - from rally.benchmark import sla - - class FailureRate(sla.SLA): - # ... - - # New - - from rally.task import sla - - @sla.configure(name="failure_rate") - class FailureRate(sla.SLA): - # ... - .. - - -* **Rally Task aborted command** - - Finally you can gracefully shutdown running task by calling: - - .. code:: bash - - rally task abort - .. - -* **Rally CLI changes** - - * [add] ``rally --plugin-paths`` specify the list of directories with plugins - - * [add] ``rally task report --junit`` - generate a JUnit report - This allows users to feed reports to tools such as Jenkins. - - * [add] ``rally task abort`` - aborts running Rally task - when run with the ``--soft`` key, the ``rally task abort`` command is - waiting until the currently running subtask is finished, otherwise the - command interrupts subtask immediately after current scenario iterations - are finished. - - * [add] ``rally plugin show`` prints detailed information about plugin - - * [add] ``rally plugin list`` prints table with rally plugin names and titles - - * [add] ``rally verify genconfig`` generates tempest.conf without running it. - - * [add] ``rally verify install`` install tempest for specified deployment - - * [add] ``rally verify reinstall`` removes tempest for specified deployment - - * [add] ``rally verify uninstall`` uninstall tempest of specified deployment - - * [fix] ``rally verify start --no-use`` --no-use was always turned on - - * [remove] ``rally use`` now each command has subcommand ``use`` - - * [remove] ``rally info`` - - * [remove] ``rally-manage tempest`` now it is covered by ``rally verify`` - - -* **New Rally task reports** - - * New code is based on OOP style which is base step to make pluggable Reports - - * Reports are now generated for only one iteration over the resulting data - which resolves scalability issues when we are working with large - amount of iterations. - - * New Load profiler plot that shows amount of iterations that are working - in parallel - - * Failed iterations are shown as a red areas on stacked are graphic. - -Non backward compatible changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* [remove] ``rally use`` cli command - -* [remove] ``rally info`` cli command - -* [remove] ``--uuid`` parameter from ``rally deployment `` - -* [remove ``--deploy-id`` parameter from: - ``rally task ``, ``rally verify ``, ``rally show `` - -Specs & Feature requests -~~~~~~~~~~~~~~~~~~~~~~~~ - - [feature request] Explicitly specify existing users for scenarios - - [feature request] Improve install script and add --uninstall and --version - - [feature request] Allows specific repos & packages in install-rally.sh - - [feature request] Add ability to capture logs from tested services - - [feature request] Check RPC queue perfdata - - [spec] Refactoring Rally cleanup - - [spec] Consistent resource names - -Plugins -~~~~~~~ - -* **Scenarios**: - - [new] CinderVolumes.create_volume_backup - - [new] CinderVolumes.create_and_restore_volume_backup - - [new] KeystoneBasic.add_and_remove_user_role - - [new] KeystoneBasic.create_and_delete_role - - [new] KeystoneBasic.create_add_and_list_user_roles - - [new] FuelEnvironments.list_environments - - [new] CinderVolumes.modify_volume_metadata - - [new] NovaServers.boot_and_delete_multiple_servers - - [new] NeutronLoadbalancerV1.create_and_list_pool - - [new] ManilaShares.list_shares - - [new] CeilometerEvents.create_user_and_get_event - - [new] CeilometerEvents.create_user_and_list_event_types - - [new] CeilometerEvents.create_user_and_list_events - - [new] CeilometerTraits.create_user_and_list_trait_descriptions - - [new] CeilometerTraits.create_user_and_list_traits - - [new] NeutronLoadbalancerV1.create_and_delete_pools - - [new] NeutronLoadbalancerV1.create_and_update_pools - - [new] ManilaShares.create_and_delete_share - - [new] ManilaShares.create_share_network_and_delete - - [new] ManilaShares.create_share_network_and_list - - [new] HeatStacks.create_and_delete_stack - - [new] ManilaShares.list_share_servers - - [new] HeatStacks.create_snapshot_restore_delete_stack - - [new] KeystoneBasic.create_and_delete_ec2credential - - [new] KeystoneBasic.create_and_list_ec2credentials - - [new] HeatStacks.create_stack_and_scale - - [new] ManilaShares.create_security_service_and_delete - - [new] KeystoneBasic.create_user_set_enabled_and_delete - - [new] ManilaShares.attach_security_service_to_share_network - - [new] IronicNodes.create_and_delete_node - - [new] IronicNodes.create_and_list_node - - [new] CinderVolumes.create_and_list_volume_backups - - [new] NovaNetworks.create_and_list_networks - - [new] NovaNetworks.create_and_delete_network - - [new] EC2Servers.list_servers - - [new] VMTasks.boot_runcommand_delete_custom_imagea - - [new] CinderVolumes.create_and_update_volume - - -* **Contexts**: - - [new] ManilaQuotas - - Add context for setting up Manila quotas: - shares, gigabytes, snapshots, snapshot_gigabytes, share_networks - - [new] ManilaShareNetworks - - Context for share networks that will be used in case of usage - deployment with existing users. Provided share networks via context - option "share_networks" will be balanced between all share creations - of scenarios. - - [new] Lbaas - - Context to create LBaaS-v1 resources - - [new] ImageCommandCustomizerContext - - Allows image customization using side effects of a command execution. - E.g. one can install an application to the image and use these image - for 'boot_runcommand_delete' scenario afterwards. - - [new] EC2ServerGenerator - - Context that creates servers using EC2 api - - [new] ExistingNetwork - - This context lets you use existing networks that have already been - created instead of creating new networks with Rally. This is useful - when, for instance, you are using Neutron with a dumb router that is - not capable of creating new networks on the fly. - - -* **SLA**: - - [remove] max_failure_rate - use failure_rate instead - - -Bug fixes -~~~~~~~~~ - -**90 bugs were fixed, the most critical are**: - -* Many fixes related that fixes access of rally.conf and DB files - -* Incorrect apt-get "-yes" parameter in install_rally.sh script - -* Rally bash completion doesn't exist in a virtualenv - -* Rally show networks CLI command worked only with nova networks - -* RPS runner was not properly generating load - -* Check is dhcp_agent_scheduler support or not in network cleanup - -* NetworkContext doesn't work with Nova V2.1 - -* Rally task input file was not able to use jinja2 include directive - -* Rally in docker image was not able to - -* Rally docker image didn't contain samples - -* Do not update the average duration when iteration failed - - -Documentation -~~~~~~~~~~~~~ - -* **Add plugin reference page** - - :ref:`Rally Plugins Reference page ` page contains a - full list with - -* **Add maintainers section on project info page** - - :ref:`Rally Maintainers section ` contains information - about core contributors of OpenStack Rally their responsibilities and - contacts. This will help us to make our community more transparent and open - for newbies. - -* **Added who is using section in docs** - -* **Many small fixes** diff --git a/doc/release_notes/archive/v0.1.1.rst b/doc/release_notes/archive/v0.1.1.rst deleted file mode 100644 index 5d17ba1e..00000000 --- a/doc/release_notes/archive/v0.1.1.rst +++ /dev/null @@ -1,131 +0,0 @@ -============ -Rally v0.1.1 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **32** | -+------------------+-----------------------+ -| Bug fixes | **9** | -+------------------+-----------------------+ -| Dev cycle | **11 days** | -+------------------+-----------------------+ -| Release date | **6/October/2015** | -+------------------+-----------------------+ - - -Details -------- - -This release contains new features, new 6 plugins, 9 bug fixes, -various code and API improvements. - - -New Features -~~~~~~~~~~~~ - -* **Rally verify generates proper tempest.conf file now** - - Improved script that generates tempest.conf, now it works out of box for - most of the clouds and most of Tempest tests will pass without hacking it. - -* **Import Tempest results to Rally DB** - - ``rally verify import`` command allows you to import already existing Tempest - results and work with them as regular "rally verify start" results: - generate HTML/CSV reports & compare different runs. - - -API Changes -~~~~~~~~~~~~ - -**Rally CLI changes** - - * [add] ``rally verify import`` imports raw Tempest results to Rally - - -Specs & Feature requests -~~~~~~~~~~~~~~~~~~~~~~~~ - - There is no new specs and feature requests. - -Plugins -~~~~~~~ - -* **Scenarios**: - - [new] NeutronNetworks.create_and_list_floating_ips - - [new] NeutronNetworks.create_and_delete_floating_ips - - [new] MuranoPackages.import_and_list_packages - - [new] MuranoPackages.import_and_delete_package - - [new] MuranoPackages.import_and_filter_applications - - [new] MuranoPackages.package_lifecycle - - [improved] NovaKeypair.boot_and_delete_server_with_keypair - - New argument ``server_kwargs``, these kwargs are used to boot server. - - [fix] NeutronLoadbalancerV1.create_and_delete_vips - - Now it works in case of concurrency > 1 - - -* **Contexts**: - - [improved] network - - Network context accepts two new arguments: - ``subnets_per_network`` and ``network_create_args``. - - [fix] network - - Fix cleanup if nova-network is used. Networks should be dissociate from - project before deletion - - [fix] custom_image - - Nova server that is used to create custom image was not deleted if - script that prepares server failed. - - -Bug fixes -~~~~~~~~~ - -**9 bugs were fixed, the most critical are**: - -* Fix install_rally.sh script - - Set 777 access to /var/lib/rally/database file if system-wide method of - installation is used. - -* Rally HTML reports Overview table had few mistakes - - * Success rate was always 100% - - * Percentiles were wrongly calculated - -* Missing Ironic, Murano and Workload(vm) options in default config file - -* ``rally verify start`` failed while getting network_id - -* ``rally verify genconfig`` hangs forever if Horizon is not available - - -Documentation -~~~~~~~~~~~~~ - -* **Fix project maintainers page** - - Update the information about Rally maintainers - -* **Document rally --plugin-paths CLI argument** - -* **Code blocks in documentation looks prettier now** - diff --git a/doc/release_notes/archive/v0.1.2.rst b/doc/release_notes/archive/v0.1.2.rst deleted file mode 100644 index 4bd70971..00000000 --- a/doc/release_notes/archive/v0.1.2.rst +++ /dev/null @@ -1,206 +0,0 @@ -============ -Rally v0.1.2 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **208** | -+------------------+-----------------------+ -| Bug fixes | **37** | -+------------------+-----------------------+ -| Dev cycle | **77 days** | -+------------------+-----------------------+ -| Release date | **23/December/2015** | -+------------------+-----------------------+ - - -Details -------- - -This release, as well as all previous ones, includes a lot of internal and -external changes. Most important of them are listed below. - -.. warning:: Release 0.1.2 is the last release with Python 2.6 support. - - -Deprecations -~~~~~~~~~~~~ - -* Class `rally.common.objects.Endpoint` was renamed to `Credentials`. Old - class is kept for backward compatibility. Please, stop using the old class - in your plugins. - - .. warning:: dict key was changed too in user context from "endpoint" to "credential" - -* rally.task.utils: wait_is_ready(), wait_for(), wait_for_delete() deprecated - you should use wait_for_status() instead. - - -Rally Verify -~~~~~~~~~~~~ - -* Added possibility to run Tempest tests listed in a file(--tests-file argument in ``verify start``) - -* Added possibility to upload Tempest subunit stream logs into data base - -* Improvements in generating Tempest config file - -* Reworked subunit stream parser - -* Don't install Tempest when `rally verify [gen/show]config` - -* Rally team tries to simplify usage of each our component. - Now Rally verification has some kind of a context like in Tasks. - Before launching each verification, Rally checks existence of required - resources(networks, images, flavours, etc) in Tempest configuration file and - pre-creates them. Do not worry, all these resources will not be forgotten - and left, Rally will clean them after verification. - - -Rally Task -~~~~~~~~~~ - -* Add --html-static argument to ``rally task report`` which allows to - generate HTML reports that doesn't require Internet. - -* Rally supports different API versions now via api_versions context: - -.. code-block:: none - - CinderVolumes.create_and_delete_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - -* Move rally.osclients.Clients to plugin base - - Rally OSclients is pluggable now and it is very easy to extend OSClient for - your cloud out of Rally tree. - -* Add 'merge' functionality to SLA - - All SLA plugins should implement merge() method now. - In future this will be used for distributed load generation. - Where SLA results from different runners will be merged together. - -* New optional_action_timer decorator - - Allows to make the methods that can be both atomic_action or regular - method. Method changes behavior based on value in extra key "atomic_action" - - -Rally Certification -~~~~~~~~~~~~~~~~~~~ - -* Fix Glance certification arguments - -* Add Neutron Quotas only if Neutron service is available - -Specs & Feature Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -* Spec consistent-resource-names: - - Resource name is based on Task id now. It is a huge step to persistence - and disaster cleanups. - -* Add a spec for distributed load generation: - - https://github.com/openstack/rally/blob/master/doc/specs/in-progress/distributed_runner.rst - -* Improvements for scenario output format - - https://github.com/openstack/rally/blob/master/doc/specs/in-progress/improve_scenario_output_format.rst - -* Task and Verify results export command - - https://github.com/openstack/rally/blob/master/doc/specs/in-progress/task_and_verification_export.rst - - -Plugins -~~~~~~~ - -* **Scenarios**: - - * [new] NovaServers.boot_and_get_console_output - * [new] NovaServers.boot_and_show_server - * [new] NovaServers.boot_server_attach_created_volume_and_resize - * [new] NovaServers.boot_server_from_volume_and_resize - * [new] NeutronSecurityGroup.create_and_delete_security_groups - * [new] NeutronSecurityGroup.create_and_list_security_groups - * [new] NeutronSecurityGroup.create_and_update_security_groups - * [new] NeutronLoadbalancerV1.create_and_delete_healthmonitors - * [new] NeutronLoadbalancerV1.create_and_list_healthmonitors - * [new] NeutronLoadbalancerV1.create_and_update_healthmonitors - * [new] SwiftObjects.list_and_download_objects_in_containers - * [new] SwiftObjects.list_objects_in_containers - * [new] FuelNodes.add_and_remove_node - * [new] CeilometerMeters.list_matched_meters - * [new] CeilometerResource.list_matched_resources - * [new] CeilometerSamples.list_matched_samples - * [new] CeilometerStats.get_stats - * [new] Authenticate.validate_monasca - * [new] DesignateBasic.create_and_delete_zone - * [new] DesignateBasic.create_and_list_zones - * [new] DesignateBasic.list_recordsets - * [new] DesignateBasic.list_zones - * [fix] CinderVolumes.create_nested_snapshots_and_attach_volume - Remove random nested level which produce different amount of atomic - actions and bad reports. - * Support for Designate V2 api - * A lot of improvements in Sahara scenarios - -* **Context**: - - * [new] api_versions - - Context allows us to setup client to communicate to specific service. - - * [new] swift_objects - - Context pre creates swift objects for future usage in scenarios - - * [update] sahara_cluster - - It supports proxy server which allows to use single floating IP for - whole cluster. - - * [fix] cleanup - - Fix cleanup of networks remove vip before port. - -Bug fixes -~~~~~~~~~ - -**37 bugs were fixed, the most critical are**: - -* Follow symlinks in plugin discovery -* Use sed without -i option for portability (install_rally.sh) -* Fixed race in rally.common.broker -* Fixed incorrect iteration number on "Failures" Tab -* Fixing issue with create_isolated_networks = False -* Fix docker build command - -Documentation -~~~~~~~~~~~~~ - -Fixed some minor typos and inaccuracies. - -Thanks -~~~~~~ - -We would like to thank Andreas Jaeger for ability to provide Python 2.6 support in this release. diff --git a/doc/release_notes/archive/v0.2.0.rst b/doc/release_notes/archive/v0.2.0.rst deleted file mode 100644 index a09f9b7d..00000000 --- a/doc/release_notes/archive/v0.2.0.rst +++ /dev/null @@ -1,165 +0,0 @@ -============ -Rally v0.2.0 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **48** | -+------------------+-----------------------+ -| Bug fixes | *6** | -+------------------+-----------------------+ -| Dev cycle | **19 days** | -+------------------+-----------------------+ -| Release date | **1/11/2015** | -+------------------+-----------------------+ - - -Details -------- - -This release, as well as all previous ones, includes a lot of internal and -external changes. Most important of them are listed below. - -.. warning:: Release 0.2.0 doesn't support python 26 - - -Deprecations -~~~~~~~~~~~~ - -* Option --system-wide-install for ``rally verify start`` was deprecated in - favor of --system-wide - -* `rally show` commands were deprecated because of 3 reasons: - - It blocks us to make Rally generic testing tool - - It complicates work on Rally as a Service - - You can always use standard OpenStack clients to do the same - -Rally Verify -~~~~~~~~~~~~ - -* Add "xfail" mechanism for Tempest tests. - - This mechanism allows us to list some tests, that are expected to fail, - in a YAML file and these tests will have "xfail" status instead of "fail". - - Use new argument "--xfails-file" of rally verify start command. - - -Rally Task -~~~~~~~~~~ - -* --out argument of `rally task report` is optional now - - If you don't specify --out it will just print the resulting report - -* Better scenario output support - - As far as you know each scenario plugin are able to return data as a dict. - This dict contained set of key-values {: } where each name - was line on graph and each number was one of point. Each scenario run adds - a single point for each line on that graph. - - This allows to add extra data to the Rally and see how some values were - changed over time. However, in case when Rally was used to execute some other - tool and collect it's data this was useless. - - To address this **Scenario.add_output(additive, complete)** was introduced: - - Now it is possible to generate as many as you need graphs by calling this - method multiple times. - There are two types of graph additive and complete. **Additive** is the same - as legacy concept of output data which is generated from results of all - iterations, **complete** are used when you would like to return whole chart - from each iteration. - - HTML report has proper sub-tabs *Aggregated* and *Per iteration* - inside *Scenario Data* tab. - - Here is a simple example how output can be added in any - scenario plugin: - - .. code-block:: python - - # This represents a single X point in result StackedArea. - # Values from other X points are taken from other iterations. - self.add_output(additive={"title": "How do A and B changes", - "description": ("Trend for A and B " - "during the scenario run"), - "chart_plugin": "StackedArea", - "data": [["foo", 42], ["bar", 24]]}) - # This is a complete Pie chart that belongs to this concrete iteration - self.add_output( - complete={"title": "", - "description": ("Complete results for Foo and Bar " - "from this iteration"), - "chart_plugin": "Pie", - "data": [["foo", 42], ["bar", 24]]}) - -Rally Certification -~~~~~~~~~~~~~~~~~~~ - - None. - - -Specs & Feature Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - - [Spec][Implemented] improve_scenario_output_format - - https://github.com/openstack/rally/blob/master/doc/specs/implemented/improve_scenario_output_format.rst - - -Plugins -~~~~~~~ - -* **Scenarios**: - - * [new] DesignateBasic.create_and_update_domain - - * [improved] CinderVolumes.create_and_attach_volume - - .. warning:: Use "create_vm_params" dict argument instead of ``**kwargs`` - for instance parameters. - - -* **Context**: - - * [improved] images - - .. warning:: The min_ram and min_disk arguments in favor of image_args, - which lets the user specify any image creation keyword arguments they want. - - -Bug fixes -~~~~~~~~~ - -**6 bugs were fixed**: - - * #1522935: CinderVolumes.create_and_attach_volume does not accept additional - args for create_volume - - * #1530770: "rally verify" fails with error 'TempestResourcesContext' object - has no attribute 'generate_random_name' - - * #1530075: cirros_img_url in rally.conf doesn't take effective in - verification tempest - - * #1517839: Make CONF.set_override with parameter enforce_type=True by default - - * #1489059: "db type could not be determined" running py34 - - * #1262123: Horizon is unreachable outside VM when we are using DevStack + - OpenStack - - -Documentation -~~~~~~~~~~~~~ - - None. - -Thanks -~~~~~~ - - 2 Everybody! diff --git a/doc/release_notes/archive/v0.3.0.rst b/doc/release_notes/archive/v0.3.0.rst deleted file mode 100644 index bdb7e452..00000000 --- a/doc/release_notes/archive/v0.3.0.rst +++ /dev/null @@ -1,188 +0,0 @@ -============ -Rally v0.3.0 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **69** | -+------------------+-----------------------+ -| Bug fixes | **7** | -+------------------+-----------------------+ -| Dev cycle | **29 days** | -+------------------+-----------------------+ -| Release date | **2/16/2016** | -+------------------+-----------------------+ - - -Details -------- - -This release, as well as all previous ones, includes a lot of internal and -external changes. Most important of them are listed below. - -.. warning:: In this release Rally DB schema migration is introduced. While - upgrading Rally from previous versions it is required now to run - ``rally-manade db upgrade``. Please see 'Documentation' section for details. - - -CLI changes -~~~~~~~~~~~ - -* .. warning:: [Removed] ``rally info`` in favor of ``rally plugin *``. - It was deprecated for a long time. - -* [Modified] ``rally deployment check`` now prints services, which don't have - names, since such services can be used via api_versions context. - -* .. warning:: [Modified] ``rally verify [re]install`` - option --no-tempest-venv was deprecated in favor of --system-wide - -* [Added] ``rally-manage db revision`` displays current revision of - Rally database schema - -* [Added] ``rally-manage db upgrade`` upgrades pre-existing Rally - database schema to the latest revision - -* [Added] ``rally-manage db downgrade`` to downgrades existing Rally - database schema to previous revision - -* [Added] ``rally task export`` exports task results to external - services (only CLI command introduced, no real service support - implemented yet, however one could write own plugins) - -* [Added] ``rally verify export`` exports verification results to - external services (only CLI command introduced, no real service support - implemented yet, however one could write own plugins) - -Rally Deployment -~~~~~~~~~~~~~~~~ - -* .. warning:: ``fuel`` deployment engine is removed since it was outdated and - lacked both usage and support - -Rally Task -~~~~~~~~~~ - -Add custom labels for "Scenario Output" charts - -* X-axis label can be specified to add_output() by - "axis_label" key of chart options dict. - The key is named "axis_label" but not "x_label" - because chart can be displayed as table, so we explicitly - mention "axis" in option name to make this parameter - useless for tables - -* Y-axis label can be specified to add_output() by - "label" key of chart options dict - In some cases this parameter can be used for rendering - tables - it becomes column name in case if chart with - single iteration is transformed into table - -* As mentioned above, if we have output chart - with single iteration, then it is transformed to table, - because chart with single value is useless - -* OutputLinesChart is added, it is displayed by - NVD3 lineChart() - -* Chart "description" is optional now. Description is - not shown if it is not specified explicitly - -* Scenario Dummy.add_output is improved to display labels - and OutputLinesChart - -* Fix: If Y-values are too long and overlaps chart box, - then JavaScript updates chart width in runtime to fit - width of chart graphs + Y values to their DOM container - -Rally Certification -~~~~~~~~~~~~~~~~~~~ - - None. - -Specs & Feature Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -* [Spec][Introduced] Export task and verification results to external services - - https://github.com/openstack/rally/blob/master/doc/specs/in-progress/task_and_verification_export.rst - -* [Spec][Implemented] Consistent resource names - - https://github.com/openstack/rally/blob/master/doc/specs/implemented/consistent_resource_names.rst - -* [Feature request][Implemented] Tempest concurrency - - https://github.com/openstack/rally/blob/master/doc/feature_request/implemented/add_possibility_to_specify_concurrency_for_tempest.rst - -Plugins -~~~~~~~ - -* **Scenarios**: - - - [added] VMTasks.workload_heat - - - [added] NovaFlavors.list_flavors - - - [updated] Flavors for Master and Worker node groups are now - configured separately for SaharaCluster.* scenarios - -* **Context**: - - - .. warning:: [deprecated] rally.plugins.openstack.context.cleanup - in favor of rally.plugins.openstack.cleanup - - - [improved] sahara_cluster - - Flavors for Master and Worker node groups are now - configured separately in ``sahara_cluster`` context - -Miscellaneous -~~~~~~~~~~~~~ - -* Cinder version 2 is used by default - -* Keystone API v3 compatibility improved - - - Auth URL in both formats http://foo.rally:5000/v3 - and http://foo.rally:5000 is supported for Keystone API v3 - - - Tempest configuration file is created properly according - to Keystone API version used - -* ``install_rally.sh --branch`` now accepts all git tree-ish, - not just branches or tags - -* VM console logs are now printed when Rally fails to connect to VM - -* Add support for Rally database schema migration (see 'Documentation' section) - -Bug fixes -~~~~~~~~~ - -**7 bugs were fixed**: - -* #1540563: Rally is incompatible with liberty Neutron client - - The root cause is that in Neutron Liberty client, - the _fx function doesn't take any explicit keyword parameter - but Rally is passing one (tenant_id). - -* #1543414: The `rally verify start` command fails when running - a verification against Kilo OpenStack - -* #1538341: Error in logic to retrieve image details in image_valid_on_flavor - -Documentation -~~~~~~~~~~~~~ - -* Add documentation for DB migration - - https://github.com/openstack/rally/blob/master/rally/common/db/sqlalchemy/migrations/README.rst - -Thanks -~~~~~~ - - 2 Everybody! diff --git a/doc/release_notes/archive/v0.3.1.rst b/doc/release_notes/archive/v0.3.1.rst deleted file mode 100644 index 9d41b77a..00000000 --- a/doc/release_notes/archive/v0.3.1.rst +++ /dev/null @@ -1,47 +0,0 @@ -============ -Rally v0.3.1 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **9** | -+------------------+-----------------------+ -| Bug fixes | **6** | -+------------------+-----------------------+ -| Dev cycle | **2 days** | -+------------------+-----------------------+ -| Release date | **2/18/2016** | -+------------------+-----------------------+ - - -Details -------- - -This release is more about bug-fixes than features. - -.. warning:: Please, update 0.3.0 to latest one. - - -Features -~~~~~~~~ - -* Pass api_versions info to glance images context - -* [Verify] Don't create new flavor when flavor already exists - -Bug fixes -~~~~~~~~~ - -**6 bugs were fixed, the most critical are**: - -* #1545889: Existing deployment with given endpoint doesn't work anymore - -* #1547092: Insecure doesn't work with Rally 0.3.0 - -* #1547083: Rally Cleanup failed with api_versions context in 0.3.0 release - -* #1544839: Job gate-rally-dsvm-zaqar-zaqar fails since the recent Rally patch - -* #1544522: Non-existing "called_once_with" method of Mock library is used diff --git a/doc/release_notes/archive/v0.3.2.rst b/doc/release_notes/archive/v0.3.2.rst deleted file mode 100644 index f302c6a3..00000000 --- a/doc/release_notes/archive/v0.3.2.rst +++ /dev/null @@ -1,216 +0,0 @@ -============ -Rally v0.3.2 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **55** | -+------------------+-----------------------+ -| Dev cycle | **25 days** | -+------------------+-----------------------+ -| Release date | **3/14/2016** | -+------------------+-----------------------+ - - -Details -------- - -This release, as well as all previous ones, includes a lot of internal and -external changes. Most important of them are listed below. - - -CLI changes -~~~~~~~~~~~ - -* .. warning:: [Modified] Option '--tempest-config' for 'rally verify - reinstall' command was deprecated for removal. - -* .. warning:: [Removed] Option `--system-wide-install` was removed from - `rally verify` commands in favor of `--system-wide` option. - -* .. warning:: [Modified] Step of installation of Tempest during execution of - the `rally verify start` command was deprecated and will be removed in the - future. Please use `rally verify install` instead. - -* Rework commands.task.TaskCommands.detailed. Now output of the command - contains the same results as in HTML report. - -Rally Verify -~~~~~~~~~~~~ - -* Re-run failed Tempest tests - - Add the ability to re-run the tempest tests that failed in the last test - execution. Sometimes Tempest tests fail due to a special temporary condition - in the environment, in such cases it is very useful to be able to re-execute - those tests. - - Running the following command will re-run all the test that failed during - the last test execution regardless of what test suite was run. - - .. code-block:: bash - - rally verify start --failing - - -Specs & Feature Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -* `[Spec][Introduced] Refactoring scenario utils`__ - -__ https://github.com/openstack/rally/blob/master/doc/specs/in-progress/refactor_scenario_utils.rst - -* `[Spec] Deployment unification`__ - -__ https://github.com/openstack/rally/blob/master/doc/specs/in-progress/deployment_type.rst - -Plugins -~~~~~~~ - -* **Scenarios**: - - * [updated] Fix flavor for cloudera manager - - Cloudera manager need master-node flavor - - * [added] Expand Nova API benchmark in Rally - - Add support for listing nova hosts, agents, availability-zones - and aggregates. - - * [updated] Make sure VolumeGenerator uses the api version info while cleanup - - * Designate V2 - Add recordset scenarios - - Add create_and_(list|delete)_recordset scenarios - Remove the test also that checks the allowed methods, this is in order for - us to be able to have a private method _walk_pages that will do fetching of - pages for us vs attempting to fetch 1 giant list at once. - - * unify \*_kwargs name in scenarios - - When running a scenario, `kwargs` is used as default key-word arguments. - But in some scenarios, there are more and one services being called, and - we use xxx_kwargs for this case. - - However, some xxx_kwargs are not unified for same usage[0]. Unifying - these could avoid misleading for end users. Another improvement is to - add xxx_kwargs with empty settings for scenario config files. - - [0] http://paste.openstack.org/show/489505/ - - * .. warning:: Deprecated arguments 'script' and 'interpreter' were removed - in favor of 'command' argument. - - VM task scenarios executes a script with a interpreter provided through a - formatted argument called 'command' which expects the remote_path or - local_path of the script and optionally an interpreter with which the - script has to be executed. - -Miscellaneous -~~~~~~~~~~~~~ - -* Avoid using `len(x)` to check if x is empty - - This cases are using `len()` to check if collection has items. As - collections have a boolean representation too, directly check for true / - false. And fix the wrong mock in its unit test. - -* Fix install_rally.sh to get it to work on MacOSX - - On MacOSX, `mktemp` requires being passed a template. This change modifies - the calls to `mktemp` to explicitly pass a template so that the code works - on both MacOSX and linux. - -* Use new-style Python classes - - There are some classes in the code that didn't inherited from - nothing and this is an old-style classes. A "New Class" is the - recommended way to create a class in modern Python.A "New Class" - should always inherit from `object` or another new-style class. - - Hacking rule added as well. - -* Make Rally cope with unversioned keystone URL - - With the change, the client version that's returned is now determined by - the keystoneclient library itself based on whether you supply a URL with a - version in it or not. - -* Fix rally-mos job to work with mos-8.0 - - Also remove hardcoded values for some other jobs. - -* Add name() to ResourceManager - - This will allow us to perform cleanup based on the name. - -* Add task_id argument to name_matches_object - - This will be used to ensure that we are only deleting resources for a - particular Rally task. - -* Extend api.Task.get_detailed - - Extend api.Task.get_detailed with ability to return task data as dict with - extended results. - -Bug fixes -~~~~~~~~~ - -**The most critical fixed bugs are**: - -* #1547624: Wrong configuration for baremetal(ironic) tempest tests - -* #1536800: openrc values are not quoted - - The openrc file created after rally deployment --fromenv did not quote the - values for environment variables that will be exported. - -* #1509027: Heat delete_stack never exits if status is DELETE_FAILED - -* #1540545: Refactored atomic action in authenticate scenario - -* #1469897: Incompatible with Keystone v3 argument in service create scenario - -* #1550262: Different results in ``rally task detailed``, ``rally task report`` - and ``rally task status`` commands. - -* #1553024: Backward incompatible change in neutronclient(release 4.1.0) broke - Tempest config generation to support latest neutronclient. - -Documentation -~~~~~~~~~~~~~ - -* Add documentation for DB migration - -* Make documentation for output plugins - - * Add descriptive docstrings for plugins based on OutputChart - * Register these plugins in `Rally Plugins Reference`__ - -__ https://docs.openstack.org/rally/latest/plugins/plugin_reference.html - -* Documentation tox fix - - Added information about debugging unit test with tox. Replace 3 references - to py26 with py34 to reflect current rally tox configuration. - -* Change structure of rally plugin and plugin references page - -* Update the scenario development, runner and context sections - -* The design of `Rally Plugins Reference`__ page was improved - -__ https://docs.openstack.org/rally/latest/plugins/plugin_reference.html - -* New page was added - `CLI references`__ - -__ https://docs.openstack.org/rally/latest/cli_reference.html - -Thanks -~~~~~~ - -To Everybody! diff --git a/doc/release_notes/archive/v0.3.3.rst b/doc/release_notes/archive/v0.3.3.rst deleted file mode 100644 index 7fe551e8..00000000 --- a/doc/release_notes/archive/v0.3.3.rst +++ /dev/null @@ -1,112 +0,0 @@ -============ -Rally v0.3.3 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **20** | -+------------------+-----------------------+ -| Dev cycle | **10 days** | -+------------------+-----------------------+ -| Release date | **3/24/2016** | -+------------------+-----------------------+ - - -Details -------- - -A half of patches relate to Cleanup. We have once again proved that ideal -stuff can be improved. :) - -Specs & Feature Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -* `[Spec][Introduced] Improve atomic actions format`__ - -__ https://github.com/openstack/rally/blob/master/doc/specs/in-progress/improve_atomic_actions_format.rst - -Plugins -~~~~~~~ - -* **Cleanups**: - - * Use proper attribute to get heat stack name - - * Always assign a name to created images. - - This is necessary for name-based cleanup. If a name is not specified, one - will be generated automatically. - - * Improve filtering glance images in case of V2 API - - * Delete only images created by images context - - Since the images context allows creating images with arbitrary names, - name-based cleanup won't work for it, so we have to delete the exact list - of images that it created instead. - - * New config option to set cleanup threads - - Allow the user to change the number of cleanup threads via the rally - config. When scaling out to thousands of instances, the cleanup can take - forever with the static 20 threads. - - * Add inexact matching to name_matches_object - - This will support places where we create resources with names that start - with a given name pattern, but include some additional identifier - afterwards. For instance, when bulk creating instances, Nova appends a UUID - to each instance name. - -* **Scenarios**: - - * Add sample of template for testing for testing heat caching. - - * Introduced new scenario `Dummy.dummy_random_action`__. It is suitable for - demonstration of upcoming trends report. - -__ http://rally.readthedocs.org/en/latest/plugin/plugin_reference.html#dummy-dummy-random-action-scenario - -* **Contexts**: - - `api_versions`__ context was extended to support switch between Keystone V2 - and V3 API versions. Now it is possible to use one Rally deployment to check - both Keystone APIs. - -__ http://rally.readthedocs.org/en/latest/plugin/plugin_reference.html#api-versions-context - -* **Newcomer in the family**: - - All ResourceType classes are pluggable now and it is much easier to use and - extend them. - - .. warning:: Decorator ``rally.task.types.set`` is deprecated now in favor of - ``rally.task.types.convert``. - - -Bug fixes -~~~~~~~~~ - -* #1536172: rally deployment destroy failed with traceback for failed - deployments. At current moment it is impossible to delete deployment if for - some reason deployment engine plugin cannot be found, because exception will - be thrown. - -Documentation -~~~~~~~~~~~~~ - -* Remove extra link in `All release notes` - - Previously, two links for latest release were presented. - -* Update release notes for 0.3.2 - - * Fixed indents for warning messages - * Fixed all references - -Thanks -~~~~~~ - - To Everybody! diff --git a/doc/release_notes/archive/v0.4.0.rst b/doc/release_notes/archive/v0.4.0.rst deleted file mode 100644 index ea1618be..00000000 --- a/doc/release_notes/archive/v0.4.0.rst +++ /dev/null @@ -1,286 +0,0 @@ -============ -Rally v0.4.0 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **76** | -+------------------+-----------------------+ -| Bug fixes | **12** | -+------------------+-----------------------+ -| Dev cycle | **28 days** | -+------------------+-----------------------+ -| Release date | **4/18/2016** | -+------------------+-----------------------+ - - -Details -------- - -.. warning:: Rally DB schema was changed since previous release. - See `HOWTO `_ - about updating your database. - - -CLI changes -~~~~~~~~~~~ - -* Add status messages of db migration process - -* Display task errors in human-friendly form - -* Support OS_PROJECT_NAME as well as OS_TENANT_NAME - -Messages -~~~~~~~~ - -* Removed deprecation warning in case of transmitted "name" attribute while - creation neutron resources. - - .. warning:: Deprecated code was deleted. - -* Suppress warning insecure URL messages - - Do not spam end users by insecure URL messages because it is - quite valid case in testing process - -Database -~~~~~~~~ - -While preparing for deployment refactoring: - -* db schema was changed; -* migration with new column `credentials` to deployment model was added; -* columns `users` and `admin` were dropped. - -Rally Task -~~~~~~~~~~ - -* Remove deprecated scenario output mechanism via returing value - - .. warning:: Deprecated code was deleted. - -* Friendlier error message with empty task file - - This is particularly useful when a Jinja2 template results in an empty - task. The current error message isn't very helpful: - - Task config is invalid: `'NoneType' object has no attribute 'get'` - -* Add Heat template validator - -Plugins -~~~~~~~ - -**Scenarios**: - -* Extend VM bind actions with "pause_unpause", "suspend_resume", "lock_unlock", - "shelve_unshelve". - -* Add exact error message into `VMTasks.runcommand_heat scenario`__ - -__ http://rally.readthedocs.org/en/0.4.0/plugin/plugin_reference.html#vmtasks-runcommand-heat-scenario - -* Add heat scenarios: output-show, output-list - - Current patch contains 4 scenarios from heat repo: - - `output-show for old algorithm - `_ - - `output-show for new algorithm - `_ - - `output-list for old algorithm - `_ - - `output-list for new algorithm - `_ - - -**Contexts**: - -* Reduce default speed of users creation in users context from 30 to 20 - by default. - -**SLAs**: - -* *NEW!!* MaxAverageDurationPerAtomic : Maximum average duration of one - iterations atomic actions in seconds. - - `Plugin Reference `_ - -**Reports**: - -* Improve results calculation in charts.Table - -* Use int instead of float for Y axis. It's number of parallel iterations and - it can't be float. - -* Remove accuracy that makes no sense, and creates a lot of noise on this graph - -* Include failed iterations as well, otherwise we will calculate load - incorrectly - -* Graph should start from 0 (begging of experiment) - -* Add 2 points at the end of graph to get at the end of graph 0 iterations - in parallel - -**Task Exporter**: - -In previous release we introduced new mechanism to export results in various -external systems and various formats. - -In this release, we added first plugin for this stuff - `file_exporter` - -**Services**: - -Remove hardcoded timeout from heat service - -**Utils**: - -Make glance web uploads streamable - -Without this change entire file get's downloaded into memory and can cause -issues. - -Rally Verify -~~~~~~~~~~~~ - -* Set time precision to 3 digits (instead of 5) after dot. - -* Don't use "--parallel" flag when concurrency == 1 - - If concurrency equals to 1, it means that we use only one thread to run - Tempest tests and the "--parallel" flag is not needed. - -Plugin for DevStack -~~~~~~~~~~~~~~~~~~~ - -* Support to be enabled with different plugin name - - Allow rally to be installed by devstack through a different plugin - name, e.g: - - .. code-block:: bash - - enable_plugin test-rally http://github.com/rally/rally.git master - -* Removed uncalled code - - Devstack won't "source plugin.sh source" any more. - -Bug fixes -~~~~~~~~~ - -**12 bugs were fixed**: - -* X-Fail mechanism did not work for TestCase which failed on setUp step - - If Tempest fails in a test's setUpClass(), there is only one subunit event - for each TestCase. In this case, Rally did not check partial test with x-fail - list and marked test as "fail" insted of "x-fail". - - `Launchpad bug-report #1568133 - `_ - -* Weak isolation of scenario arguments between iterations - - Input arguments for sub-task were shared between all iterations. Rally team - found one scenario which modified mutable input variable. - - Affected scenario: NeutronNetworks.create_and_update_ports - -* Incompatible filters between V1 and V2 for Glance images listing - - Glance V1 and V2 have different filters. For example, "owner" is a separate - kwarg in V1, not a generic filter. Also, visibility has different labels in - different APIs. We modified our Glance wrapper to support Glance V2 format - of filters for both V1 and V2 - -* Wrong way to store validation errors - - Results of failed task validations saved in incorrect format. It broke and - made un-userfriendly `rally task detailed` command. - - `Launchpad bug-report #1562713 - `_ - -* Hardcoded task's status in `rally task results` - - If there are no results for task, `rally task results` printed message that - task has failed status, but it can be not true(tasks in running state do not - have results). - - `Launchpad bug-report #1539096 - `_ - -* Tempest context failed to create network resources - - While we merged improvement for keystoneclient, we used wrong way to obtain - tenant id in TempestContext. - - `Launchpad bug-report #1550848 - `_ - -* Tasks based on Tempest failed to parse execution time. - - There is an ability in Rally to launch tasks based on Tempest. Since launch - of Tempest is just subprocess, it is needed to parse subunit to set correct - atomic actions. - - There was an issue while converting task execution time. - - `Launchpad bug-report #1566712 - `_ - -* JSONSchema huge impact on task performance - - Before runner sent data to engine we were checking jsonschema. This operation - is very expensive and in some cases it can take a lot of time. - - Here are test results, with Dummy.dummy_output scenario, sleep 0.5s - (added manually), 8000 iterations, 400 in parallel: - - * on master branch before the fix: - Load duration: 117.659588099 - Full duration: 227.451056004 - - * on master before the fix but remove jsonschema validation in scenario: - Load duration: 12.5437350273 - Full duration: 128.942219973 - - * on this patch before the fix (pure python validation): - Load duration: 11.5991640091 - Full duration: 22.7199981213 - -* Wrong Calculation of running iterations in parallel - - Load profile chart was calculated wrongly. - It showed more running iterations in parallel than actually are running. - -* Rally did not show "missing argument" error raised by argparse while parsing - cli args - - `Launchpad bug-report #1562916 - `_ - -* Issue while checking required arguments in CLI - - There was a possible issue in case of several required arguments - - `Launchpad bug-report #1555764 - `_ - -* Prepare step of verification did not check visibility of obtained image - - When we request a list of images to choose one of them for tests, we should - make sure all images are active and they are PUBLIC. If images are not - public, we will have failures of Tempest tests as described in the bug. - - `Launchpad bug-report #1564431 - `_ - -Thanks -~~~~~~ - - 2 Everybody! diff --git a/doc/release_notes/archive/v0.5.0.rst b/doc/release_notes/archive/v0.5.0.rst deleted file mode 100644 index 846e91d6..00000000 --- a/doc/release_notes/archive/v0.5.0.rst +++ /dev/null @@ -1,410 +0,0 @@ -============ -Rally v0.5.0 -============ - -Information ------------ - -+------------------+-----------------------+ -| Commits | **175** | -+------------------+-----------------------+ -| Bug fixes | **19** | -+------------------+-----------------------+ -| Dev cycle | **93 days** | -+------------------+-----------------------+ -| Release date | **7/20/2016** | -+------------------+-----------------------+ - - -Details -------- - -This release took much more time than we expected, but we have a lot of -reasons for such delay and if you look at our change-log, you will understand -them.:) - -Here is a quick introduction: - -* To make our releases as much as possible stable, we added upper limits for - each of our requirements; -* A lot of deprecated lines of code were removed, so be careful; -* Statistics trends for given tasks were introduced; -* Support for tempest plugins was added; -* Several new pages at docs. - -Specs & Feature Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -* `[Introduced && implemented] Introduce class-based scenario implementation `_ - -* `[Introduced] Rally Task Validation refactoring `_ - -* `[Introduced] Scaling & Refactoring Rally DB `_ - -* `[Introduced] SLA Performance degradation plugin `_ - -Logging -~~~~~~~ - -* disable urllib3 warnings only if the library provide them - -Database -~~~~~~~~ - -[doesn't require migration] -Transform DB layer to return dicts, not SQLAlchemy models - -Rally Deployment -~~~~~~~~~~~~~~~~ - -* Support single-AZ deployment - - This supports the case where OpenStack is deployed with a single AZ for both - controller(s) and compute(s), and not all hosts in the AZ that contains an - instance are guaranteed to have the nova-compute service. - -* Extend creation from environment with several new vars - - - OS_ENDPOINT_TYPE/OS_INTERFACE - - OS_USER_DOMAIN_NAME - - OS_PROJECT_DOMAIN_NAME - -* Improve devstack plugin for Keystone V3 - -Rally Task -~~~~~~~~~~ - -*NEW!!* Statistics trends for given tasks. - - -Rally Verify -~~~~~~~~~~~~ - -* Remove '--tempest-config' arg from 'reinstall' command - - .. warning:: Using `--tempest-config` is became an error from this release. - Use `rally verify genconfig` cmd for all config related stuff. - -* Don't install Tempest when `rally verify start` - - .. warning:: Use should use `rally verify install` cmd to install tempest now - -* Add ability to setup version of Tempest to install - - `CLI argument to setup version `_ - -* Configure 'aodh' service in 'service_available' section - -* Check existence of Tempest-tree in `rally verify discover` cmd - -* Make Tempest work with auth url which doesn't include keystone version - - Tempest needs /v2.0 and /v3 at the end of URLs. Actually, we can't fix - Tempest, so we extend our configuration module with workaround which allow - to specify auth_url without version in rally deployment config. - -* Use default list of plugins for sahara - -* Move tempest related options of rally configuration to separate section. - -* *NEW!!* Support for tempest plugins. - - `CLI argument to install them `_ - - -Plugins -~~~~~~~ - -In this release we are happy to introduce new entity - plugins Base classes - -We have a lot of base plugin entities: Context, Scenario, SLA and etc. -Sometimes plugins of different bases can have equal names(i.e ceilometer -OSClient and ceilometer Context). It is normal and we should allow such -conflicts. To support such cases we introduced new entity - plugin base. -Statements of plugin bases: - - - Each plugin base is unique entity; - - Names of plugin bases can't conflict with each other; - - Names of two or more plugins in one plugin base can't conflict with each - other(in case of same namespace). - - Names of two or more plugins in different plugin base can conflict - -Current list of plugin bases: - - - rally.task.context.Context - - rally.task.scenario.Scenario - - rally.task.types.ResourceType - - rally.task.exporter.TaskExporter - - rally.task.processing.charts.Chart - - rally.task.runner.ScenarioRunner - - rally.task.sla.SLA - - rally.deployment.serverprovider.provider.ProviderFactory - - rally.deployment.engine.Engine - - rally.osclients.OSClient - -**OSClients** - -* *NEW!!* Support for Senlin client - -* *NEW!!* Support for Gnocchi client - -* *NEW!!* Support for Magnum client - -* *NEW!!* Support for Watcher client - -* Transmit endpoint_type to saharaclient - -**Scenarios**: - -* *NEW!!*: - - - `Authenticate.validate_ceilometer `_ - - `CinderVolumes.create_volume_from_snapshot `_ - - `CinderVolumes.create_volume_and_clone `_ - - `NovaFlavors.create_and_list_flavor_access `_ - - `NovaFlavors.create_flavor `_ - - `NovaServers.boot_and_update_server `_ - - `NovaServers.boot_server_from_volume_snapshot `_ - -* [Sahara] Add configs to MapR plugin - -* Extend CinderVolumes.create_and_upload_volume_to_image with "image" argument - - `Plugin Reference `_ - -* Deprecate Dummy.dummy_with_scenario_output scenario in favor of Dummy.dummy_output - - .. warning:: Dummy.dummy_with_scenario_output scenario will be removed after - several releases - - `Deprecated Plugin Reference `_ - `New Plugin Reference `_ - -* Extend CinderVolumes.create_volume_and_clone with nested_level - - Add nested_level argument for nested cloning volume to new volume - -* Extend `CinderVolumes.create_nested_snapshots_and_attach_volume - `_ - - Two new arguments were added: create_volume_kwargs and create_snapshot_kwargs - - .. warning:: All arguments related to snapshot creation should be transmitted - only via create_snapshot_kwargs. - -* Introduce new style of scenarios - class based. - - `Spec Reference `_ - -* Improve report for VMTasks.boot_runcommand_delete - -* [Sahara] Added 5.5.0 version for cdh-plugin and 1.6.0 version for spark - -* Extend boot_server_from_volume_and_delete, boot_server_from_volume, - boot_server_from_volume_and_live_migrate, boot_server_from_volume_snapshot - scenarios of NovaServers class with "volume_type" parameter. - -**Contexts**: - -* *NEW!!*: - - - `Cinder volume_types `_ - - `Murano environments `_ - - `Heat dataplane `_ - -* Use Broker Pattern in Keystone roles context - -* Use immutable types for locking context configuration - - Since context configuration passed to Context.__init__() was a mutable type - (dict or list), sometimes we had unexpected changes done by unpredictable - code (for example, in wrappers). - -* Add possibility to balance usage of users - - For the moment all users for tasks were taken randomly and there was no way - to balance them between tasks. It may be very useful when we have difference - between first usage of tenant/user and all consecutive. In this case we get - different load results. - - Therefore, "users" context was extended with new config option - 'user_choice_method' that defines approach for picking up users. - - Two values are available: - - random - - round_robin - - Default one is compatible with old approach - "random". - -* Make sahara_image and custom_image contexts glance v2 compatible - -* Extend servers context with "nics" parameter - -* Extend network context with "dns_nameservers" parameter - -* Extend volume context with "volume_type" parameter - -**Cleanup**: - -* Mark several cleanup resources as tenant_resource - - Nova servers and security groups are tenant related resources, but resource - decorator missed that fact which makes cleanup tries to delete one resources - several times. - -* Turn off redundant nova servers cleanup for NovaFlavors.list_flavors scenario - -* Add neutron cleanup for NeutronSecurityGroup.create_and_delete_security_groups - -**Exporter**: - -Rename task-exporter "file-exporter" to "file". - -.. warning:: "file-exporter" is deprecated and will be removed in further - releases. - -**Types**: - -Remove deprecated types. - -.. warning:: you should use rally.task.types.convert instead of - rally.task.types.set decorator - -**Validators** - -* Add a required_api_version validator -* Add validators for scenario arguments - -**Utils**: - -Use glance wrapper where appropriate to support compatibility between V1 and V2 - -Bug fixes -~~~~~~~~~ - -**19 bugs were fixed**: - -* Wrong arguments order of Keystone wrapper in case of V2 and V3 - -* AttributeError while disabling urllib3 warnings on old installations - - `Launchpad bug-report #1573650 - `_ - -* install_rally.sh script is failed while obtaining setuptools - -* "-inf" load duration in case of wrong runner plugin and failed start of - contexts - -* Strange input task in the report - - `Launchpad bug-report #1570328 - `_ - -* Wrong behaviour of boot_server_from_volume scenarios in case of booting - server from image. - - The arg of image must be None, when booting server from volume. Otherwise - still boot server from image. - - Affected scenarios: - NovaServers.boot_server_from_volume - NovaServers.boot_server_from_volume_and_delete - NovaServers.boot_server_from_volume_and_resize - NovaServers.boot_server_from_volume_and_live_migrate - - `Launchpad bug-report #1578556 - `_ - -* Weak validation of json schema of RPS runner - - JSON Schema of RPS runner doesn't have "required" field. It means that - users are able to pass wrong configs and we will have runtime error while - running task. - -* Rally doesn't take cacert setting while creating keystone session - - `Launchpad bug-report #1577360 - `_ - -* Heat scenarios fail when API uses TLS - - `Launchpad bug-report #1585456 - `_ - -* Example in comment of context manila_share_networks wrong - - `Launchpad bug-report #1587164 - `_ - -* There is no way to get UUID of a verification after it is created by - "rally verify start" or "rally verify import_results" when --no-use is set - - `Launchpad bug-report #1587034 - `_ - -* Exposed ssh timeout and interval in vm scenario - - `Launchpad bug-report #1587728 - `_ - -* Ceilometer scenario doesn't require "ceilometer" ctx - - `Launchpad bug-report #1557642 - `_ - -* "servers" context requires setting network id for multiple possible networks - found. - - `Launchpad bug-report #1592292 - `_ - -* nested_level data type incorrect in create_nested_snapshots_and_attach_volume - - `Launchpad bug-report #1594656 - `_ - -* Rally cleanup servers raises exception - - `Launchpad bug-report #1584104 - `_ - -* Stopping server is redundant before cold-migrating server - - `Launchpad bug-report #1594730 - `_ - -* existing_users context doesn't work in case of Keystone v3 - -* Whether validates flavor's disk or not depends on booting type of the instance - - `Launchpad bug-report #1596756 - `_ - -Documentation -~~~~~~~~~~~~~ - -* Re-use openstack theme for building docs outside rtd. - - `Rally Docs at docs.openstack.org - `_ - -* Add page for Verification component - - `RTD page for Verification component - `_ - -* Add glossary page - - `RTD page for Glossary - `_ - -* Adjust docs reference to "KeystoneBasic.authenticate" scenario - - `Step 6. Aborting load generation on success criteria failure - `_ - -Thanks -~~~~~~ - - 2 Everybody! diff --git a/doc/release_notes/archive/v0.6.0.rst b/doc/release_notes/archive/v0.6.0.rst deleted file mode 100644 index 2649ac69..00000000 --- a/doc/release_notes/archive/v0.6.0.rst +++ /dev/null @@ -1,198 +0,0 @@ -============ -Rally v0.6.0 -============ - -Overview --------- - -+------------------+-----------------------+ -| Release date | **9/05/2016** | -+------------------+-----------------------+ - -Details -------- - -Common -~~~~~~ - -* Added Python 3.5 support -* Sync requirements with OpenStack global-requirements -* Start using latest way of authentication - keystoneauth library -* Start porting all scenario plugins to class-based view. - -Specs & Feature Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -* `[Implemented] SLA Performance degradation plugin `_ -* `[Proposed] New Tasks Configuration section - hook `_ - -Database -~~~~~~~~ - -* disable db downgrade api -* [require migration] upgrade deployment config - -Docker image -~~~~~~~~~~~~ - -* Add sudo rights to rally user - Rally is a pluggable framework. External plugins can require installation of - additional python or system packages, so we decided to add sudo rights. - -* Move from ubuntu:14.04 base image to ubuntu:16.04 . - Ubuntu 16.04 is current/latest LTS release. Let's use it. - -* pre-install vim - Since there are a lot of users who like to experiment and modify samples - inside container, rally team decided to pre-install vim - -* configure/pre-install bash-completion - Rally provides bash-completion script, but it doesn't work without installed - `bash-completion` package and now it is included in our image. - - -Rally Deployment -~~~~~~~~~~~~~~~~ - -* Add strict jsonschema validation for ExistingCloud deployments. All incorrect - and unexpected properties will not be ignored anymore. If you need to store - some extra parameters, you can use new "extra" property. -* Fix an issue with endpoint_type. - Previously, endpoint type was not transmitted to keystone client. In this - case, keystoneclient used default endpoint type (for different API calls it - can differ). Behaviour after the fix: - - - None endpoint type -> Rally will initialize all clients without setting - endpoint type. It means that clients will choose what default values for - endpoint type use by itself. Most of clients have "public" as default - values. Keystone use "admin" or "internal" by default. - - Not none endpoint type -> Rally will initialize all clients with this - endpoint. Be careful, by default most of keystone v2 api calls do not work - with public endpoint type. - - -Rally Task -~~~~~~~~~~ - -* [core] Iterations numbers in logging and reports must be synchronized. Now - they start from 1 . - -* [config] users_context.keystone_default_role is a new config option - (Defaults to "member") for setting default user role for new users in case - of Keystone V3. - -* [Reports] Embed Rally version into HTML reports - This adds Rally version via meta tag into HTML reports: - - - -* [Reports] Expand menu if there is only one menu group - -* [logging] Remove deprecated rally.common.log module - -* [Trends][Reports] Add success rate chart to trends report - -* [Reports] Hide menu list if there is no data at all - -Rally Verify -~~~~~~~~~~~~ - -* Updating Tempest config file - - - Some tests (for boto, horizon, etc.) were removed from Tempest and now there - is no need to keep the corresponding options in Tempest config file. - - - Some options in Tempest were moved from one section to another and we should - to do the corresponding changes in Rally to be up to date with the latest - Tempest version. - -* Adding '--skip-list' arg to `rally verify start` cmd - - `CLI argument for --skip-list `_ - -* *NEW!!*: - - - `Command for plugin listing `_ - - `Command to uninstall plugins `_ - -* Rename and deprecated several arguments for `rally verify start` cmd: - - - tests-file -> load-list - - xfails-file -> xfail-list - -Plugins -~~~~~~~ - -**Scenarios**: - -* Extend Sahara scenarios with autoconfig param - - Affected plugins: - - - `SaharaClusters.create_and_delete_cluster `_ - - `SaharaClusters.create_scale_delete_cluster `_ - - `SaharaNodeGroupTemplates.create_and_list_node_group_templates `_ - - `SaharaNodeGroupTemplates.create_delete_node_group_templates `_ - -* *NEW!!*: - - - `MonascaMetrics.list_metrics `_ - - `SenlinClusters.create_and_delete_cluster `_ - - `Watcher.create_audit_template_and_delete `_ - - `Watcher.create_audit_and_delete `_ - - `Watcher.list_audit_templates `_ - -* Rename **murano.create_service** to **murano.create_services** atomic action - -**SLA**: - -*NEW!!*: `performance degradation plugin `_ - -**Contexts**: - -* *NEW!!*: - - - `Monasca monasca_metrics `_ - - `Senlin profiles `_ - - `Watcher audit_templates `_ - -* Extend `manila_share_networks `_ - context with share-network autocreation support. - -* Extend `volumes `_ - context to allow volume_type to be None to allow using default value - -Bug fixes -~~~~~~~~~ - -* [existing users] Quota context does not restore original settings on exit - - `Launchpad bug-report #1595578 `_ - -* [keystone v3] Rally task's test user role setting failed - - `Launchpad bug-report #1595081 `_ - -* [existing users] context cannot fetch 'tenant' and 'user' details from cloud - deployment - - `Launchpad bug-report #1602157 `_ - -* UnboundLocalError: local variable 'cmd' referenced before assignment - - `Launchpad bug-report #1587941 `_ - -* [Reports] Fix trends report generation if there are n/a results - - -Documentation -~~~~~~~~~~~~~ - -* Add page about task reports - - `RTD page for reports `_ - -Thanks -~~~~~~ - - 2 Everybody! diff --git a/doc/release_notes/archive/v0.7.0.rst b/doc/release_notes/archive/v0.7.0.rst deleted file mode 100644 index e49ac0ba..00000000 --- a/doc/release_notes/archive/v0.7.0.rst +++ /dev/null @@ -1,135 +0,0 @@ -============ -Rally v0.7.0 -============ - -Overview --------- - -+------------------+-----------------------+ -| Release date | **10/11/2016** | -+------------------+-----------------------+ - -Details -------- - -Specs & Feature Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -* [Used] Ported all rally scenarios to class base - - `Spec reference `_ - -* `[Implemented] New Plugins Type - Hook `_ - -Database -~~~~~~~~ - -.. warning:: Database schema is changed, you must run - `rally-manage db upgrade `_ - to be able to use old Rally installation with latest release. - -* [require migration] fix for wrong format of "verification_log" of tasks -* [require migration] remove admin_domain_name from OpenStack deployments - -Rally Deployment -~~~~~~~~~~~~~~~~ - -* Remove admin_domain_name from openstack deployment - Reason: admin_domain_name parameter is absent in Keystone Credentials. - - -Rally Task -~~~~~~~~~~ - -* [Trends][Reports] Use timestamps on X axis in trends report - -* [Reports] Add new OutputTextArea chart plugin - - New chart plugin can show arbitrary textual data on - "Scenario Stata -> Per iteration" tab. - - This finally allows to show non-numeric data like IP addresses, notes and - even long comments. - - Plugin `Dummy.dummy_output `_ - is also updated to provide demonstration. - -* [cli] Add version info to *rally task start* output - -* [api] Allow to delete stopped tasks without force=True - - It is reasonable to protect deletion of running tasks (statuses INIT, - VERIFYING, RUNNING, ABORTING and so on...) but it is strange to protect - deletion for stopped tasks (statuses FAILED and ABORTED). Also this is - annoying in CLI usage. - -* Added hooks and triggers. - - Hook is a new entity which can be launched on specific events. Trigger is - another new entity which processes events and launches hooks. - For example, hook can launch specific destructive action - just execute cli - command(we have sys_call hook for this task) and it can be launched by - simple trigger on specific iteration(s) or time (there is event trigger). - -Rally Verify -~~~~~~~~~~~~ - -Scenario tests in Tempest require an image file. Logic of obtaining this image -is changed: - -* If CONF.tempest.img_name_regex is set, Rally tries to find an image matching - to the regex in Glance and download it for the tests. -* If CONF.tempest.img_name_regex is not set (or Rally didn't find the image - matching to CONF.tempest.img_name_regex), Rally downloads the image by the - link specified in CONF.tempest.img_url. - -Plugins -~~~~~~~ - -**Scenarios**: - -* *Removed*: `Dummy.dummy_with_scenario_output `_ - - It was deprecated in 0.5.0 - - .. warning:: This plugin is not available anymore in 0.7.0 - -* *NEW!!*: - - - `MagnumClusterTemplates.list_cluster_templates `_ - - `MagnumClusters.list_clusters `_ - - `MagnumClusters.create_and_list_clusters `_ - - `NovaAggregates.create_aggregate_add_and_remove_host `_ - - `NovaAggregates.create_and_list_aggregates `_ - - `NovaAggregates.create_and_delete_aggregate `_ - - `NovaAggregates.create_and_update_aggregate `_ - - `NovaFlavors.create_and_get_flavor `_ - - `NovaFlavors.create_flavor_and_set_keys `_ - - `NovaHypervisors.list_and_get_hypervisors `_ - - `NovaServers.boot_server_associate_and_dissociate_floating_ip `_ - - `KeystoneBasic.authenticate_user_and_validate_token `_ - -**Contexts**: - -* *NEW!!*: - - - `Manila manila_security_services `_ - - `Magnum cluster_templates `_ - - `Magnum clusters `_ - -**OSClients**: - -Port all openstack clients to use keystone session. - -Bug fixes -~~~~~~~~~ - -* [tasks] rally task detailed incorrect / inconsistent output - - `Launchpad bug-report #1562713 `_ - - -Thanks -~~~~~~ - - 2 Everybody! diff --git a/doc/release_notes/archive/v0.8.0.rst b/doc/release_notes/archive/v0.8.0.rst deleted file mode 100644 index 0630e4f5..00000000 --- a/doc/release_notes/archive/v0.8.0.rst +++ /dev/null @@ -1,232 +0,0 @@ -============ -Rally v0.8.0 -============ - -Overview --------- - -+------------------+-----------------------+ -| Release date | **1/25/2017** | -+------------------+-----------------------+ - -Details -------- - -Specs & Feature Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -* `[Implemented] Refactor Verification Component - `_ - -* `[Implemented] Scaling & Refactoring Rally DB - `_ - -Installation -~~~~~~~~~~~~ - -We switched to use bindep library for checking required system packages. -All our dependencies moved to separate file (like requirements.txt for python -packages) `bindep.txt -`_. - -Database -~~~~~~~~ - -.. warning:: Database schema is changed, you must run - `rally-manage db upgrade `_ - to be able to use old Rally installation with latest release. - -* change structure of database to be more flexible -* save raw task results in chunks (see raw_result_chunk_size option of - [DEFAULT] rally configuration section) -* add db revision check in rally API, so it is impossible to use rally with - wrong db now. - -Rally API -~~~~~~~~~ - -Single entry point for Rally API is added - rally.api.API . Old API classes -(``rally.api.Task``, ``rally.api.Verification``, ``rally.api.Deployment``) are -deprecated now. - -Rally CLI -~~~~~~~~~ - -* ``rally task sla_check`` is deprecated now in favor of - ``rally task sla-check`` - -* Deprecated category ``rally show`` was removed. - -* `rally plugin list` is extended with plugin base column - -Task Component -~~~~~~~~~~~~~~ - -- [Random names] scenario for checking performance of generate_random_name - method is added to our CI with proper SLA. Be sure, whatever number of random - names you need, it will not affect performance of Rally at all, we checked. - -- [atomic actions] scenario for checking performance of calculating atomic - actions is added to our CI with proper SLA. Be sure, whatever number atomics - you have in scenarios, it will not affect performance of Rally at all, we - checked. - -- [services] new entity is introduced for helping to provide compatibility - layer between different API versions of one service. - -Verification component -~~~~~~~~~~~~~~~~~~~~~~ - -We completely redesign the whole Verification component. For more details see -`our new docs for that component -`_ - -Unfortunately, such big change could not be done in backward compatible way, -so old code is not compatible with new one. See `HowTo migrate from -Verification component 0.7.0 to 0.8.0 -`_ - -Plugins -~~~~~~~ - -**Services**: - -* Glance: - - Switched from V1 to V2 API by default. - -* Keystone: - - - Transmit endpoint_type to keystoneclient - - Full keystone V3 support - -**Scenarios**: - -* *Updated*: - - - The meaning of the volume_type argument is changes in - `CinderVolumes.create_snapshot_and_attach_volume - `_ - scenario. It should contain actual volume type instead of boolean value to - choose random volume type. - - Extend `GlanceImages.create_image_and_boot_instances - `_ - with create_image_kwargs and boot_server_kwargs arguments. - -* *NEW!!*: - - - `CeilometerAlarms.create_and_get_alarm - `_ - - `CinderVolumeBackups.create_incremental_volume_backup - `_ - - `CinderVolumeTypes.create_and_delete_volume_type - `_ - - `CinderVolumeTypes.create_volume_type_and_encryption_type - `_ - - `CinderVolumes.create_and_accept_transfer - `_ - - `CinderVolumes.create_and_get_volume - `_ - - `CinderVolumes.create_volume_and_update_readonly_flag - `_ - - `CinderVolumes.list_transfers - `_ - - `CinderVolumes.list_types - `_ - - `KeystoneBasic.create_and_get_role - `_ - - `ManilaShares.create_and_list_share - `_ - - `ManilaShares.set_and_delete_metadata - `_ - - `MistralExecutions.create_execution_from_workbook - `_ - - `MistralExecutions.list_executions - `_ - - `NeutronLoadbalancerV2.create_and_list_loadbalancers - `_ - - `NeutronNetworks.create_and_show_network - `_ - - `NeutronNetworks.list_agents - `_ - - `NovaAggregates.create_aggregate_add_host_and_boot_server - `_ - - `NovaAggregates.create_and_get_aggregate_details - `_ - - `NovaFlavors.create_and_delete_flavor - `_ - - `NovaFlavors.create_flavor_and_add_tenant_access - `_ - - `NovaHosts.list_and_get_hosts - `_ - - `NovaHypervisors.list_and_get_uptime_hypervisors - `_ - - `NovaHypervisors.list_and_search_hypervisors - `_ - - `NovaHypervisors.statistics_hypervisors - `_ - - `NovaSecGroup.boot_server_and_add_secgroups - `_ - - `NovaServerGroups.create_and_list_server_groups - `_ - - `Quotas.nova_get - `_ - -**Hooks**: - -* *NEW!!*: - - - `fault_injection - `_ - -**Runners** - -* *Updated*: - - - `RPS runner - `_ - is extended with ability to increase 'rps' value by arithmetic progression - across certain duration. Now it can be also a dict specifying progression - parameters: - - .. code-block:: json - - rps": { - "start": 1, - "end": 10, - "step": 1, - "duration": 2 - } - - This will generate rps value: ``start, start + step, start + 2 * step, .., - end`` across certain 'duration' seconds each step. If iteration count not - ended at the last step of progression, then rps will continue to generate - with "end" value. Note that the last rps could be generated smaller. - -Fixed bugs -~~~~~~~~~~ - -* [hooks] incorrect encoding of stdout/stderr streams opened by sys_call hook - for py3 - -* [hooks] sorting Hook column at HTML report doesn't work - -* [tasks][scenarios][neutron] L3 HA: Unable to complete operation on subnet - - `Launchpad bug-report #1562878 `_ - -* [tasks] JSON report doesn't save order of atomics - -* [tasks][cleanup][nova] Failed to remove aggregate which has hosts in it - -* [tasks] `--abort-on-sla-failure - `_ - mechanism works only for current workload, but does not stop the next ones. - -* [hooks] hooks section isn't displayed in HTML report - - -Thanks -~~~~~~ - - 2 Everybody! diff --git a/doc/release_notes/archive/v0.8.1.rst b/doc/release_notes/archive/v0.8.1.rst deleted file mode 100644 index 08bc4179..00000000 --- a/doc/release_notes/archive/v0.8.1.rst +++ /dev/null @@ -1,40 +0,0 @@ -============ -Rally v0.8.1 -============ - -Overview --------- - -+------------------+-----------------------+ -| Release date | **1/27/2017** | -+------------------+-----------------------+ - -Details -------- - -Fix for python requirements list. - -Plugins -~~~~~~~ - -**Scenarios**: - -* *Updated*: - - - Use new network for each subnet at - `NeutronNetworks.create_and_list_subnets - `_ - scenario. - -* *NEW!!*: - - - `CinderVolumeTypes.create_and_list_encryption_type - `_ - - - `Quotas.cinder_get - `_ - -Thanks -~~~~~~ - - 2 Everybody! diff --git a/doc/release_notes/archive/v0.9.0.rst b/doc/release_notes/archive/v0.9.0.rst deleted file mode 100644 index e88846e4..00000000 --- a/doc/release_notes/archive/v0.9.0.rst +++ /dev/null @@ -1,163 +0,0 @@ -============ -Rally v0.9.0 -============ - -Overview --------- - -+------------------+-----------------------+ -| Release date | **3/20/2017** | -+------------------+-----------------------+ - -Details -------- - -Command Line Interface -~~~~~~~~~~~~~~~~~~~~~~ - -* `rally plugin list` now does not contain hidden plugins. - -Task component -~~~~~~~~~~~~~~ - -* Added check for duplicated keys in task files. - -* The order of subtasks (scenarios/workloads) is not ignored any more. You can - generate whatever you want load or use that feature for up the cloud (put - small scenario to the start of task to wake up the cloud before the real - load). - -* Information about workload creation is added to HTML-reports. - -* Task statuses is changed to be more clear and cover more cases: - - - ``verifying`` is renamed to ``validating``. - - ``failed`` is divided for 2 statuses - ``validation_failed``, which means - that task did not pass validation step, and ``crashed``, which means that - something went wrong in rally engine. - -* Our awesome cleanup become more awesome! The filter mechanism is improved to - discover resources in projects created only by Rally (it works for most of - resources, except several network-related ). It makes possible to run Rally - with existing users in real tenants without fear to remove something - important. - - -Verification component -~~~~~~~~~~~~~~~~~~~~~~ - -* Fixed an issue with missed tests while listing all supported tests of - specified verifier. - -* Fixed an issue with displaying the wrong version of verifier in case of - cloning from the local directory. - -* Extend `rally verify rerun - `_ - with ``--detailed``, ``--no-use``, ``--tag`` and ``--concurrency`` arguments. - -* Add output examples for `JSON - `_ and - `JUnit-XML - `_ - reporters. - -Plugins -~~~~~~~ - -**Contexts** - -* Extend cinder quotas to support ``backups`` and ``backup_gigabytes``. - - -**Deployment Engines**: - -*Updated* Extend `DevstackEngine -`_ -with ``enable_plugin`` option. - -**OpenStack clients**: - -* Extend support for auth urls like ``https://example.com:35357/foo/bar/v3`` - -* Pass endpoint type to heatclient - - -**Scenarios**: - -* *NEW!!* - - - `CinderVolumeTypes.create_and_delete_encryption_type - `_ - - - `CinderVolumeTypes.create_and_set_volume_type_keys - `_ - - - `KeystoneBasic.create_and_list_roles - `_ - - - `KeystoneBasic.create_and_update_user - `_ - - - `NovaKeypair.create_and_get_keypair - `_ - - - `NovaServers.resize_shutoff_server - `_ - - - `VMTasks.dd_load_test - `_ - -* *UPDATED!!* - - - Extend `VMTasks.boot_runcommand_delete - `_ - to display just raw text output of executed command. - -* *DELETED* - - Scenario `VMTasks.boot_runcommand_delete_custom_image - `_ - is removed since `VMTasks.boot_runcommand_delete - `_ - covers the case of that particular scenario without adding any complexity. - -**Validators**: - -* Extend ``required_contexts`` validator to support ``at least one of the`` - logic. - -* Fix a bunch of JSON schemas which are used for validation of all plugins. - -Documentation -~~~~~~~~~~~~~ - -We totally reworked `Plugins Reference -`_ page. -Now it looks more like `Command Line Interface -`_, which means that -you can get links for particular parameter of particular plugin. - -Also, you can find expected parameters and their types of all contexts, hooks, -SLAs and so on! Most of them still miss descriptions, but we are working on -adding them. - -Fixed bugs -~~~~~~~~~~ - -* [osclients] Custom auth mechanism was used for zaqarclient instead of unified - keystonesession, which led to auth errors at some envs. - -* [plugins] During running - `CinderVolumes.create_and_restore_volume_backup - `_ - scenario we had a race problem with backup deleting due to wrong check of - backup status. - -* [plugins][verifications] Jenkins expexts "classname" JUnitXML attribute - instead of "class_name". - -Thanks -~~~~~~ - - 2 Everybody! diff --git a/doc/release_notes/latest.rst b/doc/release_notes/latest.rst deleted file mode 120000 index 5e487482..00000000 --- a/doc/release_notes/latest.rst +++ /dev/null @@ -1 +0,0 @@ -archive/v0.9.0.rst \ No newline at end of file diff --git a/doc/source/Makefile b/doc/source/Makefile deleted file mode 100644 index a4664cac..00000000 --- a/doc/source/Makefile +++ /dev/null @@ -1,177 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = _build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/rally.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/rally.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/rally" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/rally" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/doc/source/_templates/openstackrally/_static/img.css b/doc/source/_templates/openstackrally/_static/img.css deleted file mode 100644 index 90af29f3..00000000 --- a/doc/source/_templates/openstackrally/_static/img.css +++ /dev/null @@ -1,3 +0,0 @@ -.body img { - max-width: 100%; -} \ No newline at end of file diff --git a/doc/source/_templates/openstackrally/layout.html b/doc/source/_templates/openstackrally/layout.html deleted file mode 100644 index ef819316..00000000 --- a/doc/source/_templates/openstackrally/layout.html +++ /dev/null @@ -1,47 +0,0 @@ -{% extends "openstack/layout.html" %} -{% set show_source = False %} -{% set css_files = css_files + ["_static/img.css"] %} - -{# sidebarlogo is a top block in sidebar. Let's use it to display home link #} -{%- block sidebarlogo %} -

What is Rally?

-{%- endblock %} - -{# Display global toc instead of local #} -{%- block sidebartoc %} -

Contents

- {{ toctree() }} -{%- endblock %} - -{# Turn off sections "Previous topic" and "Next topic" #} -{%- block sidebarrel %}{% endblock %} - -{% block projectsource %} -

Contacts

-

- IRC
#openstack-rally channel at FreeNode
- E-mail
openstack-dev@lists.openstack.org with "[Rally]" tag in subject -

-

Useful links

- -{% endblock %} - -{# copy-pasted from original theme and extended with Rally links #} -{%- block header_navigation %} -
  • Home
  • -
  • Projects
  • -
  • User Stories
  • -
  • Community
  • -
  • Blog
  • -
  • Wiki
  • -
  • Documentation
  • -{% endblock %} - diff --git a/doc/source/_templates/openstackrally/theme.conf b/doc/source/_templates/openstackrally/theme.conf deleted file mode 100644 index e739753b..00000000 --- a/doc/source/_templates/openstackrally/theme.conf +++ /dev/null @@ -1,2 +0,0 @@ -[theme] -inherit = openstack \ No newline at end of file diff --git a/doc/source/cli_reference.rst b/doc/source/cli_reference.rst deleted file mode 100644 index 23fb7220..00000000 --- a/doc/source/cli_reference.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _cli-reference: - - -Command Line Interface -====================== - -.. contents:: - :depth: 1 - :local: - -.. make_cli_reference:: diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 82935be5..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,299 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Rally documentation build configuration file, created by -# sphinx-quickstart on Fri Jan 10 23:19:18 2014. -# -# This file is execfile() with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# auto-generated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import datetime as dt -import os -import subprocess -import sys - -import rally.common.version - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.extend([ -# os.path.abspath("../.."), -# os.path.abspath("../"), -# os.path.abspath("./") -# ]) - -sys.path.insert(0, os.path.abspath("../../")) -sys.path.insert(0, os.path.abspath("../")) -sys.path.insert(0, os.path.abspath("./")) - - -# -- General configuration ---------------------------------------------------- - -on_rtd = os.environ.get("READTHEDOCS") == "True" - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = "1.0" - -# Add any Sphinx extension module names here, as strings. -# They can be extensions coming with Sphinx (named "sphinx.ext.*") or your -# custom ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.doctest", - "sphinx.ext.todo", - "sphinx.ext.coverage", - "sphinx.ext.ifconfig", - "sphinx.ext.viewcode", - "ext.cli_reference", - "ext.plugin_reference", - "ext.include_vars" -] -todo_include_todos = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix of source filenames. -source_suffix = ".rst" - -# The encoding of source files. -# source_encoding = "utf-8-sig" - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"Rally" -copyright = u"%d, OpenStack Foundation" % dt.datetime.now().year - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. - - -version = rally.common.version.version_string() -# The full version, including alpha/beta/rc tags. -release = rally.common.version.version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = "" -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = "%B %d, %Y" - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [ - "feature_request/README.rst", - "samples/README.rst", - "**/README.rst" -] - -# The reST default role (used for this markup: `text`) to use for all documents -# default_role = None - -# If true, "()" will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -if not on_rtd: - html_theme = "openstackrally" -else: - html_theme = "default" -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -if not on_rtd: - import oslosphinx - theme_dir = os.path.join(os.path.dirname(oslosphinx.__file__), "theme") - html_theme_path = [theme_dir, "_templates"] -else: - html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -if not on_rtd: - html_static_path = ["_templates/openstackrally/_static"] -else: - html_static_path = [] - -# If not "", a "Last updated on:" timestamp is inserted at every page bottom, -# using the given strftime format. -git_cmd = [ - "git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] -html_last_updated_fmt = subprocess.check_output(git_cmd).decode("utf-8") - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -html_use_smartypants = False - -# Custom sidebar templates, maps document names to template names. -html_sidebars = {"**": ["searchbox.html", "globaltoc.html"]} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = "" - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = "%sdoc" % project - - -# -- Options for LaTeX output ------------------------------------------------- - -latex_elements = { - # The paper size ("letterpaper" or "a4paper"). - # "papersize": "letterpaper", - # The font size ("10pt", "11pt" or "12pt"). - # "pointsize": "10pt", - # Additional stuff for the LaTeX preamble. - # "preamble": "", -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]) -latex_documents = [ - ("index", - "%s.tex" % project, - u"%s Documentation" % project, - u"OpenStack Foundation", "manual"), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output ------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -# man_pages = [ -# ("index", "rally", u"Rally Documentation", -# [u"Rally Team"], 1) -# ] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ----------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ("index", "Rally", u"Rally Documentation", - u"Rally Team", "Rally", - "Testing framework and tool for all kinds of tests", - "Development"), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: "footnote", "no", or "inline". -# texinfo_show_urls = "footnote" diff --git a/doc/source/contribute.rst b/doc/source/contribute.rst deleted file mode 100644 index 278835dc..00000000 --- a/doc/source/contribute.rst +++ /dev/null @@ -1,261 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _contribute: - -Contribute to Rally -=================== - -Where to begin --------------- - -Please take a look `our Roadmap`_ to get information about our current work -directions. - -In case you have questions or want to share your ideas, be sure to contact us -either at `Rally-dev/Lobby`_ channel on **Gitter** messenger (or, less -preferably, at the ``#openstack-rally`` IRC channel on **irc.freenode.net**). - -If you are going to contribute to Rally, you will probably need to grasp a -better understanding of several main design concepts used throughout our -project (such as **benchmark scenarios**, **contexts** etc.). To do so, please -read :ref:`this article `. - - -How to contribute ------------------ - -1. You need a `Launchpad`_ account and need to be joined to the -`OpenStack team`_. You can also join the `Rally team`_ if you want to. Make -sure Launchpad has your SSH key, Gerrit (the code review system) uses this. - -2. Sign the CLA as outlined in the `account setup`_ section of the developer -guide. - -3. Tell git your details: - -.. code-block:: bash - - git config --global user.name "Firstname Lastname" - git config --global user.email "your_email@youremail.com" - -4. Install git-review. This tool takes a lot of the pain out of remembering -commands to push code up to Gerrit for review and to pull it back down to edit -it. It is installed using: - -.. code-block:: bash - - pip install git-review - -Several Linux distributions (notably Fedora 16 and Ubuntu 12.04) are also -starting to include git-review in their repositories so it can also be -installed using the standard package manager. - -5. Grab the Rally repository: - -.. code-block:: bash - - git clone git@github.com:openstack/rally.git - -6. Checkout a new branch to hack on: - -.. code-block:: bash - - git checkout -b TOPIC-BRANCH - -7. Start coding - -8. Run the test suite locally to make sure nothing broke, e.g. (this will run -py34/py27/pep8 tests): - -.. code-block:: bash - - tox - -**(NOTE: you should have installed tox<=1.6.1)** - -If you extend Rally with new functionality, make sure you have also provided -unit and/or functional tests for it. - -9. Commit your work using: - -.. code-block:: bash - - git commit -a - - -Make sure you have supplied your commit with a neat commit message, containing -a link to the corresponding blueprint / bug, if appropriate. - -10. Push the commit up for code review using: - -.. code-block:: bash - - git review -R - -That is the awesome tool we installed earlier that does a lot of hard work for -you. - -11. Watch your email or `review site`_, it will automatically send your code -for a battery of tests on our `Jenkins setup`_ and the core team for the -project will review your code. If there are any changes that should be made -they will let you know. - -12. When all is good the review site will automatically merge your code. - - -(This tutorial is based on: -http://www.linuxjedi.co.uk/2012/03/real-way-to-start-hacking-on-openstack.html) - -Testing -------- - -Please, don't hesitate to write tests ;) - - -Unit tests -^^^^^^^^^^ - -*Files: /tests/unit/** - -The goal of unit tests is to ensure that internal parts of the code work -properly. All internal methods should be fully covered by unit tests with a -reasonable mocks usage. - - -About Rally unit tests: - -- All `unit tests`_ are located inside /tests/unit/* -- Tests are written on top of: *testtools* and *mock* libs -- `Tox`_ is used to run unit tests - - -To run unit tests locally: - -.. code-block:: console - - $ pip install tox - $ tox - -To run py34, py27 or pep8 only: - -.. code-block:: console - - $ tox -e - - #NOTE: is one of py34, py27 or pep8 - -To run a single unit test e.g. test_deployment - -.. code-block:: console - - $ tox -e -- - - #NOTE: is one of py34, py27 or pep8 - # is the unit test case name, e.g tests.unit.test_osclients - -To debug issues on the unit test: - -- Add breakpoints on the test file using ``import pdb;`` ``pdb.set_trace()`` -- Then run tox in debug mode: - -.. code-block:: console - - $ tox -e debug - #NOTE: use python 2.7 - #NOTE: is the unit test case name - - or - -.. code-block:: console - - $ tox -e debug34 - #NOTE: use python 3.4 - #NOTE: is the unit test case name - -To get test coverage: - -.. code-block:: console - - $ tox -e cover - - #NOTE: Results will be in /cover/index.html - -To generate docs: - -.. code-block:: console - - $ tox -e docs - - #NOTE: Documentation will be in doc/source/_build/html/index.html - -Functional tests -^^^^^^^^^^^^^^^^ - -*Files: /tests/functional/** - -The goal of `functional tests`_ is to check that everything works well -together. Functional tests use Rally API only and check responses without -touching internal parts. - -To run functional tests locally: - -.. code-block:: console - - $ source openrc - $ rally deployment create --fromenv --name testing - $ tox -e cli - - #NOTE: openrc file with OpenStack admin credentials - -Output of every Rally execution will be collected under some reports root in -directory structure like: reports_root/ClassName/MethodName_suffix.extension -This functionality implemented in tests.functional.utils.Rally.__call__ method. -Use 'gen_report_path' method of 'Rally' class to get automatically generated -file path and name if you need. You can use it to publish html reports, -generated during tests. Reports root can be passed throw environment variable -'REPORTS_ROOT'. Default is 'rally-cli-output-files'. - -Rally CI scripts -^^^^^^^^^^^^^^^^ - -*Files: /tests/ci/** - -This directory contains scripts and files related to the Rally CI system. - -Rally Style Commandments -^^^^^^^^^^^^^^^^^^^^^^^^ - -*Files: /tests/hacking/* - -This module contains Rally specific hacking rules for checking commandments. - -For more information about Style Commandments, read the -`OpenStack Style Commandments manual`_. - -.. references: - -.. _our Roadmap: https://docs.google.com/a/mirantis.com/spreadsheets/d/16DXpfbqvlzMFaqaXAcJsBzzpowb_XpymaK2aFY2gA2g/edit#gid=0 -.. _Rally-dev/Lobby: https://gitter.im/rally-dev/Lobby -.. _Launchpad: https://launchpad.net/ -.. _OpenStack team: https://launchpad.net/openstack -.. _Rally team: https://launchpad.net/rally -.. _account setup: http://docs.openstack.org/infra/manual/developers.html#development-workflow -.. _review site: http://review.openstack.org/ -.. _Jenkins setup: http://jenkins.openstack.org/ -.. _unit tests: http://en.wikipedia.org/wiki/Unit_testing -.. _Tox: https://tox.readthedocs.org/en/latest/ -.. _functional tests: https://en.wikipedia.org/wiki/Functional_testing -.. _OpenStack Style Commandments manual: https://docs.openstack.org/hacking/latest/ diff --git a/doc/source/feature_request b/doc/source/feature_request deleted file mode 120000 index 4b5d7cf8..00000000 --- a/doc/source/feature_request +++ /dev/null @@ -1 +0,0 @@ -../feature_request/ \ No newline at end of file diff --git a/doc/source/feature_requests.rst b/doc/source/feature_requests.rst deleted file mode 100644 index e15555af..00000000 --- a/doc/source/feature_requests.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _feature_requests: - -Request New Features -==================== - -To request a new feature, you should create a document similar to other feature -requests and then contribute it to the **doc/feature_request** directory of the -Rally repository (see the :ref:`How-to-contribute tutorial `). - -If you don't have time to contribute your feature request via Gerrit, please -contact Boris Pavlovic (boris@pavlovic.me) - -Active feature requests: - -.. toctree:: - :glob: - :maxdepth: 1 - - feature_request/* diff --git a/doc/source/images/Amqp_rpc_single_reply_queue.png b/doc/source/images/Amqp_rpc_single_reply_queue.png deleted file mode 100644 index f5bd084d..00000000 Binary files a/doc/source/images/Amqp_rpc_single_reply_queue.png and /dev/null differ diff --git a/doc/source/images/Hook-Aggregated-Report.png b/doc/source/images/Hook-Aggregated-Report.png deleted file mode 100644 index f7564364..00000000 Binary files a/doc/source/images/Hook-Aggregated-Report.png and /dev/null differ diff --git a/doc/source/images/Hook-Per-Hook-Report.png b/doc/source/images/Hook-Per-Hook-Report.png deleted file mode 100644 index 7bfc523d..00000000 Binary files a/doc/source/images/Hook-Per-Hook-Report.png and /dev/null differ diff --git a/doc/source/images/Hook-Results.png b/doc/source/images/Hook-Results.png deleted file mode 100644 index cc9306bd..00000000 Binary files a/doc/source/images/Hook-Results.png and /dev/null differ diff --git a/doc/source/images/Rally-Actions.png b/doc/source/images/Rally-Actions.png deleted file mode 100644 index 20c74c41..00000000 Binary files a/doc/source/images/Rally-Actions.png and /dev/null differ diff --git a/doc/source/images/Rally-Plugins.png b/doc/source/images/Rally-Plugins.png deleted file mode 100644 index 40b11cda..00000000 Binary files a/doc/source/images/Rally-Plugins.png and /dev/null differ diff --git a/doc/source/images/Rally-UseCases.png b/doc/source/images/Rally-UseCases.png deleted file mode 100644 index 13a80e9e..00000000 Binary files a/doc/source/images/Rally-UseCases.png and /dev/null differ diff --git a/doc/source/images/Rally_Architecture.png b/doc/source/images/Rally_Architecture.png deleted file mode 100644 index 5e8b748b..00000000 Binary files a/doc/source/images/Rally_Architecture.png and /dev/null differ diff --git a/doc/source/images/Rally_Distributed_Runner.png b/doc/source/images/Rally_Distributed_Runner.png deleted file mode 100644 index 5899a5d1..00000000 Binary files a/doc/source/images/Rally_Distributed_Runner.png and /dev/null differ diff --git a/doc/source/images/Rally_QA.png b/doc/source/images/Rally_QA.png deleted file mode 100644 index 76680de6..00000000 Binary files a/doc/source/images/Rally_QA.png and /dev/null differ diff --git a/doc/source/images/Rally_VM_list.png b/doc/source/images/Rally_VM_list.png deleted file mode 100644 index 7e48da2d..00000000 Binary files a/doc/source/images/Rally_VM_list.png and /dev/null differ diff --git a/doc/source/images/Rally_snapshot_vm.png b/doc/source/images/Rally_snapshot_vm.png deleted file mode 100644 index da04fd83..00000000 Binary files a/doc/source/images/Rally_snapshot_vm.png and /dev/null differ diff --git a/doc/source/images/Rally_who_is_using.png b/doc/source/images/Rally_who_is_using.png deleted file mode 100644 index ee9d1ad5..00000000 Binary files a/doc/source/images/Rally_who_is_using.png and /dev/null differ diff --git a/doc/source/images/Report-Abort-on-SLA-task-1.png b/doc/source/images/Report-Abort-on-SLA-task-1.png deleted file mode 100644 index 0ca4d96f..00000000 Binary files a/doc/source/images/Report-Abort-on-SLA-task-1.png and /dev/null differ diff --git a/doc/source/images/Report-Abort-on-SLA-task-2.png b/doc/source/images/Report-Abort-on-SLA-task-2.png deleted file mode 100644 index 844e958a..00000000 Binary files a/doc/source/images/Report-Abort-on-SLA-task-2.png and /dev/null differ diff --git a/doc/source/images/Report-Collage.png b/doc/source/images/Report-Collage.png deleted file mode 100644 index 50694a17..00000000 Binary files a/doc/source/images/Report-Collage.png and /dev/null differ diff --git a/doc/source/images/Report-Multiple-Configurations-Overview.png b/doc/source/images/Report-Multiple-Configurations-Overview.png deleted file mode 100644 index 498350de..00000000 Binary files a/doc/source/images/Report-Multiple-Configurations-Overview.png and /dev/null differ diff --git a/doc/source/images/Report-Multiple-Overview.png b/doc/source/images/Report-Multiple-Overview.png deleted file mode 100644 index 82050c07..00000000 Binary files a/doc/source/images/Report-Multiple-Overview.png and /dev/null differ diff --git a/doc/source/images/Report-Overview.png b/doc/source/images/Report-Overview.png deleted file mode 100644 index 4d4c5775..00000000 Binary files a/doc/source/images/Report-Overview.png and /dev/null differ diff --git a/doc/source/images/Report-SLA-Overview.png b/doc/source/images/Report-SLA-Overview.png deleted file mode 100644 index 84d11b9c..00000000 Binary files a/doc/source/images/Report-SLA-Overview.png and /dev/null differ diff --git a/doc/source/images/Report-SLA-Scenario.png b/doc/source/images/Report-SLA-Scenario.png deleted file mode 100644 index 823c470a..00000000 Binary files a/doc/source/images/Report-SLA-Scenario.png and /dev/null differ diff --git a/doc/source/images/Report-Scenario-Atomic.png b/doc/source/images/Report-Scenario-Atomic.png deleted file mode 100644 index 5e7fa0ad..00000000 Binary files a/doc/source/images/Report-Scenario-Atomic.png and /dev/null differ diff --git a/doc/source/images/Report-Scenario-Overview.png b/doc/source/images/Report-Scenario-Overview.png deleted file mode 100644 index 0f9ff8ed..00000000 Binary files a/doc/source/images/Report-Scenario-Overview.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Actions-durations.png b/doc/source/images/Report-Task-Actions-durations.png deleted file mode 100644 index 66cb7054..00000000 Binary files a/doc/source/images/Report-Task-Actions-durations.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Distribution.png b/doc/source/images/Report-Task-Distribution.png deleted file mode 100644 index 49106755..00000000 Binary files a/doc/source/images/Report-Task-Distribution.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Failures.png b/doc/source/images/Report-Task-Failures.png deleted file mode 100644 index 19f857a0..00000000 Binary files a/doc/source/images/Report-Task-Failures.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Input-file.png b/doc/source/images/Report-Task-Input-file.png deleted file mode 100644 index a7b112a0..00000000 Binary files a/doc/source/images/Report-Task-Input-file.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Load-profile.png b/doc/source/images/Report-Task-Load-profile.png deleted file mode 100644 index 0790476d..00000000 Binary files a/doc/source/images/Report-Task-Load-profile.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Overview.png b/doc/source/images/Report-Task-Overview.png deleted file mode 100644 index b411f157..00000000 Binary files a/doc/source/images/Report-Task-Overview.png and /dev/null differ diff --git a/doc/source/images/Report-Task-SLA.png b/doc/source/images/Report-Task-SLA.png deleted file mode 100644 index d8ae9153..00000000 Binary files a/doc/source/images/Report-Task-SLA.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Scenario-Data-Aggregated.png b/doc/source/images/Report-Task-Scenario-Data-Aggregated.png deleted file mode 100644 index 1dea113c..00000000 Binary files a/doc/source/images/Report-Task-Scenario-Data-Aggregated.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Scenario-Data-Per-iteration-profiler.png b/doc/source/images/Report-Task-Scenario-Data-Per-iteration-profiler.png deleted file mode 100644 index 4254834b..00000000 Binary files a/doc/source/images/Report-Task-Scenario-Data-Per-iteration-profiler.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Scenario-Data-Per-iteration.png b/doc/source/images/Report-Task-Scenario-Data-Per-iteration.png deleted file mode 100644 index 745e9bf8..00000000 Binary files a/doc/source/images/Report-Task-Scenario-Data-Per-iteration.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Subtask-configuration.png b/doc/source/images/Report-Task-Subtask-configuration.png deleted file mode 100644 index 476b51ea..00000000 Binary files a/doc/source/images/Report-Task-Subtask-configuration.png and /dev/null differ diff --git a/doc/source/images/Report-Task-Total-durations.png b/doc/source/images/Report-Task-Total-durations.png deleted file mode 100644 index 44c33ab4..00000000 Binary files a/doc/source/images/Report-Task-Total-durations.png and /dev/null differ diff --git a/doc/source/images/Report-Trends-Atomic-actions.png b/doc/source/images/Report-Trends-Atomic-actions.png deleted file mode 100644 index cba092b3..00000000 Binary files a/doc/source/images/Report-Trends-Atomic-actions.png and /dev/null differ diff --git a/doc/source/images/Report-Trends-Configuration.png b/doc/source/images/Report-Trends-Configuration.png deleted file mode 100644 index b158cf15..00000000 Binary files a/doc/source/images/Report-Trends-Configuration.png and /dev/null differ diff --git a/doc/source/images/Report-Trends-Overview.png b/doc/source/images/Report-Trends-Overview.png deleted file mode 100644 index a5d80b1c..00000000 Binary files a/doc/source/images/Report-Trends-Overview.png and /dev/null differ diff --git a/doc/source/images/Report-Trends-Total.png b/doc/source/images/Report-Trends-Total.png deleted file mode 100644 index 23a75113..00000000 Binary files a/doc/source/images/Report-Trends-Total.png and /dev/null differ diff --git a/doc/source/images/Report-Trends-single-run.png b/doc/source/images/Report-Trends-single-run.png deleted file mode 100644 index bcb3f33f..00000000 Binary files a/doc/source/images/Report-Trends-single-run.png and /dev/null differ diff --git a/doc/source/images/Report-Verify-filter-by-status.png b/doc/source/images/Report-Verify-filter-by-status.png deleted file mode 100644 index 9088696d..00000000 Binary files a/doc/source/images/Report-Verify-filter-by-status.png and /dev/null differ diff --git a/doc/source/images/Report-Verify-for-4-Verifications.png b/doc/source/images/Report-Verify-for-4-Verifications.png deleted file mode 100644 index 771beb3d..00000000 Binary files a/doc/source/images/Report-Verify-for-4-Verifications.png and /dev/null differ diff --git a/doc/source/images/Report-Verify-toggle-tags.png b/doc/source/images/Report-Verify-toggle-tags.png deleted file mode 100644 index a5694095..00000000 Binary files a/doc/source/images/Report-Verify-toggle-tags.png and /dev/null differ diff --git a/doc/source/images/Report-Verify-tracebacks.png b/doc/source/images/Report-Verify-tracebacks.png deleted file mode 100644 index 66e96191..00000000 Binary files a/doc/source/images/Report-Verify-tracebacks.png and /dev/null differ diff --git a/doc/source/images/Report-Verify-xfail.png b/doc/source/images/Report-Verify-xfail.png deleted file mode 100644 index 79028427..00000000 Binary files a/doc/source/images/Report-Verify-xfail.png and /dev/null differ diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 32ff7489..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============== -What is Rally? -============== - -**OpenStack** is, undoubtedly, a really *huge* ecosystem of cooperative -services. **Rally** is a **benchmarking tool** that answers the question: -**"How does OpenStack work at scale?"**. To make this possible, Rally -**automates** and **unifies** multi-node OpenStack deployment, cloud -verification, benchmarking & profiling. Rally does it in a **generic** way, -making it possible to check whether OpenStack is going to work well on, say, a -1k-servers installation under high load. Thus it can be used as a basic tool -for an *OpenStack CI/CD system* that would continuously improve its SLA, -performance and stability. - -.. image:: ./images/Rally-Actions.png - :align: center - - -Contents -======== -.. toctree:: - :maxdepth: 2 - - overview/index - install_and_upgrade/index - quick_start/index - cli_reference - task/index - verification/index - plugins/index - contribute - feature_requests - project_info/index diff --git a/doc/source/install_and_upgrade/db_migrations.rst b/doc/source/install_and_upgrade/db_migrations.rst deleted file mode 120000 index 34cc0e7a..00000000 --- a/doc/source/install_and_upgrade/db_migrations.rst +++ /dev/null @@ -1 +0,0 @@ -../../../rally/common/db/sqlalchemy/migrations/README.rst \ No newline at end of file diff --git a/doc/source/install_and_upgrade/index.rst b/doc/source/install_and_upgrade/index.rst deleted file mode 100644 index 78f68210..00000000 --- a/doc/source/install_and_upgrade/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -========================= -Installation and upgrades -========================= - -.. toctree:: - :glob: - - install - db_migrations diff --git a/doc/source/install_and_upgrade/install.rst b/doc/source/install_and_upgrade/install.rst deleted file mode 100644 index f9b5f1d4..00000000 --- a/doc/source/install_and_upgrade/install.rst +++ /dev/null @@ -1,176 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _install: - -Installation process -==================== - -Automated installation ----------------------- - -The easiest way to install Rally is by executing its `installation script`_ - -.. code-block:: bash - - wget -q -O- https://raw.githubusercontent.com/openstack/rally/master/install_rally.sh | bash - # or using curl - curl https://raw.githubusercontent.com/openstack/rally/master/install_rally.sh | bash - -The installation script will also check if all the software required -by Rally is already installed in your system; if run as **root** user -and some dependency is missing it will ask you if you want to install -the required packages. - -By default it will install Rally in a virtualenv in ``~/rally`` when -run as standard user, or install system wide when run as root. You can -install Rally in a **venv** by using the option ``--target``: - -.. code-block:: bash - - ./install_rally.sh --target /foo/bar - -You can also install Rally system wide by running script as root and -without ``--target`` option: - -.. code-block:: bash - - sudo ./install_rally.sh - - -Run ``./install_rally.sh`` with option ``--help`` to have a list of all -available options: - -.. code-block:: console - - $ ./install_rally.sh --help - Usage: install_rally.sh [options] - - This script will install rally either in the system (as root) or in a virtual environment. - - Options: - -h, --help Print this help text - -v, --verbose Verbose mode - -s, --system Instead of creating a virtualenv, install as - system package. - -d, --target DIRECTORY Install Rally virtual environment into DIRECTORY. - (Default: $HOME/rally). - -f, --overwrite Remove target directory if it already exists. - -y, --yes Do not ask for confirmation: assume a 'yes' reply - to every question. - -D, --dbtype TYPE Select the database type. TYPE can be one of - 'sqlite', 'mysql', 'postgres'. - Default: sqlite - --db-user USER Database user to use. Only used when --dbtype - is either 'mysql' or 'postgres'. - --db-password PASSWORD Password of the database user. Only used when - --dbtype is either 'mysql' or 'postgres'. - --db-host HOST Database host. Only used when --dbtype is - either 'mysql' or 'postgres' - --db-name NAME Name of the database. Only used when --dbtype is - either 'mysql' or 'postgres' - -p, --python EXE The python interpreter to use. Default: /usr/bin/python. - - -**Notes:** the script will check if all the software required by Rally -is already installed in your system. If this is not the case, it will -exit, suggesting you the command to issue **as root** in order to -install the dependencies. - -You also have to set up the **Rally database** after the installation is -complete: - -.. code-block:: bash - - rally-manage db recreate - -.. include:: ../../../devstack/README.rst - -Rally & Docker --------------- - -First you need to install Docker; Docker supplies `installation -instructions for various OSes`_. - -You can either use the official Rally Docker image, or build your own -from the Rally source. To do that, change directory to the root directory of -the Rally git repository and run: - -.. code-block:: bash - - docker build -t myrally . - -If you build your own Docker image, substitute ``myrally`` for -``rallyforge/rally`` in the commands below. - -The Rally Docker image is configured to store local settings and the -database in the user's home directory. For persistence of these data, -you may want to keep this directory outside of the container. This may -be done via the following steps: - -.. code-block:: bash - - sudo mkdir /var/lib/rally_container - sudo chown 65500 /var/lib/rally_container - docker run -it -v /var/lib/rally_container:/home/rally rallyforge/rally - -.. note:: - - In order for the volume to be accessible by the Rally user - (uid: 65500) inside the container, it must be accessible by UID - 65500 *outside* the container as well, which is why it is created - in ``/var/lib/rally_container``. Creating it in your home directory is only - likely to work if your home directory has excessively open - permissions (e.g., ``0755``), which is not recommended. - -You can find all task samples, docs and certification tasks at /opt/rally/. -Also you may want to save the last command as an alias: - -.. code-block:: bash - - echo 'alias dock_rally="docker run -it -v /var/lib/rally_container:/home/rally rallyforge/rally"' >> ~/.bashrc - -After executing ``dock_rally``, or ``docker run ...``, you will have -bash running inside the container with Rally installed. You may do -anything with Rally, but you need to create the database first: - -.. code-block:: console - - user@box:~/rally$ dock_rally - rally@1cc98e0b5941:~$ rally-manage db recreate - rally@1cc98e0b5941:~$ rally deployment list - There are no deployments. To create a new deployment, use: - rally deployment create - rally@1cc98e0b5941:~$ - -In case you have SELinux enabled and Rally fails to create the -database, try executing the following commands to put SELinux into -Permissive Mode on the host machine - -.. code-block:: bash - - sed -i 's/SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config - setenforce permissive - -Rally currently has no SELinux policy, which is why it must be run in -Permissive mode for certain configurations. If you can help create an -SELinux policy for Rally, please contribute! - -More about docker: https://www.docker.com/ - -.. references: - -.. _installation script: https://raw.githubusercontent.com/openstack/rally/master/install_rally.sh -.. _installation instructions for various OSes: https://docs.docker.com/engine/installation/ diff --git a/doc/source/miscellaneous/concepts.rst b/doc/source/miscellaneous/concepts.rst deleted file mode 100644 index 0314674c..00000000 --- a/doc/source/miscellaneous/concepts.rst +++ /dev/null @@ -1,414 +0,0 @@ -.. - Copyright 2014 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _main_concepts: - -Main concepts of Rally -====================== - -Benchmark Scenarios -------------------- - -Concept -^^^^^^^ - -The concept of **benchmark scenarios** is a central one in Rally. Benchmark -scenarios are what Rally actually uses to **test the performance of an -OpenStack deployment**. They also play the role of main building blocks in the -configurations of benchmark tasks. Each benchmark scenario performs a small -**set of atomic operations**, thus testing some **simple use case**, usually -that of a specific OpenStack project. For example, the **"NovaServers"** -scenario group contains scenarios that use several basic operations available -in **nova**. The **"boot_and_delete_server"** benchmark scenario from that -group allows to benchmark the performance of a sequence of only **two simple -operations**: it first **boots** a server (with customizable parameters) and -then **deletes** it. - - -User's view -^^^^^^^^^^^ - -From the user's point of view, Rally launches different benchmark scenarios -while performing some benchmark task. **Benchmark task** is essentially a set -of benchmark scenarios run against some OpenStack deployment in a specific -(and customizable) manner by the CLI command: - -.. code-block:: bash - - rally task start --task= - -Accordingly, the user may specify the names and parameters of benchmark -scenarios to be run in **benchmark task configuration files**. A typical -configuration file would have the following contents: - -.. code-block:: json - - { - "NovaServers.boot_server": [ - { - "args": { - "flavor_id": 42, - "image_id": "73257560-c59b-4275-a1ec-ab140e5b9979" - }, - "runner": {"times": 3}, - "context": {...} - }, - { - "args": { - "flavor_id": 1, - "image_id": "3ba2b5f6-8d8d-4bbe-9ce5-4be01d912679" - }, - "runner": {"times": 3}, - "context": {...} - } - ], - "CinderVolumes.create_volume": [ - { - "args": { - "size": 42 - }, - "runner": {"times": 3}, - "context": {...} - } - ] - } - - -In this example, the task configuration file specifies two benchmarks to be -run, namely **"NovaServers.boot_server"** and **"CinderVolumes.create_volume"** -(benchmark name = *ScenarioClassName.method_name*). Each benchmark scenario may -be started several times with different parameters. In our example, that's the -case with **"NovaServers.boot_server"**, which is used to test booting servers -from different images & flavors. - -Note that inside each scenario configuration, the benchmark scenario is -actually launched **3 times** (that is specified in the **"runner"** field). -It can be specified in **"runner"** in more detail how exactly the benchmark -scenario should be launched; we elaborate on that in the *"Scenario Runners"* -section below. - - -.. _ScenariosDevelopment: - -Developer's view -^^^^^^^^^^^^^^^^ - -From the developer's perspective, a benchmark scenario is a method marked by a -**@configure** decorator and placed in a class that inherits from the base -`Scenario`_. There may be arbitrary many benchmark scenarios in a scenario -class; each of them should be referenced to (in the task configuration file) -as *ScenarioClassName.method_name*. - -In a toy example below, we define a scenario class *MyScenario* with one -benchmark scenario *MyScenario.scenario*. This benchmark scenario tests the -performance of a sequence of 2 actions, implemented via private methods in the -same class. Both methods are marked with the **@atomic_action_timer** -decorator. This allows Rally to handle those actions in a special way and, -after benchmarks complete, show runtime statistics not only for the whole -scenarios, but for separate actions as well. - -.. code-block:: python - - from rally.task import atomic - from rally.task import scenario - - - class MyScenario(scenario.Scenario): - """My class that contains benchmark scenarios.""" - - @atomic.action_timer("action_1") - def _action_1(self, **kwargs): - """Do something with the cloud.""" - - @atomic.action_timer("action_2") - def _action_2(self, **kwargs): - """Do something with the cloud.""" - - @scenario.configure() - def scenario(self, **kwargs): - self._action_1() - self._action_2() - - - -Scenario runners ----------------- - -Concept -^^^^^^^ - -**Scenario Runners** in Rally are entities that control the execution type and -order of benchmark scenarios. They support different running **strategies for -creating load on the cloud**, including simulating *concurrent requests* from -different users, periodic load, gradually growing load and so on. - - -User's view -^^^^^^^^^^^ - -The user can specify which type of load on the cloud he would like to have -through the **"runner"** section in the **task configuration file**: - -.. code-block:: json - - { - "NovaServers.boot_server": [ - { - "args": { - "flavor_id": 42, - "image_id": "73257560-c59b-4275-a1ec-ab140e5b9979" - }, - "runner": { - "type": "constant", - "times": 15, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 3 - }, - "quotas": { - "nova": { - "instances": 20 - } - } - } - } - ] - } - - -The scenario running strategy is specified by its **type** and also by some -type-specific parameters. Available types include: - -* **constant**, for creating a constant load by running the scenario for a - fixed number of **times**, possibly in parallel (that's controlled by the - *"concurrency"* parameter). - -* **constant_for_duration** that works exactly as **constant**, but runs the - benchmark scenario until a specified number of seconds elapses - (**"duration"** parameter). -* **rps**, which executes benchmark scenarios with intervals between two - consecutive runs, specified in the **"rps"** field in times per second. -* **serial**, which is very useful to test new scenarios since it just runs the - benchmark scenario for a fixed number of **times** in a single thread. - - -Also, all scenario runners can be provided (again, through the **"runner"** -section in the config file) with an optional *"timeout"* parameter, which -specifies the timeout for each single benchmark scenario run (in seconds). - -.. _RunnersDevelopment: - -Developer's view -^^^^^^^^^^^^^^^^ - -It is possible to extend Rally with new Scenario Runner types, if needed. -Basically, each scenario runner should be implemented as a subclass of the -base `ScenarioRunner`_ class and located in the -`rally.plugins.common.runners package`_. The interface each scenario runner -class should support is fairly easy: - -.. code-block:: python - - from rally.task import runner - from rally import consts - - class MyScenarioRunner(runner.ScenarioRunner): - """My scenario runner.""" - - # This string is what the user will have to specify in the task - # configuration file (in "runner": {"type": ...}) - - __execution_type__ = "my_scenario_runner" - - - # CONFIG_SCHEMA is used to automatically validate the input - # config of the scenario runner, passed by the user in the task - # configuration file. - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "type": { - "type": "string" - }, - "some_specific_property": {...} - } - } - - def _run_scenario(self, cls, method_name, ctx, args): - """Run the scenario 'method_name' from scenario class 'cls' - with arguments 'args', given a context 'ctx'. - - This method should return the results dictionary wrapped in - a runner.ScenarioRunnerResult object (not plain JSON) - """ - results = ... - - return runner.ScenarioRunnerResult(results) - - - - -Benchmark contexts ------------------- - -Concept -^^^^^^^ - -The notion of **contexts** in Rally is essentially used to define different -types of **environments** in which benchmark scenarios can be launched. Those -environments are usually specified by such parameters as the number of -**tenants and users** that should be present in an OpenStack project, the -**roles** granted to those users, extended or narrowed **quotas** and so on. - - -User's view -^^^^^^^^^^^ - -From the user's prospective, contexts in Rally are manageable via the **task -configuration files**. In a typical configuration file, each benchmark scenario -to be run is not only supplied by the information about its arguments and how -many times it should be launched, but also with a special **"context"** -section. In this section, the user may configure a number of contexts he needs -his scenarios to be run within. - -In the example below, the **"users" context** specifies that the -*"NovaServers.boot_server"* scenario should be run from **1 tenant** having -**3 users** in it. Bearing in mind that the default quota for the number of -instances is 10 instances per tenant, it is also reasonable to extend it to, -say, **20 instances** in the **"quotas" context**. Otherwise the scenario would -eventually fail, since it tries to boot a server 15 times from a single tenant. - -.. code-block:: json - - { - "NovaServers.boot_server": [ - { - "args": { - "flavor_id": 42, - "image_id": "73257560-c59b-4275-a1ec-ab140e5b9979" - }, - "runner": { - "type": "constant", - "times": 15, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 3 - }, - "quotas": { - "nova": { - "instances": 20 - } - } - } - } - ] - } - - -.. _ContextDevelopment: - -Developer's view -^^^^^^^^^^^^^^^^ - -From the developer's view, contexts management is implemented via **Context -classes**. Each context type that can be specified in the task configuration -file corresponds to a certain subclass of the base `Context`_ class. Every -context class should implement a fairly simple **interface**: - -.. code-block:: python - - from rally.task import context - from rally import consts - - @context.configure(name="your_context", # Corresponds to the context field name in task configuration files - order=100500, # a number specifying the priority with which the context should be set up - hidden=False) # True if the context cannot be configured through the input task file - class YourContext(context.Context): - """Yet another context class.""" - - # The schema of the context configuration format - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "additionalProperties": False, - "properties": { - "property_1": , - "property_2": - } - } - - def __init__(self, context): - super(YourContext, self).__init__(context) - # Initialize the necessary stuff - - def setup(self): - # Prepare the environment in the desired way - - def cleanup(self): - # Cleanup the environment properly - -Consequently, the algorithm of initiating the contexts can be roughly seen as -follows: - -.. code-block:: python - - context1 = Context1(ctx) - context2 = Context2(ctx) - context3 = Context3(ctx) - - context1.setup() - context2.setup() - context3.setup() - - - - context3.cleanup() - context2.cleanup() - context1.cleanup() - -- where the order of contexts in which they are set up depends on the value of - their *order* attribute. Contexts with lower *order* have higher priority: - *1xx* contexts are reserved for users-related stuff (e.g. users/tenants - creation, roles assignment etc.), *2xx* - for quotas etc. - -The *hidden* attribute defines whether the context should be a *hidden* one. -**Hidden contexts** cannot be configured by end-users through the task -configuration file as shown above, but should be specified by a benchmark -scenario developer through a special *@scenario.configure(context={...})* -decorator. Hidden contexts are typically needed to satisfy some specific -benchmark scenario-specific needs, which don't require the end-user's -attention. For example, the hidden **"cleanup" context** -(:mod:`rally.plugins.openstack.context.cleanup`) is used to make generic -cleanup after running benchmark. So user can't change it configuration via task -and break his cloud. - -If you want to dive deeper, also see the context manager -(:mod:`rally.task.context`) class that actually implements the algorithm -described above. - -.. references: - -.. _Scenario: https://github.com/openstack/rally/blob/0.1/rally/task/scenario.py#L94 -.. _ScenarioRunner: https://github.com/openstack/rally/blob/master/rally/task/runner.py -.. _rally.plugins.common.runners package: https://github.com/openstack/rally/tree/master/rally/plugins/common/runners -.. _Context: https://github.com/openstack/rally/blob/master/rally/task/context.py diff --git a/doc/source/overview/glossary.rst b/doc/source/overview/glossary.rst deleted file mode 100644 index 23fc735b..00000000 --- a/doc/source/overview/glossary.rst +++ /dev/null @@ -1,175 +0,0 @@ -:tocdepth: 1 - -.. _glossary: - -======== -Glossary -======== - -.. warning:: Unfortunately, our glossary is not full, but the Rally - team is working on improving it. If you cannot find a definition in - which you are interested, feel free to ping us via IRC - (#openstack-rally channel at Freenode) or via E-Mail - (openstack-dev@lists.openstack.org with tag [Rally]). - -.. contents:: - :depth: 1 - :local: - -Common -====== - -Alembic -------- - -A lightweight database migration tool which powers Rally migrations. Read more -at `Official Alembic documentation `_ - -DB Migrations -------------- - -Rally supports database schema and data transformations, which are also -known as migrations. This allows you to get your data up-to-date with -latest Rally version. - -Rally ------ - -A testing tool that automates and unifies multi-node OpenStack deployment -and cloud verification. It can be used as a basic tool -for an OpenStack CI/CD system that would continuously improve its SLA, -performance and stability. - -Rally Config ------------- - -Rally behavior can be customized by editing its configuration file, -*rally.conf*, in `configparser -`_ -format. While being installed, Rally generates a config with default -values from its `sample -`_. -When started, Rally searches for its config in -"/etc/rally/rally.conf", "~/.rally/rally.conf", -"/etc/rally/rally.conf" - -Rally DB --------- - -Rally uses a relational database as data storage. Several database backends -are supported: SQLite (default), PostgreSQL, and MySQL. -The database connection can be set via the configuration file option -*[database]/connection*. - -Rally Plugin ------------- - -Most parts of Rally -`are pluggable `_. -Scenarios, runners, contexts and even charts for HTML report are plugins. -It is easy to create your own plugin and use it. Read more at -`plugin reference `_. - -Deployment -========== - -Deployment ----------- - -A set of information about target environment (for example: URI and -authentication credentials) which is saved in the database. It is used -to define the target system for testing each time a task is started. -It has a "type" value which changes task behavior for the selected -target system; for example type "openstack" will enable OpenStack -authentication and services. - -Task -==== - -Cleanup -------- - -This is a specific context which removes all resources on target -system that were created by the current task. If some Rally-related -resources remain, please `file a bug -`_ and attach the task file and a -list of remaining resources. - -Context -------- - -A type of plugin that can run some actions on the target environment -before the workloads start and after the last workload finishes. This -allows, for example, preparing the environment for workloads (e.g., -create resources and change parameters) and restoring the environment -later. Each Context must implement ``setup()`` and ``cleanup()`` -methods. - -Input task ----------- - -A file that describes how to run a Rally Task. It can be in JSON or -YAML format. The *rally task start* command needs this file to run -the task. The input task is pre-processed by the `Jinja2 -`_ templating engine so it is very easy to -create repeated parts or calculate specific values at runtime. It is -also possible to pass values via CLI arguments, using the -*--task-args* or *--task-args-file* options. - -Runner ------- - -This is a Rally plugin which decides how to run Workloads. For -example, they can be run serially in a single process, or using -concurrency. - -Scenario --------- - -Synonym for `Workload <#workload>`_ - -Service -------- - -Abstraction layer that represents target environment API. For -example, this can be some OpenStack service. A Service provides API -versioning and action timings, simplifies API calls, and reduces code -duplication. It can be used in any Rally plugin. - -SLA ---- - -Service-Level Agreement (Success Criteria). -Allows you to determine whether a subtask or workload is successful -by setting success criteria rules. - -Subtask -------- - -A part of a Task. There can be many subtasks in a single Task. - -Task ----- - -An entity which includes all the necessary data for a test run, and -results of this run. - -Workload --------- - -An important part of Task: a plugin which is run by the runner. It is -usually run in separate thread. Workloads are grouped into Subtasks. - -Verify -====== - -Rally can run different subunit-based testing tools against a target -environment, for example `tempest -`_ for OpenStack. - -.. _glossary-verification: - -Verification ------------- - -A result of running some third-party subunit-based testing tool. diff --git a/doc/source/overview/index.rst b/doc/source/overview/index.rst deleted file mode 100644 index b91df287..00000000 --- a/doc/source/overview/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -====================== -Rally project overview -====================== - -.. toctree:: - :glob: - - overview - glossary - user_stories diff --git a/doc/source/overview/overview.rst b/doc/source/overview/overview.rst deleted file mode 100644 index f9e4be2b..00000000 --- a/doc/source/overview/overview.rst +++ /dev/null @@ -1,183 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _overview: - -.. contents:: - :depth: 1 - :local: - -Overview -======== - -**Rally** is a **benchmarking tool** that **automates** and **unifies** -multi-node OpenStack deployment, cloud verification, benchmarking & profiling. -It can be used as a basic tool for an *OpenStack CI/CD system* that would -continuously improve its SLA, performance and stability. - -Who Is Using Rally ------------------- - -Here's a small selection of some of the many companies using Rally: - -.. image:: ../images/Rally_who_is_using.png - :align: center - -Use Cases ---------- - -Let's take a look at 3 major high level Use Cases of Rally: - -.. image:: ../images/Rally-UseCases.png - :align: center - - -Generally, there are a few typical cases where Rally proves to be of great use: - - 1. Automate measuring & profiling focused on how new code changes affect - the OS performance; - - 2. Using Rally profiler to detect scaling & performance issues; - - 3. Investigate how different deployments affect the OS performance: - - * Find the set of suitable OpenStack deployment architectures; - * Create deployment specifications for different loads (amount of - controllers, swift nodes, etc.); - - 4. Automate the search for hardware best suited for particular OpenStack - cloud; - - 5. Automate the production cloud specification generation: - - * Determine terminal loads for basic cloud operations: VM start & stop, - Block Device create/destroy & various OpenStack API methods; - * Check performance of basic cloud operations in case of different - loads. - - -Real-life examples ------------------- - -To be substantive, let's investigate a couple of real-life examples of Rally in -action. - - -How does amqp_rpc_single_reply_queue affect performance? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Rally allowed us to reveal a quite an interesting fact about **Nova**. We used -*NovaServers.boot_and_delete* benchmark scenario to see how the -*amqp_rpc_single_reply_queue* option affects VM bootup time (it turns on a kind -of fast RPC). Some time ago it was -`shown `_ -that cloud performance can be boosted by setting it on, so we naturally decided -to check this result with Rally. To make this test, we issued requests for -booting and deleting VMs for a number of concurrent users ranging from 1 to 30 -with and without the investigated option. For each group of users, a total -number of 200 requests was issued. Averaged time per request is shown below: - -.. image:: ../images/Amqp_rpc_single_reply_queue.png - :align: center - -**So Rally has unexpectedly indicated that setting the -*amqp_rpc_single_reply_queue* option apparently affects the cloud performance, -but in quite an opposite way rather than it was thought before.** - - -Performance of Nova list command -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Another interesting result comes from the *NovaServers.boot_and_list_server* -scenario, which enabled us to launch the following benchmark with Rally: - - * **Benchmark environment** (which we also call **"Context"**): 1 temporary - OpenStack user. - * **Benchmark scenario**: boot a single VM from this user & list all VMs. - * **Benchmark runner** setting: repeat this procedure 200 times in a - continuous way. - -During the execution of this benchmark scenario, the user has more and more VMs -on each iteration. Rally has shown that in this case, the performance of the -**VM list** command in Nova is degrading much faster than one might expect: - -.. image:: ../images/Rally_VM_list.png - :align: center - - -Complex scenarios -^^^^^^^^^^^^^^^^^ - -In fact, the vast majority of Rally scenarios is expressed as a sequence of -**"atomic" actions**. For example, *NovaServers.snapshot* is composed of 6 -atomic actions: - - 1. boot VM - 2. snapshot VM - 3. delete VM - 4. boot VM from snapshot - 5. delete VM - 6. delete snapshot - -Rally measures not only the performance of the benchmark scenario as a whole, -but also that of single atomic actions. As a result, Rally also plots the -atomic actions performance data for each benchmark iteration in a quite -detailed way: - -.. image:: ../images/Rally_snapshot_vm.png - :align: center - - -Architecture ------------- - -Usually OpenStack projects are implemented *"as-a-Service"*, so Rally provides -this approach. In addition, it implements a *CLI-driven* approach that does not -require a daemon: - - 1. **Rally as-a-Service**: Run rally as a set of daemons that present Web - UI *(work in progress)* so 1 RaaS could be used by a whole team. - 2. **Rally as-an-App**: Rally as a just lightweight and portable CLI app - (without any daemons) that makes it simple to use & develop. - -The diagram below shows how this is possible: - -.. image:: ../images/Rally_Architecture.png - :align: center - -The actual **Rally core** consists of 4 main components, listed below in the -order they go into action: - - 1. **Server Providers** - provide a **unified interface** for interaction - with different **virtualization technologies** (*LXS*, *Virsh* etc.) and - **cloud suppliers** (like *Amazon*): it does so via *ssh* access and in - one *L3 network*; - 2. **Deploy Engines** - deploy some OpenStack distribution (like *DevStack* - or *FUEL*) before any benchmarking procedures take place, using servers - retrieved from Server Providers; - 3. **Verification** - runs *Tempest* (or another specific set of tests) - against the deployed cloud to check that it works correctly, collects - results & presents them in human readable form; - 4. **Benchmark Engine** - allows to write parameterized benchmark scenarios - & run them against the cloud. - -It should become fairly obvious why Rally core needs to be split to these parts -if you take a look at the following diagram that visualizes a rough **algorithm -for starting benchmarking OpenStack at scale**. Keep in mind that there might -be lots of different ways to set up virtual servers, as well as to deploy -OpenStack to them. - -.. image:: ../images/Rally_QA.png - :align: center diff --git a/doc/source/overview/stories b/doc/source/overview/stories deleted file mode 120000 index bb3efd11..00000000 --- a/doc/source/overview/stories +++ /dev/null @@ -1 +0,0 @@ -../../user_stories/ \ No newline at end of file diff --git a/doc/source/overview/user_stories.rst b/doc/source/overview/user_stories.rst deleted file mode 100644 index 11907d5f..00000000 --- a/doc/source/overview/user_stories.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _user_stories: - -User stories -============ - -Many users of Rally were able to make interesting discoveries concerning their -OpenStack clouds using our benchmarking tool. Numerous user stories presented -below show how Rally has made it possible to find performance bugs and validate -improvements for different OpenStack installations. - - -.. toctree:: - :glob: - :maxdepth: 1 - - stories/** diff --git a/doc/source/plugins/implementation/context_plugin.rst b/doc/source/plugins/implementation/context_plugin.rst deleted file mode 100644 index 7a89319b..00000000 --- a/doc/source/plugins/implementation/context_plugin.rst +++ /dev/null @@ -1,143 +0,0 @@ -.. - Copyright 2016 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _plugins_context_plugin: - - -Context as a plugin -=================== - -So what are contexts doing? These plugins will be executed before -scenario iteration starts. For example, a context plugin could create -resources (e.g., download 10 images) that will be used by the -scenarios. All created objects must be put into the *self.context* -dict, through which they will be available in the scenarios. Let's -create a simple context plugin that adds a flavor to the environment -before the benchmark task starts and deletes it after it finishes. - -Creation -^^^^^^^^ - -Inherit a class for your plugin from the base *Context* class. Then, -implement the Context API: the *setup()* method that creates a flavor and the -*cleanup()* method that deletes it. - -.. code-block:: python - - from rally.task import context - from rally.common import logging - from rally import consts - from rally import osclients - - LOG = logging.getLogger(__name__) - - - @context.configure(name="create_flavor", order=1000) - class CreateFlavorContext(context.Context): - """This sample creates a flavor with specified options before task starts - and deletes it after task completion. - - To create your own context plugin, inherit it from - rally.task.context.Context - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "additionalProperties": False, - "properties": { - "flavor_name": { - "type": "string", - }, - "ram": { - "type": "integer", - "minimum": 1 - }, - "vcpus": { - "type": "integer", - "minimum": 1 - }, - "disk": { - "type": "integer", - "minimum": 1 - } - } - } - - def setup(self): - """This method is called before the task starts.""" - try: - # use rally.osclients to get necessary client instance - nova = osclients.Clients(self.context["admin"]["credential"]).nova() - # and than do what you need with this client - self.context["flavor"] = nova.flavors.create( - # context settings are stored in self.config - name=self.config.get("flavor_name", "rally_test_flavor"), - ram=self.config.get("ram", 1), - vcpus=self.config.get("vcpus", 1), - disk=self.config.get("disk", 1)).to_dict() - LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"]) - except Exception as e: - msg = "Can't create flavor: %s" % e.message - if logging.is_debug(): - LOG.exception(msg) - else: - LOG.warning(msg) - - def cleanup(self): - """This method is called after the task finishes.""" - try: - nova = osclients.Clients(self.context["admin"]["credential"]).nova() - nova.flavors.delete(self.context["flavor"]["id"]) - LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"]) - except Exception as e: - msg = "Can't delete flavor: %s" % e.message - if logging.is_debug(): - LOG.exception(msg) - else: - LOG.warning(msg) - - -Usage -^^^^^ - -You can refer to your plugin context in the benchmark task configuration -files in the same way as any other contexts: - -.. code-block:: json - - { - "Dummy.dummy": [ - { - "args": { - "sleep": 0.01 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "create_flavor": { - "ram": 1024 - } - } - } - ] - } diff --git a/doc/source/plugins/implementation/hook_and_trigger_plugins.rst b/doc/source/plugins/implementation/hook_and_trigger_plugins.rst deleted file mode 100644 index 1a1a0a50..00000000 --- a/doc/source/plugins/implementation/hook_and_trigger_plugins.rst +++ /dev/null @@ -1,402 +0,0 @@ -.. - Copyright 2016 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _hook_and_trigger_plugins: - - -Hooks. Hook trigger plugins -=========================== - - -Why Hooks? ----------- - -All Rally workloads repeat their actions as many times as it is configured by -runner. Once run, there is no way to interrupt the runner to evaluate any -change or restart event on the stability of the cloud under test. -For example we would like to test how configuration change or cloud component -restart would affect performance and stability. - -Task hooks were added to fill this gap and allow to use Rally for reliability -and high availability testing. Generally, hooks allow to perform any -actions on specified iteration or specified time since the workload has been -started. - -Also, task html-report provides results of hook execution. They can contain -graphical or textual information with timing and statistics. - - -Hooks & Triggers Overview -------------------------- - - -Architecture -^^^^^^^^^^^^ - -Rally uses runners to specify how many times the workload should be executed. -Hooks do not use runners, instead they rely on trigger plugins to specify when -and how many times hook should be called. Therefore hooks are isolated from -workload runners and do not affect them because each hook is executed in -separate thread. - - -Sample of usage -^^^^^^^^^^^^^^^ - -Hooks can be added to the task configuration. Lets take a look at hook -configuration: - -.. code-block:: json - - { - "name": "sys_call", - "args": "/bin/echo 123", - "trigger": { - "name": "event", - "args": { - "unit": "iteration", - "at": [5, 50, 200, 1000] - } - } - } - - -It specifies hook plugin with name "sys_call". "args" field contains string -that will be used by sys_call plugin, but in case of any other hook plugin it -can contain any other Python object, that is assumed to be passed to the hook. -"trigger" field specifies which trigger plugin should be used to run this hook. -"trigger" contains similar fields "name" and "args" which represent trigger -plugin name and arguments for trigger plugin. In this example "event" trigger -is specified and configured to run the hook at 5th, 50th, 200th and 1000th -iterations. - -Here is a full task config that contains previous hook configuraiton: - -.. code-block:: json - - { - "Dummy.dummy": [ - { - "args": { - "sleep": 0.01 - }, - "runner": { - "type": "constant", - "times": 1500, - "concurrency": 1 - }, - "hooks": [ - { - "name": "sys_call", - "args": "/bin/echo 123", - "trigger": { - "name": "event", - "args": { - "unit": "iteration", - "at": [5, 50, 200, 1000] - } - } - } - ] - } - ] - } - - -.. note:: - In this example, runner is configured to run workload 1500 times. So there - is a limit for iterations and hook will be triggered only if certain - iteration is started by runner. In other words, if trigger specifies - iteration out of runner iterations scope then such trigger will not be - called. - -Task report for this example will contain minimal information about hook -execution: duration of each hook call and its status(success of failure). - -Let's take a look at more complicated config that can produce graphical -and textual information. - -.. code-block:: yaml - - --- - Dummy.dummy: - - - args: - sleep: 0.75 - runner: - type: "constant" - times: 20 - concurrency: 2 - hooks: - - name: sys_call - description: Run script - args: sh rally/rally-jobs/extra/hook_example_script.sh - trigger: - name: event - args: - unit: iteration - at: [2, 5, 8, 13, 17] - - name: sys_call - description: Show time - args: date +%Y-%m-%dT%H:%M:%S - trigger: - name: event - args: - unit: time - at: [0, 2, 5, 6, 9] - - name: sys_call - description: Show system name - args: uname -a - trigger: - name: event - args: - unit: iteration - at: [2, 3, 4, 5, 6, 8, 10, 12, 13, 15, 17, 18] - sla: - failure_rate: - max: 0 - - -hook_example_script.sh generates dummy output in JSON format. Grafical -information format is the same as for workloads and the same types of -charts are supported for the hooks. - -Here is a report that shows aggregated table and chart with hook results: - -.. image:: ../../images/Hook-Aggregated-Report.png - -Here is report that shows lines chart and pie chart for first hook on -the second iteration: - -.. image:: ../../images/Hook-Per-Hook-Report.png - -Browse existing Hooks_ and Triggers_. - - -Writing your own Hook plugin ----------------------------- - -Problem description -^^^^^^^^^^^^^^^^^^^ - -Hook plugin should implement custom action that can be done one or multiple -times during the workload. Examples of such actions might be the following: - -- Destructive action inside cloud (`Fault Injection`_) -- Getting information about current state of cloud (load/health) -- Upgrading/downgrading a component of cloud -- Changing configuration of cloud -- etc. - - -Plugin code -^^^^^^^^^^^ - -The following example shows simple hook code that performs system call. -It is inherited from the base *Hook* class and contains implemented ``run()`` -method: - -.. code-block:: python - - import shlex - import subprocess - - from rally import consts - from rally.task import hook - - - @hook.configure(name="simple_sys_call") - class SimpleSysCallHook(hook.Hook): - """Performs system call.""" - - CONFIG_SCHEMA = { - "$schema": consts.JSON_SCHEMA, - "type": "string", - } - - def run(self): - proc = subprocess.Popen(shlex.split(self.config), - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - proc.wait() - if proc.returncode: - self.set_error( - exception_name="n/a", # no exception class - description="Subprocess returned {}".format(proc.returncode), - details=proc.stdout.read(), - ) - -Any exceptions risen during execution of ``run`` method will be caught by Hook -base class and saved as a result. Although hook should manually call -``Hook.set_error()`` to indicate logical error in case if there is no exception -raised. - -Also there is a method for saving charts data: ``Hook.add_output()``. - - -Plugin Placement -^^^^^^^^^^^^^^^^ - -There are two folders for hook plugins: - -- `OpenStack Hooks`_ -- `Common Hooks`_ - - -Sample of task that uses Hook -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "Dummy.dummy": [ - { - "args": { - "sleep": 0.01 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "hooks": [ - { - "name": "simple_sys_call", - "args": "/bin/echo 123", - "trigger": { - "name": "event", - "args": { - "unit": "iteration", - "at": [3, 6] - } - } - } - ] - } - ] - } - - -Results of task execution -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Result of previous task example: - -.. image:: ../../images/Hook-Results.png - - -Writing your own Trigger plugin -------------------------------- - - -Problem description -^^^^^^^^^^^^^^^^^^^ - -Trigger plugin should implement an event processor that decides whether to -start hook or not. Rally has two basic triggers that should cover most cases: - -- `Event Trigger`_ -- `Periodic Trigger`_ - - -Plugin code -^^^^^^^^^^^ - -This example shows the code of the existing Event trigger: - -.. code-block:: python - - from rally import consts - from rally.task import trigger - - - @trigger.configure(name="event") - class EventTrigger(trigger.Trigger): - """Triggers hook on specified event and list of values.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "oneOf": [ - { - "properties": { - "unit": {"enum": ["time"]}, - "at": { - "type": "array", - "minItems": 1, - "uniqueItems": True, - "items": { - "type": "integer", - "minimum": 0, - } - }, - }, - "required": ["unit", "at"], - "additionalProperties": False, - }, - { - "properties": { - "unit": {"enum": ["iteration"]}, - "at": { - "type": "array", - "minItems": 1, - "uniqueItems": True, - "items": { - "type": "integer", - "minimum": 1, - } - }, - }, - "required": ["unit", "at"], - "additionalProperties": False, - }, - ] - } - - def get_listening_event(self): - return self.config["unit"] - - def on_event(self, event_type, value=None): - if not (event_type == self.get_listening_event() - and value in self.config["at"]): - # do nothing - return - super(EventTrigger, self).on_event(event_type, value) - - -Trigger plugins must override two methods: - -- ``get_listening_event`` - this method should return currently configured - event name. (So far Rally supports only "time" and "iteration") -- ``on_event`` - this method is called each time certain events occur. - It calls base method when the hook is triggered on specified event. - - -Plugin Placement -^^^^^^^^^^^^^^^^ - -All trigger plugins should be placed in `Trigger folder`_. - - -.. references: - -.. _Hooks: ../plugin_reference.html#task-hooks -.. _Triggers: ../plugin_reference.html#task-hook-triggers -.. _Fault Injection: ../plugin_reference.html#fault-injection-hook -.. _Event Trigger: ../plugin_reference.html#event-hook-trigger -.. _Periodic Trigger: ../plugin_reference.html#periodic-hook-trigger -.. _Common Hooks: https://github.com/openstack/rally/tree/master/rally/plugins/common/hook -.. _OpenStack Hooks: https://github.com/openstack/rally/tree/master/rally/plugins/openstack/hook -.. _Trigger folder: https://github.com/openstack/rally/tree/master/rally/plugins/common/trigger diff --git a/doc/source/plugins/implementation/runner_plugin.rst b/doc/source/plugins/implementation/runner_plugin.rst deleted file mode 100644 index cfefc09f..00000000 --- a/doc/source/plugins/implementation/runner_plugin.rst +++ /dev/null @@ -1,109 +0,0 @@ -.. - Copyright 2016 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _plugins_runner_plugin: - -Scenario runner as a plugin -=========================== - -Let's create a scenario runner plugin that runs a given benchmark -scenario a random number of times (chosen at random from a given -range). - -Creation -^^^^^^^^ - -Inherit a class for your plugin from the base *ScenarioRunner* class -and implement its API (the *_run_scenario()* method): - -.. code-block:: python - - import random - - from rally.task import runner - from rally import consts - - - @runner.configure(name="random_times") - class RandomTimesScenarioRunner(runner.ScenarioRunner): - """Sample scenario runner plugin. - - Run scenario random number of times, which is chosen between min_times and - max_times. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "type": { - "type": "string" - }, - "min_times": { - "type": "integer", - "minimum": 1 - }, - "max_times": { - "type": "integer", - "minimum": 1 - } - }, - "additionalProperties": True - } - - def _run_scenario(self, cls, method_name, context, args): - # runners settings are stored in self.config - min_times = self.config.get('min_times', 1) - max_times = self.config.get('max_times', 1) - - for i in range(random.randrange(min_times, max_times)): - run_args = (i, cls, method_name, - runner._get_scenario_context(context), args) - result = runner._run_scenario_once(run_args) - # use self.send_result for result of each iteration - self._send_result(result) - -Usage -^^^^^ - -You can refer to your scenario runner in the benchmark task -configuration files in the same way as any other runners. Don't forget -to put your runner-specific parameters in the configuration as well -(*"min_times"* and *"max_times"* in our example): - -.. code-block:: json - - { - "Dummy.dummy": [ - { - "runner": { - "type": "random_times", - "min_times": 10, - "max_times": 20, - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] - } - - - - -Different plugin samples are available `here `_. diff --git a/doc/source/plugins/implementation/scenario_plugin.rst b/doc/source/plugins/implementation/scenario_plugin.rst deleted file mode 100644 index f4a7c657..00000000 --- a/doc/source/plugins/implementation/scenario_plugin.rst +++ /dev/null @@ -1,90 +0,0 @@ -.. - Copyright 2016 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _plugins_scenario_plugin: - - -Scenario as a plugin -==================== - -Let's create a simple scenario plugin that list flavors. - -Creation -^^^^^^^^ - -Inherit a class for your plugin from the base *OpenStackScenario* class and -implement a scenario method inside it. In our scenario, we'll first -list flavors as an ordinary user, and then repeat the same using admin -clients: - -.. code-block:: python - - from rally import consts - from rally.plugins.openstack import scenario - from rally.task import atomic - from rally.task import validation - - - @validation.add("required_services", services=[consts.Service.NOVA]) - @validation.add("required_platform", platform="openstack", users=True) - @scenario.configure(name="ScenarioPlugin.list_flavors_useless") - class ListFlavors(scenario.OpenStackScenario): - """Sample plugin which lists flavors.""" - - @atomic.action_timer("list_flavors") - def _list_flavors(self): - """Sample of usage clients - list flavors - - You can use self.context, self.admin_clients and self.clients which are - initialized on scenario instance creation""" - self.clients("nova").flavors.list() - - @atomic.action_timer("list_flavors_as_admin") - def _list_flavors_as_admin(self): - """The same with admin clients""" - self.admin_clients("nova").flavors.list() - - def run(self): - """List flavors.""" - self._list_flavors() - self._list_flavors_as_admin() - - -Usage -^^^^^ - -You can refer to your plugin scenario in the benchmark task -configuration files in the same way as any other scenarios: - -.. code-block:: json - - { - "ScenarioPlugin.list_flavors": [ - { - "runner": { - "type": "serial", - "times": 5, - }, - "context": { - "create_flavor": { - "ram": 512, - } - } - } - ] - } - -This configuration file uses the *"create_flavor"* context which we -created in :ref:`plugins_context_plugin`. diff --git a/doc/source/plugins/implementation/sla_plugin.rst b/doc/source/plugins/implementation/sla_plugin.rst deleted file mode 100644 index 4d3a911c..00000000 --- a/doc/source/plugins/implementation/sla_plugin.rst +++ /dev/null @@ -1,99 +0,0 @@ -.. - Copyright 2016 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _plugins_sla_plugin: - - -SLA as a plugin -=============== - -Let's create an SLA (success criterion) plugin that checks whether the -range of the observed performance measurements does not exceed the -allowed maximum value. - -Creation -^^^^^^^^ - -Inherit a class for your plugin from the base *SLA* class and implement its API -(the *add_iteration(iteration)*, the *details()* method): - -.. code-block:: python - - from rally.task import sla - from rally.common.i18n import _ - - @sla.configure(name="max_duration_range") - class MaxDurationRange(sla.SLA): - """Maximum allowed duration range in seconds.""" - - CONFIG_SCHEMA = { - "type": "number", - "minimum": 0.0, - } - - def __init__(self, criterion_value): - super(MaxDurationRange, self).__init__(criterion_value) - self._min = 0 - self._max = 0 - - def add_iteration(self, iteration): - # Skipping failed iterations (that raised exceptions) - if iteration.get("error"): - return self.success # This field is defined in base class - - # Updating _min and _max values - self._max = max(self._max, iteration["duration"]) - self._min = min(self._min, iteration["duration"]) - - # Updating successfulness based on new max and min values - self.success = self._max - self._min <= self.criterion_value - return self.success - - def details(self): - return (_("%s - Maximum allowed duration range: %.2f%% <= %.2f%%") % - (self.status(), self._max - self._min, self.criterion_value)) - - -Usage -^^^^^ - -You can refer to your SLA in the benchmark task configuration files in -the same way as any other SLA: - -.. code-block:: json - - { - "Dummy.dummy": [ - { - "args": { - "sleep": 0.01 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "max_duration_range": 2.5 - } - } - ] - } diff --git a/doc/source/plugins/index.rst b/doc/source/plugins/index.rst deleted file mode 100644 index dc41d765..00000000 --- a/doc/source/plugins/index.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _plugins: - -Rally Plugins -============= - -Rally has a plugin oriented architecture - in other words Rally team is trying -to make all places of code pluggable. Such architecture leads to the big amount -of plugins. :ref:`plugin-reference` contains a full list of all official Rally -plugins with detailed descriptions. - - -.. toctree:: - :maxdepth: 1 - - plugin_reference - -How plugins work ----------------- - -Rally provides an opportunity to create and use a **custom benchmark -scenario, runner, SLA, deployment or context** as a **plugin**: - -.. image:: ../images/Rally-Plugins.png - :align: center - -Placement ---------- - -Plugins can be quickly written and used, with no need to contribute -them to the actual Rally code. Just place a Python module with your -plugin class into the ``/opt/rally/plugins`` or ``~/.rally/plugins`` -directory (or its subdirectories), and it will be -automatically loaded. Additional paths can be specified with the -``--plugin-paths`` argument, or with the ``RALLY_PLUGIN_PATHS`` -environment variable, both of which accept comma-delimited -lists. Both ``--plugin-paths`` and ``RALLY_PLUGIN_PATHS`` can list -either plugin module files, or directories containing plugins. For -instance, both of these are valid: - -.. code-block:: bash - - rally --plugin-paths /rally/plugins ... - rally --plugin-paths /rally/plugins/foo.py,/rally/plugins/bar.py ... - -You can also use a script ``unpack_plugins_samples.sh`` from -``samples/plugins`` which will automatically create the -``~/.rally/plugins`` directory. - -How to create a plugin ----------------------- - -To create your own plugin you need to inherit your plugin class from -plugin.Plugin class or its subclasses. Also you need to decorate your class -with ``rally.task.scenario.configure`` - -.. code-block:: python - - from rally.task import scenario - - @scenario.configure(name="my_new_plugin_name") - class MyNewPlugin(plugin.Plugin): - pass - - -.. toctree:: - :glob: - :maxdepth: 1 - - implementation/** diff --git a/doc/source/plugins/plugin_reference.rst b/doc/source/plugins/plugin_reference.rst deleted file mode 100644 index 5b52ade1..00000000 --- a/doc/source/plugins/plugin_reference.rst +++ /dev/null @@ -1,13 +0,0 @@ -:tocdepth: 1 - -.. _plugin-reference: - - -Plugins Reference -================= - -.. contents:: - :depth: 2 - :local: - -.. generate_plugin_reference:: diff --git a/doc/source/project_info/index.rst b/doc/source/project_info/index.rst deleted file mode 100644 index f3ec86e5..00000000 --- a/doc/source/project_info/index.rst +++ /dev/null @@ -1,171 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _project_info: - -Project Info and Release Notes -============================== - -Maintainers ------------ - -Project Team Lead (PTL) -~~~~~~~~~~~~~~~~~~~~~~~ - - -+------------------------------+------------------------------------------------+ -| Contact | Area of interest | -+------------------------------+------------------------------------------------+ -| | Andrey Kurilin | * Chief Architect | -| | andreykurilin (irc) | * Release management | -| | andreykurilin (gitter) | * Community management | -| | andr.kurilin@gmail.com | * Core team management | -| | akurilin@mirantis.com | * Road Map | -+------------------------------+------------------------------------------------+ - -| *If you would like to refactor whole Rally or have UX/community/other - issues please contact me.* - - -Project Core maintainers -~~~~~~~~~~~~~~~~~~~~~~~~ - -+------------------------------+------------------------------------------------+ -| Contact | Area of interest | -+------------------------------+------------------------------------------------+ -| | Alexander Maretskiy | * Rally reports | -| | amaretskiy (irc) | * Front-end | -| | amaretskiy@mirantis.com | | -+------------------------------+------------------------------------------------+ -| | Anton Studenov | * Rally Deployment | -| | tohin (irc) | * Task Hooks | -| | astudenov@mirantis.com | | -+------------------------------+------------------------------------------------+ -| | Boris Pavlovic | * Founder and ideological leader | -| | boris-42 (irc) | * Architect | -| | boris@pavlovic.me | * Rally task & benchmark | -+------------------------------+------------------------------------------------+ -| | Chris St. Pierre | * Rally task & benchmark | -| | stpierre (irc) | * Bash guru ;) | -| | cstpierr@cisco.com | | -+------------------------------+------------------------------------------------+ -| | Illia Khudoshyn | * Rally task & benchmark | -| | ikhudoshyn (irc) | | -| | ikhudoshyn@mirantis.com | | -+------------------------------+------------------------------------------------+ -| | Kun Huang | * Rally task & benchmark | -| | kun_huang (irc) | | -| | gareth.huang@huawei.com | | -+------------------------------+------------------------------------------------+ -| | Li Yingjun | * Rally task & benchmark | -| | liyingjun (irc) | | -| | yingjun.li@kylin-cloud.com | | -+------------------------------+------------------------------------------------+ -| | Roman Vasilets | * Rally task & benchmark | -| | rvasilets (irc) | | -| | pomeo92@gmail.com | | -+------------------------------+------------------------------------------------+ -| | Sergey Skripnick | * Rally CI/CD | -| | redixin (irc) | * Rally deploy | -| | sskripnick@mirantis.com | * Automation of everything | -+------------------------------+------------------------------------------------+ -| | Yair Fried | * Rally-Tempest integration | -| | yfried (irc) | * Rally task & benchmark | -| | yfried@redhat.com | | -+------------------------------+------------------------------------------------+ -| | Yaroslav Lobankov | * Rally Verification | -| | ylobankov (irc) | | -| | ylobankov@mirantis.com | | -+------------------------------+------------------------------------------------+ - -| *All cores from this list are reviewing all changes that are proposed to Rally. - To avoid duplication of efforts, please contact them before starting work on - your code.* - - -Plugin Core reviewers -~~~~~~~~~~~~~~~~~~~~~ - -+------------------------------+------------------------------------------------+ -| Contact | Area of interest | -+------------------------------+------------------------------------------------+ -| | Ivan Kolodyazhny | * Cinder plugins | -| | e0ne (irc) | | -| | e0ne@e0ne.info | | -+------------------------------+------------------------------------------------+ -| | Nikita Konovalov | * Sahara plugins | -| | NikitaKonovalov (irc) | | -| | nkonovalov@mirantis.com | | -+------------------------------+------------------------------------------------+ -| | Oleg Bondarev | * Neutron plugins | -| | obondarev (irc) | | -| | obondarev@mirantis.com | | -+------------------------------+------------------------------------------------+ -| | Sergey Kraynev | * Heat plugins | -| | skraynev (irc) | | -| | skraynev@mirantis.com | | -+------------------------------+------------------------------------------------+ -| | Spyros Trigazis | * Magnum plugins | -| | strigazi (irc) | | -| | strigazi@gmail.com | | -+------------------------------+------------------------------------------------+ - - - -| *All cores from this list are responsible for their component plugins. - To avoid duplication of efforts, please contact them before starting working - on your own plugins.* - - -Useful links ------------- -- `Source code`_ -- `Rally roadmap`_ -- `Project space`_ -- `Bugs`_ -- `Patches on review`_ -- `Meeting logs`_ (server: **irc.freenode.net**, channel: - **#openstack-meeting**) -- `IRC logs`_ (server: **irc.freenode.net**, channel: **#openstack-rally**) -- `Gitter chat`_ -- `Trello board`_ - - -Where can I discuss and propose changes? ----------------------------------------- -- Our IRC channel: **#openstack-rally** on **irc.freenode.net**; -- Weekly Rally team meeting (in IRC): **#openstack-meeting** on - **irc.freenode.net**, held on Mondays at 14:00 UTC; -- OpenStack mailing list: **openstack-dev@lists.openstack.org** (see - `subscription and usage instructions`_); -- `Rally team on Launchpad`_: Answers/Bugs/Blueprints. - -.. _release_notes: - -.. include:: release_notes.rst - -.. references: - -.. _Source code: https://github.com/openstack/rally -.. _Rally roadmap: https://docs.google.com/a/mirantis.com/spreadsheets/d/16DXpfbqvlzMFaqaXAcJsBzzpowb_XpymaK2aFY2gA2g/edit#gid=0 -.. _Project space: http://launchpad.net/rally -.. _Bugs: https://bugs.launchpad.net/rally -.. _Patches on review: https://review.openstack.org/#/q/status:open+rally,n,z -.. _Meeting logs: http://eavesdrop.openstack.org/meetings/rally/2016/ -.. _IRC logs: http://irclog.perlgeek.de/openstack-rally -.. _Gitter chat: https://gitter.im/rally-dev/Lobby -.. _Trello board: https://trello.com/b/DoD8aeZy/rally -.. _subscription and usage instructions: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev -.. _Rally team on Launchpad: https://launchpad.net/rally diff --git a/doc/source/project_info/release_notes b/doc/source/project_info/release_notes deleted file mode 120000 index 26544e47..00000000 --- a/doc/source/project_info/release_notes +++ /dev/null @@ -1 +0,0 @@ -../../release_notes/ \ No newline at end of file diff --git a/doc/source/project_info/release_notes.rst b/doc/source/project_info/release_notes.rst deleted file mode 100644 index b26b4a6b..00000000 --- a/doc/source/project_info/release_notes.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Release Notes -------------- - -.. toctree:: - :maxdepth: 1 - - release_notes/archive.rst - release_notes/latest.rst - diff --git a/doc/source/quick_start/gates.rst b/doc/source/quick_start/gates.rst deleted file mode 100644 index 4b856726..00000000 --- a/doc/source/quick_start/gates.rst +++ /dev/null @@ -1,192 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _gates: - -Rally OpenStack Gates -===================== - -Gate jobs ---------- - -The **OpenStack CI system** uses the so-called **"Gate jobs"** to control -merges of patches submitted for review on Gerrit. These **Gate jobs** usually -just launch a set of tests -- unit, functional, integration, style -- that -check that the proposed patch does not break the software and can be merged -into the target branch, thus providing additional guarantees for the stability -of the software. - - -Create a custom Rally Gate job ------------------------------- - -You can create a **Rally Gate job** for your project to run Rally benchmarks -against the patchsets proposed to be merged into your project. - -To create a rally-gate job, you should create a **rally-jobs/** directory at -the root of your project. - -As a rule, this directory contains only **{projectname}.yaml**, but more -scenarios and jobs can be added as well. This yaml file is in fact an input -Rally task file specifying benchmark scenarios that should be run in your gate -job. - -To make *{projectname}.yaml* run in gates, you need to add *"rally-jobs"* to -the "jobs" section of *projects.yaml* in *openstack-infra/project-config*. - - -Example: Rally Gate job for Glance ----------------------------------- - -Let's take a look at an example for the `Glance`_ project: - -Edit *jenkins/jobs/projects.yaml:* - -.. parsed-literal:: - - - project: - name: glance - node: 'bare-precise || bare-trusty' - tarball-site: tarballs.openstack.org - doc-publisher-site: docs.openstack.org - - jobs: - - python-jobs - - python-icehouse-bitrot-jobs - - python-juno-bitrot-jobs - - openstack-publish-jobs - - translation-jobs - **- rally-jobs** - - -Also add *gate-rally-dsvm-{projectname}* to *zuul/layout.yaml*: - -.. parsed-literal:: - - - name: openstack/glance - template: - - name: merge-check - - name: python26-jobs - - name: python-jobs - - name: openstack-server-publish-jobs - - name: openstack-server-release-jobs - - name: periodic-icehouse - - name: periodic-juno - - name: check-requirements - - name: integrated-gate - - name: translation-jobs - - name: large-ops - - name: experimental-tripleo-jobs - check: - - check-devstack-dsvm-cells - **- gate-rally-dsvm-glance** - gate: - - gate-devstack-dsvm-cells - experimental: - - gate-grenade-dsvm-forward - - -To add one more scenario and job, you need to add *{scenarioname}.yaml* file -here, and *gate-rally-dsvm-{scenarioname}* to *projects.yaml*. - -For example, you can add *myscenario.yaml* to *rally-jobs* directory in your -project and then edit *jenkins/jobs/projects.yaml* in this way: - -.. parsed-literal:: - - - project: - name: glance - github-org: openstack - node: bare-precise - tarball-site: tarballs.openstack.org - doc-publisher-site: docs.openstack.org - - jobs: - - python-jobs - - python-havana-bitrot-jobs - - openstack-publish-jobs - - translation-jobs - - rally-jobs - **- 'gate-rally-dsvm-{name}': - name: myscenario** - -Finally, add *gate-rally-dsvm-myscenario* to *zuul/layout.yaml*: - -.. parsed-literal:: - - - name: openstack/glance - template: - - name: python-jobs - - name: openstack-server-publish-jobs - - name: periodic-havana - - name: check-requirements - - name: integrated-gate - check: - - check-devstack-dsvm-cells - - check-tempest-dsvm-postgres-full - - gate-tempest-dsvm-large-ops - - gate-tempest-dsvm-neutron-large-ops - **- gate-rally-dsvm-myscenario** - -It is also possible to arrange your input task files as templates based on -``Jinja2``. Say, you want to set the image names used throughout the -*myscenario.yaml* task file as a variable parameter. Then, replace concrete -image names in this file with a variable: - -.. code-block:: yaml - - ... - - NovaServers.boot_and_delete_server: - - - args: - image: - name: {{image_name}} - ... - - NovaServers.boot_and_list_server: - - - args: - image: - name: {{image_name}} - ... - -and create a file named *myscenario_args.yaml* that will define the parameter -values: - -.. code-block:: yaml - - --- - - image_name: "^cirros.*-disk$" - -this file will be automatically used by Rally to substitute the variables in -*myscenario.yaml*. - - -Plugins & Extras in Rally Gate jobs ------------------------------------ - -Along with scenario configs in yaml, the **rally-jobs** directory can also -contain two subdirectories: - -- **plugins**: :ref:`Plugins ` needed for your gate job; -- **extra**: auxiliary files like bash scripts or images. - -Both subdirectories will be copied to *~/.rally/* before the job gets started. - -.. references: - -.. _Glance: https://wiki.openstack.org/wiki/Glance diff --git a/doc/source/quick_start/index.rst b/doc/source/quick_start/index.rst deleted file mode 100644 index ef7d8f72..00000000 --- a/doc/source/quick_start/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=========== -Quick start -=========== - -This section will guide you through all steps of using Rally - from -installation to its advanced usage in different use cases (including running -Rally in OpenStack CI system gates to control merges of patches submitted for -review on Gerrit code review system). - -.. toctree:: - :glob: - :maxdepth: 2 - - tutorial - gates diff --git a/doc/source/quick_start/tutorial.rst b/doc/source/quick_start/tutorial.rst deleted file mode 100644 index f4151225..00000000 --- a/doc/source/quick_start/tutorial.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial: - -Rally step-by-step -================== - -In the following tutorial, we will guide you step-by-step through different use -cases that might occur in Rally, starting with the easy ones and moving towards -more complicated cases. - - -.. toctree:: - :glob: - :maxdepth: 1 - - tutorial/step_0_installation - tutorial/step_1_setting_up_env_and_running_benchmark_from_samples - tutorial/step_2_input_task_format - tutorial/step_3_benchmarking_with_existing_users - tutorial/step_4_adding_success_criteria_for_benchmarks - tutorial/step_5_task_templates - tutorial/step_6_aborting_load_generation_on_sla_failure - tutorial/step_7_working_with_multple_openstack_clouds - tutorial/step_8_discovering_more_plugins - tutorial/step_9_deploying_openstack - tutorial/step_10_verifying_cloud_via_tempest_verifier - tutorial/step_11_profiling_openstack_internals diff --git a/doc/source/quick_start/tutorial/step_0_installation.rst b/doc/source/quick_start/tutorial/step_0_installation.rst deleted file mode 100644 index 5215566e..00000000 --- a/doc/source/quick_start/tutorial/step_0_installation.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_0_installation: - -Step 0. Installation -==================== - -The easiest way to install Rally is by running its `installation script`_: - -.. code-block:: bash - - wget -q -O- https://raw.githubusercontent.com/openstack/rally/master/install_rally.sh | bash - # or using curl: - curl https://raw.githubusercontent.com/openstack/rally/master/install_rally.sh | bash - -If you execute the script as regular user, Rally will create a new -virtual environment in ``~/rally/`` and install in it Rally, and will -use `sqlite` as database backend. If you execute the script as root, -Rally will be installed system wide. For more installation options, -please refer to the :ref:`installation ` page. - -**Note:** Rally requires Python version 2.7 or 3.4. - -Now that you have Rally installed, you are ready to start -:ref:`benchmarking OpenStack with it `! - -.. references: - -.. _installation script: https://raw.githubusercontent.com/openstack/rally/master/install_rally.sh diff --git a/doc/source/quick_start/tutorial/step_10_verifying_cloud_via_tempest_verifier.rst b/doc/source/quick_start/tutorial/step_10_verifying_cloud_via_tempest_verifier.rst deleted file mode 100644 index 7f6a32b6..00000000 --- a/doc/source/quick_start/tutorial/step_10_verifying_cloud_via_tempest_verifier.rst +++ /dev/null @@ -1,607 +0,0 @@ -.. - Copyright 2017 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_10_verifying_cloud_via_tempest_verifier: - -Step 10. Verifying cloud via Tempest verifier -============================================= - -.. contents:: - :local: - -As you may know, Rally has a verification component (aka **'rally verify'**). -Earlier the purpose of this component was to simplify work with -`Tempest `_ framework (The OpenStack -Integration Test Suite). Rally provided a quite simple interface to install and -configure Tempest, run tests and build a report with results. But now the -verification component allows us to simplify work not only with Tempest but -also with any test frameworks or tools. All you need is to create a plugin for -your framework or tool, and you will be able to use **'rally verify'** -interface for it. At this point, Rally supports only one plugin in the -verification component out of the box - as you might guess, Tempest plugin. In -this guide, we will show how to use Tempest and Rally together via the updated -**'rally verify'** interface. We assume that you already have a -:ref:`Rally installation ` and have already -:ref:`registered an OpenStack deployment ` -in Rally. So, let's get started! - - -Create/delete Tempest verifier ------------------------------- - -Execute the following command to create a Tempest verifier: - -.. code-block:: console - - $ rally verify create-verifier --type tempest --name tempest-verifier - 2017-01-18 14:43:20.807 5125 INFO rally.api [-] Creating verifier 'tempest-verifier'. - 2017-01-18 14:43:21.203 5125 INFO rally.verification.manager [-] Cloning verifier repo from https://git.openstack.org/openstack/tempest. - 2017-01-18 14:43:32.458 5125 INFO rally.verification.manager [-] Creating virtual environment. It may take a few minutes. - 2017-01-18 14:43:49.786 5125 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=cde1b03d-d1eb-47f2-a997-3fd21b1d8810) has been successfully created! - Using verifier 'tempest-verifier' (UUID=cde1b03d-d1eb-47f2-a997-3fd21b1d8810) as the default verifier for the future operations. - -The command clones Tempest from the -**https://git.openstack.org/openstack/tempest** repository and installs it in -a Python virtual environment for the current deployment by default. All -information about the created verifier is stored in a database. It allows us to -set up different Tempest versions and easily switch between them. How to do it -will be described bellow. You can list all installed verifiers via the -**rally verify list-verifiers** command. - -The arguments below allow us to override the default behavior. - -Use the **--source** argument to specify an alternate git repository location. -The path to a local Tempest repository or a URL of a remote repository are -both valid values. - -.. code-block:: console - - $ rally verify create-verifier --type tempest --name tempest-verifier --source /home/ubuntu/tempest/ - 2017-01-18 14:53:19.958 5760 INFO rally.api [-] Creating verifier 'tempest-verifier'. - 2017-01-18 14:53:20.166 5760 INFO rally.verification.manager [-] Cloning verifier repo from /home/ubuntu/tempest/. - 2017-01-18 14:53:20.299 5760 INFO rally.verification.manager [-] Creating virtual environment. It may take a few minutes. - 2017-01-18 14:53:32.517 5760 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=3f878030-1edf-455c-ae5e-07836e3d7e35) has been successfully created! - Using verifier 'tempest-verifier' (UUID=3f878030-1edf-455c-ae5e-07836e3d7e35) as the default verifier for the future operations. - -.. code-block:: console - - $ rally verify create-verifier --type tempest --name tempest-verifier --source https://github.com/openstack/tempest.git - 2017-01-18 14:54:57.786 5907 INFO rally.api [-] Creating verifier 'tempest-verifier'. - 2017-01-18 14:54:57.990 5907 INFO rally.verification.manager [-] Cloning verifier repo from https://github.com/openstack/tempest.git. - 2017-01-18 14:55:05.729 5907 INFO rally.verification.manager [-] Creating virtual environment. It may take a few minutes. - 2017-01-18 14:55:22.943 5907 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=e84a947c-b9d3-434b-853b-176a597902e5) has been successfully created! - Using verifier 'tempest-verifier' (UUID=e84a947c-b9d3-434b-853b-176a597902e5) as the default verifier for the future operations. - -Use the **--version** argument to specify a Tempest commit ID or tag. - -.. code-block:: console - - $ rally verify create-verifier --type tempest --name tempest-verifier --version 198e5b4b871c3d09c20afb56dca9637a8cf86ac8 - 2017-01-18 14:57:02.274 6068 INFO rally.api [-] Creating verifier 'tempest-verifier'. - 2017-01-18 14:57:02.461 6068 INFO rally.verification.manager [-] Cloning verifier repo from https://git.openstack.org/openstack/tempest. - 2017-01-18 14:57:15.356 6068 INFO rally.verification.manager [-] Switching verifier repo to the '198e5b4b871c3d09c20afb56dca9637a8cf86ac8' version. - 2017-01-18 14:57:15.423 6068 INFO rally.verification.manager [-] Creating virtual environment. It may take a few minutes. - 2017-01-18 14:57:28.004 6068 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=532d7ad2-902e-4764-aa53-335f67dadc7f) has been successfully created! - Using verifier 'tempest-verifier' (UUID=532d7ad2-902e-4764-aa53-335f67dadc7f) as the default verifier for the future operations. - -.. code-block:: console - - $ rally verify create-verifier --type tempest --name tempest-verifier --source /home/ubuntu/tempest/ --version 13.0.0 - 2017-01-18 15:01:53.971 6518 INFO rally.api [-] Creating verifier 'tempest-verifier'. - 2017-01-18 15:01:54.180 6518 INFO rally.verification.manager [-] Cloning verifier repo from /home/ubuntu/tempest/. - 2017-01-18 15:01:54.274 6518 INFO rally.verification.manager [-] Switching verifier repo to the '13.0.0' version. - 2017-01-18 15:01:54.336 6518 INFO rally.verification.manager [-] Creating virtual environment. It may take a few minutes. - 2017-01-18 15:02:06.623 6518 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=96ffc4bc-4ac2-4ae9-b3c2-d6b16b871027) has been successfully created! - Using verifier 'tempest-verifier' (UUID=96ffc4bc-4ac2-4ae9-b3c2-d6b16b871027) as the default verifier for the future operations. - -Use the **--system-wide** argument to perform system-wide Tempest installation. -In this case, the virtual environment will not be created and Tempest -requirements will not be installed. Moreover, it is assumed that requirements -are already present in the local environment. This argument is useful when -users don't have an Internet connection to install requirements, but they have -pre-installed ones in the local environment. - -.. code-block:: console - - $ rally verify create-verifier --type tempest --name tempest-verifier --source /home/ubuntu/tempest/ --version 13.0.0 --system-wide - 2017-01-18 15:22:09.198 7224 INFO rally.api [-] Creating verifier 'tempest-verifier'. - 2017-01-18 15:22:09.408 7224 INFO rally.verification.manager [-] Cloning verifier repo from /home/ubuntu/tempest/. - 2017-01-18 15:22:09.494 7224 INFO rally.verification.manager [-] Switching verifier repo to the '13.0.0' version. - 2017-01-18 15:22:10.965 7224 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=14c94c12-633a-4522-bd3d-2508f2b9d681) has been successfully created! - Using verifier 'tempest-verifier' (UUID=14c94c12-633a-4522-bd3d-2508f2b9d681) as the default verifier for the future operations. - -To delete the Tempest verifier for all deployments execute the following -command: - -.. code-block:: console - - $ rally verify delete-verifier --id 14c94c12-633a-4522-bd3d-2508f2b9d681 - 2017-01-18 15:27:03.485 7474 INFO rally.api [-] Deleting verifier 'tempest-verifier' (UUID=14c94c12-633a-4522-bd3d-2508f2b9d681). - 2017-01-18 15:27:03.607 7474 INFO rally.api [-] Verifier has been successfully deleted! - -If you have any verifications, use the **--force** argument to delete the -verifier and all stored verifications. - -.. code-block:: console - - $ rally verify delete-verifier --id ec58af86-5217-4bbd-b9e5-491df6873b82 - Failed to delete verifier 'tempest-verifier' (UUID=ec58af86-5217-4bbd-b9e5-491df6873b82) because there are stored verifier verifications! Please, make sure that they are not important to you. Use 'force' flag if you would like to delete verifications as well. - -.. code-block:: console - - $ rally verify delete-verifier --id ec58af86-5217-4bbd-b9e5-491df6873b82 --force - 2017-01-18 15:49:12.840 8685 INFO rally.api [-] Deleting all verifications created by verifier 'tempest-verifier' (UUID=ec58af86-5217-4bbd-b9e5-491df6873b82). - 2017-01-18 15:49:12.843 8685 INFO rally.api [-] Deleting verification (UUID=c3d1408a-a224-4d31-b38f-4caf8ce06a95). - 2017-01-18 15:49:12.951 8685 INFO rally.api [-] Verification has been successfully deleted! - 2017-01-18 15:49:12.961 8685 INFO rally.api [-] Deleting verification (UUID=a437537e-538b-4637-b6ab-ecb8072f0c71). - 2017-01-18 15:49:13.052 8685 INFO rally.api [-] Verification has been successfully deleted! - 2017-01-18 15:49:13.061 8685 INFO rally.api [-] Deleting verification (UUID=5cec0579-4b4e-46f3-aeb4-a481a7bc5663). - 2017-01-18 15:49:13.152 8685 INFO rally.api [-] Verification has been successfully deleted! - 2017-01-18 15:49:13.152 8685 INFO rally.api [-] Deleting verifier 'tempest-verifier' (UUID=ec58af86-5217-4bbd-b9e5-491df6873b82). - 2017-01-18 15:49:13.270 8685 INFO rally.api [-] Verifier has been successfully deleted! - -Use the **--deployment-id** argument to remove the only deployment-specific -data, for example, the config file, etc. - -.. code-block:: console - - $ rally verify delete-verifier --deployment-id 351fdfa2-99ad-4447-ba31-22e76630df97 - 2017-01-18 15:30:27.793 7659 INFO rally.api [-] Deleting deployment-specific data for verifier 'tempest-verifier' (UUID=ec58af86-5217-4bbd-b9e5-491df6873b82). - 2017-01-18 15:30:27.797 7659 INFO rally.api [-] Deployment-specific data has been successfully deleted! - -When the **--deployment-id** and **--force** arguments are used together, -the only deployment-specific data and only verifications of the specified -deployment will be deleted. - -.. code-block:: console - - $ rally verify delete-verifier --deployment-id 351fdfa2-99ad-4447-ba31-22e76630df97 --force - 2017-01-18 15:55:02.657 9004 INFO rally.api [-] Deleting all verifications created by verifier 'tempest-verifier' (UUID=fbbd2bc0-dd92-4e1d-805c-672af7c5ec78) for deployment '351fdfa2-99ad-4447-ba31-22e76630df97'. - 2017-01-18 15:55:02.661 9004 INFO rally.api [-] Deleting verification (UUID=a3d3d53c-79a6-4151-85ce-f4a7323d2f4c). - 2017-01-18 15:55:02.767 9004 INFO rally.api [-] Verification has been successfully deleted! - 2017-01-18 15:55:02.776 9004 INFO rally.api [-] Deleting verification (UUID=eddea799-bbc5-485c-a284-1747a30e3f1e). - 2017-01-18 15:55:02.869 9004 INFO rally.api [-] Verification has been successfully deleted! - 2017-01-18 15:55:02.870 9004 INFO rally.api [-] Deleting deployment-specific data for verifier 'tempest-verifier' (UUID=fbbd2bc0-dd92-4e1d-805c-672af7c5ec78). - 2017-01-18 15:55:02.878 9004 INFO rally.api [-] Deployment-specific data has been successfully deleted! - - -Configure Tempest verifier --------------------------- - -Execute the following command to configure the Tempest verifier for the current -deployment: - -.. code-block:: console - - $ rally verify configure-verifier - 2017-01-18 16:00:24.495 9377 INFO rally.api [-] Configuring verifier 'tempest-verifier' (UUID=59e8bd5b-55e1-4ab8-b506-a5853c7a92e9) for deployment 'tempest' (UUID=4a62f373-9ce7-47a3-8165-6dc7353f754a). - 2017-01-18 16:00:27.497 9377 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=59e8bd5b-55e1-4ab8-b506-a5853c7a92e9) has been successfully configured for deployment 'tempest' (UUID=4a62f373-9ce7-47a3-8165-6dc7353f754a)! - -Use the **--deployment-id** argument to configure the verifier for any -deployment registered in Rally. - -.. code-block:: console - - $ rally verify configure-verifier --deployment-id - -If you want to reconfigure the Tempest verifier, just add the **--reconfigure** -argument to the command. - -.. code-block:: console - - $ rally verify configure-verifier --reconfigure - 2017-01-18 16:08:50.932 9786 INFO rally.api [-] Configuring verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97). - 2017-01-18 16:08:50.933 9786 INFO rally.api [-] Verifier is already configured! - 2017-01-18 16:08:50.933 9786 INFO rally.api [-] Reconfiguring verifier. - 2017-01-18 16:08:52.806 9786 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54) has been successfully configured for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - -Moreover, it is possible to extend the default verifier configuration by -providing the **--extend** argument. - -.. code-block:: console - - $ cat extra_options.conf - [some-section-1] - some-option = some-value - - [some-section-2] - some-option = some-value - -.. code-block:: console - - $ rally verify configure-verifier --extend extra_options.conf - 2017-01-18 16:15:12.248 10029 INFO rally.api [-] Configuring verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97). - 2017-01-18 16:15:12.249 10029 INFO rally.api [-] Verifier is already configured! - 2017-01-18 16:15:12.249 10029 INFO rally.api [-] Adding extra options to verifier configuration. - 2017-01-18 16:15:12.439 10029 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54) has been successfully configured for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - -.. code-block:: console - - $ rally verify configure-verifier --extend '{section-1: {option: value}, section-2: {option: value}}' - 2017-01-18 16:18:07.317 10180 INFO rally.api [-] Configuring verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97). - 2017-01-18 16:18:07.317 10180 INFO rally.api [-] Verifier is already configured! - 2017-01-18 16:18:07.317 10180 INFO rally.api [-] Adding extra options to verifier configuration. - 2017-01-18 16:18:07.549 10180 INFO rally.api [-] Verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54) has been successfully configured for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - -In order to see the generated Tempest config file use the **--show** argument. - -.. code-block:: console - - $ rally verify configure-verifier --show - 2017-01-18 16:19:25.412 10227 INFO rally.api [-] Configuring verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97). - 2017-01-18 16:19:25.412 10227 INFO rally.api [-] Verifier is already configured! - - [DEFAULT] - debug = True - log_file = tempest.log - use_stderr = False - - [auth] - use_dynamic_credentials = True - admin_username = admin - admin_password = admin - admin_project_name = admin - admin_domain_name = Default - ... - - -Start a verification --------------------- - -In order to start a verification execute the following command: - -.. code-block:: console - - $ rally verify start - 2017-01-18 16:49:35.367 12162 INFO rally.api [-] Starting verification (UUID=0673ca09-bdb6-4814-a33e-17731559ff33) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97) by verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54). - 2017-01-18 16:49:44.404 12162 INFO tempest-verifier [-] {0} tempest.api.baremetal.admin.test_chassis.TestChassis ... skip: TestChassis skipped as Ironic is not available - 2017-01-18 16:49:44.404 12162 INFO tempest-verifier [-] {0} tempest.api.baremetal.admin.test_drivers.TestDrivers ... skip: TestDrivers skipped as Ironic is not available - 2017-01-18 16:49:44.429 12162 INFO tempest-verifier [-] {3} tempest.api.baremetal.admin.test_ports_negative.TestPortsNegative ... skip: TestPortsNegative skipped as Ironic is not available - 2017-01-18 16:49:44.438 12162 INFO tempest-verifier [-] {2} tempest.api.baremetal.admin.test_nodestates.TestNodeStates ... skip: TestNodeStates skipped as Ironic is not available - 2017-01-18 16:49:44.438 12162 INFO tempest-verifier [-] {2} tempest.api.baremetal.admin.test_ports.TestPorts ... skip: TestPorts skipped as Ironic is not available - 2017-01-18 16:49:44.439 12162 INFO tempest-verifier [-] {1} tempest.api.baremetal.admin.test_api_discovery.TestApiDiscovery ... skip: TestApiDiscovery skipped as Ironic is not available - 2017-01-18 16:49:44.439 12162 INFO tempest-verifier [-] {1} tempest.api.baremetal.admin.test_nodes.TestNodes ... skip: TestNodes skipped as Ironic is not available - 2017-01-18 16:49:47.083 12162 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_availability_zone_negative.AZAdminNegativeTestJSON.test_get_availability_zone_list_detail_with_non_admin_user ... success [1.013s] - 2017-01-18 16:49:47.098 12162 INFO tempest-verifier [-] {1} tempest.api.compute.admin.test_availability_zone.AZAdminV2TestJSON.test_get_availability_zone_list ... success [1.063s] - 2017-01-18 16:49:47.321 12162 INFO tempest-verifier [-] {1} tempest.api.compute.admin.test_availability_zone.AZAdminV2TestJSON.test_get_availability_zone_list_detail ... success [0.224s] - ... - -By default, the command runs the full suite of Tempest tests for the current -deployment. Also, it is possible to run tests of any created verifier, and for -any registered deployment in Rally, using the **--id** and **--deployment-id** -arguments. - -.. code-block:: console - - $ rally verify start --id --deployment-id - -Also, there is a possibility to run a certain suite of Tempest tests, using -the **--pattern** argument. - -.. code-block:: console - - $ rally verify start --pattern set=compute - 2017-01-18 16:58:40.378 12631 INFO rally.api [-] Starting verification (UUID=a4bd3993-ba3d-425c-ab81-38b2f627e682) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97) by verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54). - 2017-01-18 16:58:44.883 12631 INFO tempest-verifier [-] {1} tempest.api.compute.admin.test_auto_allocate_network.AutoAllocateNetworkTest ... skip: The microversion range[2.37 - latest] of this test is out of the configuration range[None - None]. - 2017-01-18 16:58:47.330 12631 INFO tempest-verifier [-] {1} tempest.api.compute.admin.test_availability_zone.AZAdminV2TestJSON.test_get_availability_zone_list ... success [0.680s] - 2017-01-18 16:58:47.416 12631 INFO tempest-verifier [-] {2} tempest.api.compute.admin.test_availability_zone_negative.AZAdminNegativeTestJSON.test_get_availability_zone_list_detail_with_non_admin_user ... success [0.761s] - 2017-01-18 16:58:47.610 12631 INFO tempest-verifier [-] {1} tempest.api.compute.admin.test_availability_zone.AZAdminV2TestJSON.test_get_availability_zone_list_detail ... success [0.280s] - 2017-01-18 16:58:47.694 12631 INFO tempest-verifier [-] {3} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_using_string_ram ... success [1.015s] - 2017-01-18 16:58:48.514 12631 INFO tempest-verifier [-] {3} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_verify_entry_in_list_details ... success [0.820s] - 2017-01-18 16:58:48.675 12631 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_agents.AgentsAdminTestJSON.test_create_agent ... success [0.777s] - 2017-01-18 16:58:49.090 12631 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_agents.AgentsAdminTestJSON.test_delete_agent ... success [0.415s] - 2017-01-18 16:58:49.160 12631 INFO tempest-verifier [-] {3} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_int_id ... success [0.646s] - 2017-01-18 16:58:49.546 12631 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_agents.AgentsAdminTestJSON.test_list_agents ... success [0.455s] - ... - -Available suites for Tempest 14.0.0 (the latest Tempest release when this -documentation was written) are **full**, **smoke**, **compute**, **identity**, -**image**, **network**, **object_storage**, **orchestration**, **volume**, -**scenario**. The number of available suites depends on Tempest version because -some test sets move from the Tempest tree to the corresponding Tempest plugins. - -Moreover, users can run a certain set of tests, using a regular expression. - -.. code-block:: console - - $ rally verify start --pattern tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON - 2017-01-18 17:00:36.590 12745 INFO rally.api [-] Starting verification (UUID=1e12510e-7391-48ed-aba2-8fefe1075a87) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97) by verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54). - 2017-01-18 17:00:44.241 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_using_string_ram ... success [1.044s] - 2017-01-18 17:00:45.108 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_verify_entry_in_list_details ... success [0.868s] - 2017-01-18 17:00:45.863 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_int_id ... success [0.754s] - 2017-01-18 17:00:47.575 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_none_id ... success [1.712s] - 2017-01-18 17:00:48.260 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_uuid_id ... success [0.684s] - 2017-01-18 17:00:50.951 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_list_flavor_without_extra_data ... success [2.689s] - 2017-01-18 17:00:51.631 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_server_with_non_public_flavor ... success [0.680s] - 2017-01-18 17:00:54.192 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_is_public_string_variations ... success [2.558s] - 2017-01-18 17:00:55.102 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_list_non_public_flavor ... success [0.911s] - 2017-01-18 17:00:55.774 12745 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_list_public_flavor_with_other_user ... success [0.673s] - 2017-01-18 17:00:59.602 12745 INFO rally.api [-] Verification (UUID=1e12510e-7391-48ed-aba2-8fefe1075a87) has been successfully finished for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - - ====== - Totals - ====== - Ran: 10 tests in 14.578 sec. - - Success: 10 - - Skipped: 0 - - Expected failures: 0 - - Unexpected success: 0 - - Failures: 0 - - Using verification (UUID=1e12510e-7391-48ed-aba2-8fefe1075a87) as the default verification for the future operations. - -In such a way it is possible to run tests from a certain directory or class, -and even run a single test. - -.. code-block:: console - - $ rally verify start --pattern tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_using_string_ram - 2017-01-18 17:01:43.993 12819 INFO rally.api [-] Starting verification (UUID=b9a386e1-d1a1-41b3-b369-9607173de63e) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97) by verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54). - 2017-01-18 17:01:52.592 12819 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_using_string_ram ... success [1.214s] - 2017-01-18 17:01:57.220 12819 INFO rally.api [-] Verification (UUID=b9a386e1-d1a1-41b3-b369-9607173de63e) has been successfully finished for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - - ====== - Totals - ====== - Ran: 1 tests in 4.139 sec. - - Success: 1 - - Skipped: 0 - - Expected failures: 0 - - Unexpected success: 0 - - Failures: 0 - - Using verification (UUID=b9a386e1-d1a1-41b3-b369-9607173de63e) as the default verification for the future operations. - -In order to see errors of failed tests after the verification finished use the -**--detailed** argument. - -.. code-block:: console - - $ rally verify start --pattern tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON --detailed - 2017-01-25 19:34:41.113 16123 INFO rally.api [-] Starting verification (UUID=ceb6f26b-5830-42c5-ab09-bfd985ed4cb7) for deployment 'tempest-2' (UUID=38a397d0-ee11-475d-ab08-e17be09d0bcd) by verifier 'tempest-verifier' (UUID=bbf51ada-9dd6-4b25-b1b6-b651e0541dde). - 2017-01-25 19:34:50.188 16123 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_create_server_with_az ... fail [0.784s] - 2017-01-25 19:34:51.587 16123 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_get_details ... success [1.401s] - 2017-01-25 19:34:52.947 16123 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_list ... success [1.359s] - 2017-01-25 19:34:53.863 16123 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_remove_host ... success [0.915s] - 2017-01-25 19:34:54.577 16123 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_delete ... success [0.714s] - 2017-01-25 19:34:55.221 16123 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_delete_with_az ... success [0.643s] - 2017-01-25 19:34:55.974 16123 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_update_metadata_get_details ... success [0.752s] - 2017-01-25 19:34:56.689 16123 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_update_with_az ... success [0.714s] - 2017-01-25 19:34:57.144 16123 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_verify_entry_in_list ... success [0.456s] - 2017-01-25 19:35:01.132 16123 INFO rally.api [-] Verification (UUID=ceb6f26b-5830-42c5-ab09-bfd985ed4cb7) has been successfully finished for deployment 'tempest-2' (UUID=38a397d0-ee11-475d-ab08-e17be09d0bcd)! - - ============================= - Failed 1 test - output below: - ============================= - - tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_create_server_with_az - --------------------------------------------------------------------------------------------------------------- - Traceback (most recent call last): - File "tempest/api/compute/admin/test_aggregates.py", line 226, in test_aggregate_add_host_create_server_with_az - self.client.add_host(aggregate['id'], host=self.host) - File "tempest/lib/services/compute/aggregates_client.py", line 95, in add_host - post_body) - File "tempest/lib/common/rest_client.py", line 275, in post - return self.request('POST', url, extra_headers, headers, body, chunked) - File "tempest/lib/services/compute/base_compute_client.py", line 48, in request - method, url, extra_headers, headers, body, chunked) - File "tempest/lib/common/rest_client.py", line 663, in request - self._error_checker(resp, resp_body) - File "tempest/lib/common/rest_client.py", line 775, in _error_checker - raise exceptions.Conflict(resp_body, resp=resp) - tempest.lib.exceptions.Conflict: An object with that identifier already exists - Details: {u'message': u"Cannot add host to aggregate 2658. Reason: One or more hosts already in availability zone(s) [u'tempest-test_az-34611847'].", u'code': 409} - - ====== - Totals - ====== - - Ran: 9 tests in 12.391 sec. - - Success: 8 - - Skipped: 0 - - Expected failures: 0 - - Unexpected success: 0 - - Failures: 1 - - Using verification (UUID=ceb6f26b-5830-42c5-ab09-bfd985ed4cb7) as the default verification for the future operations. - -Also, there is a possibility to run Tempest tests from a file. Users can -specify a list of tests in the file and run them, using the **--load-list** -argument. - -.. code-block:: console - - $ cat load-list.txt - tempest.api.identity.admin.v2.test_endpoints.EndPointsTestJSON.test_create_list_delete_endpoint[id-9974530a-aa28-4362-8403-f06db02b26c1] - tempest.api.identity.admin.v2.test_endpoints.EndPointsTestJSON.test_list_endpoints[id-11f590eb-59d8-4067-8b2b-980c7f387f51] - tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_assign_user_role[id-0146f675-ffbd-4208-b3a4-60eb628dbc5e] - tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_get_role_by_id[id-db6870bd-a6ed-43be-a9b1-2f10a5c9994f] - tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_list_roles[id-75d9593f-50b7-4fcf-bd64-e3fb4a278e23] - tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_list_user_roles[id-262e1e3e-ed71-4edd-a0e5-d64e83d66d05] - tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_remove_user_role[id-f0b9292c-d3ba-4082-aa6c-440489beef69] - tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_role_create_delete[id-c62d909d-6c21-48c0-ae40-0a0760e6db5e] - -.. code-block:: console - - $ rally verify start --load-list load-list.txt - 2017-01-18 17:04:13.900 12964 INFO rally.api [-] Starting verification (UUID=af766b2f-cada-44db-a0c2-336ab0c17c27) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97) by verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54). - 2017-01-18 17:04:21.813 12964 INFO tempest-verifier [-] {1} tempest.api.identity.admin.v2.test_endpoints.EndPointsTestJSON.test_create_list_delete_endpoint ... success [1.237s] - 2017-01-18 17:04:22.115 12964 INFO tempest-verifier [-] {1} tempest.api.identity.admin.v2.test_endpoints.EndPointsTestJSON.test_list_endpoints ... success [0.301s] - 2017-01-18 17:04:24.507 12964 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_assign_user_role ... success [3.663s] - 2017-01-18 17:04:25.164 12964 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_get_role_by_id ... success [0.657s] - 2017-01-18 17:04:25.435 12964 INFO tempest-verifier [-] {2} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_list_roles ... success [0.271s] - 2017-01-18 17:04:27.905 12964 INFO tempest-verifier [-] {2} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_list_user_roles ... success [2.468s] - 2017-01-18 17:04:30.645 12964 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_remove_user_role ... success [2.740s] - 2017-01-18 17:04:31.886 12964 INFO tempest-verifier [-] {3} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_role_create_delete ... success [1.239s] - 2017-01-18 17:04:38.122 12964 INFO rally.api [-] Verification (UUID=af766b2f-cada-44db-a0c2-336ab0c17c27) has been successfully finished for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - - ====== - Totals - ====== - Ran: 8 tests in 14.748 sec. - - Success: 8 - - Skipped: 0 - - Expected failures: 0 - - Unexpected success: 0 - - Failures: 0 - - Using verification (UUID=af766b2f-cada-44db-a0c2-336ab0c17c27) as the default verification for the future operations. - -Moreover, it is possible to skip a certain list of Tempest tests, using the -**--skip-list** argument. - -.. code-block:: console - - $ cat skip-list.yaml - tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_using_string_ram[id-3b541a2e-2ac2-4b42-8b8d-ba6e22fcd4da]: - tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_verify_entry_in_list_details[id-8261d7b0-be58-43ec-a2e5-300573c3f6c5]: Reason 1 - tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_int_id[id-8b4330e1-12c4-4554-9390-e6639971f086]: - tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_none_id[id-f83fe669-6758-448a-a85e-32d351f36fe0]: Reason 2 - tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_uuid_id[id-94c9bb4e-2c2a-4f3c-bb1f-5f0daf918e6d]: - -.. code-block:: console - - $ rally verify start --pattern tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON --skip-list skip-list.yaml - 2017-01-18 17:13:44.475 13424 INFO rally.api [-] Starting verification (UUID=ec94b397-b546-4f12-82ba-bb17f052c3d0) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97) by verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54). - 2017-01-18 17:13:49.298 13424 INFO tempest-verifier [-] {-} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_int_id ... skip - 2017-01-18 17:13:49.298 13424 INFO tempest-verifier [-] {-} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_none_id ... skip: Reason 2 - 2017-01-18 17:13:49.298 13424 INFO tempest-verifier [-] {-} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_using_string_ram ... skip - 2017-01-18 17:13:49.298 13424 INFO tempest-verifier [-] {-} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_uuid_id ... skip - 2017-01-18 17:13:49.299 13424 INFO tempest-verifier [-] {-} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_verify_entry_in_list_details ... skip: Reason 1 - 2017-01-18 17:13:54.035 13424 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_list_flavor_without_extra_data ... success [1.889s] - 2017-01-18 17:13:54.765 13424 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_server_with_non_public_flavor ... success [0.732s] - 2017-01-18 17:13:57.478 13424 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_is_public_string_variations ... success [2.709s] - 2017-01-18 17:13:58.438 13424 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_list_non_public_flavor ... success [0.962s] - 2017-01-18 17:13:59.180 13424 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_list_public_flavor_with_other_user ... success [0.742s] - 2017-01-18 17:14:03.969 13424 INFO rally.api [-] Verification (UUID=ec94b397-b546-4f12-82ba-bb17f052c3d0) has been successfully finished for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - - ====== - Totals - ====== - Ran: 10 tests in 9.882 sec. - - Success: 5 - - Skipped: 5 - - Expected failures: 0 - - Unexpected success: 0 - - Failures: 0 - - Using verification (UUID=ec94b397-b546-4f12-82ba-bb17f052c3d0) as the default verification for the future operations. - -Also, it is possible to specify the path to a file with a list of Tempest tests -that are expected to fail. In this case, the specified tests will have the -**xfail** status instead of **fail**. - -.. code-block:: console - - $ cat xfail-list.yaml - tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_create_server_with_az[id-96be03c7-570d-409c-90f8-e4db3c646996]: Some reason why the test fails - -.. code-block:: console - - $ rally verify start --pattern tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON --xfail-list xfail-list.yaml - 2017-01-18 17:20:04.064 13720 INFO rally.api [-] Starting verification (UUID=c416b724-0276-4c24-ab60-3ba7078c0a80) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97) by verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54). - 2017-01-18 17:20:17.359 13720 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_create_server_with_az ... xfail [6.328s]: Some reason why the test fails - 2017-01-18 17:20:18.337 13720 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_get_details ... success [0.978s] - 2017-01-18 17:20:19.379 13720 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_list ... success [1.042s] - 2017-01-18 17:20:20.213 13720 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_remove_host ... success [0.833s] - 2017-01-18 17:20:20.956 13720 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_delete ... success [0.743s] - 2017-01-18 17:20:21.772 13720 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_delete_with_az ... success [0.815s] - 2017-01-18 17:20:22.737 13720 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_update_metadata_get_details ... success [0.964s] - 2017-01-18 17:20:25.061 13720 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_update_with_az ... success [2.323s] - 2017-01-18 17:20:25.595 13720 INFO tempest-verifier [-] {0} tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_verify_entry_in_list ... success [0.533s] - 2017-01-18 17:20:30.142 13720 INFO rally.api [-] Verification (UUID=c416b724-0276-4c24-ab60-3ba7078c0a80) has been successfully finished for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - - ====== - Totals - ====== - Ran: 9 tests in 17.118 sec. - - Success: 8 - - Skipped: 0 - - Expected failures: 1 - - Unexpected success: 0 - - Failures: 0 - - Using verification (UUID=c416b724-0276-4c24-ab60-3ba7078c0a80) as the default verification for the future operations. - -Sometimes users may want to use the specific concurrency for running tests -based on their deployments and available resources. In this case, they can use -the **--concurrency** argument to specify how many processes to use to run -Tempest tests. The default value (0) auto-detects CPU count. - -.. code-block:: console - - $ rally verify start --load-list load-list.txt --concurrency 1 - 2017-01-18 17:05:38.658 13054 INFO rally.api [-] Starting verification (UUID=cbf5e604-6bc9-47cd-9c8c-5e4c9e9545a0) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97) by verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54). - 2017-01-18 17:05:45.474 13054 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_endpoints.EndPointsTestJSON.test_create_list_delete_endpoint ... success [0.917s] - 2017-01-18 17:05:45.653 13054 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_endpoints.EndPointsTestJSON.test_list_endpoints ... success [0.179s] - 2017-01-18 17:05:55.497 13054 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_assign_user_role ... success [2.673s] - 2017-01-18 17:05:56.237 13054 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_get_role_by_id ... success [0.740s] - 2017-01-18 17:05:56.642 13054 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_list_roles ... success [0.403s] - 2017-01-18 17:06:00.011 13054 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_list_user_roles ... success [3.371s] - 2017-01-18 17:06:02.987 13054 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_remove_user_role ... success [2.973s] - 2017-01-18 17:06:04.927 13054 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_role_create_delete ... success [1.939s] - 2017-01-18 17:06:11.166 13054 INFO rally.api [-] Verification (UUID=cbf5e604-6bc9-47cd-9c8c-5e4c9e9545a0) has been successfully finished for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - - ====== - Totals - ====== - Ran: 8 tests in 23.043 sec. - - Success: 8 - - Skipped: 0 - - Expected failures: 0 - - Unexpected success: 0 - - Failures: 0 - - Using verification (UUID=cbf5e604-6bc9-47cd-9c8c-5e4c9e9545a0) as the default verification for the future operations. - -Also, there is a possibility to rerun tests from any verification. In order -to rerun tests from some verification execute the following command: - -.. code-block:: console - - $ rally verify rerun --uuid cbf5e604-6bc9-47cd-9c8c-5e4c9e9545a0 - 2017-01-18 17:29:35.692 14127 INFO rally.api [-] Re-running tests from verification (UUID=cbf5e604-6bc9-47cd-9c8c-5e4c9e9545a0) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97). - 2017-01-18 17:29:35.792 14127 INFO rally.api [-] Starting verification (UUID=51aa3275-f028-4f2d-9d63-0db679fdf266) for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97) by verifier 'tempest-verifier' (UUID=16b73e48-09ad-4a54-92eb-2f2708b72c54). - 2017-01-18 17:29:43.980 14127 INFO tempest-verifier [-] {1} tempest.api.identity.admin.v2.test_endpoints.EndPointsTestJSON.test_create_list_delete_endpoint ... success [2.172s] - 2017-01-18 17:29:44.156 14127 INFO tempest-verifier [-] {1} tempest.api.identity.admin.v2.test_endpoints.EndPointsTestJSON.test_list_endpoints ... success [0.177s] - 2017-01-18 17:29:45.333 14127 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_assign_user_role ... success [3.302s] - 2017-01-18 17:29:45.952 14127 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_get_role_by_id ... success [0.619s] - 2017-01-18 17:29:46.219 14127 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_list_roles ... success [0.266s] - 2017-01-18 17:29:48.964 14127 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_list_user_roles ... success [2.744s] - 2017-01-18 17:29:52.543 14127 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_remove_user_role ... success [3.578s] - 2017-01-18 17:29:53.843 14127 INFO tempest-verifier [-] {0} tempest.api.identity.admin.v2.test_roles.RolesTestJSON.test_role_create_delete ... success [1.300s] - 2017-01-18 17:30:01.258 14127 INFO rally.api [-] Verification (UUID=51aa3275-f028-4f2d-9d63-0db679fdf266) has been successfully finished for deployment 'tempest-2' (UUID=351fdfa2-99ad-4447-ba31-22e76630df97)! - - ====== - Totals - ====== - Ran: 8 tests in 14.926 sec. - - Success: 8 - - Skipped: 0 - - Expected failures: 0 - - Unexpected success: 0 - - Failures: 0 - - Verification UUID: 51aa3275-f028-4f2d-9d63-0db679fdf266. - -In order to rerun only failed tests add the **--failed** argument to the -command. - -.. code-block:: console - - $ rally verify rerun --uuid --failed - -A separated page about building verification reports: -:ref:`verification-reports`. diff --git a/doc/source/quick_start/tutorial/step_11_profiling_openstack_internals.rst b/doc/source/quick_start/tutorial/step_11_profiling_openstack_internals.rst deleted file mode 100644 index eef80067..00000000 --- a/doc/source/quick_start/tutorial/step_11_profiling_openstack_internals.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. - Copyright 2017 Inria All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_11_profiling_openstack_internals: - -Step 11. Profiling OpenStack Internals -====================================== - -.. contents:: - :local: - -Rally leverage `OSprofiler `_ to -generate traces of OpenStack internal calls happening during the run of a -scenario. Integration of OSProfiler in Rally can help to dig into concurrency -problems of OpenStack which is a huge ecosystem of cooperative services. - -Workflow --------- - -Enabling the profiler is based on a shared secret between the clients (here -Rally) and the various Openstack services : the HMAC key. In the following we -assume that your OpenStack services have been configured to enable OSprofiler -and that the secret HMAC key is ``SECRET_KEY``. This key is stored alongside -the credentials of your deployment. Once Rally is instructed about the HMAC -key, a new trace can be initialized for each iteration of the workload. Rally -will then store in its reports a profiler trace id. This id can be finally be -used to query OSprofiler in order to get the full trace of the iteration. - -Registering the HMAC key ------------------------- - -You can store your HMAC key in the environment variable -``OSPROFILER_HMAC_KEY``. This variable will be loaded if you create your -deployment with the ``--from-env`` option. - -Alternatively if you create your deployment using the ``--file`` option you can -add the HMAC key with the following : - -.. code-block:: json - - { - "type": "ExistingCloud", - "creds": { - "openstack": { - [...] - "profiler_hmac_key": "SECRET_KEY" - } - } - } - - -Getting the full trace ----------------------- - -A trace id is stored on a per-iteration basis and can be found in the JSON -report as-well as the HTML report : - -.. image:: ../../images/Report-Task-Scenario-Data-Per-iteration-profiler.png - - -OSProfiler can be asked to generate the full trace using this trace id: - -.. code-block:: shell - - osprofiler trace show --html --out trace.html 941338f6-3d39-4f80-9dba-395d9dbd16bb - - -Disabling the profiler ------------------------- - -Setting ``enable_profiler = False`` under the ``benchmark`` group in the -configuration file will disabled the profiler. diff --git a/doc/source/quick_start/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.rst b/doc/source/quick_start/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.rst deleted file mode 100644 index 4ebff6af..00000000 --- a/doc/source/quick_start/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.rst +++ /dev/null @@ -1,296 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_1_setting_up_env_and_running_benchmark_from_samples: - -Step 1. Setting up the environment and running a benchmark from samples -======================================================================= - -.. contents:: - :local: - -In this demo, we will show how to perform some basic operations in Rally, such -as registering an OpenStack cloud, benchmarking it and generating benchmark -reports. - -We assume that you have gone through :ref:`tutorial_step_0_installation` and -have an already existing OpenStack deployment with Keystone available at -**. - - -Registering an OpenStack deployment in Rally --------------------------------------------- - -First, you have to provide Rally with an OpenStack deployment it is going to -benchmark. This should be done either through `OpenRC files`_ or through -deployment `configuration files`_. In case you already have an *OpenRC*, it is -extremely simple to register a deployment with the *deployment create* command: - -.. code-block:: console - - $ . openrc admin admin - $ rally deployment create --fromenv --name=existing - +--------------------------------------+----------------------------+------------+------------------+--------+ - | uuid | created_at | name | status | active | - +--------------------------------------+----------------------------+------------+------------------+--------+ - | 28f90d74-d940-4874-a8ee-04fda59576da | 2015-01-18 00:11:38.059983 | existing | deploy->finished | | - +--------------------------------------+----------------------------+------------+------------------+--------+ - Using deployment : - ... - -Alternatively, you can put the information about your cloud credentials into a -JSON configuration file (let's call it `existing.json`_). The *deployment -create* command has a slightly different syntax in this case: - -.. code-block:: console - - $ rally deployment create --file=existing.json --name=existing - +--------------------------------------+----------------------------+------------+------------------+--------+ - | uuid | created_at | name | status | active | - +--------------------------------------+----------------------------+------------+------------------+--------+ - | 28f90d74-d940-4874-a8ee-04fda59576da | 2015-01-18 00:11:38.059983 | existing | deploy->finished | | - +--------------------------------------+----------------------------+------------+------------------+--------+ - Using deployment : - ... - - -Note the last line in the output. It says that the just created deployment is -now used by Rally; that means that all the benchmarking operations from now on -are going to be performed on this deployment. Later we will show how to switch -between different deployments. - -Finally, the *deployment check* command enables you to verify that your current -deployment is healthy and ready to be benchmarked: - -.. code-block:: console - - $ rally deployment check - keystone endpoints are valid and following services are available: - +----------+----------------+-----------+ - | services | type | status | - +----------+----------------+-----------+ - | cinder | volume | Available | - | cinderv2 | volumev2 | Available | - | ec2 | ec2 | Available | - | glance | image | Available | - | heat | orchestration | Available | - | heat-cfn | cloudformation | Available | - | keystone | identity | Available | - | nova | compute | Available | - | novav21 | computev21 | Available | - | s3 | s3 | Available | - +----------+----------------+-----------+ - - -Benchmarking ------------- - -Now that we have a working and registered deployment, we can start benchmarking -it. The sequence of benchmarks to be launched by Rally should be specified in a -*benchmark task configuration file* (either in *JSON* or in *YAML* format). -Let's try one of the sample benchmark tasks available in -`samples/tasks/scenarios`_, say, the one that boots and deletes multiple -servers (*samples/tasks/scenarios/nova/boot-and-delete.json*): - - -.. code-block:: json - - { - "NovaServers.boot_and_delete_server": [ - { - "args": { - "flavor": { - "name": "m1.tiny" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] - } - - -To start a benchmark task, run the ``task start`` command (you can also add the -*-v* option to print more logging information): - -.. code-block:: console - - $ rally task start samples/tasks/scenarios/nova/boot-and-delete.json - -------------------------------------------------------------------------------- - Preparing input task - -------------------------------------------------------------------------------- - - Input task is: - - - -------------------------------------------------------------------------------- - Task 6fd9a19f-5cf8-4f76-ab72-2e34bb1d4996: started - -------------------------------------------------------------------------------- - - Benchmarking... This can take a while... - - To track task status use: - - rally task status - or - rally task detailed - - -------------------------------------------------------------------------------- - Task 6fd9a19f-5cf8-4f76-ab72-2e34bb1d4996: finished - -------------------------------------------------------------------------------- - - test scenario NovaServers.boot_and_delete_server - args position 0 - args values: - {u'args': {u'flavor': {u'name': u'm1.tiny'}, - u'force_delete': False, - u'image': {u'name': u'^cirros.*-disk$'}}, - u'context': {u'users': {u'project_domain': u'default', - u'resource_management_workers': 30, - u'tenants': 3, - u'user_domain': u'default', - u'users_per_tenant': 2}}, - u'runner': {u'concurrency': 2, u'times': 10, u'type': u'constant'}} - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count | - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | nova.boot_server | 7.99 | 9.047 | 11.862 | 9.747 | 10.805 | 100.0% | 10 | - | nova.delete_server | 4.427 | 4.574 | 4.772 | 4.677 | 4.725 | 100.0% | 10 | - | total | 12.556 | 13.621 | 16.37 | 14.252 | 15.311 | 100.0% | 10 | - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - Load duration: 70.1310448647 - Full duration: 87.545541048 - - - HINTS: - * To plot HTML graphics with this data, run: - rally task report 6fd9a19f-5cf8-4f76-ab72-2e34bb1d4996 --out output.html - - * To get raw JSON output of task results, run: - rally task results 6fd9a19f-5cf8-4f76-ab72-2e34bb1d4996 - - Using task: 6fd9a19f-5cf8-4f76-ab72-2e34bb1d4996 - - -Note that the Rally input task above uses *regular expressions* to specify the -image and flavor name to be used for server creation, since concrete names -might differ from installation to installation. If this benchmark task fails, -then the reason for that might a non-existing image/flavor specified in the -task. To check what images/flavors are available in the deployment you are -currently benchmarking, you might use the the following commands: - -.. code-block:: console - - $ source ~/.rally/openrc - $ openstack image list - +--------------------------------------+---------------------------------+--------+ - | ID | Name | Status | - +--------------------------------------+---------------------------------+--------+ - | 30dc3b46-4a4b-4fcc-932c-91fa87753902 | cirros-0.3.4-x86_64-uec | active | - | d687fc2a-75bd-4194-90c7-1619af255b04 | cirros-0.3.4-x86_64-uec-kernel | active | - | c764d543-027d-47a3-b46e-0c1c8a68635d | cirros-0.3.4-x86_64-uec-ramdisk | active | - +--------------------------------------+---------------------------------+--------+ - - $ openstack flavor list - +----+-----------+-------+------+-----------+-------+-----------+ - | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | - +----+-----------+-------+------+-----------+-------+-----------+ - | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | - | 2 | m1.small | 2048 | 20 | 0 | 1 | True | - | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | - | 4 | m1.large | 8192 | 80 | 0 | 4 | True | - | 42 | m1.nano | 64 | 0 | 0 | 1 | True | - | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | - | 84 | m1.micro | 128 | 0 | 0 | 1 | True | - +----+-----------+-------+------+-----------+-------+-----------+ - - -Report generation ------------------ - -One of the most beautiful things in Rally is its task report generation -mechanism. It enables you to create illustrative and comprehensive HTML reports -based on the benchmarking data. To create and open at once such a report for -the last task you have launched, call: - -.. code-block:: bash - - rally task report --out=report1.html --open - -This will produce an HTML page with the overview of all the scenarios that -you've included into the last benchmark task completed in Rally (in our case, -this is just one scenario, and we will cover the topic of multiple scenarios in -one task in -:ref:`the next step of our tutorial `): - -.. image:: ../../images/Report-Overview.png - :align: center - -This aggregating table shows the duration of the load produced by the -corresponding scenario (*"Load duration"*), the overall benchmark scenario -execution time, including the duration of environment preparation with contexts -(*"Full duration"*), the number of iterations of each scenario -(*"Iterations"*), the type of the load used while running the scenario -(*"Runner"*), the number of failed iterations (*"Errors"*) and finally whether -the scenario has passed certain Success Criteria (*"SLA"*) that were set up by -the user in the input configuration file (we will cover these criteria in -:ref:`one of the next steps `). - -By navigating in the left panel, you can switch to the detailed view of the -benchmark results for the only scenario we included into our task, namely -**NovaServers.boot_and_delete_server**: - -.. image:: ../../images/Report-Scenario-Overview.png - :align: center - -This page, along with the description of the success criteria used to check the -outcome of this scenario, shows more detailed information and statistics about -the duration of its iterations. Now, the *"Total durations"* table splits the -duration of our scenario into the so-called **"atomic actions"**: in our case, -the **"boot_and_delete_server"** scenario consists of two actions - -**"boot_server"** and **"delete_server"**. You can also see how the scenario -duration changed throughout its iterations in the *"Charts for the total -duration"* section. Similar charts, but with atomic actions detailed are on the -*"Details"* tab of this page: - -.. image:: ../../images/Report-Scenario-Atomic.png - :align: center - -Note that all the charts on the report pages are very dynamic: you can change -their contents by clicking the switches above the graph and see more -information about its single points by hovering the cursor over these points. - -Take some time to play around with these graphs -and then move on to :ref:`the next step of our tutorial `. - -.. references: - -.. _OpenRC files: http://docs.openstack.org/user-guide/content/cli_openrc.html -.. _configuration files: https://github.com/openstack/rally/tree/master/samples/deployments -.. _existing.json: https://github.com/openstack/rally/blob/master/samples/deployments/existing.json -.. _samples/tasks/scenarios: https://github.com/openstack/rally/tree/master/samples/tasks/scenarios diff --git a/doc/source/quick_start/tutorial/step_2_input_task_format.rst b/doc/source/quick_start/tutorial/step_2_input_task_format.rst deleted file mode 100644 index e70ffc77..00000000 --- a/doc/source/quick_start/tutorial/step_2_input_task_format.rst +++ /dev/null @@ -1,231 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_2_input_task_format: - -Step 2. Rally input task format -=============================== - -.. contents:: - :local: - -Basic input task syntax ------------------------ - -Rally comes with a really great collection of -:ref:`plugins ` and in most -real-world cases you will use multiple plugins to test your OpenStack cloud. -Rally makes it very easy to run **different test cases defined in a single -task**. To do so, use the following syntax: - -.. code-block:: json - - { - "": [, , ...] - "": [, ...] - } - -where **, as before, is a dictionary: - -.. code-block:: json - - { - "args": { }, - "runner": { }, - "context": { }, - "sla": { } - } - -Multiple benchmarks in a single task ------------------------------------- - -As an example, let's edit our configuration file from -:ref:`step 1 ` -so that it prescribes Rally to launch not only the -**NovaServers.boot_and_delete_server** scenario, but also the -**KeystoneBasic.create_delete_user** scenario. All we have to do is to append -the configuration of the second scenario as yet another top-level key of our -JSON file: - -*multiple-scenarios.json* - -.. code-block:: json - - { - "NovaServers.boot_and_delete_server": [ - { - "args": { - "flavor": { - "name": "m1.tiny" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ], - "KeystoneBasic.create_delete_user": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 3 - } - } - ] - } - -Now you can start this benchmark task as usually: - -.. code-block:: console - - $ rally task start multiple-scenarios.json - ... - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count | - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | nova.boot_server | 8.06 | 11.354 | 18.594 | 18.54 | 18.567 | 100.0% | 10 | - | nova.delete_server | 4.364 | 5.054 | 6.837 | 6.805 | 6.821 | 100.0% | 10 | - | total | 12.572 | 16.408 | 25.396 | 25.374 | 25.385 | 100.0% | 10 | - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - Load duration: 84.1959171295 - Full duration: 102.033041 - -------------------------------------------------------------------------------- - - ... - - +----------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count | - +----------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | keystone.create_user | 0.676 | 0.875 | 1.03 | 1.02 | 1.025 | 100.0% | 10 | - | keystone.delete_user | 0.407 | 0.647 | 0.84 | 0.739 | 0.79 | 100.0% | 10 | - | total | 1.082 | 1.522 | 1.757 | 1.724 | 1.741 | 100.0% | 10 | - +----------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - Load duration: 5.72119688988 - Full duration: 10.0808410645 - - ... - -Note that the HTML reports you can generate by typing **rally task report ---out=report_name.html** after your benchmark task has completed will get -richer as your benchmark task configuration file includes more benchmark -scenarios. Let's take a look at the report overview page for a task that covers -all the scenarios available in Rally: - -.. code-block:: bash - - rally task report --out=report_multiple_scenarios.html --open - -.. image:: ../../images/Report-Multiple-Overview.png - :align: center - - -Multiple configurations of the same scenario --------------------------------------------- - -Yet another thing you can do in Rally is to launch **the same benchmark -scenario multiple times with different configurations**. That's why our -configuration file stores a list for the key -*"NovaServers.boot_and_delete_server"*: you can just append a different -configuration of this benchmark scenario to this list to get it. Let's say, -you want to run the **boot_and_delete_server** scenario twice: first using the -*"m1.tiny"* flavor and then using the *"m1.small"* flavor: - -*multiple-configurations.json* - -.. code-block:: json - - { - "NovaServers.boot_and_delete_server": [ - { - "args": { - "flavor": { - "name": "m1.tiny" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": {...}, - "context": {...} - }, - { - "args": { - "flavor": { - "name": "m1.small" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": {...}, - "context": {...} - } - ] - } - -That's it! You will get again the results for each configuration separately: - -.. code-block:: console - - $ rally task start --task=multiple-configurations.json - ... - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count | - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | nova.boot_server | 7.896 | 9.433 | 13.14 | 11.329 | 12.234 | 100.0% | 10 | - | nova.delete_server | 4.435 | 4.898 | 6.975 | 5.144 | 6.059 | 100.0% | 10 | - | total | 12.404 | 14.331 | 17.979 | 16.72 | 17.349 | 100.0% | 10 | - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - Load duration: 73.2339417934 - Full duration: 91.1692159176 - -------------------------------------------------------------------------------- - - ... - - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count | - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | nova.boot_server | 8.207 | 8.91 | 9.823 | 9.692 | 9.758 | 100.0% | 10 | - | nova.delete_server | 4.405 | 4.767 | 6.477 | 4.904 | 5.691 | 100.0% | 10 | - | total | 12.735 | 13.677 | 16.301 | 14.596 | 15.449 | 100.0% | 10 | - +--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - Load duration: 71.029528141 - Full duration: 88.0259010792 - ... - -The HTML report will also look similar to what we have seen before: - -.. code-block:: bash - - rally task report --out=report_multiple_configuraions.html --open - -.. image:: ../../images/Report-Multiple-Configurations-Overview.png - :align: center diff --git a/doc/source/quick_start/tutorial/step_3_benchmarking_with_existing_users.rst b/doc/source/quick_start/tutorial/step_3_benchmarking_with_existing_users.rst deleted file mode 100644 index e909ea0f..00000000 --- a/doc/source/quick_start/tutorial/step_3_benchmarking_with_existing_users.rst +++ /dev/null @@ -1,142 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_3_benchmarking_with_existing_users: - -Step 3. Benchmarking OpenStack with existing users -================================================== - -.. contents:: - :local: - -Motivation ----------- - -There are two very important reasons from the production world of why it is -preferable to use some already existing users to benchmark your OpenStack -cloud: - -1. *Read-only Keystone Backends:* creating temporary users for benchmark -scenarios in Rally is just impossible in case of r/o Keystone backends like -*LDAP* and *AD*. - -2. *Safety:* Rally can be run from an isolated group of users, and if something -goes wrong, this won’t affect the rest of the cloud users. - - -Registering existing users in Rally ------------------------------------ - -The information about existing users in your OpenStack cloud should be passed -to Rally at the -:ref:`deployment initialization step `. -You have to use the **ExistingCloud** deployment plugin that just provides -Rally with credentials of an already existing cloud. The difference from the -deployment configuration we've seen previously is that you should set up the -*"users"* section with the credentials of already existing users. Let's call -this deployment configuration file *existing_users.json*: - -.. code-block:: json - - { - "type": "ExistingCloud", - "auth_url": "http://example.net:5000/v2.0/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "pa55word", - "tenant_name": "demo" - }, - "users": [ - { - "username": "b1", - "password": "1234", - "tenant_name": "testing" - }, - { - "username": "b2", - "password": "1234", - "tenant_name": "testing" - } - ] - } - -This deployment configuration requires some basic information about the -OpenStack cloud like the region name, auth url. admin user credentials, and any -amount of users already existing in the system. Rally will use their -credentials to generate load in against this deployment as soon as we register -it as usual: - -.. code-block:: console - - $ rally deployment create --file existings_users --name our_cloud - +--------------------------------------+----------------------------+-----------+------------------+--------+ - | uuid | created_at | name | status | active | - +--------------------------------------+----------------------------+-----------+------------------+--------+ - | 1849a9bf-4b18-4fd5-89f0-ddcc56eae4c9 | 2015-03-28 02:43:27.759702 | our_cloud | deploy->finished | | - +--------------------------------------+----------------------------+-----------+------------------+--------+ - Using deployment: 1849a9bf-4b18-4fd5-89f0-ddcc56eae4c9 - ~/.rally/openrc was updated - -With this new deployment being active, Rally will use the already existing -users instead of creating the temporary ones when launching benchmark task -that do not specify the *"users"* context. - - -Running benchmark scenarios with existing users ------------------------------------------------ - -After you have registered a deployment with existing users, don't forget to -remove the *"users"* context from your benchmark task configuration if you want -to use existing users, like in the following configuration file -(*boot-and-delete.json*): - - -.. code-block:: json - - { - "NovaServers.boot_and_delete_server": [ - { - "args": { - "flavor": { - "name": "m1.tiny" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": {} - } - ] - } - -When you start this task, it will use the existing users *"b1"* and *"b2"* -instead of creating the temporary ones: - -.. code-block:: bash - - rally task start samples/tasks/scenarios/nova/boot-and-delete.json - -It goes without saying that support of benchmarking with predefined users -simplifies the usage of Rally for generating loads against production clouds. - -(based on: http://boris-42.me/rally-can-generate-load-with-passed-users-now/) diff --git a/doc/source/quick_start/tutorial/step_4_adding_success_criteria_for_benchmarks.rst b/doc/source/quick_start/tutorial/step_4_adding_success_criteria_for_benchmarks.rst deleted file mode 100644 index 689a985d..00000000 --- a/doc/source/quick_start/tutorial/step_4_adding_success_criteria_for_benchmarks.rst +++ /dev/null @@ -1,164 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_4_adding_success_criteria_for_benchmarks: - -Step 4. Adding success criteria (SLA) for benchmarks -==================================================== - -.. contents:: - :local: - -SLA - Service-Level Agreement (Success Criteria) ------------------------------------------------- - -Rally allows you to set success criteria (also called *SLA - Service-Level -Agreement*) for every benchmark. Rally will automatically check them for you. - -To configure the SLA, add the *"sla"* section to the configuration of the -corresponding benchmark (the check name is a key associated with its target -value). You can combine different success criteria: - -.. code-block:: json - - { - "NovaServers.boot_and_delete_server": [ - { - "args": { - ... - }, - "runner": { - ... - }, - "context": { - ... - }, - "sla": { - "max_seconds_per_iteration": 10, - "failure_rate": { - "max": 25 - } - } - } - ] - } - -Such configuration will mark the **NovaServers.boot_and_delete_server** -benchmark scenario as not successful if either some iteration took more than 10 -seconds or more than 25% iterations failed. - - -Checking SLA ------------- -Let us show you how Rally SLA work using a simple example based on **Dummy -benchmark scenarios**. These scenarios actually do not perform any -OpenStack-related stuff but are very useful for testing the behaviors of Rally. -Let us put in a new task, *test-sla.json*, 2 scenarios -- one that does nothing -and another that just throws an exception: - -.. code-block:: json - - { - "Dummy.dummy": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": {"max": 0.0} - } - } - ], - "Dummy.dummy_exception": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": {"max": 0.0} - } - } - ] - } - -Note that both scenarios in these tasks have the **maximum failure rate of 0%** -as their **success criterion**. We expect that the first scenario will pass -this criterion while the second will fail it. Let's start the task: - - -.. code-block:: bash - - rally task start test-sla.json - -After the task completes, run *rally task sla_check* to check the results again -the success criteria you defined in the task: - -.. code-block:: console - - $ rally task sla_check - +-----------------------+-----+--------------+--------+-------------------------------------------------------------------------------------------------------+ - | benchmark | pos | criterion | status | detail | - +-----------------------+-----+--------------+--------+-------------------------------------------------------------------------------------------------------+ - | Dummy.dummy | 0 | failure_rate | PASS | Maximum failure rate percent 0.0% failures, minimum failure rate percent 0% failures, actually 0.0% | - | Dummy.dummy_exception | 0 | failure_rate | FAIL | Maximum failure rate percent 0.0% failures, minimum failure rate percent 0% failures, actually 100.0% | - +-----------------------+-----+--------------+--------+-------------------------------------------------------------------------------------------------------+ - -Exactly as expected. - - -SLA in task report ------------------- - -SLA checks are nicely visualized in task reports. Generate one: - -.. code-block:: bash - - rally task report --out=report_sla.html --open - -Benchmark scenarios that have passed SLA have a green check on the overview -page: - -.. image:: ../../images/Report-SLA-Overview.png - :align: center - -Somewhat more detailed information about SLA is displayed on the scenario -pages: - -.. image:: ../../images/Report-SLA-Scenario.png - :align: center - -Success criteria present a very useful concept that enables not only to analyze -the outcome of your benchmark tasks, but also to control their execution. In -:ref:`one of the next sections ` -of our tutorial, we will show how to use SLA to abort the load generation -before your OpenStack goes wrong. diff --git a/doc/source/quick_start/tutorial/step_5_task_templates.rst b/doc/source/quick_start/tutorial/step_5_task_templates.rst deleted file mode 100644 index 36cc4878..00000000 --- a/doc/source/quick_start/tutorial/step_5_task_templates.rst +++ /dev/null @@ -1,378 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_5_task_templates: - -Step 5. Rally task templates -============================ - -.. contents:: - :local: - -Basic template syntax ---------------------- - -A nice feature of the input task format used in Rally is that it supports the -**template syntax** based on `Jinja2`_. This turns out to be extremely useful -when, say, you have a fixed structure of your task but you want to parameterize -this task in some way. For example, imagine your input task file (*task.yaml*) -runs a set of Nova scenarios: - -.. code-block:: yaml - - --- - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - - NovaServers.resize_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: "^cirros.*-disk$" - to_flavor: - name: "m1.small" - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - -In both scenarios above, the *"^cirros.*-disk$"* image is passed to the -scenario as an argument (so that these scenarios use an appropriate image while -booting servers). Let’s say you want to run the same set of scenarios with the -same runner/context/sla, but you want to try another image while booting server -to compare the performance. The most elegant solution is then to turn the image -name into a template variable: - -.. code-block:: yaml - - --- - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - - NovaServers.resize_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: {{image_name}} - to_flavor: - name: "m1.small" - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - -and then pass the argument value for **{{image_name}}** when starting a task -with this configuration file. Rally provides you with different ways to do -that: - - -1. Pass the argument values directly in the command-line interface (with either -a JSON or YAML dictionary): - -.. code-block:: bash - - rally task start task.yaml --task-args '{"image_name": "^cirros.*-disk$"}' - rally task start task.yaml --task-args 'image_name: "^cirros.*-disk$"' - -2. Refer to a file that specifies the argument values (JSON/YAML): - -.. code-block:: bash - - rally task start task.yaml --task-args-file args.json - rally task start task.yaml --task-args-file args.yaml - -where the files containing argument values should look as follows: - -*args.json*: - -.. code-block:: json - - { - "image_name": "^cirros.*-disk$" - } - -*args.yaml*: - -.. code-block:: yaml - - --- - image_name: "^cirros.*-disk$" - -Passed in either way, these parameter values will be substituted by Rally when -starting a task: - -.. code-block:: console - - $ rally task start task.yaml --task-args "image_name: "^cirros.*-disk$"" - -------------------------------------------------------------------------------- - Preparing input task - -------------------------------------------------------------------------------- - - Input task is: - --- - - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: ^cirros.*-disk$ - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - - NovaServers.resize_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: ^cirros.*-disk$ - to_flavor: - name: "m1.small" - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - - -------------------------------------------------------------------------------- - Task cbf7eb97-0f1d-42d3-a1f1-3cc6f45ce23f: started - -------------------------------------------------------------------------------- - - Benchmarking... This can take a while... - - -Using the default values ------------------------- - -Note that the ``Jinja2`` template syntax allows you to set the default values -for your parameters. With default values set, your task file will work even if -you don't parameterize it explicitly while starting a task. The default values -should be set using the *{% set ... %}* clause (*task.yaml*): - -.. code-block:: yaml - - {% set image_name = image_name or "^cirros.*-disk$" %} - --- - - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - - ... - -If you don't pass the value for *{{image_name}}* while starting a task, the -default one will be used: - -.. code-block:: console - - $ rally task start task.yaml - -------------------------------------------------------------------------------- - Preparing input task - -------------------------------------------------------------------------------- - - Input task is: - --- - - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: ^cirros.*-disk$ - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - - ... - - -Advanced templates ------------------- - -Rally makes it possible to use all the power of ``Jinja2`` template syntax, -including the mechanism of **built-in functions**. This enables you to -construct elegant task files capable of generating complex load on your cloud. - -As an example, let us make up a task file that will create new users with -increasing concurrency. The input task file (*task.yaml*) below uses the -``Jinja2`` **for-endfor** construct to accomplish that: - - -.. code-block:: yaml - - --- - KeystoneBasic.create_user: - {% for i in range(2, 11, 2) %} - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: {{i}} - sla: - failure_rate: - max: 0 - {% endfor %} - - -In this case, you don’t need to pass any arguments via -*--task-args/--task-args-file*, but as soon as you start this task, Rally will -automatically unfold the for-loop for you: - -.. code-block:: console - - $ rally task start task.yaml - -------------------------------------------------------------------------------- - Preparing input task - -------------------------------------------------------------------------------- - - Input task is: - --- - - KeystoneBasic.create_user: - - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 - - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 4 - sla: - failure_rate: - max: 0 - - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 6 - sla: - failure_rate: - max: 0 - - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 8 - sla: - failure_rate: - max: 0 - - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - -------------------------------------------------------------------------------- - Task ea7e97e3-dd98-4a81-868a-5bb5b42b8610: started - -------------------------------------------------------------------------------- - - Benchmarking... This can take a while... - -As you can see, the Rally task template syntax is a simple but powerful -mechanism that not only enables you to write elegant task configurations, but -also makes them more readable for other people. When used appropriately, it can -really improve the understanding of your benchmarking procedures in Rally when -shared with others. - -.. references: - -.. _Jinja2: https://pypi.python.org/pypi/Jinja2 diff --git a/doc/source/quick_start/tutorial/step_6_aborting_load_generation_on_sla_failure.rst b/doc/source/quick_start/tutorial/step_6_aborting_load_generation_on_sla_failure.rst deleted file mode 100644 index 4fd2ff93..00000000 --- a/doc/source/quick_start/tutorial/step_6_aborting_load_generation_on_sla_failure.rst +++ /dev/null @@ -1,149 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_6_aborting_load_generation_on_sla_failure: - -Step 6. Aborting load generation on success criteria failure -============================================================ - -Benchmarking pre-production and production OpenStack clouds is not a trivial -task. From the one side it is important to reach the OpenStack cloud's limits, -from the other side the cloud shouldn't be damaged. Rally aims to make this -task as simple as possible. Since the very beginning Rally was able to generate -enough load for any OpenStack cloud. Generating too big a load was the major -issue for production clouds, because Rally didn't know how to stop the load -until it was too late. - -With the **"stop on SLA failure"** feature, however, things are much better. - -This feature can be easily tested in real life by running one of the most -important and plain benchmark scenario called *"Authenticate.keystone"*. This -scenario just tries to authenticate from users that were pre-created by Rally. -Rally input task looks as follows (*auth.yaml*): - -.. code-block:: yaml - - --- - Authenticate.keystone: - - - runner: - type: "rps" - times: 6000 - rps: 50 - context: - users: - tenants: 5 - users_per_tenant: 10 - sla: - max_avg_duration: 5 - -In human-readable form this input task means: *Create 5 tenants with 10 users -in each, after that try to authenticate to Keystone 6000 times performing 50 -authentications per second (running new authentication request every 20ms). -Each time we are performing authentication from one of the Rally pre-created -user. This task passes only if max average duration of authentication takes -less than 5 seconds.* - -**Note that this test is quite dangerous because it can DDoS Keystone**. We are -running more and more simultaneously authentication requests and things may go -wrong if something is not set properly (like on my DevStack deployment in Small -VM on my laptop). - -Let’s run Rally task with **an argument that prescribes Rally to stop load on -SLA failure**: - -.. code-block:: console - - $ rally task start --abort-on-sla-failure auth.yaml - - .... - +--------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count | - +--------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | total | 0.108 | 8.58 | 65.97 | 19.782 | 26.125 | 100.0% | 2495 | - +--------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - -On the resulting table there are 2 interesting things: - -1. Average duration was 8.58 sec which is more than 5 seconds -2. Rally performed only 2495 (instead of 6000) authentication requests - -To understand better what has happened let’s generate HTML report: - -.. code-block:: bash - - rally task report --out auth_report.html - -.. image:: ../../images/Report-Abort-on-SLA-task-1.png - :align: center - -On the chart with durations we can observe that the duration of authentication -request reaches 65 seconds at the end of the load generation. **Rally stopped -load at the very last moment just before bad things happened. The reason why it -runs so many attempts to authenticate is because of not enough good success -criteria.** We had to run a lot of iterations to make average duration bigger -than 5 seconds. Let’s chose better success criteria for this task and run it -one more time. - -.. code-block:: yaml - - --- - Authenticate.keystone: - - - runner: - type: "rps" - times: 6000 - rps: 50 - context: - users: - tenants: 5 - users_per_tenant: 10 - sla: - max_avg_duration: 5 - max_seconds_per_iteration: 10 - failure_rate: - max: 0 - -Now our task is going to be successful if the following three conditions hold: - -1. maximum average duration of authentication should be less than 5 seconds -2. maximum duration of any authentication should be less than 10 seconds -3. no failed authentication should appear - -Let’s run it! - -.. code-block:: console - - $ rally task start --abort-on-sla-failure auth.yaml - - ... - +--------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count | - +--------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - | total | 0.082 | 5.411 | 22.081 | 10.848 | 14.595 | 100.0% | 1410 | - +--------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - -.. image:: ../../images/Report-Abort-on-SLA-task-2.png - :align: center - -This time load stopped after 1410 iterations versus 2495 which is much better. -The interesting thing on this chart is that first occurrence of "> 10 second" -authentication happened on 950 iteration. The reasonable question: "Why does -Rally run 500 more authentication requests then?". This appears from the math: -During the execution of **bad** authentication (10 seconds) Rally performed -about 50 request/sec * 10 sec = 500 new requests as a result we run 1400 -iterations instead of 950. - -(based on: http://boris-42.me/rally-tricks-stop-load-before-your-openstack-goes-wrong/) diff --git a/doc/source/quick_start/tutorial/step_7_working_with_multple_openstack_clouds.rst b/doc/source/quick_start/tutorial/step_7_working_with_multple_openstack_clouds.rst deleted file mode 100644 index ea67d9b1..00000000 --- a/doc/source/quick_start/tutorial/step_7_working_with_multple_openstack_clouds.rst +++ /dev/null @@ -1,144 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_7_working_with_multple_openstack_clouds: - -Step 7. Working with multiple OpenStack clouds -============================================== - -Rally is an awesome tool that allows you to work with multiple clouds and can -itself deploy them. We already know how to work with -:ref:`a single cloud `. -Let us now register 2 clouds in Rally: the one that we have access to and the -other that we know is registered with wrong credentials. - -.. code-block:: console - - $ . openrc admin admin # openrc with correct credentials - $ rally deployment create --fromenv --name=cloud-1 - +--------------------------------------+----------------------------+------------+------------------+--------+ - | uuid | created_at | name | status | active | - +--------------------------------------+----------------------------+------------+------------------+--------+ - | 4251b491-73b2-422a-aecb-695a94165b5e | 2015-01-18 00:11:14.757203 | cloud-1 | deploy->finished | | - +--------------------------------------+----------------------------+------------+------------------+--------+ - Using deployment: 4251b491-73b2-422a-aecb-695a94165b5e - ~/.rally/openrc was updated - ... - - $ . bad_openrc admin admin # openrc with wrong credentials - $ rally deployment create --fromenv --name=cloud-2 - +--------------------------------------+----------------------------+------------+------------------+--------+ - | uuid | created_at | name | status | active | - +--------------------------------------+----------------------------+------------+------------------+--------+ - | 658b9bae-1f9c-4036-9400-9e71e88864fc | 2015-01-18 00:38:26.127171 | cloud-2 | deploy->finished | | - +--------------------------------------+----------------------------+------------+------------------+--------+ - Using deployment: 658b9bae-1f9c-4036-9400-9e71e88864fc - ~/.rally/openrc was updated - ... - -Let us now list the deployments we have created: - -.. code-block:: console - - $ rally deployment list - +--------------------------------------+----------------------------+------------+------------------+--------+ - | uuid | created_at | name | status | active | - +--------------------------------------+----------------------------+------------+------------------+--------+ - | 4251b491-73b2-422a-aecb-695a94165b5e | 2015-01-05 00:11:14.757203 | cloud-1 | deploy->finished | | - | 658b9bae-1f9c-4036-9400-9e71e88864fc | 2015-01-05 00:40:58.451435 | cloud-2 | deploy->finished | * | - +--------------------------------------+----------------------------+------------+------------------+--------+ - -Note that the second is marked as **"active"** because this is the deployment -we have created most recently. This means that it will be automatically (unless -its UUID or name is passed explicitly via the *--deployment* parameter) used by -the commands that need a deployment, like *rally task start ...* or *rally -deployment check*: - -.. code-block:: console - - $ rally deployment check - Authentication Issues: wrong keystone credentials specified in your endpoint properties. (HTTP 401). - - $ rally deployment check --deployment=cloud-1 - keystone endpoints are valid and following services are available: - +----------+----------------+-----------+ - | services | type | status | - +----------+----------------+-----------+ - | cinder | volume | Available | - | cinderv2 | volumev2 | Available | - | ec2 | ec2 | Available | - | glance | image | Available | - | heat | orchestration | Available | - | heat-cfn | cloudformation | Available | - | keystone | identity | Available | - | nova | compute | Available | - | novav21 | computev21 | Available | - | s3 | s3 | Available | - +----------+----------------+-----------+ - -You can also switch the active deployment using the **rally deployment use** -command: - -.. code-block:: console - - $ rally deployment use cloud-1 - Using deployment: 658b9bae-1f9c-4036-9400-9e71e88864fc - ~/.rally/openrc was updated - ... - - $ rally deployment check - keystone endpoints are valid and following services are available: - +----------+----------------+-----------+ - | services | type | status | - +----------+----------------+-----------+ - | cinder | volume | Available | - | cinderv2 | volumev2 | Available | - | ec2 | ec2 | Available | - | glance | image | Available | - | heat | orchestration | Available | - | heat-cfn | cloudformation | Available | - | keystone | identity | Available | - | nova | compute | Available | - | novav21 | computev21 | Available | - | s3 | s3 | Available | - +----------+----------------+-----------+ - -Note the first two lines of the CLI output for the *rally deployment use* -command. They tell you the UUID of the new active deployment and also say that -the *~/.rally/openrc* file was updated -- this is the place where the "active" -UUID is actually stored by Rally. - -One last detail about managing different deployments in Rally is that the -*rally task list* command outputs only those tasks that were run against the -currently active deployment, and you have to provide the *--all-deployments* -parameter to list all the tasks: - -.. code-block:: console - - $ rally task list - +--------------------------------------+-----------------+----------------------------+----------------+----------+--------+-----+ - | uuid | deployment_name | created_at | duration | status | failed | tag | - +--------------------------------------+-----------------+----------------------------+----------------+----------+--------+-----+ - | c21a6ecb-57b2-43d6-bbbb-d7a827f1b420 | cloud-1 | 2015-01-05 01:00:42.099596 | 0:00:13.419226 | finished | False | | - | f6dad6ab-1a6d-450d-8981-f77062c6ef4f | cloud-1 | 2015-01-05 01:05:57.653253 | 0:00:14.160493 | finished | False | | - +--------------------------------------+-----------------+----------------------------+----------------+----------+--------+-----+ - $ rally task list --all-deployment - +--------------------------------------+-----------------+----------------------------+----------------+----------+--------+-----+ - | uuid | deployment_name | created_at | duration | status | failed | tag | - +--------------------------------------+-----------------+----------------------------+----------------+----------+--------+-----+ - | c21a6ecb-57b2-43d6-bbbb-d7a827f1b420 | cloud-1 | 2015-01-05 01:00:42.099596 | 0:00:13.419226 | finished | False | | - | f6dad6ab-1a6d-450d-8981-f77062c6ef4f | cloud-1 | 2015-01-05 01:05:57.653253 | 0:00:14.160493 | finished | False | | - | 6fd9a19f-5cf8-4f76-ab72-2e34bb1d4996 | cloud-2 | 2015-01-05 01:14:51.428958 | 0:00:15.042265 | finished | False | | - +--------------------------------------+-----------------+----------------------------+----------------+----------+--------+-----+ diff --git a/doc/source/quick_start/tutorial/step_8_discovering_more_plugins.rst b/doc/source/quick_start/tutorial/step_8_discovering_more_plugins.rst deleted file mode 100644 index aa7e4040..00000000 --- a/doc/source/quick_start/tutorial/step_8_discovering_more_plugins.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. - Copyright 2015 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial_step_8_discovering_more_plugins: - -Step 8. Discovering more plugins in Rally -========================================= - -.. contents:: - :local: - -Plugins in the Rally repository ---------------------------------- - -Rally currently comes with a great collection of plugins that use the API of -different OpenStack projects like **Keystone**, **Nova**, **Cinder**, -**Glance** and so on. The good news is that you can combine multiple plugins -in one task to test your cloud in a comprehensive way. - -First, let's see what plugins are available in Rally. One of the ways to -discover these plugins is just to inspect their `source code`_. -another is to use build-in rally plugin command. - -CLI: rally plugin show ----------------------- - -Rally plugin CLI command is much more convenient way to learn about different -plugins in Rally. This command allows to list plugins and show detailed -information about them: - -.. code-block:: console - - $ rally plugin show create_meter_and_get_stats - - NAME - CeilometerStats.create_meter_and_get_stats - NAMESPACE - default - MODULE - rally.plugins.openstack.scenarios.ceilometer.stats - DESCRIPTION - Meter is first created and then statistics is fetched for the same - using GET /v2/meters/(meter_name)/statistics. - PARAMETERS - +--------+------------------------------------------------+ - | name | description | - +--------+------------------------------------------------+ - | kwargs | contains optional arguments to create a meter | - | | | - +--------+------------------------------------------------+ - - -In case if multiple found benchmarks found command list all matches elements: - -.. code-block:: console - - $ rally plugin show NovaKeypair - - Multiple plugins found: - +-------------------------------------------------+-----------+-------------------------------------------------------+ - | name | namespace | title | - +-------------------------------------------------+-----------+-------------------------------------------------------+ - | NovaKeypair.boot_and_delete_server_with_keypair | default | Boot and delete server with keypair. | - | NovaKeypair.create_and_delete_keypair | default | Create a keypair with random name and delete keypair. | - | NovaKeypair.create_and_list_keypairs | default | Create a keypair with random name and list keypairs. | - +-------------------------------------------------+-----------+-------------------------------------------------------+ - - -CLI: rally plugin list ----------------------- - -This command can be used to list filtered by name list of plugins. - -.. code-block:: console - - $ rally plugin list --name Keystone - - +--------------------------------------------------+-----------+-----------------------------------------------------------------+ - | name | namespace | title | - +--------------------------------------------------+-----------+-----------------------------------------------------------------+ - | Authenticate.keystone | default | Check Keystone Client. | - | KeystoneBasic.add_and_remove_user_role | default | Create a user role add to a user and disassociate. | - | KeystoneBasic.create_add_and_list_user_roles | default | Create user role, add it and list user roles for given user. | - | KeystoneBasic.create_and_delete_ec2credential | default | Create and delete keystone ec2-credential. | - | KeystoneBasic.create_and_delete_role | default | Create a user role and delete it. | - | KeystoneBasic.create_and_delete_service | default | Create and delete service. | - | KeystoneBasic.create_and_list_ec2credentials | default | Create and List all keystone ec2-credentials. | - | KeystoneBasic.create_and_list_services | default | Create and list services. | - | KeystoneBasic.create_and_list_tenants | default | Create a keystone tenant with random name and list all tenants. | - | KeystoneBasic.create_and_list_users | default | Create a keystone user with random name and list all users. | - | KeystoneBasic.create_delete_user | default | Create a keystone user with random name and then delete it. | - | KeystoneBasic.create_tenant | default | Create a keystone tenant with random name. | - | KeystoneBasic.create_tenant_with_users | default | Create a keystone tenant and several users belonging to it. | - | KeystoneBasic.create_update_and_delete_tenant | default | Create, update and delete tenant. | - | KeystoneBasic.create_user | default | Create a keystone user with random name. | - | KeystoneBasic.create_user_set_enabled_and_delete | default | Create a keystone user, enable or disable it, and delete it. | - | KeystoneBasic.create_user_update_password | default | Create user and update password for that user. | - | KeystoneBasic.get_entities | default | Get instance of a tenant, user, role and service by id's. | - +--------------------------------------------------+-----------+-----------------------------------------------------------------+ - -.. references: - -.. _source code: https://github.com/openstack/rally/tree/master/rally/plugins/ diff --git a/doc/source/task/index.rst b/doc/source/task/index.rst deleted file mode 100644 index 60c6f45d..00000000 --- a/doc/source/task/index.rst +++ /dev/null @@ -1,380 +0,0 @@ -.. _task-component: - -============== -Task Component -============== - -This section describes Rally Task Component (including feature presented since -Rally v0.5.0, allowing to analyze statistics trends for the given tasks). - -.. contents:: - :depth: 2 - :local: - -HTML Reports -============ - -HTML reports provide comprehensive analysis. -Data is structured and displayed interactively, with charts and tables. - -Task Report ------------ - -Get the whole information about task workloads results, in pretty -and convenient format! - -.. image:: ../images/Report-Collage.png - -Generate report for single task, using task UUID -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Having a finished task, generate report with command: - -.. code-block:: shell - - $ rally task report --out - -Example: - -.. code-block:: shell - - $ rally task report 6f63d9ec-eecd-4696-8e9c-2ba065c68535 --out report.html - -Generate report for single task, using JSON file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Report can be generated from a task results JSON file. -This file can be generated with command *rally task results*: - -.. code-block:: shell - - $ rally task results 6f63d9ec-eecd-4696-8e9c-2ba065c68535 > results.json - $ rally task report results.json --out report.html - -Generate report for many tasks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Report can be generated from many tasks. All workloads from specified -tasks results will be composed into an entire report. -To generate report, use *--tasks* argument with specified list of tasks UUIDs -and/or tasks results JSON files. - -Example: - -.. code-block:: shell - - $ rally task report --tasks 6f63d9ec-eecd-4696-8e9c-2ba065c68535 20ae7e95-7395-4be4-aec2-b89220adee60 a5737eba-a204-43d6-a262-d5ea4b0065da results.json another_results.json --out report.html - -Task Overview -~~~~~~~~~~~~~ - -This is a table with brief summary of all workloads results. -All columns are sortable and clickable. - -.. image:: ../images/Report-Task-Overview.png - -Load duration -+++++++++++++ - -Time from first iteration start to last iteration end. -In other words, this is a time of all workload iterations execution. - -Full duration -+++++++++++++ - -This time includes iterations time (`Load duration <#load-duration>`_) -plus time taken by another actions related to the task, mostly Contexts -execution time. - -Iterations -++++++++++ - -How many times the workload has run. This comes from the value of -*runner.times* in task input file. - -Failures -++++++++ - -Number of failed iterations. Failure means that there was an Exception raised. - -Success (SLA) -+++++++++++++ - -This is a boolean result of workload SLA. See -`Service-level agreement explanation <#id2>`_ below. - -Input file -~~~~~~~~~~ - -This shows JSON which can be used to run a task with exactly the same workloads -list and configuration. This is not an exact copy (neither concatenation) of -actually used input files (in command *rally task start*), however this is -exactly what is needed to run workloads given in the report. - -.. image:: ../images/Report-Task-Input-file.png - -Tab «Overview» -~~~~~~~~~~~~~~ - -Service-level agreement -+++++++++++++++++++++++ - -`SLA`_ results appear in task report only if *"sla"* section is defined in task -input file. - -For example, having this in task input file: - - .. code-block:: json - - "sla": { - "performance_degradation": { - "max_degradation": 50 - }, - "max_seconds_per_iteration": 1.0, - "failure_rate": { - "max": 0 - }, - "outliers": { - "max": 1, - "min_iterations": 10, - "sigmas": 10 - }, - "max_avg_duration": 0.5 - } - -will result SLA section similar to the following: - -.. image:: ../images/Report-Task-SLA.png - - -What if workload has no "sla" configuration in input file? -++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -If *"sla"* section is missed in input file, then block *Service-level -agreement* is not displayed and its result is assumed to be always passed -(no matter how many failures occurred). - -Total durations -+++++++++++++++ - -There is a durations analysis, which is represented by statistics table and -duration StackedArea chart. - -.. image:: ../images/Report-Task-Total-durations.png - -Table with statistics data -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -**Action** - Name of the workload metric that has some duration saved. - This is either an atomic action name or *Total* which points to workload - `load duration <#load-duration>`_. - -**Min (sec)** - `Minimal`_ duration value - -**Median (sec)** - `Median`_ duration value - -**90%ile (sec)** - `Percentile`_ for 90% durations - -**95%ile (sec)** - `Percentile`_ for 95% durations - -**Max (sec)** - `Maximal`_ duration value - -**Avg (sec)** - `Average`_ duration value - -**Success** - Percent of successful runs. This is how many percent of this action runs - (number of runs is given in *Count* column) were successful. - -**Count** - Number of actually run atomic actions. This can differ from - `iterations count <#iterations>`_ because some atomic actions do not start if - some exception is raised before in the workload runtime (for example in - previous atomic action). - -StackedArea with durations per iteration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This chart shows `load_duration <#load-duration>`_ and `idle_duration <#id5>`_ -values per iteration. If there is only one iteration, then chart is useless so -it is hidden. - -Idle duration -^^^^^^^^^^^^^ - -Sometimes workload does nothing for some reason (waiting for something or just -making a dummy load). This is achieved by calling *time.sleep()* and spent time -is called *idle duration*. - -Load Profile -++++++++++++ - -`Load profile`_ chart shows number of iterations running in parallel for each -workload moment: - -.. image:: ../images/Report-Task-Load-profile.png - -Distribution -++++++++++++ - -Pie chart shows percent of successful and failed `iterations <#iterations>`_. - -Histogram shows durations distribution with the following `methods`_ (selected -in dropdown list): **Square Root Choice**, **Sturges Formula**, **Rise Rule** - -.. image:: ../images/Report-Task-Distribution.png - -Tab «Details» -~~~~~~~~~~~~~ - -Atomic Action Durations -+++++++++++++++++++++++ - -There is a StackedArea chart that shows atomic actions durations per iteration. -If there is only one iteration, then chart is useless so it is hidden. - -.. image:: ../images/Report-Task-Actions-durations.png - -Distribution -++++++++++++ - -`Distribution <#distribution>`_ for atomic actions durations - -Tab «Scenario Data» -~~~~~~~~~~~~~~~~~~~ - -This tab only appears if workload provides some custom output via method -*Scenario.add_output()*. - -Aggregated -++++++++++ - -This shows charts with data aggregated from all iterations. -This means that each X axis point represents an iteration, so each iteration -provided some values that are aggregated into charts or tables. - -.. image:: ../images/Report-Task-Scenario-Data-Aggregated.png - -Per iteration -+++++++++++++ - -Each iteration can create its own, complete charts and tables. - -.. image:: ../images/Report-Task-Scenario-Data-Per-iteration.png - -Tab «Failures» -++++++++++++++ - -Complete information about exceptions raised during the workload run - -**Iteration** - Number of iteration where exception is occurred - -**Exception type** - Type of raised Exception subclass - -**Exception message** - Message delivered by the exception - -Click on a row expands it with exception traceback. - -.. image:: ../images/Report-Task-Failures.png - -Tab «Input Task» -~~~~~~~~~~~~~~~~ - -This shows JSON for input file which can be used to run current workload. - -.. image:: ../images/Report-Task-Subtask-configuration.png - -Trends Report -------------- - -If same workload is run several times, some results of these runs can be -compared. Compared metrics are ssuccess rate (percent of successful iterations) -and statistics for durations. - -How to generate trends report -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Use command *rally task trends* with given tasks UUIDs and/or tasks results -JSON files and the name of desired output file. - -Example: - -.. code-block:: shell - - $ rally task trends --tasks 6f63d9ec-eecd-4696-8e9c-2ba065c68535 a5737eba-a204-43d6-a262-d5ea4b0065da --out trends.html - -What is an order of workload runs? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Workload run number in shown on charts X axis, the order of runs is exactly as -it comes from tasks data in the moment of report generation. - -Trends overview -~~~~~~~~~~~~~~~ - -.. image:: ../images/Report-Trends-Overview.png - -If workload has been actually run only once -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -That is obvious that it is not possible to have trend for a single value. -There should be at least two workload runs to make results comparison possible. -So in this case there is only a help message displayed. - -.. image:: ../images/Report-Trends-single-run.png - -Tab «Total» -~~~~~~~~~~~ - -Total durations -+++++++++++++++ - -Shows workload `load_duration <#load-duration>`_ statistics trends. - -Total success rate -++++++++++++++++++ - -Shows trends for percent of successful iterations - -.. image:: ../images/Report-Trends-Total.png - -Tab «Atomic actions» -++++++++++++++++++++ - -Statistics trends for atomic actions durations. -Charts are same as for total durations. - -.. image:: ../images/Report-Trends-Atomic-actions.png - -Tab «Configuration» -+++++++++++++++++++ - -Here is a configuration JSON for current workload. - -.. image:: ../images/Report-Trends-Configuration.png - -CLI References -============== - -For more information regarding Rally Task Component CLI please proceed -to `CLI reference <../cli/cli_reference.html#category-task>`_ - -.. references: - -.. _SLA: https://en.wikipedia.org/wiki/Service-level_agreement -.. _Minimal: https://en.wikipedia.org/wiki/Maxima_and_minima -.. _Median: https://en.wikipedia.org/wiki/Median -.. _Percentile: https://en.wikipedia.org/wiki/Percentile -.. _Maximal: https://en.wikipedia.org/wiki/Maxima_and_minima -.. _Average: https://en.wikipedia.org/wiki/Average -.. _Load profile: https://en.wikipedia.org/wiki/Load_profile -.. _methods: https://en.wikipedia.org/wiki/Histogram diff --git a/doc/source/verification/cli_reference.rst b/doc/source/verification/cli_reference.rst deleted file mode 100644 index 535609f7..00000000 --- a/doc/source/verification/cli_reference.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _rally-verify-cli-reference: - -====================== -Command Line Interface -====================== - -Cut down from Global :ref:`cli-reference` - -.. contents:: - :depth: 2 - :local: - -.. make_cli_reference:: - :group: verify diff --git a/doc/source/verification/howto/add_new_reporter.rst b/doc/source/verification/howto/add_new_reporter.rst deleted file mode 100644 index 7611c2a5..00000000 --- a/doc/source/verification/howto/add_new_reporter.rst +++ /dev/null @@ -1,104 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _howto-add-new-reporting-mechanism: - -================================= -HowTo add new reporting mechanism -================================= - -Reporting mechanism for verifications is pluggable. Custom plugins can be used -for custom output formats or for exporting results to external systems. - -We hardly recommend to read :ref:`plugins` page to understand how do Rally -Plugins work. - -.. contents:: - :depth: 2 - :local: - -Spec ----- - -All reporters should inherit -``rally.verification.reporter.VerificationReporter`` and implement all -abstract methods. Here you can find its interface: - - .. autoclass:: rally.verification.reporter.VerificationReporter - :members: - -Example of custom JSON Reporter -------------------------------- - -Basically, you need to implement only two methods "validate" and "generate". - -Method "validate" should check that destination of the report is right. -Method "generate" should build a report or export results somewhere; actually, -it is up to you what it should do but return format is strict, see -`Spec <#spec>`_ section for what it can return. - -.. code-block:: python - - import json - - from rally.verification import reporter - - - @reporter.configure("summary-in-json") - class SummaryInJsonReporter(reporter.VerificationReporter): - """Store summary of verification(s) in JSON format""" - - # ISO 8601 - TIME_FORMAT = "%Y-%m-%dT%H:%M:%S%z" - - @classmethod - def validate(cls, output_destination): - # we do not have any restrictions for destination, so nothing to - # check - pass - - def generate(self): - report = {} - - for v in self.verifications: - report[v.uuid] = { - "started_at": v.created_at.strftime(self.TIME_FORMAT), - "finished_at": v.updated_at.strftime(self.TIME_FORMAT), - "status": v.status, - "run_args": v.run_args, - "tests_count": v.tests_count, - "tests_duration": v.tests_duration, - "skipped": v.skipped, - "success": v.success, - "expected_failures": v.expected_failures, - "unexpected_success": v.unexpected_success, - "failures": v.failures, - # v.tests includes all information about launched tests, - # but for simplification of this fake reporters, let's - # save just names - "launched_tests": [test["name"] - for test in v.tests.values()] - } - - raw_report = json.dumps(report, indent=4) - - if self.output_destination: - # In case of output_destination existence report will be saved - # to hard drive and there is nothing to print to stdout, so - # "print" key is not used - return {"files": {self.output_destination: raw_report}, - "open": self.output_destination} - else: - # it is something that will be print at CLI layer. - return {"print": raw_report} - diff --git a/doc/source/verification/howto/add_support_for_new_tool.rst b/doc/source/verification/howto/add_support_for_new_tool.rst deleted file mode 100644 index ddd26f95..00000000 --- a/doc/source/verification/howto/add_support_for_new_tool.rst +++ /dev/null @@ -1,116 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _howto-add-support-for-new-tool: - -============================== -HowTo add support for new tool -============================== - -First of all, you should start from the reading of :ref:`plugins` page. -After you learned basic things about Rally plugin mechanism, let's move to -Verifier interface itself. - -.. contents:: - :depth: 2 - :local: - -Spec ----- - -All verifiers plugins should inherit -``rally.verification.manager.VerifierManager`` and implement all abstract -methods. Here you can find its interface: - - .. autoclass:: rally.verification.manager.VerifierManager - :members: - :exclude-members: base_ref, check_system_wide, checkout, install_venv, - parse_results, validate - - -Example of Fake Verifier Manager --------------------------------- - -FakeTool is a tool which doesn't require configuration and installation. - - .. code-block:: python - - import random - import re - - from rally.verification import manager - - - # Verification component expects that method "run" of verifier returns - # object. Class Result is a simple wrapper for two expected properties. - class Result(object): - def __init__(self, totals, tests): - self.totals = totals - self.tests = tests - - - @manager.configure("fake-tool", default_repo="https://example.com") - class FakeTool(manager.VerifierManager): - """Fake Tool \o/""" - - TESTS = ["fake_tool.tests.bar.FatalityTestCase.test_one", - "fake_tool.tests.bar.FatalityTestCase.test_two", - "fake_tool.tests.bar.FatalityTestCase.test_three", - "fake_tool.tests.bar.FatalityTestCase.test_four", - "fake_tool.tests.foo.MegaTestCase.test_one", - "fake_tool.tests.foo.MegaTestCase.test_two", - "fake_tool.tests.foo.MegaTestCase.test_three", - "fake_tool.tests.foo.MegaTestCase.test_four"] - - # This fake verifier doesn't launch anything, just returns random - # results, so let's override parent methods to avoid redundant - # clonning repo, checking packages and so on. - - def install(self): - pass - - def uninstall(self, full=False): - pass - - # Each tool, which supports configuration, has the own mechanism - # for that task. Writing unified method is impossible. That is why - # `VerificationManager` implements the case when the tool doesn't - # need (doesn't support) configuration at all. Such behaviour is - # ideal for FakeTool, since we do not need to change anything :) - - # Let's implement method `run` to return random data. - def run(self, context): - totals = {"tests_count": len(self.TESTS), - "tests_duration": 0, - "failures": 0, - "skipped": 0, - "success": 0, - "unexpected_success": 0, - "expected_failures": 0} - tests = {} - for name in self.TESTS: - duration = random.randint(0, 10000)/100. - totals["tests_duration"] += duration - test = {"name": name, - "status": random.choice(["success", "fail"]), - "duration": "%s" % duration} - if test["status"] == "fail": - test["traceback"] = "Ooooppps" - totals["failures"] += 1 - else: - totals["success"] += 1 - tests[name] = test - return Result(totals, tests=tests) - - def list_tests(self, pattern=""): - return [name for name in self.TESTS if re.match(pattern, name)] diff --git a/doc/source/verification/howto/index.rst b/doc/source/verification/howto/index.rst deleted file mode 100644 index e2b68a4b..00000000 --- a/doc/source/verification/howto/index.rst +++ /dev/null @@ -1,12 +0,0 @@ - -===== -HowTo -===== - -.. toctree:: - :maxdepth: 1 - :glob: - - ./add* - migrate_from_old_design - diff --git a/doc/source/verification/howto/migrate_from_old_design.rst b/doc/source/verification/howto/migrate_from_old_design.rst deleted file mode 100644 index a74eeb7b..00000000 --- a/doc/source/verification/howto/migrate_from_old_design.rst +++ /dev/null @@ -1,501 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======================================================== -HowTo migrate from Verification component 0.7.0 to 0.8.0 -======================================================== - -.. note:: This document describes migration process from 0.7.0 to 0.8.0 Rally - version. You can apply this instruction for migration to later versions, - but check all references and release notes before trying to do it. - - -Verification Component was introduced long time ago even before the first Rally -release. It started as a small helper thing but became a big powerful tool. -Since it was not designed to all features that were implemented there later, -it contained a lot of workarounds and hacks. - -New Verification Component, which we are happy to introduce, should fix all -architecture issues and improve user-experience. Unfortunately, fixing all -those obsolete architecture decisions could not be done in a -backward-compatible way, or it would produce much more workarounds. That is why -we decided to redesign the whole component in a clear way - remove old code and -write a new one from scratch. - -Migration to New Verification Component should be simple and do not take too -much time. You can find description of made changes below. - -.. contents:: - :depth: 2 - :local: - -Reports -------- - -We completely reworked verification reports and merged comparison to main -report. Now you can build one report for multiple number of verifications. - -For more details follow :ref:`verification-reports` - -Verification statuses ---------------------- - -+------------+------------+---------------------------------------------------+ -| Old Status | New Status | Description | -+============+============+===================================================+ -| init | init | Initial state. It appears instantly after calling | -| | | ``rally verify start`` command before the actual | -| | | run of verifier's tool. | -+------------+------------+---------------------------------------------------+ -| running | | It was used right after checking status of | -| | | verifier. It is redundant in terms of new design. | -+------------+------------+---------------------------------------------------+ -| verifying | running | Identifies the process of tool execution. | -+------------+------------+---------------------------------------------------+ -| finished | finished | Previously, "finished" state was used for an | -| | | identification of just finished verification. By | -| | | "finished" meant that verification has any test | -| | | result. Now it means that verification was | -| | | executed and doesn't have failures, unexpected | -| | | success or any kind of errors. | -| +------------+---------------------------------------------------+ -| | failed | Old purpose is an identification of "errors", | -| | | situations when results are empty. The right use | -| | | is an identification of finished verification | -| | | with tests in "failed" and "uxsuccess" | -| | | (unexpected success) statuses. | -+------------+------------+---------------------------------------------------+ -| failed | crashed | Something went wrong while launching verification.| -+------------+------------+---------------------------------------------------+ - -The latest information about verification statuses you can find at -:ref:`verification_statuses`. - -Command Line Interface ----------------------- - -You can find the latest information about Verification Component CLI here - -:ref:`rally-verify-cli-reference`. - -Installing verifier -""""""""""""""""""" - -Command for Rally 0.7.0 - `rally verify install -`_ - -.. code-block:: console - - $ rally verify install --deployment --source --version \ - --system-wide - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify create-verifier --type "tempest" --source \ - --version --system-wide --name - -Here you can find several important improvements: - -1) Rally team introduced new entity - :ref:`verifiers`. Verifier stores all - information about installed tool (i.e., source, version, system-wide) in a - database. You do not need to transmit the same arguments into - all ``rally verify`` commands as it was previously with ``--system-wide`` - flag. - -2) You can use particular verifier for multiple deployments. ``--deployment`` - flag moved to ``rally verify start`` command. Also, you can run it - simultaneously (checking in parallel different sets, different cloud, etc) - -3) Verification Component can use not only Tempest for verifying system. Check - :ref:`known-verifier-types` for full list of supported tools. - -4) You can have unlimited number of verifiers. - -Re-install verifier aka update -"""""""""""""""""""""""""""""" - -Command for Rally 0.7.0 - `rally verify reinstall -`_ - -.. code-block:: console - - $ rally verify reinstall --deployment --source --version \ - --system-wide - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify update-verifier --id --source --version \ - --system-wide --no-system-wide --update-venv - -Changes: - -1) ``rally verify update-verifier`` doesn't require deployment id - -2) You can switch between usage of system-wide installation and virtual - environment. - -3) You can update just virtual environment without cloning verifier code again - -Uninstall -""""""""" - -Command for Rally 0.7.0 - `rally verify uninstall -`_ - -.. code-block:: console - - $ rally verify uninstall --deployment - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify delete-verifier --id --deployment-id --force - -Changes: - -1) As it was mentioned before, Verifier doesn't have an alignment to any - particular deployment, so deployment argument is optional now. - If --deployment-id argument is specified only deployment specific data will - be removed (i.e, configurations). - -2) New --force flag for removing all verifications results for that verifier. - -Installation extensions -""""""""""""""""""""""" - -Command for Rally 0.7.0 - `rally verify installplugin -`_ - -.. code-block:: console - - $ rally verify installplugin --deployment --source \ - --version --system-wide - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify add-verifier-ext --id --source --version \ - --extra-settings - -Changes: - -1) --system-wide flag is removed. Rally checks the verifier information to - identify where to install the extension - in a system-side way or use - virtual environment. - -2) New --extra-settings flag. In case of Tempest, it is redundant, but for - other verifiers allows to transmit some extra installation settings for - verifier extension. - -Uninstall extensions -"""""""""""""""""""" - -Command for Rally 0.7.0 - `rally verify uninstallplugin -`_ - - -.. code-block:: console - - $ rally verify uninstallplugin --deployment --repo-name \ - --system-wide - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify delete-verifier-ext --id --name - -Changes: - -1) It is one more place where you do not need to pass --system-wide flag - anymore. - -2) --deployment flag is gone. - -3) --repo-name is renamed to just --name. - -List extensions -""""""""""""""" - -Command for Rally 0.7.0 - `rally verify listplugins -`_ - -.. code-block:: console - - $ rally verify listplugins --deployment --system-wide - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify list-verifier-exts --id - -Changes: - -1) No need to specify --system-wide flag. - -2) --deployment flag is gone. - -Discover available tests -"""""""""""""""""""""""" - -Command for Rally 0.7.0 - `rally verify discover -`_ - -.. code-block:: console - - $ rally verify discover --deployment --system-wide --pattern - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify list-verifier-tests --id --pattern - -Changes: - -1) No need to specify --system-wide flag. - -2) --deployment flag is gone. - -Configuring -""""""""""" - -Commands for Rally 0.7.0: - -* The command for generating configs `rally verify genconfig - `_ - - .. code-block:: console - - $ rally verify genconfig --deployment --tempest-config \ - --add-options --override - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify configure-verifier --id --deployment-id \ - --extend --override --reconfigure --show - -Changes: - -1) The argument ``--override`` replaces old ``--tempest-config`` name. First - of all, argument name "override" is a unified word without alignment to any - tool. Also, it describes in the best way the meaning of the action: use - client specified configuration file. - -2) The argument ``--extend`` replaces old ``--add-options``. It accepts a path - to config in INI format or JSON/YAML string. In future, it will be extended - with the ability to specify a path to JSON/YAML file. - -3) The argument ``--reconfigure`` replaces old ``--override``. It means that - existing file will be ignored and new one will be used/created. - -Show config -""""""""""" - -Command for Rally 0.7.0 - `rally verify showconfig -`_ - -.. code-block:: console - - $ rally verify showconfig --deployment - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify configure-verifier --id --deployment-id --show - -Changes: - - We do not have a separate command for that task. - ``rally verify configure-verifier --show`` shows an existing configuration - (if it exists) if ``--reconfigure`` argument is not specified. - -Running verification -"""""""""""""""""""" - -Command for Rally 0.7.0 - `rally verify start -`_ - -.. code-block:: console - - $ rally verify start --deployment --set --regex \ - --load-list --tests-file --skip-list \ - --tempest-config --xfail-list --system-wide \ - --concurrency --failing --no-use - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify start --id --deployment-id --pattern \ - --load-list --skip-list --xfail-list \ - --concurrency --no-use --detailed - -Changes: - -1) You need to pass verifier id - -2) Arguments ``--set`` and ``--regex`` are merged in the new model to single - ``--pattern`` argument. Name of tests set should be specified like - ``--pattern set=``. It was done to provide a way for each - verifier to support custom arguments. - -3) The argument ``--tests-file`` was deprecated in Rally 0.6.0 and - we are ready to remove it. -4) Arguments ``--skip-list`` and ``--xfail-list`` accept path to file in - JSON/YAML format. Content should be a dictionary, where keys are tests - names (full name with id and tags) and values are reasons. -5) The argument ``--tempest-config`` is gone. Use - ``rally verify configure-verifier --id --deployment-id - --override `` instead. -6) The argument ``--system-wide`` is gone like in most of other commands. -7) In case of specified ``--detailed`` arguments, traces of failed tests will - be displayed (default behaviour in old verification design) - -Show verification result -"""""""""""""""""""""""" - -Commands for Rally 0.7.0: - -* The command for showing results of verification `rally verify show - `_ - - .. code-block:: console - - $ rally verify show --uuid --sort-by --detailed - -* Separate command which calls ``rally verify show`` with hardcoded - ``--detailed`` flag `rally verify detailed - `_ - - .. code-block:: console - - $ rally verify detailed --uuid --sort-by - - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify show --uuid --sort-by --detailed - -Changes: - -1) Redundant ``rally verify detailed`` command is removed - -2) Sorting tests via ``--sort-by`` argument is extended to name/duration/status - -Listing all verifications -""""""""""""""""""""""""" - -Command for Rally 0.7.0 - `rally verify list -`_ - -.. code-block:: console - - $ rally verify list - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify list --id --deployment-id --status - -Changes: - - You can filter verifications by verifiers, by deployments and results - statuses. - -Importing results -""""""""""""""""" - -Command for Rally 0.7.0 - `rally verify import -`_ - -.. code-block:: console - - $ rally verify import --deployment --set --file --no-use - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify import --id --deployment-id --file \ - --run-args --no-use - -Changes: - -1) You need to specify verifier to import results for. - -2) The argument ``--set`` is merged into unified ``--run-args``. - -Building reports -"""""""""""""""" - -Commands for Rally 0.7.0: - -* The command for building HTML/JSON reports of verification - `rally verify results - `_ - - .. code-block:: console - - $ rally verify results --uuid --html --json --output-file - -* The command for comparison two verifications `rally verify compare - `_ - - .. code-block:: console - - $ rally verify compare --uuid-1 --uuid-2 --csv --html \ - --json --output-file --threshold - -Command since Rally 0.8.0: - -.. code-block:: console - - $ rally verify report --uuid --type --to --open - -Changes: - -1) Building reports becomes pluggable. You can extend reporters types. - See :ref:`verification-reports` for more details. - -2) The argument ``--type`` expects type of report (HTML/JSON). There are no - more separate arguments for each report type. - - .. hint:: You can list all supported types, executing ``rally plugin list - --plugin-base VerificationReporter`` command. - -3) Reports are not aligned to only local types, so the argument ``--to`` - replaces ``--output-file``. In case of HTML/JSON reports, it can include a - path to the local file like it was previously or URL to some external system - with credentials like ``https://username:password@example.com:777``. - -4) The comparison is embedded into main reports and it is not limited by two - verifications results. There are no reasons for the separate command for - that task. - -The End -""""""" - -Have nice verifications! diff --git a/doc/source/verification/index.rst b/doc/source/verification/index.rst deleted file mode 100644 index 3b2254e4..00000000 --- a/doc/source/verification/index.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -====================== -Verification Component -====================== - -Functional testing is a first step to ensuring that your product works as -expected and API covers all use-cases. Rally Verification Component is all -about this. It is not designed to generate a real big load (for this job we -have :ref:`task-component`), but it should be enough to check that your -environment works by different tools (we call them -:ref:`glossary-verification`). - -.. toctree:: - :maxdepth: 2 - :glob: - - verifiers - reports - cli_reference - howto/index - -.. include:: ./overview.rst diff --git a/doc/source/verification/overview.rst b/doc/source/verification/overview.rst deleted file mode 100644 index 3c42b71f..00000000 --- a/doc/source/verification/overview.rst +++ /dev/null @@ -1,57 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Historical background ---------------------- - -Tempest, OpenStack’s official test suite, is a powerful tool for running a set -of functional tests against an OpenStack cluster. Tempest automatically runs -against every patch in every project of OpenStack, which lets us avoid merging -changes that break functionality. - -Unfortunately, it has limited opportunities to be used, to process its results, -etc. That is why we started Verification Component initiative a long time ago -(see `a blog post -`_ -for more details, but be careful as all user interface is changed completely -since that time). - -What is Verification Component and why do you need it? ------------------------------------------------------- - -The primary goal of Rally Product is to provide a simple way to do complex -things. As for functional testing, Verification Component includes interfaces -for: - -* **Managing things**. Create an isolated virtual environment and install - verification tool there? Yes, we can do it! Clone tool from Git repositories? - Sure! Store several versions of one tool (you know, sometimes they are - incompatible, with different required packages and so on)? Of course! - In general, Verification Component allows to install, upgrade, reinstall, - configure your tool. You should not care about zillion options anymore Rally - will discover them via cloud UX and make the configuration file for you - automatically. -* **Launching verifiers**. Launchers of specific tools don't always contain all - required features, Rally team tries to fix this omission. Verification - Component supports some of them like expected failures, a list of tests to - skip, a list of tests to launch, re-running previous verification or just - failed tests from it and so on. Btw, all verification runs arguments are - stored in the database. -* **Processing results**. Rally DataBase stores all `verifications - `_ and you can obtain unified (across different verifiers) - results at any time. You can find a verification run summary there, run - arguments which were used, error messages and etc. Comparison mechanism for - several verifications is available too. Verification reports can be generated - in several formats: HTML, JSON, JUnit-XML (see :ref:`verification-reports` - for more details). Also, reports mechanism is expendable and you can write - your own plugin for whatever system you want. diff --git a/doc/source/verification/reports.rst b/doc/source/verification/reports.rst deleted file mode 100644 index 290cb23d..00000000 --- a/doc/source/verification/reports.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _verification-reports: - -==================== -Verification reports -==================== - -Rally stores all verifications results in its DataBase so that you can access -and process results at any time. No matter what verifier you use, results will -be stored in a unified way and reports will be unified too. - -We support several types of reports out of the -box: :include-var:`rally.cli.commands.verify.DEFAULT_REPORT_TYPES`; but our -reporting system is pluggable so that you can write your own plugin to build -some specific reports or to export results to the specific system (see -:ref:`howto-add-new-reporting-mechanism` for more details`). - -.. contents:: - :depth: 2 - :local: - -HTML reports ------------- - -HTML report is the most convenient type of reports. It includes as much as -possible useful information about Verifications. - -Here is an example of HTML report for 3 verifications. -It was generated by next command: - -.. code-block:: console - - $ rally verify report --uuid --type html \ - --to ./report.html - - -.. image:: ../images/Report-Verify-for-4-Verifications.png - :align: center - -The report consists of two tables. - -First one is a summary table. It includes base information about -verifications: UUIDs; numbers of tests; when they were launched; statuses; etc. -Also, you can find detailed information grouped by tests statuses at the right -part of the table. - -If the size (height) of the summary table seems too large for you and hinders -to see more tests results, you can push "Toggle Header" button. - -The second table contains actual verifications results. They are grouped by -tests names. The result of the test for particular verification overpainted by -one of the next colours: - -* *Red* - It means that test has "failed" status -* *Orange* - It is "unexpected success". Most of the parsers calculates it just - like failure -* *Green* - Everything is ok. The test succeeded. -* *Yellow* - It is "expected failure". -* *Light Blue* - Test is skipped. It is not good and not bad - -Several verifications comparison is a default embedded behaviour of reports. -The difference between verifications is displayed in brackets after actual -test duration. Sign **+** means that current result is bigger that standard by -the number going after the sign. Sign **-** is an opposite to **+**. Please, -note that all diffs are comparisons with the first verification in a row. - -Filtering results -""""""""""""""""" - -You can filter tests by setting or removing a mark from check box of the -particular status column of the summary table. - -.. image:: ../images/Report-Verify-filter-by-status.png - :align: center - -Tests Tags -"""""""""" - -Some of the tests tools support tests tagging. It can be used for setting -unique IDs, groups, etc. Usually, such tags are included in test name. It is -inconvenient and Rally stores tags separately. By default they are hidden, but -if you push "Toggle tags" button, they will be displayed under tests names. - -.. image:: ../images/Report-Verify-toggle-tags.png - :align: center - -Tracebacks & Reasons -"""""""""""""""""""" - -Tests with "failed" and "expected failure" statuses have tracebacks of -failures. Tests with "skipped", "expected failure", "unexpected success" status -has "reason" of events. By default, both tracebacks and reasons are hidden, -but you can show them by clicking on the appropriate test. - -.. image:: ../images/Report-Verify-tracebacks.png - :align: center - -.. image:: ../images/Report-Verify-xfail.png - :align: center - -Plugins Reference for all out-of-the-box reporters --------------------------------------------------- - -.. generate_plugin_reference:: - :base_cls: Verification Reporter diff --git a/doc/source/verification/verifiers.rst b/doc/source/verification/verifiers.rst deleted file mode 100644 index 392c72c3..00000000 --- a/doc/source/verification/verifiers.rst +++ /dev/null @@ -1,100 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _verifiers: - -========= -Verifiers -========= - -.. contents:: - :depth: 1 - :local: - -What is it? ------------ - -Verifier Plugin is a compatibility layer between Rally and the specific tool -(such as Tempest) which runs tests. It implements features like installation, -configuration, upgrades, running, etc in terms of the tool. It is a driver in -other words. -It is a pluggable entity, which means that you can easily add support for -whatever tool you want (see :ref:`howto-add-support-for-new-tool` page for -more information). Even more, you can deliver such plugin separately from Rally -itself, but we firmly recommend to push a change to Rally upstream (see -:ref:`contribute` guide), so Rally core-team will able to review it and help -to improve. - -Verifier is an instance of the Verifier Plugin. It is an installed tool. -For example, "Tempest" is a set of functional tests, it is Verifier Plugin -(we have a plugin for it). Installed Tempest 12.0 from -https://github.com/openstack/tempest in a virtual environment is the verifier. - -Verifier is not aligned to any particular deployment like it was in the past, -you can use one verifier for testing unlimited number of deployments (each -deployment will have separate configuration files for the tool). - -Verifier & Verifier Plugin are the main entities which Verification component -operates with. Another one is the verifications results. - -Verifier statuses ------------------ - -All verifiers can be in next statuses: - -* *init* - Initial state. It appears while you call ``rally verify - create-verifier`` command and installation step is not yet started. -* *installing* - Installation of the verifier is not a quick task. It is about - cloning tool, checking packages or installing virtual environments with all - required packages. This state indicates that this step is in the process. -* *installed* - It should be one of your favourite states. It means that - everything is ok and you can start verifying your cloud. -* *updating* - This state identifies the process of updating verifier (version, - source, packages, etc.). -* *extending* - The process of extending a verifier by its plugins. -* *failed* - Something went wrong while installation. - -.. _verification_statuses: - -Verification statuses ---------------------- - -* *init* - Initial state. It appears instantly after calling - ``rally verify start`` command before the actual run of verifier's tool. -* *running* - Identifies the process of execution tool. -* *finished*- Verification is finished without errors and failures. -* *failed* - Verification is finished, but there are some failed tests. -* *crashed* - Unexpected error had happened while running verification. - - -.. _known-verifier-types: - -Known verifier types --------------------- - -Out of the box -"""""""""""""" - -You can execute command ``rally verify list-plugins`` locally to check -available verifiers in your environment. - -Cut down from Global :ref:`plugin-reference` page: - -.. generate_plugin_reference:: - :base_cls: Verifier Manager - -Third-party -""""""""""" - -Nothing here yet. - diff --git a/doc/specs/README.rst b/doc/specs/README.rst deleted file mode 100644 index 850c145c..00000000 --- a/doc/specs/README.rst +++ /dev/null @@ -1,15 +0,0 @@ -Rally Specs -=========== - -Specs are detailed description of proposed changes in project. Usually they -answer on what, why, how to change in project and who is going to work on -change. - -This directory contains 2 subdirectories: - -- in-progress - These specs are approved, but they are not implemented yet -- implemented - Implemented specs archive - -If you are looking for full rally road map overview go -`here `_. - diff --git a/doc/specs/implemented/README.rst b/doc/specs/implemented/README.rst deleted file mode 100644 index 17609f54..00000000 --- a/doc/specs/implemented/README.rst +++ /dev/null @@ -1,10 +0,0 @@ -Rally Specs -=========== - -Specs are detailed description of proposed changes in project. Usually they -answer on what, why, how to change in project and who is going to work on -change. - -This directory contains files with implemented specs, 1 file is 1 spec. - -If you are looking for full rally road map overview go `here `_. diff --git a/doc/specs/implemented/class-based-scenarios.rst b/doc/specs/implemented/class-based-scenarios.rst deleted file mode 100644 index f38871b8..00000000 --- a/doc/specs/implemented/class-based-scenarios.rst +++ /dev/null @@ -1,113 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - - -=================================== -Class-based Scenario Implementation -=================================== - -Introduce scenarios implementation as classes, not methods. - -Problem description -=================== - -Current scenario implementation transforms method to class at runtime, -so this overcomplicates the code. - -Method-based extensions mechanism is not a common practice in frameworks, -so this is a bit confusing. - -Most Rally plugins like Context, SLA, Runner, OutputChart (except Scenario) -are implemented as classes, not methods. - -Proposed change -=============== - -Add an ability to implement scenarios as classes, keeping full backward -compatibility with existing code. - -This means that class represents single scenario which is actually implemented -in method *Scenario.run()*. - -So input task can contain scenario names that does not have method part -split by dot from class part. - -For example, here we have two scenarios, first one is in old manner -and another is class-based: - -.. code-block:: json - - { - "Dummy.dummy": [ - { - "runner": { - "type": "serial", - "times": 20 - } - } - ], - "another_dummy_scenario": [ - { - "runner": { - "type": "serial", - "times": 20 - } - } - ] - } - -Class AnotherDummyScenario should have method run(): - -.. code-block:: python - - from rally.task import scenario - - @scenario.configure(name="another_dummy_scenario") - class AnotherDummyScenario(scenario.Scenario): - - def run(self): - """Scenario implementation.""" - -Modules *rally.task.engine* and *rally.task.processing* should be modified to -make them working with class-based scenarios. - -Alternatives ------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - - Alexander Maretskiy - - -Work Items ----------- - - - Update task.engine and task.processing for class-based scenarios - - Transform all Dummy scenarios into class-based implementations as first - stage of usage class-based scenarios. - -Dependencies -============ - -None diff --git a/doc/specs/implemented/consistent_resource_names.rst b/doc/specs/implemented/consistent_resource_names.rst deleted file mode 100644 index 097a1c6f..00000000 --- a/doc/specs/implemented/consistent_resource_names.rst +++ /dev/null @@ -1,125 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================= -Consistent Resource Names -========================= - -To facilitate better cleanup of ephemeral resources created by Rally, -random resource names need to be consistently used across all -scenarios and all plugins. Additionally, to support Rally's use -against systems other than OpenStack, plugins need greater control -over both the format and the list of characters used in generating -resource names. - -Problem description -=================== - -Currently we use a few different cleanup mechanisms, some of which -(Keystone) use resource names, while most others use tenant -membership. As a result, if Rally is interrupted before cleanup -completes it may not be possible to know which resources were created -by Rally (and thus should be cleaned up after the fact). - -Random names are generated from a fairly limited set of digits and -ASCII letters. This should be configurable by each plugin, along with -all other parts of the random name, in order to support benchmarking -systems other than OpenStack, which may have different naming -restrictions. - -Finally, each Rally task should include some consistent element in its -resource names, distinct from other Rally tasks, to support multiple -independent Rally runs and cleanup. - -Proposed change -=============== - -Random names will consist of three components: - -* A random element derived from the task ID that is the same for all - random names in the task; -* A random element that should be different for all names in the task; - and -* Any amount of formatting as determined by the plugin. - -The format of the random name will be given by a class variable, -``RESOURCE_NAME_FORMAT``, on each scenario and context plugin. This -variable is a ``mktemp(1)``-like string that describes the format; the -default for scenario plugins will be:: - - RESOURCE_NAME_FORMAT = "s_rally_XXXXXXXX_XXXXXXXX" - -And for context plugins:: - - RESOURCE_NAME_FORMAT = "c_rally_XXXXXXXX_XXXXXXXX" - -The format must have two separate sets of at least three consecutive -'X's. (That is, they must match: -``^.*(?, # full validator function name, - # # validator plugin name (in the future) - # "input": , # smallest part of - # "message": , # message with description - # "success": , # did validatior pass - # "duration": # duration of validation process - # }, - # ..... - # ] - validation_result : TEXT - - # Duration of verification can be used to tune verification process. - validation_duration : FLOAT - - # Duration of benchmarking part of task - task_duration : FLOAT - - # All workloads in the task are passed - pass_sla : BOOL - - # Current status of task - status : ENUM(init, validating, validation_failed, - aborting, soft_aborting, aborted, - crashed, validated, running, finished) - - -Task.status diagram of states -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: - - INIT -> VALIDATING -> VALIDATION_FAILED - -> ABORTING -> ABORTED - -> SOFT_ABORTING -> ABORTED - -> CRASHED - -> VALIDATED -> RUNNING -> FINISHED - -> ABORTING -> ABORTED - -> SOFT_ABORTING -> ABORTED - -> CRASHED - - -Subtask table -~~~~~~~~~~~~~ - -.. code-block:: - - id : INT, PK - uuid : UUID - task_uuid : UUID - title : String - description : TEXT - - # Position of Subtask in Input Task - position : INT - - # Context and SLA could be defined both Subtask-wide and per workload - context : JSON - sla : JSON - - run_in_parallel : BOOL - duration : FLOAT - - # All workloads in the task are passed - pass_sla : BOOL - - # Current status of task - status : ENUM(running, finished, crashed) - - -Workload table -~~~~~~~~~~~~~~ - -.. code-block:: - - id : INT, PK - uuid : UUID - subtask_id : INT - task_uuid : UUID - - # Unlike Task's and Subtask's title which is arbitrary - # Workload's name defines scenario being executed - name : String - - # Scenario plugin docstring - description : TEXT - - # Position of Workload in Input Task - position : INT - - runner : JSON - runner_type : String - - # Context and SLA could be defined both Subtask-wide and per workload - context : JSON - sla : JSON - - args : JSON - - # SLA structure that contains all detailed info looks like: - # [ - # { - # "name": , - # "duration": , - # "success": , - # "message": , - # } - #] - # - sla_results : TEXT - - # Context data structure (order makes sense) - #[ - # { - # "name": string - # "setup_duration": FLOAT, - # "cleanup_duration": FLOAT, - # "exception": LIST # exception info - # "setup_extra": DICT # any custom data - # "cleanup_extra": DICT # any custom data - # - # } - #] - context_execution : TEXT - - starttime : TIMESTAMP - - load_duration : FLOAT - full_duration : FLOAT - - # Shortest and longest iteration duration - min_duration : FLOAT - max_duration : FLOAT - - total_iteration_count : INT - failed_iteration_count : INT - - # Statictics data structure (order makes sense) - # { - # "": { - # "min_duration": FLOAT, - # "max_duration": FLOAT, - # "median_duration": FLOAT, - # "avg_duration": FLOAT, - # "percentile90_duration": FLOAT, - # "percentile95_duration": FLOAT, - # "success_count": INT, - # "total_count": INT - # }, - # ... - # } - statistics : JSON # Aggregated information about actions - - # As for SLA result - pass_sla : BOOL - - # Profile information collected during the run of scenario - # This is internal data and format of it can be changed over time - # _profiling_data : Text - - -WorkloadData -~~~~~~~~~~~~ - -.. code-block:: - - id : INT, PK - uuid : UUID - workload_id : INT - task_uuid : UUID - - # Chunk order it's used to be able to sort output data - chunk_order : INT - - # Amount of iterations, can be useful for some of algorithms - iteration_count : INT - - # Number of failed iterations - failed_iteration_count : INT - - # Full size of results in bytes - chunk_size : INT - - # Size of zipped results in bytes - zipped_chunk_size : INT - - started_at : TIMESTAMP - finished_at : TIMESTAMP - - # Chunk_data structure - # [ - # { - # "duration": FLOAT, - # "idle_duration": FLOAT, - # "timestamp": FLOAT, - # "errors": LIST, - # "output": { - # "complete": LIST, - # "additive": LIST, - # }, - # "actions": LIST - # }, - # ... - # ] - chunk_data : BLOB # compressed LIST of JSONs - - -Tag table -~~~~~~~~~ - -.. code-block:: - - id : INT, PK - uuid : UUID of task or subtask - type : ENUM(task, subtask) - tag : TEXT - -- (uuid, type, tag) is unique and indexed - - -Open questions -~~~~~~~~~~~~~~ - -None. - - -Alternatives ------------- - -None. - - -Implementation -============== - -Assignee(s) ------------ - -- boris-42 (?) -- ikhudoshyn - -Milestones ----------- - -Target Milestone for completion: N/A - -Work Items ----------- - -TBD - -Dependencies -============ - -- There should be smooth transition of code to work with new data structure diff --git a/doc/specs/implemented/hook_plugins.rst b/doc/specs/implemented/hook_plugins.rst deleted file mode 100644 index f2c6b4fa..00000000 --- a/doc/specs/implemented/hook_plugins.rst +++ /dev/null @@ -1,262 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================= -New Plugins Type - Hook -======================= - -Problem description -=================== - -Rally lacks a plugin type that would run some code on specified iteration. -New plugin type is required for reliability testing of OpenStack. This type of -plugin would give an ability to activate factors on some iteration and provide -timestamps and some info about executed actions to rally report. - -Proposed change -=============== - -Add a new section to task config: - -Schema of hook section allows to specify number of iteration and a list -of hook plugins that should be executed on this iteration. - -.. code:: json - - { - "KeystoneBasic.create_delete_user": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "hook": [ # new section - { - "name": "example_hook", - "args": { - "cmd": "bash enable_factor_1" - }, - "trigger": { - "name": "event", - "args": { - "unit": "time", - "at": [1, 50, 100] # seconds since start - } - } - }, - { - "name": "example_hook", - "args": { - "cmd": "bash enable_factor_2" - }, - "trigger": { - "name": "event", - "args": { - "unit": "iteration", - "at": [35, 40, 45] # iteration numbers - } - } - }, - { - "name": "example_hook", - "args": { - "cmd": "bash enable_factor_3" - }, - "trigger": { - "name": "periodic", - "args": { - "unit": "iteration", - "step": 20, # execute hook each 20 iterations - "start": 0, - "end": 1000 - } - } - }, - { - "name": "example_hook", - "args": { - "cmd": "bash enable_factor_4" - }, - "trigger": { - "name": "periodic", - "args": { - "unit": "time", - "step": 15, # execute hook each 15 seconds - "start": 100, - "end": 200 - } - } - } - ] - } - ] - } - - -Add a new base class for such plugins, that should: - - contain common logic for schema validation - - save timestamps when "run" method started/finished - - provide abstract method 'run' which should be implemented in plugins - this method should be called after specified iteration has been executed - -Add new classes for trigger plugins, that should: - - contain validation schema for its configuration - - contain "get_listening_event" and "on_event" methods - -Trigger plugin classes should: - - implement "get_listening_event" methods that return events to listen - - implement "on_event" methods that check event type and value; - launch hook if needed - - -Add HookExecuter class to run hook plugins, that should: - - control when to run a hook specified in config - - receive result of hook execution from hook plugin - - return a full result of hook execution in the following format: - -.. code:: json - - [{ - # this is config of specific hook; it should not be empty! - "config": {...}, - "results":[ - { - # value is time in seconds - "triggered_by": {"event_type": "iteration", "value": 20}, - "started_at": 1470331269.134323, - "finished_at": 1470331319.761103, - "status": "success", - # same output format as in scenarios; this key can be missed - # if no output was added - "output": {} - } - ], - "summary": {"success": 1} - }] - -Modify ResultConsumer, that should: - - control HookExecuter and provide info about iterations - - add a full result to TaskResult - -Example code of base class: - -.. code:: python - - @plugin.base() - @six.add_metaclass(abc.ABCMeta) - class Hook(plugin.Plugin): - - @classmethod - def validate(cls, config): - # schema validation - pass - - def __init__(self, config): - self.config = config - - @abc.abstractmethod - def run(self): - pass - - -example_hook class: - -.. code:: python - - @hook.configure(name="example_hook") - class ExampleHook(hook.Hook): - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "cmd": { - "type": "string" - }, - "required": [ - "cmd", - ], - "additionalProperties": False, - } - - def __init__(self, config): - super(ExampleHook, self).__init__(config) - self.cmd = self.config["cmd"] - - def run(self): - # do some action - rc = os.system(self.cmd) - - -Example of hook result that goes to TaskResult (list of dicts): - -.. code:: python - - [{ - # this is config of specific hook; it should not be empty! - "config": {...}, - "results":[ - { - "triggered_by": {"event_type": "iteration", "value": 20}, - "started_at": 1470331269.134323, - "finished_at": 1470331319.761103, - "status": "success", - # same output format as in scenarios; this key can be missed - # if no output was added - "output": {} - }, - { - # value is time in seconds - "triggered_by": {"event_type": "time", "value": 150.0}, - "started_at": 1470331270.352342, - "finished_at": 1470331333.623303, - "status": "failed", - "error": { - "etype": "Exception", # type of exception - "msg": "exception message", - # additional information to help (for example, traceback) - "details": "" - } - } - ], - "summary": {"success": 1, "failed": 1} - }] - - -Alternatives ------------- - -Use sla section for such plugins, but this looks weird - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - -- astudenov -- ylobankov -- amaretskiy - - -Work Items ----------- - -- Implement new section in task config -- Add example of hook plugin that runs specified command as subprocess -- Add trigger plugins for iterations -- Add trigger plugins for time -- Add hooks results into HTML report - -Dependencies -============ - -None diff --git a/doc/specs/implemented/improve_scenario_output_format.rst b/doc/specs/implemented/improve_scenario_output_format.rst deleted file mode 100644 index 751f7b7a..00000000 --- a/doc/specs/implemented/improve_scenario_output_format.rst +++ /dev/null @@ -1,324 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - -======================================= -Improvements for scenario output format -======================================= - -Current implementation of how scenario saves output data is limited and -does not meet the needs - it neither allows having more than one data set, -nor saving custom data structures by each iteration. There is simply a dict -with int values. - -This specification proposes how this can be significantly improved. - -Problem description -=================== - -At first, let's clarify types of desired output. - -Output divides on two main types: additive and complete. - -*Additive output* requires processing and representation for the whole -scenario. For example each iteration has duration - this additive data can -be taken from each iteration and analyzed how it changes during the -scenario execution. - -*Complete output* data is completely created by iteration and does not require -extra processing. It is related to this specific iteration only. - -Currently scenario can just return a single dict with int values - this is an -additive data only, and it is stored in iteration results according to -this schema: - -.. code-block:: - - "result": { - ... - "scenario_output": { - "type": "object", - "properties": { - "data": { - "type": "object" - }, - "errors": { - "type": "string" - }, - }, - "required": ["data", "errors"] - } - } - -Here are main issues: - - * single data set - this does not allow to split data (if required) among - different sources. For example scenario runs two (or more) third-party - tools or scripts but has to put all data into single dict - - * output is additive only - so its representation makes sense only after - putting data from all iterations together. Scenario iteration can not - save its own data list that can be processed independently from another - iterations. - - * there is no specific data for HTML report generation like chart title - and chart type, so report uses hardcoded values. - -As result, HTML report can represent output by a single chart of single type: - -.. code-block:: - - .--------. - | Output | - -----' '----------- - Scenario output - -------------------- - | | - | SINGLE StackedArea | - | | - -------------------- - -Proposed change -=============== - -Scenario should have ability to save arbitrary number of both additive -and complete output data. This data should include titles and instructions -how to be processed and displayed in HTML report. - -Here is proposed iterations results structure for output data: - -.. code-block:: - - - "result": { - ... - "output": { - "additive": [ - # Each iteration duplicates "title", "description", "chart" and - # items keys, however this seems to be less evil than keeping - # aggregated metadata on upper level of task results schema. - # "chart" is required by HTML report and should be a name of - # existent Chart subclass that is responsible for processing - # and displaying the data - {"title": "How some durations changes during the scenario", - "description": "Some details explained here", - "chart": "OutputStackedAreaChart", - "items": [[, ], ...] # Additive data - }, - ... # More data if required - ], - "complete": [ - # Complete data from this specific iteration. - # "widget" is required by HTML report and should be a name - # of chart widget (see details below) that responsible for - # displaying data. We do not need to specify "chart" here - # because this data does not require processing - it is - # already processed and represents a result of Chart.render() - {"title": "Interesting data from specific iteration", - "description": "Some details explaind here", - "widget": "StackedArea", - "data": [ - [ - , - [[, ], ...] - ], - ... - ] - }, - ... # More data if required - ] - } - } - -**NOTES**: - - * for backward compatibility, data from deprecated "scenario_output" should - be transformed into "output/data/additive[0]" on-the-fly (for example - if we load task results from file) - - * as you can see, there is no container *output/errors* - that is because - value of *errors* is not used at all and not required (there is another - container for errors in iteration results) - -How scenario saves output data ------------------------------- - -Scenario should be extended with method *add_output()*: - -.. code-block:: - - class Scenario(...): - - def __init__(self, context=None): - ... - self._output = {"additive": [], "complete": []} - - ... - - def add_output(self, additive=None, complete=None): - """Add iteration values for additive output. - - :param additive: dict with additive output - :param complete: dict with complete output - :raises RallyException: When additive or complete has wrong format - """ - for key, value in (("additive", additive), ("complete", complete)): - if value: - try: - jsonschema.validate( - value, task.OUTPUT_SCHEMA["properties"][key]["items"]) - self._output[key].append(value) - except jsonschema.ValidationError: - raise exceptions.RallyException( - "%s output has wrong format" % key.capitalize()) - - -Here is an example how scenario can save different output: - -.. code-block:: - - class SomePlugin(Scenario): - - def specific_scenario(self): - ... - - self.add_output(additive={"title": "Foo data", - "description": "Some words about Foo", - "chart": "OutputStackedAreaChart", - "items": [["foo 1", 12], ["foo 2", 34]]}) - self.add_output(additive={"title": "Bar data", - "description": "Some words about Bar", - "chart": "OutputAvgChart", - "items": [["bar 1", 56], ["bar 2", 78]]}) - self.add_output(complete={"title": "Complete data", - "description": "Some details here", - "widget": "StackedArea", - "data": [["foo key", [ ... ]], ... ]}) - self.add_output(complete={"title": "Another data", - "description": "Some details here", - "widget": "Pie", - "data": [["bar key", [ ... ]], ... ]}) - self.add_output(complete={"title": "Yet another data", - "description": "Some details here", - "widget": "Table", - "data": [["spam key", [ ... ]], ... ]}) - -Displaying scenario output in HTML report ------------------------------------------ - -The following changes are planned for HTML report and charts classes: - - * rename tab *Output* to *Scenario Data* - * implement subtabs under *Scenario Data*: *Aggregated* and *Per iteration* - * *Aggregated* subtab shows charts with additive data - * *Per iteration* subtab shows charts with complete data, for each iteration - * Both subtabs (as well as parent tab) are shown only if there is - something to display - * add base class OutputChart and generic charts classes for processing - output data: OutputStackedAreaChart, OutputAvgChart, OutputStatsTable - * add optional *title* and *description* arguments to OutputChart.__init__() - so title and description - this is important for custom charts - * add *WIDGET* property to each OutputChart subclass to bind it to specific - chart widget (StackedArea, Pie, Table). For example, AvgChart will be - bound to "Pie". This will allow defining both how to process and how - to display some data simply by single class name - * update return value format of OutputChart.render() with title and widget: - {"title": , "description": , "widget": , "data": [...]} - -UI sketch for active "Aggregated" subtab: - -.. code-block:: - - .---------------. - | Scenario Data | - ----' '------------------- - Aggregated Per iteration - ------------- - - - ---------------------------- - | | - | Any available chart widget | - | | - ---------------------------- - - - - ---------------------------- - | | - | Any available chart widget | - | | - ---------------------------- - - [... more charts] - -UI sketch for active "Per iteration" subtab, let it be iteration 5 -selected by dropdown: - -.. code-block:: - - .---------------. - | Scenario Data | - ----' '------------------- - Aggregated Per iteration - ---------- - - [iteration 5] - - - - ---------------------------- - | | - | Any available chart widget | - | | - ---------------------------- - - - - ---------------------------- - | | - | Any available chart widget | - | | - ---------------------------- - - [... more charts] - -Alternatives ------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - * amaretskiy - -Work Items ----------- - - * Update task results schema with *output* container - * Extend Scenario with method *add_output()* - * Bound Chart subclasses to specific charts widgets - * Add generic Charts subclasses for output data - * Changes in HTML report related to *Output* tab - * Add scenario with example output data - -Dependencies -============ - -None diff --git a/doc/specs/implemented/sla_pd_plugin.rst b/doc/specs/implemented/sla_pd_plugin.rst deleted file mode 100644 index 5ee64b38..00000000 --- a/doc/specs/implemented/sla_pd_plugin.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================== -SLA Performance degradation plugin -================================== - -Problem description -=================== - -During density and reliability testing of OpenStack with Rally -we observed test cases, during execution of which performance -of OpenStack cluster has been drammatically degradated. - -Proposed change -=============== - -Develop a new Rally SLA plugin: *performance_degradation* - -This SLA plugin should find minimum and maximum duration of -iterations completed without errors during Rally task execution. -Assuming that minimum duration is 100%, it should calculate -performance degradation against maximum duration. - -SLA plugin results: - - failure if performance degradation is more than value set - in plugin's max_degradation parameter; - - success if degradation is less - - performance degradation value as a percentage. - -How to enable this plugin: - -.. code:: json - - "sla": { - "performance_degradation": { - "max_degradation": 50 - } - } - -Alternatives ------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - -anevenchannyy - -Work Items ----------- - - - Implement plugin - - Add non-voting job with this plugin to the most important OpenStack services - -Dependencies -============ - -None diff --git a/doc/specs/implemented/split_plugins.rst b/doc/specs/implemented/split_plugins.rst deleted file mode 100644 index 0c32c0bb..00000000 --- a/doc/specs/implemented/split_plugins.rst +++ /dev/null @@ -1,81 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - -==================== - Re-organize Plugins -==================== - -Move all plugins under rally/plugins to simplify Rally code base - - -Problem description -=================== - -Rally code is coupled with Rally engine and infra as well as OpenStack specific -code. This makes contribution harder as new-comers need to understand Rally -code as well as many different plugins. It also makes reviewing much harder. - -Proposed change -=============== - -Moving all plugins under a single directory, with "OpenStack" as its -sub-directory would make everything simpler. - -Alternatives ------------- - -None comes to mind. - -Implementation -============== - - -.. code-block:: shell - - rally/ - | - +-- plugins/ - | - +-- common/ - | | - | +-- runners/ - | +-- sla/ - | +-- contexts/ - | +-- scenarios/ - | - +-- openstack/ - | - +-- runners/ - +-- sla/ - +-- contexts/ - +-- scenarios/ - - -NOTE: looking at the current code base we can see that: - -#. All ``runners`` and ``sla`` will go under ``common``. -#. All ``contexts`` will go under ``openstack``. -#. Most of ``scenarios`` (except for ``dummy``) will go under ``openstack``. - -Assignee(s) ------------ - - - yfried - - - boris-42 - -Work Items ----------- - -- Move all OpenStack related plugins and code under ``plugins/openstack/`` and - all other plugins code under ``plugins/common/``. - - -Dependencies -============ - -- Plugin unification diff --git a/doc/specs/implemented/verification_refactoring.rst b/doc/specs/implemented/verification_refactoring.rst deleted file mode 100644 index 9e72a04c..00000000 --- a/doc/specs/implemented/verification_refactoring.rst +++ /dev/null @@ -1,954 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/rally/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - -=============================== -Refactor Verification Component -=============================== - -Rally Verification was introduced long time ago as an easy way to launch -Tempest. It allows to manage(install, uninstall, configure and etc), -launch Tempest and process the results(store, compare, displaying in different -formats). - -There is a lot of code related to Verification which can be used not only for -Tempest. Since `rally verify` was implemented to launch subunit-based -applications(Tempest is a such tool), our code is ready to launch whatever we -want subunit-frameworks by changing only one var - path to tests. - -Problem description -=================== - -Rally is a good framework for any kind of testing (performance, functional and -etc), so it is pretty sad when we have a lot of hardcode and binding to -specific application. - -* non-pluggable architecture - - Most of Rally components (for example Task or Deployment) are pluggable. You - can easily extend Rally framework for such components. But we cannot say the - same about Verification. - -* subunit-trace - - ``subunit-trace`` library is used to display the live progress and summary at - user-friendly way for each launch of Verification. - There are several issues across this library: - - 1. It is Tempest requirements. - - It is second time when Rally Verification component uses dependency - from Tempest. ``tools/colorizer.py`` was used from Tempest repo - before ``subunit-trace``. This script was removed from Tempest which led - to breakage of whole Verification stuff. - Also, ``rally verify install`` supports ``--source`` option for installing - Tempest from non-default repos which can miss ``subunit-trace`` - requirement. - - 2. Bad calculation(for example, skip of whole TestCase means 1 skipped test) - -* Code duplication - - To simplify usage of Tempest, it is required to check existence of images, - roles, networks and other resources. While implementing these checks, we - re-implemented ... "Context" class which is used in Tasks. - It was called TempestResourcesContext. - -* Inner storage based on deployment - - In case of several deployments and one type of verifier(one repo), Rally - creates several directories in ``~/.rally/tempest`` (``for-tempest-`` - where is a UUID of deployment). Each of these directories will - include same files. The difference only in config files which can be stored - wherever we want. - Also, we have one more directory with the same data - cache directory - (``~/.rally/tempest/base``). - -* Word "Tempest" hardcoded in logging, help messages, etc. - -Proposed change -=============== - -Most of subunit-based frameworks can be launched in the same way, but they can -accept different arguments, different setup steps and so on. - -.. note:: In further text, we will apply labels "old" for code which was - implemented before this spec and "new" for proposed change. Also, all - references for old code will be linked to `0.3.3`__ release which is latest - release at the time of writing this spec. - -__ http://rally.readthedocs.org/en/0.3.3/release_notes/archive/v0.3.3.html - -Declare base Verification entities ----------------------------------- - -Lets talk about all entities which represents Verification. - -Old model -~~~~~~~~~ - -Old implementation uses only one entity - results of a single verification -launch. - -**DB Layer** - -* `Verification`__ - - It represents a summary of a single verification launch results. Also, it - is linked to full results (see next entity - VerificationResult). - -__ https://github.com/openstack/rally/blob/0.3.3/rally/common/db/sqlalchemy/models.py#L186 - -* `VerificationResult`__ - - The full results of a single launch. - - Since support of migrations was added - recently, not all places are cleared yet, so ``VerificationResults`` can - store results in two formats(old and current format). It would be nice to - fix it and support only 1 format. - -__ https://github.com/openstack/rally/blob/0.3.3/rally/common/db/sqlalchemy/models.py#L217 - -**Object layer** - -It is a bad practise to provide an access to db stuff directly and we don't do -that. ``rally.common.objects`` layer was designed to hide all db related stuff. - -* `Verification`__ - - Just represents results. - -__ https://github.com/openstack/rally/blob/0.3.3/rally/common/objects/verification.py#L28 - -New model -~~~~~~~~~ - -We want to support different verifiers and want to identify them, so let's -declare three entities: - -* **Verifier type**. The name of entity is a description it self. Each type - should be represented by own plugin which implements interface for - verification tool. For example, Tempest, Gabbi should be such types. - -* **Verifier**. An instance of ``verifier type``. I can be described with - following options: - - * *source* - path to git repository of tool. - - * *system-wide* - whether or not to use the local env instead of virtual - environment when installing verifier. - - * *version* - branch, tag or hash of commit to install verifier from. By - default it is "master" branch. - -* **Verification Results**. Result of a single launch. - - -**DB Layer** - -* **Verifier**. We should add one more table to store different verifiers. New - migration should be added, which check existence verification launches and - create "default" verifier(type="Tempest", source="n/a") and map all of - launches to it. - - .. code-block:: - - class Verifier(BASE, RallyBase): - """Represent a unique verifier.""" - - __tablename__ = "verifiers" - __table_args__ = ( - sa.Index("verification_uuid", "uuid", unique=True), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - uuid = sa.Column(sa.String(36), default=UUID, nullable=False) - - deployment_uuid = sa.Column( - sa.String(36), - sa.ForeignKey(Deployment.uuid), - nullable=False, - ) - - name = sa.Column(sa.String(255), unique=True) - description = sa.Column(sa.String(1000)) - - status = sa.Column(sa.Enum(*list(consts.VerifierStatus), - name="enum_verifier_status"), - default=consts.VerifierStatus.INIT, - nullable=False) - started_at = sa.Column(sa.DateTime) - updated_at = sa.Column(sa.DateTime) - - type = sa.Column(sa.String(255), nullable=False) - settings = info = sa.Column( - sa_types.MutableJSONEncodedDict, - default={"system-wide": False, - "source": "n/a"}, - nullable=False, - ) - -* `Verification`__ - - It should be extended with a link to Verifier. - -* `VerificationResult`__ - - We can leave it as it is. - - -Move storage from deployment depended logic to verifier -------------------------------------------------------- - -Old structure of ``~/.rally/tempest`` dir: - -.. code-block:: yaml - - base: - tempest_base-: - # Cached Tempest repository - tempest: - api - api_schema - cmd - ... - ... - requirements.txt - setup.cfg - setup.py - ... - for-deployment-: - # copy-paste of tempest_base- + files and directories listed below - .venv # Directory for virtual environment: exists if user didn't - # specify ``--system-wide`` argument while tempest - # installation (``rally verify install`` command). - tempest.conf # Only this file is unique for each deployment. It stores - # Tempest configuration. - subunit.stream # Temporary result-file produced by ``rally verify start``. - -As you can see there are a lot of copy-pasted repositories and little unique -data. - -New structure(should be located in ``~/.rally/verifiers``): - -.. code-block:: yaml - - verifier-: - # Storage for unique verifier. is a uuid of verifier. - repo: - # Verifier code repository. It is same for all deployments. Also one - # virtual environment can be used across all deployment too. - ... - for-deployment-: - # Folder to store unique for deployment data. is a deployment uuid - # here. Currently we have only configuration file to store, but lets - # reserve place to store more data. - settings.conf - ... - -Each registered verifier is a unique entity for Rally and can be used by all -deployments. If there is deployment specific data(for example, configuration -file) required for verifier, it should be stored separately from verifier. - -Command line interface ----------------------- - -`rally verify` commands are not so hardcoded as other parts of Verification -component, but in the same time they are not flexible. - -Old commands: - -.. code-block:: none - - compare Compare two verification results. - detailed Display results table of a verification with detailed errors. - discover Show a list of discovered tests. - genconfig Generate Tempest configuration file. - import Import Tempest tests results into the Rally database. - install Install Tempest. - list List verification runs. - reinstall Uninstall Tempest and install again. - results Display results of a verification. - show Display results table of a verification. - showconfig Show configuration file of Tempest. - start Start verification (run Tempest tests). - uninstall Remove the deployment's local Tempest installation. - use Set active verification. - -There is another problem of old CLI. Management is split across all commands -and you can do the same things via different commands. Moreover, you can -install Tempest in virtual environment via ``rally verify install`` and use -``--system-wide`` option in ``rally verify start``. - -Lets provide more strict CLI. Something like: - -.. code-block:: none - - list-types - - create-verifier - delete-verifier - list-verifiers - update-verifier - extend-verifier - use-verifier - - configure - discover - start - - compare - export - import - list - show - use - -list-types -~~~~~~~~~~ - -Verifiers types should be implemented on base Rally plugin mechanism. It allow -to not create types manually, Rally will automatically load them and user will -need only interface to list them. - -create-verifier -~~~~~~~~~~~~~~~ - -Just creates a new verifier based on type. - -Example: - -.. code-block:: bash - - $ rally verify create-verifier tempest-mitaka --type tempest --source "https://git.openstack.org/openstack/tempest" --version "10.0.0" --system-wide - -This command should process next steps: - -1. Clone Tempest repository from "https://git.openstack.org/openstack/tempest"; -2. Call ``git checkout 10.0.0``; -3. Check that all requirements from requirements.txt are satisfied; -4. Put new verifier as default one - -Also, it would be nice to store verifier statuses like "Init", "Ready-to-use", -"Failed", "Updating". - -delete-verifier -~~~~~~~~~~~~~~~ - -Deletes verifier virtual environment(if it was created), repository, deployment -specific files(configuration files). - -Also, it will remove verification results produced by this verifier. - -list-verifiers -~~~~~~~~~~~~~~ - -List all available verifiers. - -update-verifier -~~~~~~~~~~~~~~~ - -This command gives ability to update git repository(``git pull`` or -``git checkout``) or start/stop using virtual environment. - -Also, configuration file can be update via this interface. - -extend-verifier -~~~~~~~~~~~~~~~ - -Verifier can have an interface to extend itself. For example, Tempest supports -plugins. For verifiers which do not support any extend-mechanism, lets print -user-friendly message. - -use-verifier -~~~~~~~~~~~~ - -Choose the default verifier. - -configure -~~~~~~~~~ - -An interface to configure verifier for an specific deployment. - -Usage examples: - -.. code-block:: bash - - # At this step we assume that configuration file was not created yet. - # Create configuration file and show it. - $ rally verify configure - - # Configuration file already exists, so just show it. - $ rally verify configure - - # Recreate configuration file and show it - $ rally verify configure --renew - - # Recreate configuration file using predefined configuration options and - # show it. - # via json: - $ rally verify configure --renew \ - > --options '{"section_name": {"some_key": "some_var"}}' - - # via config file, which can be json/yaml or ini format: - $ rally verify configure --renew --options ~/some_file.conf - - # Replace configuration file by another file and show it - $ rally verify configure --replace ./some_config.conf - -Also, we can provide ``--silent`` option to disable ``show`` action. - -discover -~~~~~~~~ - -Discover and list tests. - -start -~~~~~ - -Start verification. Basically, there is no big difference between launching -different verifiers. - -Current arguments: ``--set``, ``--regex``, ``--tests-file``, ``xfails-file``, -``--failing``. - -Argument ``--set`` is specific for Tempest. Each verifier can have specific -search arguments. Lets introduce new argument ``--filter-by``. In this case, -set_name for Tempest can be specified like ``--filter-by set=smoke``. - -compare -~~~~~~~ - -Compare two verification results. - -export -~~~~~~ - -Part of `Export task and verifications into external services`__ spec - -__ https://github.com/openstack/rally/blob/0.3.2/doc/specs/in-progress/task_and_verification_export.rst - -import -~~~~~~ - -Import outer results in Rally database. - -list -~~~~ - -List all verifications results. - -show -~~~~ - -Show verification results in different formats. - -Refactor base classes ---------------------- - -Old implementation includes several classes: - -* Main class **Tempest**. This class combines manage and launch logic. - - .. code-block:: python - - # Description of a public interface(all implementation details are skipped) - class Tempest(object): - - base_repo_dir = os.path.join(os.path.expanduser("~"), - ".rally/tempest/base") - - def __init__(self, deployment, verification=None, - tempest_config=None, source=None, system_wide=False): - pass - - @property - def venv_wrapper(self): - """This property returns the command for activation virtual - environment. It is hardcoded on tool from Tempest repository: - - https://github.com/openstack/tempest/blob/10.0.0/tools/with_venv.sh - - We should remove this hardcode in new implementation.""" - - @property - def env(self): - """Returns a copy of environment variables with addition of pathes - to tests""" - - def path(self, *inner_path): - """Constructs a path for inner files of - ~/.rally/tempest/for-deployment- - """ - - @property - def base_repo(self): - """The structure of ~/.rally/tempest dir was changed several times. - This method handles the difference.""" - - def is_configured(self): - pass - - def generate_config_file(self, override=False): - """Generate configuration file of Tempest for current deployment. - :param override: Whether or not to override existing Tempest - config file - """ - - def is_installed(self): - pass - - def install(self): - """Creates local Tempest repo and virtualenv for deployment.""" - - def uninstall(self): - """Removes local Tempest repo and virtualenv for deployment.""" - - def run(self, testr_args="", log_file=None, tempest_conf=None): - """Run Tempest.""" - - def discover_tests(self, pattern=""): - """Get a list of discovered tests. - :param pattern: Test name pattern which can be used to match - """ - - def parse_results(self, log_file=None, expected_failures=None): - """Parse subunit raw log file.""" - - def verify(self, set_name, regex, tests_file, expected_failures, - concur, failing): - """Launch verification and save results in database.""" - - def import_results(self, set_name, log_file): - """Import outer subunit-file to Rally database""" - - def install_plugins(self, *args, **kwargs): - """Install Tempest plugin.""" - -* class ``TempestConfig`` was designed to obtain all required settings from - OpenStack public API and generate configuration file. It has not-bad - interface (just ``init`` and ``generate`` public methods), but implementation - can be better(init method should not start obtaining data). - -* class ``TempestResourcesContext`` looks like context which we have for Task - component. - -``TempestConfig`` and ``TempestResourcesContext`` are help classes and in new -implementation they will be optional. - -New implementation should looks like: - -* ``VerifierManager``. It is a main class which represents a type of Verifier - and provide an interface for all management stuff(i.e. install, update, - delete). Also, it should be an entry-point for configuration and - extend-mechanism which are optional. - -* ``VerifierLauncher``. It takes care about deployment's task - preparation - and launching verification and so on. - -* ``VerifierContext``. The inheritor of rally.task.context.Context class with - hardcoded "hidden=True" value, since it should be inner helper class. - -* ``VerifierSettings``. Obtains required data from public APIs and constructs - deployment specific configuration files for Verifiers. - -Proposed implementation will be described below in `Implementation`_ section. - -Remove dependency from external libraries and scripts ------------------------------------------------------ - -Currently our verification code has two redundant dependencies: - -* subunit-trace -* /tools/with_venv.sh - -subunit-trace -~~~~~~~~~~~~~ - -It should not be a hard task to remove this dependency. With small -modifications ``rally.common.io.subunit.SubunitV2StreamResult`` can print live -progress. Also, we an print summary info based on parsed results. - -with_venv.sh script -~~~~~~~~~~~~~~~~~~~ - -It is tempest in-tree script. Its logic is too simple - just activate virtual -environment and execute transmitted cmd in it. I suppose that we can rewrite -this script in python and put it to Verification component. - -Alternatives ------------- - -Stop development of Rally Verification. - -Implementation -============== - -Implementation details ----------------------- - -Below you can find an example of implementation. It contains some -implementation details and notes for future development. - -.. note:: Proposed implementation is not ideal and not finished. It should be - reviewed without nits. - -rally.common.objects.Verifier -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Basically, it will be the same design as `rally.common.objects.Verification`__. -There is no reasons to store old class. ``Verifier`` interface should be -enough. - -__ https://github.com/openstack/rally/blob/0.3.3/rally/common/objects/verification.py#L28 - -VerifierManager -~~~~~~~~~~~~~~~ - -.. code-block:: python - - import os - import shutil - import subprocess - - from rally.common.plugin import plugin - - - class VerifierManager(plugin.Plugin): - - def __init__(self, verifier): - """Init manager - - :param verifier: `rally.common.objects.Verifier` instance - """ - self.verifier = self.verifier - - @property - def home_dir(self): - """Home directory of verifier""" - return "~/.rally/verifier-%s" % self.verifier.id - - @property - def repo_path(self): - """Path to local repository""" - return os.path.join(self.home_dir, "repo") - - def mkdirs(self): - """Create all directories""" - if not self.home_dir: - os.mkdir(self.home_dir) - deployment_path = os.path.join( - base_path, "for-deployment-%s" % self.deployment.id)) - if not deployment_path: - os.mkdir(deployment_path) - - def _clone(self): - """Clone and checkout git repo""" - self.mkdirs() - source = self.verifier.source or self._meta_get("default_repo") - subprocess.check_call(["git", "clone", source, self.repo_path]) - - version = self.verifier.version or self._meta_get("default_version") - if version: - subprocess.check_call(["git", "checkout", version], - cwd=self.repo_path) - - def _install_virtual_env(self): - """Install virtual environment and all requirement in it.""" - if os.path.exists(os.path.join(self.repo_path, ".venv")): - # NOTE(andreykurilin): It is necessary to remove old env while - # processing update action - shutils.rmtree(os.path.join(self.repo_path, ".venv")) - - # TODO(andreykurilin): make next steps silent and print output only - # on failure or debug - subprocess.check_output(["virtualenv", ".venv"], cwd=self.repo_path) - # TODO: install verifier and its requirements here. - - def install(self): - if os.path.exists(self.home_dir): - # raise a proper exception - raise Exception() - self._clone() - if system_wide: - # There are several ways to check requirements. It can be done - # at least via two libraries: `pip`, `pkgutils`. The code below - # bases on `pip`, but it can be changed for better solution while - # implementation. - import pip - - requirements = set(pip.req.parse_requirements( - "%s/requirements.txt" % self.repo_path, - session=False)) - installed_packages = set(pip.get_installed_distributions()) - missed_packages = requirements - installed_packages - if missed_packages: - # raise a proper exception - raise Exception() - else: - self._install_virtual_env() - - - def delete(self): - """Remove all""" - shutils.rmtree(self.home_dir) - - def update(self, update_repo=False, version=None, update_venv=False): - """Update repository, version, virtual environment.""" - pass - - def extend(self, *args, **kwargs): - """Install verifier extensions. - - .. note:: It is an optional interface, so it raises UnsupportedError - by-default. If specific verifier needs this interface, it should - just implement it. - """ - raise UnsupportedAction("%s verifier is not support extensions." % - self.get_name()) - -For example, the implementation of verifier for Tempest will need to -implement only one method ``extend``: - -.. code-block:: python - - @configure("tempest_manager", - default_repo="https://github.com/openstack/tempest", - default_version="master", - launcher="tempest_launcher") - class TempestManager(VerifierManager): - - def extend(self, *args, **kwargs): - """Install tempest-plugin.""" - pass - -VerifierLauncher -~~~~~~~~~~~~~~~~ - -.. code-block:: python - - import os - import subprocess - - from rally.common.io import subunit_v2 - from rally.common.plugin import plugin - - - class EmptyContext(object): - """Just empty default context.""" - - def __init__(self, verifier, deployment): - pass - - def __enter__(self): - return - - def __exit__(self, exc_type, exc_value, exc_traceback): - # do nothing - return - - - class VerifierLauncher(plugin.Plugin): - def __init__(self, deployment, verifier): - """Init launcher - - :param deployment: `rally.common.objects.Deployment` instance - :param verifier: `rally.common.objects.Verifier` instance - """ - self.deployment = deployment - self.verifier = self.verifier - - @property - def environ(self): - """Customize environment variables.""" - return os.environ.copy() - - @property - def _with_venv(self): - """Returns arguments for activation virtual environment if needed""" - if self.verifier.system_wide: - return [] - # FIXME(andreykurilin): Currently, we use "tools/with_venv.sh" script - # from Tempest repository. We should remove this dependency. - return ["activate-venv"] - - @property - def context(self): - ctx = self._meta_get("context") - if ctx: - ctx = VerifierContext.get(ctx) - return ctx or EmptyContext - - def configure(self, override=False): - # by-default, verifier doesn't support this method - raise NotImplementedError - - def configure_if_necessary(self): - """Check existence of config file and create it if necessary.""" - pass - - def transform_kwargs(self, **kwargs): - """Transform kwargs into the list of testr arguments.""" - args = ["--subunit", "--parallel"] - if kwargs.get("concurrency"): - args.append("--concurrency") - args.append(kwargs["concurrency"]) - if kwargs.get("re_run_failed"): - args.append("--failing") - if kwargs.get("file_with_tests"): - args.append("--load-list") - args.append(os.path.abspath(kwargs["file_with_tests"])) - if kwargs.get("regexp"): - args.append(kwargs["regexp"]) - return args - - def run(self, regexp=None, concurrency=None, re_run_failed=False, - file_with_tests=None): - self.configure_if_necessary() - - cmd = [self._with_venv, "testr", "run"] - cmd.extend(self.transform_kwargs( - regexp=regexp, concurrency=concurrency, - re_run_failed=re_run_failed, file_with_tests=file_with_tests)) - - with self.context(self.deployment, self.verifier): - verification = subprocess.Popen( - cmd, env=self.environ(), - cwd=self.verifier.manager.home_dir, - stdout=subprocess.PIPE, - stderr=subprocess.stdout) - results = subunit_v2.parse(verification.stdout, live=True) - verification.wait() - return results - -An example of VerifierLauncher for Tempest: - -.. code-block:: python - - @configure("tempest_verifier") - class TempestLauncher(VerifierLauncher): - - @property - def configfile(self): - return os.path.join(self.verifier.manager.home_dir, - "for-deployment-%s" % self.deployment.id, - "tempest.conf") - - @property - def environ(self): - """Customize environment variables.""" - env = super(TempestLauncher, self).environ - - env["TEMPEST_CONFIG_DIR"] = os.path.dirname(self.configfile) - env["TEMPEST_CONFIG"] = os.path.basename(self.configfile) - env["OS_TEST_PATH"] = os.path.join(self.verifier.manager.home_dir, - "tempest", "test_discover") - return env - - def configure(self, override=False): - if os.path.exists(self.configfile): - if override: - os.remove(self.configfile) - else: - raise AlreadyConfiguredException() - # Configure Tempest. - - def configure_if_necessary(self): - try: - self.configure() - except AlreadyConfiguredException: - # nothing to do. everything is ok - pass - - def run(self, set_name, **kwargs): - if set_name == "full": - pass - elif set_name in consts.TempestTestsSets: - kwargs["regexp"] = set_name - elif set_name in consts.TempestTestsAPI: - kwargs["regexp"] = "tempest.api.%s" % set_name - - super(TempestLauncher, self).run(**kwargs) - -VerifierContext -~~~~~~~~~~~~~~~ - -.. code-block:: python - - from rally import osclients - from rally.task import context - - - class VerifierContext(context.Context): - - def __init__(self, **ctx): - super(VerifierContext, self).__init__(ctx) - # There are no terms "task" and "scenario" in Verification - del self.task - del self.map_for_scenario - self.clients = osclients(self.context["deployment"].credentials) - - @classmethod - def _meta_get(cls, key, default=None): - # It should be always hidden - if key == "hidden": - return True - return super(VerifierContext, cls)._meta_get(key, default) - - -Example of context for Tempest: - -.. code-block:: python - - @configure("tempest_verifier_ctx") - class TempestContext(VerifierContext): - - def __init__(self, **kwargs): - super(TempestContext, self).__init__(**kwargs) - self.clients = osclients(self.context["deployment"].credentials) - - def setup(self): - # create required resources and save them to self.context - pass - - def cleanup(self): - # remove created resources - pass - - -Assignee(s) ------------ - -Primary assignee: - Andrey Kurilin - -Work Items ----------- - -1) CLI and API related changes. - - Lets provide new interface as soon as possible, even if some APIs will not - be implemented. As soon we deprecate old interface as soon we will be able - to remove it and provide clear new one. - -2) Provide base classes for Verifiers - -3) Rewrite Tempest verifier based on new classes. - - -Dependencies -============ - -None diff --git a/doc/specs/in-progress/README.rst b/doc/specs/in-progress/README.rst deleted file mode 100644 index 20a4fa1f..00000000 --- a/doc/specs/in-progress/README.rst +++ /dev/null @@ -1,12 +0,0 @@ -Rally Specs -=========== - -Specs are detailed description of proposed changes in project. Usually they -answer on what, why, how to change in project and who is going to work on -change. - - -This directory contains files with accepted by not implemented specs, -1 file is 1 spec. - -If you are looking for full rally road map overview go `here `_. diff --git a/doc/specs/in-progress/cleanup_refactoring.rst b/doc/specs/in-progress/cleanup_refactoring.rst deleted file mode 100644 index 4368cfd4..00000000 --- a/doc/specs/in-progress/cleanup_refactoring.rst +++ /dev/null @@ -1,198 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================= -Refactoring Rally Cleanup -========================= - -Current generic mechanism is nice but it doesn't work enough well in real life. -And in cases of existing users, persistence context and disaster cleanups it -doesn't work well. -This proposal should be useful for covering following use cases. - - -Problem description -=================== - -There are 5 use cases that require cleanup refactoring: - -#. Benchmarking with existing tenants. - - Keep existing resources instead of deleting all resources in the tenants. - -#. Persistence benchmark context. - - Create benchmark environment once before benchmarking. After that run some - amount of benchmarks that are using it and at the end just delete all - created resources by context cleanups. - -#. Disaster cleanup. - - Delete all resources created by Rally in such case if something went wrong - with server that is running Rally. - -#. Isolated task - - It is quite important to add ability to run few instances of Rally against - cloud simultanesouly (and one cleanup, won't affect the others) - -#. Testing that cleanups works - - How to ensure that Rally cleaned all resources. - - -Proposed change -=============== - -Use consistent resource names as described in -https://review.openstack.org/201545 - -* Resources created by Rally are deleted after a task finishes by - `UserCleanup.cleanup()`. - -* Resources created by contexts are deleted when the environment is - not necessary by the context class `cleanup()`. - -Specifically, there are three cases we need to be able to handle: - -* Cleanup of all resources created by a single subtask run; -* Cleanup of all resources created by contexts; and -* Cleanup of all resources, possibly (or probably) out-of-band. - -In each case, this can be handled by matching resource names with a -subset of plugins. For instance, to clean up scenario resources, we -will do something like: - -.. code-block:: python - - scenarios = [cls for cls in discover.itersubclasses(scenario.Scenario) - if issubclass(cls, utils.RandomNameGeneratorMixin)] - for resource in resource_manager.list(): - manager = resource_manager_cls(raw_resource=resource, ...) - if utils.name_matches_object(resource_manager.name, scenarios, - task_id=task_id): - manager.delete() - -This is pseudocode that hides much of the complexity of our current -cleanup process, but it demonstrates the basic idea: - -#. Generate a list of subclasses to delete resources for. In this case - we use ``rally.task.scenario.Scenario``, but for context cleanup it - would be ``rally.task.context.Context``, and for global cleanup it - would be ``rally.common.plugin.plugin.Plugin``. In all three cases - we would only delete resources for plugins that have - ``rally.common.utils.RandomNameGeneratorMixin`` as a superclass; - this lets us easily perform global cleanup without needing to worry - about which plugin subclasses might implement - ``RandomNameGeneratorMixin``. -#. For each resource manager, list resources. -#. If the resource name matches the list of possible patterns gleaned - from the set of classes, delete it. - -A fair bit of functionality will need to be added to support this: - -* ``rally.plugins.openstack.cleanup.manager.cleanup()`` will - need to accept a keyword argument specifying the type of - cleanup. This should be a superclass that will be used to discover - the subclasses to delete resources for. It will be passed to - ``rally.plugins.openstack.cleanup.manager.SeekAndDestroy``, - which will also need to accept the argument and generate the list of - classes. -* ``rally.plugins.openstack.cleanup.base``, - ``rally.plugins.openstack.cleanup.manager`` and - ``rally.plugins.openstack.cleanup.resources`` need to be - moved out of the context space, since they will be used not only by - the cleanup context to do scenario cleanup, but also to do - out-of-band cleanup of all resources. -* A new function, ``name()``, will need to be added to - ``rally.plugins.openstack.cleanup.base.ResourceManager`` - so that we can determine the name of a resource in order to match it. -* A ``task_id`` keyword argument will be added to - ``name_matches_object`` and ``name_matches_pattern`` in order to - ensure that we only match names from the currently-running - task. This will need to be passed along starting with - ``rally.plugins.openstack.cleanup.manager.cleanup()``, and - added as a keyword argument to every intermediate function. - -Additionally, a new top-level command will be added:: - - rally cleanup [--deployment ] [--task ] - -This will invoke cleanup of all resources, either for a specific task, -or for any rally-created resource at all, regardless of task ID. This -will not be ``rally task cleanup`` because it can be run with or -without a task. - -Alternatives ------------- - -* Use OpenStack project resources cleaner (ospurge). This enables us to purge - the tenants, regardless of resource naming, so we only need to keep track of - Rally tenants (naming could be a solution here) and resources in admin - tenant. In this case, we need to think about a case where Rally needs to - cleanup some resources from a existing tenant while leaving the rest - available. - -* Use/enhance Tempest cleanup command (tempest/cmd/cleanup.py). Compare - functionality or fix the version in tempest. Maybe tempest_lib would be a - better place for this, and for the cleanup code in general. In this case, - we need to think about a case where a Rally scenario creates a tenant, and - then deletes it but some resources are left around. And also we need to think - about a case of benchmark on existing tenants. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - wtakase aka Wataru Takase - -Other contributors: - rvasilets aka Roman Vasilets - stpierre aka Chris St. Pierre - - -Work Items ----------- - -#. Consider how to deal resources which don't be named by - generate_random_name(). For example, Neutron ports which are - created as side-effect of other resources (routers, networks, - servers) don't have resource names. In this case, ports always have - an "owner" so cleanup should check port's owner's name. And what - about floating IPs? (Needed by use cases 1, 2, 3, 4, 5) -#. Modify ``name_matches_{object,pattern}`` to accept a task ID. -#. Add ``name()`` functions to all ``ResourceManager`` subclasses. -#. Move - ``rally.plugins.openstack.cleanup.manager.{base,manager,resources}`` - to ``rally.plugins.openstack.cleanup``. -#. Modify ``rally.plugins.openstack.cleanup.manager.cleanup()`` to - accept a task ID and a superclass, pass them along to - ``SeekAndDestroy``, and generally Do The Right Thing with them. -#. Create the ``rally cleanup`` command. -#. Support negative filter which deletes unmatched resources. (Needed - by use cases 3, 5) - - -Dependencies -============ - -* Consistent resource names: https://review.openstack.org/201545 - -* Add name pattern filter for resource cleanup: - https://review.openstack.org/#/c/139643/ - -* Finish support of benchmarking with existing users: - https://review.openstack.org/#/c/168524/ - -* Add support of persistence benchmark environment: - https://github.com/openstack/rally/blob/master/doc/feature_request/persistence_benchmark_env.rst - -* Production ready cleanups: - https://github.com/openstack/rally/blob/master/doc/feature_request/production_ready_cleanup.rst diff --git a/doc/specs/in-progress/deployment_type.rst b/doc/specs/in-progress/deployment_type.rst deleted file mode 100644 index bbf0f4b5..00000000 --- a/doc/specs/in-progress/deployment_type.rst +++ /dev/null @@ -1,216 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================ -Rally Deployment Unification -============================ - -Make Rally be able to examine any software through the API, -unbound it from OpenStack. - - -Problem description -=================== - -Rally is able to examine only system that use Keystone as a authentication -services, which limits sphere where Rally is suitable. - -At the moment to run Rally Task or Rally Verify you must specify OpenStack -deployment which contains credentials of it. These credentials are used in -Rally Task & Verify for different setups and validations. - -Rally is not able to store more than one credential for one deployment, so -it is impossible to support multi-scenario runs related to different systems. - - -Proposed change -=============== - -* Modify 'Deployment' database model to be able to store credentials - of many different systems, adding type of system. - -Now we have model Deployment with admin and users columns, -which are credentials for Keystone (tight coupled with OpenStack). - -There is next model now: - -.. code-block:: python - - class Deployment(BASE, RallyBase): - ... - admin = sa.Column(types.PickleType, nullable=True) - users = sa.Column(types.PickleType, default=[], nullable=False) - ... - -and values of columns in DB something like that: - -``admin = {admin_creds} or None`` - -``users = [{user_creds1}, {user_creds2}, ...] or []`` - -We need to decouple deployment from OpenStack and -make credentials more flexible, we describe it in one column named -``credentials``, where we can store special structure containing credentials -for many different systems, including type of credentials for each. - -.. code-block:: python - - class Deployment(BASE, RallyBase): - ... - credentials = sa.Column(types.PickleType, default=[], nullable=False) - ... - -So, for current OpenStack credentials we will have next data -in credentials column in DB after migration: - -.. code-block:: python - - credentials = [ - [ - "openstack", - {admin: {admin_creds} or None, - users: [{user_creds1}, {user_creds2}, ...] or []} - ], - ] - -and for multi-credentials deployment: - -.. code-block:: python - - credentials = [ - [ - "openstack", - {admin: {admin_creds} or None, - users: [{user_creds1}, {user_creds2}, ...] or []} - ], - [ - "zabbix", - {"url": "example.com", "login": "admin", "password": "admin"} - ] - ] - -Future summarized schema in DB: -``credentials = [[, ], ... ]`` - -To implement this point we need to write db migration, tests for it -and write adapters for credentials get/create/update methods, -mostly for support backward compatibility in ``rally.api`` module methods. - -* Get rid of ``rally.common.objects.credential.Credential`` class - and fix it usages mostly in ``rally.osclients`` if needed. - -Refactor all usages of passing ``rally.common.objects.credential.Credential`` -to ``rally.osclients.OSClient``, make possible to take dict as credentials -for ``rally.osclients.OSClient`` class, initialise -``rally.plugins.openstack.credentials.OpenStackCredentials`` class -in ``OSClient`` ``__init__`` method. - -Base class for credentials will be inherited from plugins.Plugin -and must implement validation method, -it will be placed in ``rally.plugins.common.credentials``: - -.. code-block:: python - - @six.add_metaclass(abc.ABCMeta) - @plugin.configure(name="base_credentials", schema="{...}") - class Credentials(plugin.Plugin): - def __init__(self, credentials): - self.validate(credentials) - super(Credentials, self).__setattr__("credentials", credentials) - - def __getattr__(self, item): - if item in self.__dict__: - return self.__dict__[item] - return self.credentials[item] - - def __setattr__(self, key, value): - self.credentials[key] = value - - def to_dict(self): - return self.credentials.copy() - - def validate(self, obj): - jsonschema.validate(obj, self._meta_get("schema")) - -and we need to add child for openstack credentials, -it will be placed in ``rally.plugins.openstack.credentials``: - -.. code-block:: python - - openstack_credentials_schema = { - "type": "object", - - "properties": { - "auth_url": {"type": "string"}, - "username": {"type": "string"}, - "password": {"type": "string"}, - }, - "required": ["auth_url", "username", "password"] - } - - @plugin.configure(name="openstack_credentials", - schema=openstack_credentials_schema) - class OpenStackCredentials(Credentials): - pass - -Replace usage of ``rally.common.objects.credential.Credential`` to -``rally.plugins.openstack.credentials.OpenStackCredentials`` -in ``rally.osclients`` - -* Update cli to show deployment type in output of 'rally deployment list'. - -Make possible to show deployments list in case of multi-scenario as: - -.. code-block:: shell - - > rally deployment list # (in case of many deployments) - - uuid | name | created_at | type | credential - -------+--------+------------+-----------+--------------------------------- - | | 21-02-2016 | openstack | {"admin": {...}, "users": [...]} - | zabbix | {"login": "login", "psw": "..."} - - -Alternatives ------------- - -None - - -Implementation -============== - - - -Assignee(s) ------------ - -Primary assignee: - rpromyshlennikov aka Rodion Promyshlennikov (rpromyshlennikov@mirantis.com) - - -Work Items ----------- - -- Change Deployment db model class -- Write migrations -- Make adapters for credentials get/create/update methods for temporary - support changed data format -- Remove all usages of passing ``rally.common.objects.credential.Credential`` - to ``rally.osclients.OSClient`` -- Create new plugin based class for credentials -- Write subclass of rally.plugins.common.credentials.Credential - for OpenStack credentials with proper validation of them -- Migrate to new credentials class -- Remove ``rally.common.objects.credential.Credential`` class -- Improve CLI-client to make possible show multi-credentials deployments. -- Feature refactoring: remove adapters after - "Multi Scenario support" implementation. - -Dependencies -============ - -None diff --git a/doc/specs/in-progress/distributed_runner.rst b/doc/specs/in-progress/distributed_runner.rst deleted file mode 100644 index 3a14521e..00000000 --- a/doc/specs/in-progress/distributed_runner.rst +++ /dev/null @@ -1,153 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - - -============================ -Implement Distributed Runner -============================ - -We need a Distributed Runner in Rally that will run tasks on many nodes -simultaneously. - -Problem description -=================== - -Currently there are several runners in Rally, but they all can only run on -the same host that Rally itself runs on. It limits test load that Rally can -generate. In some cases required load can not be generated from one host. - -In current implementation Runner object runs actual subtask and generates test -results while TaskEngine via ResultConsumer retrieves these results, -checks them against specified SLA and stores in DB. - -There are several aspects that should be kept in mind when reasoning about -distributed load generation: - -- Even one active runner is able to produce significant amounts of result data - so that TaskEngine could barely process it in time. We assume that - the single TaskEngine instance definitely will not be able to process - several streams of raw test result data from several simultaneous runners. - -- We need test results to be checked against SLA as soon as possible so that - we could stop load generation on SLA violation immediately (or close to) - and protect the environment being tested. On the other hand we need results - from all runners to be analysed, i.e. checking SLA on a single runner is not - enough. - -- Since we expect long task duration we want to provide to user at least - partial information about task execution as soon as possible. - - -Proposed change -=============== - -It is proposed to introduce two new component, RunnerAgent and a new plugin -of runner type, DistributedRunner, and refactor existing components, -TaskEngine, Runner and SLA, so that overall interaction will look as follows. - -.. image:: ../../source/images/Rally_Distributed_Runner.png - :align: center - - -1. TaskEngine - - - create subtask context - - create instance of Runner - - run Runner.run() with context object and info about sceanario - - in separated thread consume iteration result chunks & SLA from Runner - - delete context - -2. RunnerAgent - - is executed on agent nodes - - runs Runner for received task iterations with given context and args - - collects iteration result chunks, stores them on local filesystem, - sends them on request to DistributedRunner - - aggregates SLA data and periodically sends it to DistributedRunner - - stops Runner on receive of corresponding message - -3. DistributedRunner - - is a regular plugin of Runner type - - communicates with remote RunnerAgents wia message queue (ZeroMQ) - - provides context, args and SLA to RunnerAgents - - distributes task iterations to RunnerAgents - - aggregates SLA data from RunnerAgents - - merges chunks of task result data - -It is supposed to use separate communication channels for task results -and SLA data. - - SLA data is sent periodically (e.g. once per second) for iterations - that are already finished. - - Task results are collected into chunks and stored locally by - RunnerAgent and only send on request. - - - - - -Alternatives ------------- - -No way - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Illia Khudoshyn - - -Work Items ----------- - -- Refactor current SLA mechanism to support aggregated SLA data - -- Refactor current Runner base class - - collect iteration results into chunks, ordered by timestamp - - perform local SLA checks - - aggregate SLA data - -- Refactor TaskEngine to reflect changes in Runner - - operate chunks of ordered test results rather then stream of raw - result items - - apply SLA checks to aggregated SLA data - - analyze SLA data and consume test results in separate threads - -- Develop infrastructure that will allow multi-node Rally configuration - and run - -- Implement RunnerAgent - - run Runner - - cache prepared chunks of iteration results - - comunicate via ZMQ with DistributedRunner(send task results - and SLA on separate channels) - - terminate Runner on 'stop' command from TaskEngine - -- Implement DistributedRunner that will - - feed tasks to RunnerAgents - - receive chunks of result data from RunnerAgents, merge it and - provide merged data to TaskEngine - - receive aggregated SLA data from RunnerAgents, merge it - and provide data to TaskEngine - - translate 'stop' command from TaskEngine to RunnerAgents - -Dependencies -============ - -- DB model refactoring (boris-42) -- Report generation refactoring (amaretsky) diff --git a/doc/specs/in-progress/improve_atomic_actions_format.rst b/doc/specs/in-progress/improve_atomic_actions_format.rst deleted file mode 100644 index 0993f686..00000000 --- a/doc/specs/in-progress/improve_atomic_actions_format.rst +++ /dev/null @@ -1,157 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - - -============================================= -New Atomic actions format in workload results -============================================= - -Currently atomic actions data in workload results is insufficient, -therefore some new features can not be implemented. - -Problem description -=================== - -The main problem is that current format does not support nested -atomic actions. - -Also, atomic actions data does not include timestamps for each action -start and end time. Having this data will allow us to inspect atomic -actions runtime better and generate detailed reports. - -Since word "atomic" means something that can not be split into parts -and we introduce nested atomic actions, we should use different term -instead of "atomic actions". - -Proposed change -=============== - -Term "atomic actions" should be renamed to just "actions". - -Change actions results schema from type "object" to "array" -and extend it with timestamps and nested actions. - -Nested actions will be represented by "children" key and have -unlimited nesting. - -With timestamps, there is no need to save durations anymore, -so get rid of this value. - -Since this change is not backward compatible, we need to create -a database migration script. The migration will use iteration start -timestamp as start timestamp for first action and then calculate -further timestamps based on actions order and their durations. - -Benefits of new format ----------------------- - -Nested actions will make actions measurement more detailed and flexible -since we could have data what sub-actions were run during specific action -runtime, without complicated changes at code. - -Start and end timestamps will provide us with accurate information -about action runtime within the whole iteration and ability to create -`Gantt charts `_. - -Schema modification -------------------- - -Schema location is *rally.common.objects.task.TASK_RESULT_SCHEMA -["properties"]["result"]["properties"]["atomic_actions"]* - -should be moved to *rally.common.objects.task.TASK_RESULT_SCHEMA -["properties"]["result"]["properties"]["actions"]* - -and changed: - -AS IS: - -.. code-block:: python - - { - "type": "object" - } - -Here keys are actions names, and values are their durations. -Actions data is actually represented by collections.OrderedDict, -so we have real order saved. - -Example: - -.. code-block:: python - - OrderedDict([("keystone.create_tenant", 0.1234), - ("keystone.create_users", 1.234)]) - -TO BE: - -.. code-block:: python - - { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": {"type": "string"}, # name of action - "started_at": {"type": "number"}, # float UNIX timestamp - "finished_at": {"type": "number"}, # float UNIX timestamp - "children": {"$ref": "#/"}, - }, - "required": ["name", "started_at", "finished_at", "children"], - "additionalProperties": False - }, - "minItems": 0 - } - -Example how this data can be represented: - -.. code-block:: python - - [{"name": "keystone.create_tenant", - "started_at": 1455281370.288397, - "finished_at": 1455281372.672342, - "children": []}, - {"name": "keystone.create_users", - "started_at": 1455281372.931324, - "finished_at": 1455281373.375184, - "children": []}] - -Alternatives ------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Alexander Maretskiy - - -Work Items ----------- - - - Rename atomic actions into actions - - Improve actions results format - - Create a DB migartion that transforms results to new format - -Dependencies -============ - -None diff --git a/doc/specs/in-progress/new_rally_input_task_format.rst b/doc/specs/in-progress/new_rally_input_task_format.rst deleted file mode 100644 index 9c482ac5..00000000 --- a/doc/specs/in-progress/new_rally_input_task_format.rst +++ /dev/null @@ -1,349 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - - -==================================== -Make the new Rally input task format -==================================== - -Current Rally format is not flexible enough to cover all use cases that are -required. Let's change it! - - -Problem description -=================== - - Why do we need such fundamental change? - -- Multi scenarios load generation support. - This is very important, because it will allow to use Rally for more - real life load generation. Like making load on different components - and HA testing (where one scenario tries for example to authenticate - another is disabling controller) - -- Ability to add require meta information like (title and descriptions) - That are required to generate clear reports - -- Fixing UX issues. Previous format is very hard for understanding and - end users have issues with understanding how it works exactly. - - -Proposed change -=============== - -Make a new format that address all issues. - - -Old format JSON schema: - -.. code-block:: python - - { - "type": "object", - "$schema": "http://json-schema.org/draft-04/schema", - "patternProperties": { - ".*": { - "type": "array", - "items": { - "type": "object", - "properties": { - "args": { - "type": "object" - }, - "runner": { - "type": "object", - "properties": { - "type": {"type": "string"} - }, - "required": ["type"] - }, - "context": { - "type": "object" - }, - "sla": { - "type": "object", - }, - }, - "additionalProperties": False - } - } - } - } - - -Old format sample: - -.. code-block:: yaml - - --- - : - - - args: - runner: - context: - : - ... - sla: - : - - - -//- - - - -//- - : - -//- - - Every element of list corresponding to is separated task, - that generates environment according to context, generates load using - specified runner that runs multiple times with it's args. - - -New format JSON schema: - -.. code-block:: python - - { - "type": "object", - "$schema": "http://json-schema.org/draft-04/schema", - "properties": { - "version": {"type": "number"}, - "title": {"type": "string"}, - "description": {"type": "string"}, - "tags": { - "type": "array", - "items": {"type": "string"} - }, - - "subtasks": { - "type": "array", - "items": { - "type": "object", - "properties": { - "title": {"type": "string"}, - "description": {"type": "string"}, - "tags": { - "type": "array", - "items": {"type": "string"} - }, - - "run_in_parallel": {"type": "boolean"}, - "workloads": { - "type": "array", - "items": { - "type": "object", - "properties": { - "scenario": {"type": "object"}, - "runner": {"type": "object"} - "slas": {"type": "object"}, - "contexts": {"type": "object"} - }, - "required": ["scenario", "runner"] - } - }, - "context": {"type": "object"} - }, - "required": ["title", "workloads"] - } - } - }, - "required": ["title", "tasks"] - } - - -New format sample: - -.. code-block:: yaml - - --- - - # Having Dictionary on top level allows us in future to add any new keys. - # Keeping the schema of format more or less same for end users. - - # Version of format - version: 1 - - # Allows to set title of report. Which allows end users to understand - # what they can find in task report. - title: "New Input Task format" - - # Description allows us to put all required information to explain end - # users what kind of results they can find in reports. - description: "This task allows you to certify that your cloud works" - - # Explicit usage "rally task start --tag" --tag attribute - tags: ["periodic", "nova", "cinder", "ha"] - - subtasks: - # Note every task is executed serially (one by one) - # - # Using list for describing what benchmarks (tasks) to run is much - # better idea then using Dictionary. It resolves at least 3 big issues: - # - # 1) Bad user experience - # 1.1) Users do not realize that Rally can run N benchmarks - # 1.2) Keys of Dictionary were Scenario names (reasonable question why?!) - # 1.3) Users tried to put N times same k-v (to run one benchmark N times) - # 2) No way to specify order of scenarios execution, especially in case - # where we need to do chain like: ScenarioA -> SecnearioB -> ScenarioA - # 3) No way to support multi scenario load, because we used scenario name - # as a identifier of single task - - - # title field is required because in case of multi scenario load - # we can't use scenario name for it's value. - title: "First task to execute" - description: "We will stress Nova" # optional - - # Tags are going to be used in various rally task reports for filtering - # and grouping. - tags: ["nova", "my_favorite_task", "do it"] - - # The way to execute scenarios (one by one or all in parallel) - run_in_parallel: False - - # Single scenario load can be generated by specifying only one element - # in "workloads" section. - workloads: - - - scenario: - NovaServers.boot_and_delete: - image: - name: "^cirros$" - flavors: - name: "m1.small" - runner: - constant: - times: 100 - concurrency: 10 - # Benchmark success of criteria based on results - slas: - # Every key means SLA plugin name, values are config of plugin - # Only if all criteria pass task is marked as passed - failure_rate: - max: 0 - - # Specification of context that creates env for benchmark scenarios - # E.g. it creates users, tenants, sets quotas, uploads images... - contexts: - # Each key is the name of context plugin - - # This context creates temporary users and tenants - users: - # These k-v will be passed as arguments to this `users` plugin - tenants: 2 - users_per_tenant: 10 - - # This context set's quotas for created by `users` context tenants - quotas: - nova: - cpu: -1 - - - - title: "Second task to execute" - description: "Multi Scenario load generation with common context" - - run_in_parallel: True - - # If we put 2 or more scenarios to `scenarios` section we will run - # all of them simultaneously which allows us to generate more real life - # load - workloads: - - - scenario: - CinderVolumes.create_and_delete: - size: 10 - runner: - constant: - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - scenario: - KeystoneBasic.create_and_delete_users: - name_length: 20 - runner: - rps: - rps: 1 - times: 1000 - slas: - max_seconds_per_iteration: 10 - - - scenario: - PhysicalNode.restart: - ip: "..." - user: "..." - password: "..." - runner: - rps: - rps: 10 - times: 10 - slas: - max_seconds_per_iteration: 100 - # This scenario is called in own independent and isolated context - contexts: {} - - # Global context that is used if scenario doesn't specify own - contexts: - users: - tenants: 2 - users_per_tenant: 10 - - -Alternatives ------------- - -No way - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - boris-42 aka Boris Pavlovic - - -Work Items ----------- - -- Implement OLD -> NEW format converter - -- Switch benchmark engine to use new format. - This should affect only benchmark engine - -- Implement new DB schema format, that will allow to store multi-scenario - output data - -- Add support for multi scenario results processing in rally task - detailed|sla_check|report - -- Add timestamps to task, scenarios and atomics - -- Add support for usage multi-runner instance in single task with - common context - -- Add support for scenario's own context - -- Add ability to use new format in rally task start. - -- Deprecate OLD format - - -Dependencies -============ - -None diff --git a/doc/specs/in-progress/osprofiler.rst b/doc/specs/in-progress/osprofiler.rst deleted file mode 100644 index 88abead3..00000000 --- a/doc/specs/in-progress/osprofiler.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/rally/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - -================================= -OSProfiler integration into Rally -================================= - -The OSProfiler is a distributed trace toolkit library. It provides pythonic -helpers to do trace generation to avoid repeated code to trace WSGI, RPC, DB, -and other important places... - -Integration OSProfiler into Rally can help to dig into concurrency problems of -OpenStack which is a huge ecosystem of cooperative services. - -Problem description -=================== - -Rally Framework provides a powerful interface to generate real, big, load for -the deployment. Such load can kill the cloud, specifically OpenStack. There is -no way to identify reasons and bottlenecks without parsing timestamps and logs. -To fix that issue embedding profiling into each of workload iteration can help -to display wide picture of where we were in that particular moment when -something went wrong. - -Proposed change -=============== - -Two facts about OSProfiler which are the start point for the proposed changes: - -* HMAC key is used as a secret identifier while profiling -* Initialization of profiling is made in thread safe mode. Profiling of one - iteration should not influence profiling another one - -Storing secret key ------------------- - -The HMAC key is not something that will be changed from one task to another. -It is specific thing for the deployment, like authentication url or other -credentials. That is why Rally deployment config is the best place to store -such information. - -Since OSProfiler is OpenStack specific tool, we need to extend OpenStack -credentials model in Rally to support new argument. It should be done in two -places: validation (by modifying jsonschema [0]_) and the place where -credentials are store (specific class [1]_ [2]_). - -Initialization profiling ------------------------- - -As it was mentioned before, we need to initialize OSProfiler per iteration. -OSProfiler is made in thread safe mode [3]_, so we should not have problem from -that side. - -Initialization of OSProfiler is quite simple. - - .. code-block:: python - - from osprofiler import profiler - - profiler.init(HMAC_KEY) - - -As for the place where to initialize OSProfiler in Rally, constructor of -scenario is a good choice. First of all, we have a separate class for OpenStack -scenarios[4] which means that integration with OSProfiler there will not affect -all other platforms. Another reason for using constructor is that we initialize -new instance of scenario class for each iterations. - -Storing profiling results -------------------------- - -OSProfiler sends to collector a message at every trace point. We should not -care about supported OSProfiler backends and use only OSProfiler as -entry-point. - -The full trace can be obtained via trace-id after profiling is initialized. - - .. code-block:: python - - from osprofiler import profiler - - trace_id = profiler.get().get_base_id() - -At the first step of integration OSProfiler in Rally, let's store that trace-id -just like simple text. It will allow to show trace-id in Rally HTML and JSON -reports. - - .. code-block:: python - - self.add_output(complete={"title": "OSProfiler Trace-ID", - "chart_plugin": "TextArea", - "data": [trace_id]}) - -We can execute these lines in the same place where we initialize OSProfiler. - -In future, we should develop a separate chart that will embed OSProfiler html -report as a separate tab in the Rally report. - -Enabling profiling ------------------- - -Enabling/disabling profiling should be done via rally configuration file: - -* It is common place for storing different kinds of options. -* There is planned feature that will able to re-set config options via - deployment config or task file. - -The default value of that options should be True. In case of missing HMAC key -in credentials, attempt to initialize OSProfiler should not be started. - -Alternatives ------------- - -Here [5]_ you can find the answer to that section. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Andrey Kurilin - - -Work Items ----------- - -* Extend OpenStack credentials -* Add new configuration option to Rally -* Extend OpenStack scenario base class to initialize OSProfiler and store - trace id - - -Dependencies -============ - -None - - -References -========== - -.. [0] https://github.com/openstack/rally/blob/a5691d7850b5abd7ea707730f0d48d75116d88d3/rally/plugins/openstack/credential.py#L154 -.. [1] https://github.com/openstack/rally/blob/a5691d7850b5abd7ea707730f0d48d75116d88d3/rally/plugins/openstack/credential.py#L26 -.. [2] https://github.com/openstack/rally/blob/a5691d7850b5abd7ea707730f0d48d75116d88d3/rally/plugins/openstack/credential.py#L161 -.. [3] https://github.com/openstack/osprofiler/blob/1.8.0/osprofiler/profiler.py#L29-L30 -.. [4] https://github.com/openstack/rally/blob/a5691d7850b5abd7ea707730f0d48d75116d88d3/rally/plugins/openstack/scenario.py#L28-L55 -.. [5] https://docs.openstack.org/developer/osprofiler/background.html#why-not-cprofile-and-etc diff --git a/doc/specs/in-progress/pluggable-types.rst b/doc/specs/in-progress/pluggable-types.rst deleted file mode 100644 index a61dcf02..00000000 --- a/doc/specs/in-progress/pluggable-types.rst +++ /dev/null @@ -1,286 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================= -Make Resource Types Pluggable -============================= - -Rally's current type resolution subsystem (``rally.task.types``) isn't -easily pluggable, is tied to OpenStack, and cannot handle resources -that must be resolved by the context of each iteration rather than the -context of the subtask. This spec aims to solve all three problems. - -Problem description -=================== - -The Rally ``types.set()`` decorator is used to convert resource names -or regular expressions to resource objects. For instance, in a task -file a user can specify: - -.. code-block:: yaml - - image: - regex: cirros-.*-disk - flavor: - name: m1.tiny - -Rally will convert those into the matching image ID and flavor ID. As -it currently exists, this process has several shortcomings and bugs: - -* Although it is technically pluggable, the resource type classes do - not call ``rally.common.plugin.configure`` and the code is not - patterned as a plugin, with code in ``rally.plugins``. Technically, - a user could implement a subclass of - ``rally.task.types.ResourceType`` in a plugin and use it, but this - is not obvious from the code or documentation, and it would not be - registered as a plugin. Moreover, OpenStack-specific resources are - in the ``rally.task.types`` module instead of the OpenStack plugin. -* It is tied to OpenStack. ``rally.task.types.preprocess()`` loads an - OpenStack Clients object and passes it to the resource type objects. -* In some cases, resources must be loaded by the client context - created for each iteration, not by the admin context. For instance, - when Glance images are created by the ``images`` context they are - created as private images in each tenant; trying to load the image - with the admin context fails. We need to be able to support this use - case, without taking on a significant or universal performance - penalty. - -Proposed change -=============== - -This change is very involved and is broken into a number of distinct -sections. - -Create ``types.convert()`` --------------------------- - -First, we will add a new function, ``types.convert()``, to replace -``types.set()``. ``types.convert()`` will accept arguments differently -than ``types.set()``. For instance, this: - -.. code-block:: python - - @types.set(image=types.ImageResourceType, - flavor=types.FlavorResourceType) - -...will change to: - -.. code-block:: python - - @types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) - -This has a number of advantages: - -* Resource type classes can be named or removed, or the interface - changed, without breaking the public API. -* Users will not have to import types in the code. Currently this is - only a single module, but this spec proposes to change that. -* Plugins are loaded automatically, rather than users having to import - them explicitly. -* We can use the existing plugin deprecation mechanisms. -* By passing a dict to ``types.convert()`` instead of a class, we - could in theory pass arguments to the types. Nothing in this spec - requires that functionality, but it is provided for the future. -* ``set`` is a reserved keyword, so by renaming the function we - eliminate a bit of code that is in violation of the OpenStack Style - Guidelines. - -Convert ``ResourceType`` to plugin ----------------------------------- - -Next, the code will be rearranged to make it obviously pluggable, -and a ``types.configure()`` call will be added to register the -``ResourceType`` subclasses as plugins. OpenStack resources will be -moved into the OpenStack plugin space, and documentation will be added -to make it clear that ``ResourceType`` can be subclassed by other -plugins. The old resource type classes will be left in place, but -deprecated. ``types.set()`` will also be deprecated at this point. - -Switch scenarios to ``types.convert()`` and new type plugins ------------------------------------------------------------- - -After resource type plugins are created, all existing scenarios will -be changed over to ``types.convert()``. This will allow us to make the -changes below that affect the type conversion API without having to -make further changes to the scenarios. - -Change type preprocessing signature ------------------------------------ - -The arguments with which each preprocessor is called will be -changed. Instead of: - -.. code-block:: python - - def transform(cls, clients, resource_config): - -...it will be: - -.. code-block:: python - - def preprocess(self, resource_config, context=None, clients=None): - -Within the types subsystem proper, only ``context`` will be passed; -``clients`` will remain for compatibility with the validation -subsystem, which does not have a context object yet, and remains tied -to OpenStack. - -If ``clients`` is not passed to ``transform()``, the responsibility -for creating OpenStack clients (or doing anything else with the -subtask context) will lie with the ``ResourceType`` subclass -itself. This entails a small performance penalty, but it's necessary -to divorce the types subsystem from OpenStack. If ``clients`` is -passed, then a deprecation warning will be logged. When the validation -subsystem is made independent from OpenStack, the ``clients`` keyword -argument should be removed. - -This also makes it so that ``ResourceType.transform()`` is no longer a -class method, which will allow the resource classes to retain -persistent information about a single decorated scenario -function. ``transform`` will also be renamed to ``preprocess``, which -will be more consistent with ``rally.task.types.preprocess`` and will -make it easier to add a second resource type resolution hook, -described below. - -Add ``ResourceType.map_for_scenario()`` ---------------------------------------- - -A new hook will be added to the runners. In addition to -``ResourceType.preprocess()``, which is run after contexts but before -the scenarios start, ``ResourceType.map_for_scenario(self, -scenario_context, resource_config)`` will run before each iteration of the -scenario. Together with the change to make ``types.set()`` accept -objects instead of classes, this will solve the issue of resources -that must be resolved per-iteration. - -For instance, to resolve images, ``ImageResourceType.preprocess()`` -would resolve images for each set of credentials created for the -subtask, as well as for the admin credentials, and cache them; -``ImageResourceType.map_for_scenario()`` would be passed the mapped -scenario context and the resource configuration, and would choose the -correct image ID to pass to the scenario. Note that image listing and -resolution is not done by ``map_for_scenario()``; we should strive to -keep the performance profile of ``map_for_scenario()`` as small as -possible. - -In order to simplify the type resolution workflow, only -``map_for_scenario()`` will be able to rewrite arguments, but the -default implementation in ``rally.task.types.ResourceType`` will -rewrite it with the value cached in ``preprocess()``. For instance: - -.. code-block:: python - - class ResourceType(plugin.Plugin): - - @abc.abstractmethod - def preprocess(self, context, resource_config): - pass - - @abc.abstractmethod - def map_for_scenario(self, scenario_context, resource_config): - pass - - - class FlavorResourceType(ResourceType): - def preprocess(self, resource_config, context=None, clients=None): - self._flavor_id = resource_config.get("id") - if not self._flavor_id: - novaclient = clients.nova() - self._flavor_id = _id_from_name( - resource_config=resource_config, - resources=novaclient.flavors.list(), - typename="flavor") - - def map_for_scenario(self, scenario_context, resource_config): - return self._flavor_id - - - class ImageResourceType(ResourceType): - def preprocess(self, resource_config, context=None, clients=None): - self._image_id = resource_config.get("id") - if not self._image_id: - self._images = {} - all_images = clients.glance().images.list() - for image in all_images: - self._images.setdefault(image["owner"], []).append(image) - - def map_for_scenario(self, scenario_context, resource_config): - if self._image_id: - return self._image_id - else: - return _id_from_name( - resource_config=resource_config, - resources=self._images[scenario_context["user"]], - typename="image") - -This demonstrates two different workflows. - -Flavors, which exist globally for all users and tenants, can be easily -resolved once, at preprocessing time, and ``map_for_scenario()`` needs -only to substitute the single, canonical flavor ID on each -iteration. This does lead to some redundancy -- flavor arguments will -be rewritten on each iteration, for instance -- but as it's only a -matter of changing a few values in the argument dict, the performance -penalty will be minimal. - -Images are more complicated, because images can exist on a per-user -basis, and remain invisible to other users. In order to properly -resolve image IDs, we must first find all images in ``preprocess()``, -and then select the correct image for each iteration (and for the user -that maps to each iteration) in ``map_for_scenario()``. - -Remove deprecated code ----------------------- - -Finally, in some future release we will remove the old, deprecated -resource type classes and ``types.set()``. - -Alternatives ------------- - -Type resolution could be done in a single step (as opposed to the two -step ``preprocess()``/``map_for_scenario()``) if we passed the results in -the context object instead of rewriting scenario arguments. This is -less straightforward, though; the scenario author would then need to -know where to look in the context to find the resource object, even -though for any given iteration there is exactly one resource object -that is appropriate. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - stpierre aka Chris St. Pierre - -Work Items ----------- - -* Create ``types.convert()``. -* Rearrange the code into plugins and add plugin - documentation. Deprecate ``types.set()`` and the old resource type - classes. -* Convert existing scenarios to use ``types.convert()``. -* Convert ``ResourceType.transform()`` to - ``ResourceType.preprocess()`` and create a new abstract intermediate - subclass, ``OpenStackResourceType``, to which to offload OpenStack - client creation. -* Add the ``ResourceType.map_for_scenario()`` hook. -* Rewrite any resource types that need to take advantage of the new - ``map_for_scenario()`` hook. This will likely be limited to - ``ImageResourceType`` and ``EC2ImageResourceType``. If there are - obvious patterns that can be abstracted out, then add a new abstract - intermediate subclass. -* In the indeterminate future, remove the deprecated resource type - classes and ``types.set()``. - -Dependencies -============ - -None. diff --git a/doc/specs/in-progress/pluggable_validators.rst b/doc/specs/in-progress/pluggable_validators.rst deleted file mode 100644 index 139d1909..00000000 --- a/doc/specs/in-progress/pluggable_validators.rst +++ /dev/null @@ -1,206 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - - -================================= -Rally Task Validation Refactoring -================================= - -Problem description -=================== - -* Current validator system is pluggable - but it doesn't use our plugin - mechanism which creates problems (e.g. validators are imported directly and - used in code, instead of using their names, which doesn't allow to rename - them or move without breaking backward compatibility). - -* Current mechanism of validation leads to a lot of OpenStack related code in - the Rally task engine. - -* It's hard to use the same validators for different types of plugins, current - approach is used only for scenarios. - -Proposed change -=============== - -To create unified validation mechanism that can be used for all types of -future deployments and type of plugins in the same way. So we will be able -to remove `OpenStack related code `_ from the task engine, and create a bunch of -common validators (e.g. jsonschema) that can be used by any -plugin. -As a bonus of refactoring, it allows us to switch to common mechanism of -plugins. - -Alternatives ------------- - -No way - - -Implementation -============== - -Here is an example of base class for all pluggable validators. - -.. code-block:: python - - import abc - import six - - from rally.common.plugin import plugin - from rally.task import validation - - - def configure(name, namespace="default"): - return plugin.configure(name=name, namespace=namespace) - - @six.add_metaclass(abc.ABCMeta) - @configure(name="base_validator") - class Validator(plugin.Plugin): - - def validate(self, cache, deployment, cfg, plugin_cfg): - """ - Method that validates something. - - :param cache: this is cross validator cache where different - validators could store information about - environment like initialized OpenStack clients, - images, etc and share it through validators. - E.g. if your custom validators need to perform 200 - OpenStack checks and each validator plugin need to - initialize client, Rally will take extra 2 minutes - for validation step. As well, its not efficient to - fetch all image each time if we have image related - validators. - :param deployment: Deployment object, deployment which would be - used for validation - :param cfg: dict, configuration of subtask - :param plugin_cfg: dict, with exact configuration of the plugin - """ - pass - - def add(name, **kwargs): - """ - Add validator instance to the validator plugin class meta. - - Get validator class by name. Initialize an instance. Add validator - instance to validators list stored in the Validator meta by - 'validator_v2' key. This would be used to iterate and execute through - all validators used during execution of subtask. - - :param kwargs: dict, arguments used to initialize validator class - instance - :param name: str, name of the validator plugin - """ - validator = Validator.get(name)(**kwargs) - - def wrapper(p): - p._meta_setdefault("validators_v2", []) - p._meta_get("validators_v2").append(validator) - return p - - return wrapper - - - @abc.abstractmethod - def validate(plugin, deployment, cfg, plugin_cfg): - """ - Execute all validate() method of all validators stored in meta of - Validator. - - Iterate during all validators stored in the meta of Validator and - execute proper validate() method and add validation result to the - list. - - :param plugin: is plugin class instance that has validators and should - be validated - :param deployment: Deployment object, deployment which would be - used for validation - :param cfg: dict, configuration of subtask - :param plugin_cfg: dict, with exact configuration of the plugin - """ - results = [] - cache = {} - - for v in plugin._meta_get("validators_v2"): - try: - v.validate(cache, deployment, cfg, plugin_cfg) - except Exception as e: - results.append(validation.ValidationResult(is_valid=False, - msg=e)) - return results - - -New design allows us to use the same validator and same validation mechanism -for different types of plugins (context, sla, runner, scenarios) which was not -possible before. For example, we could implement jsonschema validation as a -plugin. - -.. code-block:: python - - import jsonschema - - @configure(name="jsonschema") - class JsonSchemaValidator(Validator): - - def __init__(self, schema=None): - super(JsonSchemaValidator, self).__init__() - self.schema = schema or {} - - def validate(self, cache, deployment, cfg, plugin_cfg): - jsonschema.validate(plugin_cfg, self.schema) - - - - @validator.add("jsonschema", schema="") - class SomeContext(base.Context): - pass - - - class SomeScenario(base.Scenario): - - @validator.add("jsonschema", schema="") - def some_function(self): - pass - - -Assignee(s) ------------ - -Primary assignee: - -- boris-42 -- rvasilets - -Work Items ----------- - -- Create validation module with base plugin and method of adding validators - -- Add support to task engine of new validation mechanism - -- Port all old validators to new mechanism - -- Deprecate old validation mechanism - -- Remove deprecated in new release - - -Dependencies -============ - -None diff --git a/doc/specs/in-progress/raas.rst b/doc/specs/in-progress/raas.rst deleted file mode 100644 index e14fb7ef..00000000 --- a/doc/specs/in-progress/raas.rst +++ /dev/null @@ -1,360 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - -================== -Rally-as-a-Service -================== - -Problem description -=================== - -Having Rally Web Service that gives access to Rally functionality via HTTP is a -highly desired feature. - -Proposed change -=============== - -Enhance Rally API ------------------ - -Using Rally as a library (python client) seems to be a convenient way to -automate its usage in different applications. The full power of Rally, however, -can be now accessed only through its command-line interface. -The current Rally API is not powerful enough to be used for Rally-as-a-Service. - -Move all features from CLI to API -""""""""""""""""""""""""""""""""" - -Rally API should provide the same features which are available in CLI. - -To achieve that all direct DB calls and Rally objects should be removed from -CLI layer. The CLI implementation should be restricted to pure API method -calls, and the API should cover all stuff that is needed for CLI (processing -results, making reports, etc.). - -Make API return serializable objects -"""""""""""""""""""""""""""""""""""" - -Rally API should always return something that can be easily serialized and sent -over HTTP. It is required change, since we do not want to duplicate code which -is used by CLI and which will be used by Rally-as-a-Service. Both of these -entities should wrap the same thing - Rally API. - -Move from a classmethod model to a instancemethod model in the API -"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" - -Each of API method should not be a single function - classmethod. -The instancemethod model should establish a right way of communication between -different API methods and provide an access to API preferences. - -Also, it would be nice to create a base class for single API group. - - .. code-block:: python - - class APIGroup(object): - def __init__(self, api): - """Initialize API group. - - :param api: an instance of rally.api.API object - """ - self.api = api - - class _Task(APIGroup): - def start(self, deployment, config, task=None, - abort_on_sla_failure=False): - deployment = self.api.deployment._get(deployment) - - ... - - -Wrap each API method -"""""""""""""""""""" - -Since usage of the API via HTTP should be similar to the direct usage, we need -to wrap each of API methods by the specific decorator which will decide to send -a http request or make a direct call to the API. - - .. code-block:: python - - from rally import exceptions - - def api_wrapper(path, method): - def decorator(func) - def inner(self, *args, **kwargs): - if args: - raise TypeError("It is restricted to use positional - arguments for API calls.") - - if self.api.endpoint_url: - # it's a call to the remote Rally instance - return self._request(path, method, **kwargs) - else: - try: - return func(*args, **kwargs) - except Exception as e: - # NOTE(andreykurilin): we need to use the same error - # handling things as it is done in dispatcher, so - # one error will have the same representation in - # both cases - via direct use and via HTTP - raise exceptions.make_exception(e) - - - inner.path = path - inner.method = method - - return inner - return decorator - - -The specific ``_request`` method for handling all communication details, -serialization and errors should be implemented in the common class APIGroup. - - .. code-block:: python - - import collections - import requests - - from rally import exceptions - - class APIGroup(object): - - def _request(self, path, method, **kwargs): - response = request.request(method, path, json=kwargs) - if response.status_code != 200: - raise exceptions.find_exception(response) - - # use OrderedDict by default for all cases - return response.json( - object_pairs_hook=collections.OrderedDict)["result"] - - -Rally-as-a-Service implementation ---------------------------------- - -The code base of Rally-as-a-Service should be located in ``rally.aas`` module. - -The application should discover all API methods and check their properties to -identify methods that should be available via HTTP. - - .. code-block:: python - - from rally import api - - def discover_routes(rapi): - """ - - :param rapi: an instance of rally.api.API - """ - - routes = [] - for group, obj in vars(rapi)): - if not isinstance(obj, APIGroup): - continue - - for name, method in vars(obj): - if name.startswith("_"): - # do not touch private methods - continue - if hasattr(method, "path") and hasattr(method, "method"): - routes.append({"path": "%s/%s" % (group, method.path), - "method": method.method, - "handler": method}) - return routes - - -Since we have custom data, errors and etc, we need custom preparation method -too. - - .. code-block:: python - - import json - - def dispatch(func, kwargs): - """ - :param func: method to call - """ - response = {} - status_code = 200 - try: - response["result"] = func(**kwargs) - except Exception as e: - status_code = getattr(e, "http_code", 500) - response["error"] = {"name": e.__name__, - "msg": str(e), - "args": getattr(a, args)} - return json.dumps(response, sort_keys=False), status_code - - - -Most of the routing and dispatching things will be done via our specific -methods and decorators, so our requirements to web framework are simple - we do -not need much from it. - -Let's start from `Flask `_ web framework. It is quite -simple, lightweight and compatible with WSGI. In future, it should not be too -difficult to switch from it. - -Since there are a lot of blocking calls in Rally, only read-only methods ( -"GET" method type) should be allowed at first implementation of -Rally-as-a-Service. - - .. code-block:: python - - import flask - - - class Application(object): - - API_PATH_TEMPLATE = "/api/v%(version)s/%(path)s" - - def __init__(self, rapi): - self.rapi = rapi - self.app = flask.Flask("OpenStack Rally") - self.app.add_url_rule("", methods=["GET"], - view_func=self) - self._routes = dict( - [(PATH_TEMPLATE % {"version": rapi.get_api_version(), - "path": path}, handler) - for path, handler in discover_routes().items()]) - - def __call__(self, path): - if path not in self._routes: - # redirect to 404 - return dispatch(self._routes[path], flask.request.data) - - - def start(self, ip, port): - self.app.start(ip, port) - - -Routing convention -"""""""""""""""""" - -The routes for each API method should match next format: - - ``/api/v//`` - -, where - -* ```` is a version of API. We do not provide versioning of - API, so let's put "1" for now. -* ```` can be task, deployment, verification and etc -* ```` should represent the name of method to call. - -Example of possible path: ``/api/v1/task/validate`` - -Exception refactoring ---------------------- - -To make existing exception classes from ``rally.exceptions`` module usable in -case of RaaS, they should: - -* store initialization arguments, so it will be possible to re-create object -* contain error code as a property. - -Serialization/De-serialization of exceptions -"""""""""""""""""""""""""""""""""""""""""""" - -Exceptions should serializable as other return data. Serialization mechanism is -described with ``dispatch`` method. - -De-serialization should look like: - - .. code-block:: python - - exception_map = dict((e.error_code, e) - for e in RallyException.subclasses()) - - def find_exception(response): - """Discover a proper exception class based on response object""" - exc_class = exception_map.get(response.status_code, RallyException) - error_data = response.json()["error"] - if error_data["args"]: - return exc_class(error_data["args"]) - return exc_class(error_data["msg"]) - - -As it was mentioned previously, exception objects should be the same in case of -direct and HTTP communications. To make it possible specific check function -should be implemented like: - - .. code-block:: python - - def make_exception(exc): - """Check a class of exception and convert it to rally-like if needed""" - if isinstance(exc, RallyException): - return exc - return RallyException(str(exc)) - - -Command Line Interface ----------------------- - -CLI should be extended by specific global argument ``--endpoint-url`` for -using remote mode. - -Rally-as-a-Service itself should be started via new command: - - .. code-block:: console - - $ rally-manage service start - -Rally Web Portal ----------------- - -Web Portal for Rally can be a good addition. It's implementation can be done -on the top of Rally-as-a-Service which should handle all HTTP stuff. - -Since read-only mode of RaaS will be enable from first stages, Web Portal -can be started from providing tables with results of Tasks, Verifications. That -tables should be able to filter results by different fields (tags, time, -deployment, etc.) and make regular or trends reports for selected results. - - -Alternatives ------------- - -n/a - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee(s): - - Andrey Kurilin - Hai Shi - - -Work Items ----------- - -* Make return data of Verify/Verification API serializable -* Make return data of Task API serializable -* Make return data of Deployment API serializable -* Implement the base class for API groups and port Deployment, Task, Verify, - Verification APIs on it -* Refactor exceptions -* Implement `api_wrapper` decorator and wrap all methods of each API groups -* Implement base logic for as-a-Service -* Extend CLI -* Add simple pages for Web Portal - -Dependencies -============ - -n/a diff --git a/doc/specs/in-progress/refactor_scenario_utils.rst b/doc/specs/in-progress/refactor_scenario_utils.rst deleted file mode 100644 index 882d5ef9..00000000 --- a/doc/specs/in-progress/refactor_scenario_utils.rst +++ /dev/null @@ -1,357 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - -========================================================= - Refactor scenarios' utils into central os-services tree -========================================================= - -It's hard to reuse code from different scenario utils in areas like context. - - -Problem description -=================== - -* Code that wraps openstack services from different scenario utils is - difficult to reuse in context plugins (or, sometimes in different scenarios - plugins), which causes code duplications. - -* Wrappers don't fully integrate with the current structure (example: network - operations need to alternate between calls to utils and calls to network - wrappers). - -* It is impossible to do versioning of current utils which makes them - hard to reuse as a base for out of tree plugins. - -* Is is not possible to have separated common functionality (e.g. network) and - specific implementation features (nova network and neutron) - - -Proposed change -=============== - -Group all service related utils under a single tree accessible from all areas -of the project. -Also, inheritance structure in scenarios is problematic. This would be a great -opportunity to move to composition. - -Alternatives ------------- - -None comes to mind. - -Implementation -============== - -Current source tree -------------------- - -.. code-block:: - - rally/ - | - +-- plugins/ - +-- openstack/ - | +-- scenarios/ - | | | - | | +-- nova/ - | | | | - | | | +-- servers.py - | | | | - | | | +-- utils.py - | | | - | | +-- ... - | +-- wrappers/ - | | - | +-- keystone.py - | | - | +-- network.py - -keystone scenarios use plugins/openstack/scenarios/keystone/utils.py - -.. code-block:: python - - @atomic.action_timer("keystone.create_tenant") - def _tenant_create(self, name_length=10, **kwargs): - """Creates keystone tenant with random name. - - :param name_length: length of generated (random) part of name - :param kwargs: Other optional parameters - :returns: keystone tenant instance - """ - name = self._generate_random_name(length=name_length) - return self.admin_clients("keystone").tenants.create(name, **kwargs) - -.. code-block:: python - - class KeystoneBasic(kutils.KeystoneScenario): - """Basic benchmark scenarios for Keystone.""" - - @validation.number("name_length", minval=10) - @validation.required_openstack(admin=True) - @scenario.configure(context={"admin_cleanup": ["keystone"]}) - def create_tenant(self, name_length=10, **kwargs): - """Create a keystone tenant with random name. - - :param name_length: length of the random part of tenant name - :param kwargs: Other optional parameters - """ - self._tenant_create(name_length=name_length, **kwargs) - -while keystone contexts use -plugins/openstack/wrappers/keystone.py - -.. code-block:: python - - @six.add_metaclass(abc.ABCMeta) - class KeystoneWrapper(object): - def __init__(self, client): - self.client = client - - def __getattr__(self, attr_name): - return getattr(self.client, attr_name) - - @abc.abstractmethod - def create_project(self, project_name, domain_name="Default"): - """Creates new project/tenant and return project object. - - :param project_name: Name of project to be created. - :param domain_name: Name or id of domain where to create project, - for implementations that don't support - domains this - argument must be None or 'Default'. - """ - - @abc.abstractmethod - def delete_project(self, project_id): - """Deletes project.""" - - - class KeystoneV2Wrapper(KeystoneWrapper): - def create_project(self, project_name, domain_name="Default"): - self._check_domain(domain_name) - tenant = self.client.tenants.create(project_name) - return KeystoneV2Wrapper._wrap_v2_tenant(tenant) - - def delete_project(self, project_id): - self.client.tenants.delete(project_id) - - class KeystoneV3Wrapper(KeystoneWrapper): - def create_project(self, project_name, domain_name="Default"): - domain_id = self._get_domain_id(domain_name) - project = self.client.projects.create( - name=project_name, domain=domain_id) - return KeystoneV3Wrapper._wrap_v3_project(project) - - def delete_project(self, project_id): - self.client.projects.delete(project_id) - -Users context: - -.. code-block:: python - - @context.configure(name="users", order=100) - class UserGenerator(UserContextMixin, context.Context): - """Context class for generating temporary - users/tenants for benchmarks.""" - - def _create_tenants(self): - cache["client"] = keystone.wrap(clients.keystone()) - tenant = cache["client"].create_project( - self.PATTERN_TENANT % {"task_id": task_id, "iter": i}, domain) - -Suggested change ----------------- - -.. code-block:: - - plugins/ - | - +-- openstack/ - | - | - +-- scenarios/ - | | - | | - | +-- neutron/ - | +-- authenticate/ - | - +-- services/ - | # Here we will store base code for openstack services. - | # like wait_for, and wait_for_delete - +-- base.py - | - +-- compute/ - | | - | +-- compute.py - | - +-- identity/ - | | # Here is common service when we care to do things - | | # and regardless of which API/service is used for - | | # that. So we will implement here parts that can be - | | # done in both. - | +-- identity.py - | | # Here is api for working with specific API - | | # version/service Like keystone_v2/keystone_v3 or - | | # nova_network/neutron. This will be used in - | | # main.py for implementation. - | +-- kestone_v2.py - | | - | +-- kestone_v3.py - | - +-- network/ - | | # Here is common service when we care to do things - | | # and regardless of which API/service is used for - | | # that. So we will implement here parts that can be - | | # done in both. - | +-- network.py - | | # Here is api for working with specific API - | | # version/service Like nova_network/neutron. - | | # This will be used in main.py for implementation. - | +-- nova_network.py - | | - | +-- neutron.py - | - +-- ... - - -Base class that allow us to use atomic actions in services is inside the -rally/plugins/openstack/services/base.py: - - -.. code-block:: python - - class Service(object): - def __init__(self, clients, atomic_inst=None): - self.clients = clients - if atomic_inst: - if not isinstance(atomic_inst, ActionTimerMixin): - raise TypeError() - - # NOTE(boris-42): This allows us to use atomic actions - # decorators but they will add values - # to the scenario or context instance - self._atomic_actions = atomic_inst._atomic_actions - else: - # NOTE(boris-42): If one is using this not for scenarios and - # context, Service instance will store atomic - # actions data. - self._atomic_actions = costilus.OrderedDict() - - -Implementation of IdentityService in services/identity/identity.py: - - -.. code-block:: python - - class IdentityService(Service): - """Contains only common methods for Keystone V2 and V3.""" - - def __init__(self, clients, atomic_inst=None, version=None): - super(self).__init__(clients, atomic_inst=atomic_inst) - - if version: - if version == "2": - self.impl = KeystoneV2Service() - else: - self.impl = KeysotneServiceV3() - else: - self.impl = auto_discover_version() - - def project_create(self, name, **kwargs): - result = self.impl.project_create(name) - # handle the difference between implementations - return magic(result) - - # ... - - -Inside services/identity/keystone_v2.py: - -.. code-block:: python - - class KeystoneV2Service(KeystoneService): - - # NOTE(boris-42): we can use specific atomic action names - # for specific implementation of service - @atomic.action_timer("keystone_v2.tenant_create") - def project_create(self, project_name): - """Implementation.""" - - -Inside services/identity/keystone_v3.py: - -.. code-block:: python - - class KeystoneV3Service(KeystoneService): - - @atomic.action_timer("keystone_v3.project_create") - def project_create(self, project_name): - """Implementation.""" - - def domain_create(self, *args, **kwargs): - """Specific method for KesytoneV3.""" - - -Both context.keystone and scenario.keystone can use now services/identity.py - -usage is the same in context and scenario, so it's enough to show in case -of scenario. - -.. code-block:: python - - from rally.plugins.openstack.services.identity import identity - from rally.plugins.openstack.services.identity import keystone_v3 - - class KeystoneBasic(scenario.OpenStackScenario): # no more utils.py - """Basic benchmark scenarios for Keystone.""" - - - @validation.number("name_length", minval=10) - @validation.required_openstack(admin=True) - @scenario.configure(context={"admin_cleanup": ["keystone"]}) - def create_tenant(self, name_length=10, **kwargs): - """Create a keystone tenant with random name. - - :param name_length: length of the random part of tenant name - :param kwargs: Other optional parameters - """ - - name = self._generate_random_name(length=name_length) - # NOTE(boris-42): Code above works in keystone V2 and V3 - # as well it will add atomic action, and name - # will be "keystone_v3.project_create" or - # "keystone_v2.tenant_create" depending on used - # version - common.Identity(self.clients, self).create_project(name, - **kwargs) - - # NOTE(boris-42): If you need specific operation for keystone v3 - keystone_v3.KeystoneV3Service(self.clients, self).domain_create() - - # NOTE(boris-42): One of the nice thing is that we can move - # initialization of services to __init__ method - # of sceanrio. - -Assignee(s) ------------ - - - boris-42 - -Work Items ----------- - -#. Create a base.Service class -#. Create for each project services -#. Use in all scenarios and context services instead of utils -#. Deprecate utils -#. Remove utils - - -Dependencies -============ - -none diff --git a/doc/specs/in-progress/task_and_verification_export.rst b/doc/specs/in-progress/task_and_verification_export.rst deleted file mode 100644 index 242a36d4..00000000 --- a/doc/specs/in-progress/task_and_verification_export.rst +++ /dev/null @@ -1,109 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - - -==================================================== -Export task and verifications into external services -==================================================== - -Currently Rally stores all information about executed tasks and verifications -in its database and it is also able to provide this data in JSON format or -in the form of HTML reports. There is a request for Rally to export this data -into external services (like test management system or Google Docs) -via its API. - -Problem description -=================== - -There are many, including a lot of proprietary, test management systems -in the market available as SaaS and/or On-Premises, like TestRail, TestLink, -TestLodge etc, which objective is to manage, organize and track all testing -efforts. - -Most of the systems provide an API for importing test data. The systems also -possess data model somewhat similar to Rally's one. -It usually includes (among others) models for project, test suite test case, -test plan and test execution results. - -It is suggested to provide Rally users an ability to export information about -testing their environments into such test management systems in order -to integrate benchmarking via Rally into rest of their testing activities. - -Since different test management systems have alike yet different API -for the purpose it is reasonable to implement this export functionality via -plugins. - -Proposed change -=============== - -1. Implement a base class Exporter for an export plugin at -*rally/task/exporter.py*. - -..code-block:: python - - class Exporter(plugin.Plugin): - def export(self, task, connection_string): - ... - -2. Implement a CLI command of the form - -..code-block:: shell - - rally task export - -3. Implement a base class VerifyExporter for an export plugin at -*rally/verify/exporter.py*. - -..code-block:: python - - class VerifyExporter(plugin.Plugin): - def export(self, verification, connection_string): - ... - -4. Implement a CLI command of the form - -..code-block:: shell - - rally verify export - -Alternatives ------------- - -No way - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - -rvasilets - -Work Items ----------- - -- Implement plugin base class - -- Implement CLI command - -- Implement plugin for TestRail - -Dependencies -============ - -None diff --git a/doc/specs/template.rst b/doc/specs/template.rst deleted file mode 100644 index 02e17c08..00000000 --- a/doc/specs/template.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. - This template should be in ReSTructured text. The filename in the git - repository should match the launchpad URL, for example a URL of - https://blueprints.launchpad.net/rally/+spec/awesome-thing should be named - awesome-thing.rst . Please do not delete any of the sections in this - template. If you have nothing to say for a whole section, just write: None - For help with syntax, see http://sphinx-doc.org/rest.html - To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html - -====================== -The title of your Spec -====================== - -Rally Road map: - -https://docs.google.com/a/mirantis.com/spreadsheets/d/16DXpfbqvlzMFaqaXAcJsBzzpowb_XpymaK2aFY2gA2g/edit#gid=0 - - -Introduction paragraph -- why are we doing anything? - -Problem description -=================== - -A detailed description of the problem. - -Proposed change -=============== - -Here is where you cover the change you propose to make in detail. How do you -propose to solve this problem? - -If this is one part of a larger effort make it clear where this piece ends. In -other words, what's the scope of this effort? - -Include where in the heat tree hierarchy this will reside. - -Alternatives ------------- - -This is an optional section, where it does apply we'd just like a demonstration -that some thought has been put into why the proposed approach is the best one. - -Implementation -============== - -Assignee(s) ------------ - -Who is leading the writing of the code? Or is this a blueprint where you're -throwing it out there to see who picks it up? - -If more than one person is working on the implementation, please designate the -primary author and contact. - -Primary assignee: - - -Can optionally list additional ids if they intend on doing -substantial implementation work on this blueprint. - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - - -Dependencies -============ - -- Include specific references to specs and/or blueprints in heat, or in other - projects, that this one either depends on or is related to. - -- Does this feature require any new library dependencies or code otherwise not - included in OpenStack? Or does it depend on a specific version of library? - - -References -========== - -Links to some external resources. diff --git a/doc/user_stories/keystone/authenticate.rst b/doc/user_stories/keystone/authenticate.rst deleted file mode 100644 index ef9d0997..00000000 --- a/doc/user_stories/keystone/authenticate.rst +++ /dev/null @@ -1,115 +0,0 @@ -==================================================================================== -4x performance increase in Keystone inside Apache using the token creation benchmark -==================================================================================== - -*(Contributed by Neependra Khare, Red Hat)* - -Below we describe how we were able to get and verify a 4x better performance of -Keystone inside Apache. To do that, we ran a Keystone token creation benchmark -with Rally under different load (this benchmark scenario essentially just -authenticate users with keystone to get tokens). - -Goal ----- -- Get the data about performance of token creation under different load. -- Ensure that keystone with increased public_workers/admin_workers values and - under Apache works better than the default setup. - -Summary -------- -- As the concurrency increases, time to authenticate the user gets up. -- Keystone is CPU bound process and by default only one thread of - *keystone-all* process get started. We can increase the parallelism by: - - 1. increasing *public_workers/admin_workers* values in *keystone.conf* file - 2. running Keystone inside Apache - -- We configured Keystone with 4 *public_workers* and ran Keystone inside - Apache. In both cases we got up to 4x better performance as compared to - default Keystone configuration. - -Setup ------ -Server : Dell PowerEdge R610 - -CPU make and model : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz - -CPU count: 24 - -RAM : 48 GB - -Devstack - Commit#d65f7a2858fb047b20470e8fa62ddaede2787a85 - -Keystone - Commit#455d50e8ae360c2a7598a61d87d9d341e5d9d3ed - -Keystone API - 2 - -To increase public_workers - Uncomment line with *public_workers* and set -*public_workers* to 4. Then restart Keystone service. - -To run Keystone inside Apache - Added *APACHE_ENABLED_SERVICES=key* in -*localrc* file while setting up OpenStack environment with Devstack. - - -Results -------- - -1. Concurrency = 4 - -.. code-block:: json - - {'context': {'users': {'concurrent': 30, - 'tenants': 12, - 'users_per_tenant': 512}}, - 'runner': {'concurrency': 4, 'times': 10000, 'type': 'constant'}} - - -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count |apache enabled keystone|public_workers| -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| total | 0.537 | 0.998 | 4.553 | 1.233 | 1.391 | 100.0% | 10000 | N | 1 | -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| total | 0.189 | 0.296 | 5.099 | 0.417 | 0.474 | 100.0% | 10000 | N | 4 | -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| total | 0.208 | 0.299 | 3.228 | 0.437 | 0.485 | 100.0% | 10000 | Y | NA | -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ - - -2. Concurrency = 16 - -.. code-block:: json - - {'context': {'users': {'concurrent': 30, - 'tenants': 12, - 'users_per_tenant': 512}}, - 'runner': {'concurrency': 16, 'times': 10000, 'type': 'constant'}} - -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count |apache enabled keystone|public_workers| -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| total | 1.036 | 3.905 | 11.254 | 5.258 | 5.700 | 100.0% | 10000 | N | 1 | -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| total | 0.187 | 1.012 | 5.894 | 1.61 | 1.856 | 100.0% | 10000 | N | 4 | -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| total | 0.515 | 0.970 | 2.076 | 1.113 | 1.192 | 100.0% | 10000 | Y | NA | -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ - - -3. Concurrency = 32 - -.. code-block:: json - - {'context': {'users': {'concurrent': 30, - 'tenants': 12, - 'users_per_tenant': 512}}, - 'runner': {'concurrency': 32, 'times': 10000, 'type': 'constant'}} - -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count |apache enabled keystone|public_workers| -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| total | 1.493 | 7.752 | 16.007 | 10.428 | 11.183 | 100.0% | 10000 | N | 1 | -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| total | 0.198 | 1.967 | 8.54 | 3.223 | 3.701 | 100.0% | 10000 | N | 4 | -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ -| total | 1.115 | 1.986 | 6.224 | 2.133 | 2.244 | 100.0% | 10000 | Y | NA | -+--------+-----------+-----------+-----------+---------------+---------------+---------+-------+-----------------------+--------------+ diff --git a/doc/user_stories/nova/boot_server.rst b/doc/user_stories/nova/boot_server.rst deleted file mode 100644 index 8557c7bc..00000000 --- a/doc/user_stories/nova/boot_server.rst +++ /dev/null @@ -1,196 +0,0 @@ -========================================================================================== -Finding a Keystone bug while benchmarking 20 node HA cloud performance at creating 400 VMs -========================================================================================== - -*(Contributed by Alexander Maretskiy, Mirantis)* - -Below we describe how we found a `bug in Keystone`_ and achieved 2x average -performance increase at booting Nova servers after fixing that bug. Our initial -goal was to benchmark the booting of a significant amount of servers on a -cluster (running on a custom build of `Mirantis OpenStack`_ v5.1) and to ensure -that this operation has reasonable performance and completes with no errors. - -Goal ----- - -- Get data on how a cluster behaves when a huge amount of servers is started -- Get data on how good the neutron component is good in this case - -Summary -------- - -- Creating 400 servers with configured networking -- Servers are being created simultaneously - 5 servers at the same time - -Hardware --------- - -Having a real hardware lab with 20 nodes: - -+--------+-------------------------------------------------------+ -| Vendor | SUPERMICRO SUPERSERVER | -+--------+-------------------------------------------------------+ -| CPU | 12 cores, Intel(R) Xeon(R) CPU E5-2620 v2 @ 2.10GHz | -+--------+-------------------------------------------------------+ -| RAM | 32GB (4 x Samsung DDRIII 8GB) | -+--------+-------------------------------------------------------+ -| HDD | 1TB | -+--------+-------------------------------------------------------+ - -Cluster -------- - -This cluster was created via Fuel Dashboard interface. - -+----------------------+--------------------------------------------+ -| Deployment | Custom build of `Mirantis OpenStack`_ v5.1 | -+----------------------+--------------------------------------------+ -| OpenStack release | Icehouse | -+----------------------+--------------------------------------------+ -| Operating System | Ubuntu 12.04.4 | -+----------------------+--------------------------------------------+ -| Mode | High availability | -+----------------------+--------------------------------------------+ -| Hypervisor | KVM | -+----------------------+--------------------------------------------+ -| Networking | Neutron with GRE segmentation | -+----------------------+--------------------------------------------+ -| Controller nodes | 3 | -+----------------------+--------------------------------------------+ -| Compute nodes | 17 | -+----------------------+--------------------------------------------+ - -Rally ------ - -**Version** - -For this benchmark, we use custom Rally with the following patch: - -https://review.openstack.org/#/c/96300/ - -**Deployment** - -Rally was deployed for cluster using `ExistingCloud`_ type of deployment. - -**Server flavor** - -.. code-block:: console - - $ nova flavor-show ram64 - +----------------------------+--------------------------------------+ - | Property | Value | - +----------------------------+--------------------------------------+ - | OS-FLV-DISABLED:disabled | False | - | OS-FLV-EXT-DATA:ephemeral | 0 | - | disk | 0 | - | extra_specs | {} | - | id | 2e46aba0-9e7f-4572-8b0a-b12cfe7e06a1 | - | name | ram64 | - | os-flavor-access:is_public | True | - | ram | 64 | - | rxtx_factor | 1.0 | - | swap | | - | vcpus | 1 | - +----------------------------+--------------------------------------+ - -**Server image** - -.. code-block:: console - - $ nova image-show TestVM - +----------------------------+-------------------------------------------------+ - | Property | Value | - +----------------------------+-------------------------------------------------+ - | OS-EXT-IMG-SIZE:size | 13167616 | - | created | 2014-08-21T11:18:49Z | - | id | 7a0d90cb-4372-40ef-b711-8f63b0ea9678 | - | metadata murano_image_info | {"title": "Murano Demo", "type": "cirros.demo"} | - | minDisk | 0 | - | minRam | 64 | - | name | TestVM | - | progress | 100 | - | status | ACTIVE | - | updated | 2014-08-21T11:18:50Z | - +----------------------------+-------------------------------------------------+ - - -**Task configuration file (in JSON format):** - -.. code-block:: json - - { - "NovaServers.boot_server": [ - { - "args": { - "flavor": { - "name": "ram64" - }, - "image": { - "name": "TestVM" - } - }, - "runner": { - "type": "constant", - "concurrency": 5, - "times": 400 - }, - "context": { - "neutron_network": { - "network_ip_version": 4 - }, - "users": { - "concurrent": 30, - "users_per_tenant": 5, - "tenants": 5 - }, - "quotas": { - "neutron": { - "subnet": -1, - "port": -1, - "network": -1, - "router": -1 - } - } - } - } - ] - } - -The only difference between first and second run is that runner.times for first -time was set to 500 - -Results -------- - -**First time - a bug was found:** - -Starting from 142 server, we have error from novaclient: **Error : Unauthorized (HTTP 401).** - -That is how a `bug in Keystone`_ was found. - -+------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ -| action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count | -+------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ -| nova.boot_server | 6.507 | 17.402 | 100.303 | 39.222 | 50.134 | 26.8% | 500 | -| total | 6.507 | 17.402 | 100.303 | 39.222 | 50.134 | 26.8% | 500 | -+------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - -**Second run, with bugfix:** - -After a patch was applied (using RPC instead of neutron client in metadata -agent), we got **100% success and 2x improved average performance**: - -+------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ -| action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count | -+------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ -| nova.boot_server | 5.031 | 8.008 | 14.093 | 9.616 | 9.716 | 100.0% | 400 | -| total | 5.031 | 8.008 | 14.093 | 9.616 | 9.716 | 100.0% | 400 | -+------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+ - -.. references: - -.. _bug in Keystone: https://bugs.launchpad.net/keystone/+bug/1360446 -.. _Mirantis OpenStack: https://software.mirantis.com/ -.. _ExistingCloud: https://github.com/openstack/rally/blob/master/samples/deployments/existing.json diff --git a/etc/rally.bash_completion b/etc/rally.bash_completion deleted file mode 100644 index 82ddd332..00000000 --- a/etc/rally.bash_completion +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -# Standalone _filedir() alternative. -# This exempts from dependence of bash completion routines -function _rally_filedir() -{ - test "${1}" \ - && COMPREPLY=( \ - $(compgen -f -- "${cur}" | grep -E "${1}") \ - $(compgen -o plusdirs -- "${cur}") ) \ - || COMPREPLY=( \ - $(compgen -o plusdirs -f -- "${cur}") \ - $(compgen -d -- "${cur}") ) -} - -_rally() -{ - declare -A SUBCOMMANDS - declare -A OPTS - - OPTS["deployment_check"]="--deployment" - OPTS["deployment_config"]="--deployment" - OPTS["deployment_create"]="--name --fromenv --filename --no-use" - OPTS["deployment_destroy"]="--deployment" - OPTS["deployment_list"]="" - OPTS["deployment_recreate"]="--filename --deployment" - OPTS["deployment_show"]="--deployment" - OPTS["deployment_use"]="--deployment" - OPTS["plugin_list"]="--name --namespace --plugin-base" - OPTS["plugin_show"]="--name --namespace" - OPTS["task_abort"]="--uuid --soft" - OPTS["task_delete"]="--force --uuid" - OPTS["task_detailed"]="--uuid --iterations-data" - OPTS["task_export"]="--uuid --type --to" - OPTS["task_import"]="--file --deployment --tag" - OPTS["task_list"]="--deployment --all-deployments --status --tag --uuids-only" - OPTS["task_report"]="--out --open --html --html-static --uuid" - OPTS["task_results"]="--uuid" - OPTS["task_sla-check"]="--uuid --json" - OPTS["task_sla_check"]="--uuid --json" - OPTS["task_start"]="--deployment --task --task-args --task-args-file --tag --no-use --abort-on-sla-failure" - OPTS["task_status"]="--uuid" - OPTS["task_trends"]="--out --open --tasks" - OPTS["task_use"]="--uuid" - OPTS["task_validate"]="--deployment --task --task-args --task-args-file" - OPTS["verify_add-verifier-ext"]="--id --source --version --extra-settings" - OPTS["verify_configure-verifier"]="--id --deployment-id --reconfigure --extend --override --show" - OPTS["verify_create-verifier"]="--name --type --namespace --source --version --system-wide --extra-settings --no-use" - OPTS["verify_delete"]="--uuid" - OPTS["verify_delete-verifier"]="--id --deployment-id --force" - OPTS["verify_delete-verifier-ext"]="--id --name" - OPTS["verify_import"]="--id --deployment-id --file --run-args --no-use" - OPTS["verify_list"]="--id --deployment-id --tag --status" - OPTS["verify_list-plugins"]="--namespace" - OPTS["verify_list-verifier-exts"]="--id" - OPTS["verify_list-verifier-tests"]="--id --pattern" - OPTS["verify_list-verifiers"]="--status" - OPTS["verify_report"]="--uuid --type --to --open" - OPTS["verify_rerun"]="--uuid --deployment-id --failed --tag --concurrency --detailed --no-use" - OPTS["verify_show"]="--uuid --sort-by --detailed" - OPTS["verify_show-verifier"]="--id" - OPTS["verify_start"]="--id --deployment-id --tag --pattern --concurrency --load-list --skip-list --xfail-list --detailed --no-use" - OPTS["verify_update-verifier"]="--id --update-venv --version --system-wide --no-system-wide" - OPTS["verify_use"]="--uuid" - OPTS["verify_use-verifier"]="--id" - - for OPT in ${!OPTS[*]} ; do - CMD=${OPT%%_*} - CMDSUB=${OPT#*_} - SUBCOMMANDS[${CMD}]+="${CMDSUB} " - done - - COMMANDS="${!SUBCOMMANDS[*]}" - COMPREPLY=() - - local cur="${COMP_WORDS[COMP_CWORD]}" - local prev="${COMP_WORDS[COMP_CWORD-1]}" - - if [[ $cur =~ ^(\.|\~|\/) ]] || [[ $prev =~ ^--out(|put-file)$ ]] ; then - _rally_filedir - elif [[ $prev =~ ^--(task|filename)$ ]] ; then - _rally_filedir "\.json|\.yaml|\.yml" - elif [ $COMP_CWORD == "1" ] ; then - COMPREPLY=($(compgen -W "$COMMANDS" -- ${cur})) - elif [ $COMP_CWORD == "2" ] ; then - COMPREPLY=($(compgen -W "${SUBCOMMANDS[${prev}]}" -- ${cur})) - else - COMMAND="${COMP_WORDS[1]}_${COMP_WORDS[2]}" - COMPREPLY=($(compgen -W "${OPTS[$COMMAND]}" -- ${cur})) - fi - return 0 -} - -complete -o filenames -F _rally rally diff --git a/etc/rally/rally-config-generator.conf b/etc/rally/rally-config-generator.conf deleted file mode 100644 index 16a6bf04..00000000 --- a/etc/rally/rally-config-generator.conf +++ /dev/null @@ -1,5 +0,0 @@ -[DEFAULT] -output_file = etc/rally/rally.conf.sample -namespace = rally -namespace = oslo.db -namespace = oslo.log diff --git a/etc/rally/rally.conf.sample b/etc/rally/rally.conf.sample deleted file mode 100644 index 332d823f..00000000 --- a/etc/rally/rally.conf.sample +++ /dev/null @@ -1,781 +0,0 @@ -[DEFAULT] - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of -# the default INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# The name of a logging configuration file. This file is appended to -# any existing logging configuration files. For details about logging -# configuration files, see the Python logging module documentation. -# Note that when logging configuration files are used then all logging -# configuration is set in the configuration file and other logging -# configuration options are ignored (for example, -# logging_context_format_string). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. -# (string value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default -# is set, logging will go to stderr as defined by use_stderr. This -# option is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. -# This option is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is -# moved or removed this handler will open a new log file with -# specified path instantaneously. It makes sense only if log_file -# option is specified and Linux platform is used. This option is -# ignored if log_config_append is set. (boolean value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and -# will be changed later to honor RFC5424. This option is ignored if -# log_config_append is set. (boolean value) -#use_syslog = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if -# log_config_append is set. (boolean value) -#use_stderr = false - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. -# (string value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the -# message is DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string -# value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is -# ignored if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. -# (string value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. -# (string value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Interval, number of seconds, of log rate limiting. (integer value) -#rate_limit_interval = 0 - -# Maximum number of logged messages per rate_limit_interval. (integer -# value) -#rate_limit_burst = 0 - -# Log level name used by rate limiting: CRITICAL, ERROR, INFO, -# WARNING, DEBUG or empty string. Logs with level greater or equal to -# rate_limit_except_level are not filtered. An empty string means that -# all levels are filtered. (string value) -#rate_limit_except_level = CRITICAL - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - -# -# From rally -# - -# Print debugging output only for Rally. Off-site components stay -# quiet. (boolean value) -#rally_debug = false - -# HTTP timeout for any of OpenStack service in seconds (floating point -# value) -#openstack_client_http_timeout = 180.0 - -# Size of raw result chunk in iterations (integer value) -# Minimum value: 1 -#raw_result_chunk_size = 1000 - - -[benchmark] - -# -# From rally -# - -# Time to sleep after creating a resource before polling for it status -# (floating point value) -#cinder_volume_create_prepoll_delay = 2.0 - -# Time to wait for cinder volume to be created. (floating point value) -#cinder_volume_create_timeout = 600.0 - -# Interval between checks when waiting for volume creation. (floating -# point value) -#cinder_volume_create_poll_interval = 2.0 - -# Time to wait for cinder volume to be deleted. (floating point value) -#cinder_volume_delete_timeout = 600.0 - -# Interval between checks when waiting for volume deletion. (floating -# point value) -#cinder_volume_delete_poll_interval = 2.0 - -# Time to wait for cinder backup to be restored. (floating point -# value) -#cinder_backup_restore_timeout = 600.0 - -# Interval between checks when waiting for backup restoring. (floating -# point value) -#cinder_backup_restore_poll_interval = 2.0 - -# Time to sleep after boot before polling for status (floating point -# value) -#ec2_server_boot_prepoll_delay = 1.0 - -# Server boot timeout (floating point value) -#ec2_server_boot_timeout = 300.0 - -# Server boot poll interval (floating point value) -#ec2_server_boot_poll_interval = 1.0 - -# Time(in sec) to sleep after creating a resource before polling for -# it status. (floating point value) -#heat_stack_create_prepoll_delay = 2.0 - -# Time(in sec) to wait for heat stack to be created. (floating point -# value) -#heat_stack_create_timeout = 3600.0 - -# Time interval(in sec) between checks when waiting for stack -# creation. (floating point value) -#heat_stack_create_poll_interval = 1.0 - -# Time(in sec) to wait for heat stack to be deleted. (floating point -# value) -#heat_stack_delete_timeout = 3600.0 - -# Time interval(in sec) between checks when waiting for stack -# deletion. (floating point value) -#heat_stack_delete_poll_interval = 1.0 - -# Time(in sec) to wait for stack to be checked. (floating point value) -#heat_stack_check_timeout = 3600.0 - -# Time interval(in sec) between checks when waiting for stack -# checking. (floating point value) -#heat_stack_check_poll_interval = 1.0 - -# Time(in sec) to sleep after updating a resource before polling for -# it status. (floating point value) -#heat_stack_update_prepoll_delay = 2.0 - -# Time(in sec) to wait for stack to be updated. (floating point value) -#heat_stack_update_timeout = 3600.0 - -# Time interval(in sec) between checks when waiting for stack update. -# (floating point value) -#heat_stack_update_poll_interval = 1.0 - -# Time(in sec) to wait for stack to be suspended. (floating point -# value) -#heat_stack_suspend_timeout = 3600.0 - -# Time interval(in sec) between checks when waiting for stack suspend. -# (floating point value) -#heat_stack_suspend_poll_interval = 1.0 - -# Time(in sec) to wait for stack to be resumed. (floating point value) -#heat_stack_resume_timeout = 3600.0 - -# Time interval(in sec) between checks when waiting for stack resume. -# (floating point value) -#heat_stack_resume_poll_interval = 1.0 - -# Time(in sec) to wait for stack snapshot to be created. (floating -# point value) -#heat_stack_snapshot_timeout = 3600.0 - -# Time interval(in sec) between checks when waiting for stack snapshot -# to be created. (floating point value) -#heat_stack_snapshot_poll_interval = 1.0 - -# Time(in sec) to wait for stack to be restored from snapshot. -# (floating point value) -#heat_stack_restore_timeout = 3600.0 - -# Time interval(in sec) between checks when waiting for stack to be -# restored. (floating point value) -#heat_stack_restore_poll_interval = 1.0 - -# Time (in sec) to wait for stack to scale up or down. (floating point -# value) -#heat_stack_scale_timeout = 3600.0 - -# Time interval (in sec) between checks when waiting for a stack to -# scale up or down. (floating point value) -#heat_stack_scale_poll_interval = 1.0 - -# Interval(in sec) between checks when waiting for node creation. -# (floating point value) -#ironic_node_create_poll_interval = 1.0 - -# Ironic node create timeout (floating point value) -#ironic_node_create_timeout = 300 - -# Ironic node poll interval (floating point value) -#ironic_node_poll_interval = 1.0 - -# Ironic node create timeout (floating point value) -#ironic_node_delete_timeout = 300 - -# Time(in sec) to sleep after creating a resource before polling for -# the status. (floating point value) -#magnum_cluster_create_prepoll_delay = 5.0 - -# Time(in sec) to wait for magnum cluster to be created. (floating -# point value) -#magnum_cluster_create_timeout = 1200.0 - -# Time interval(in sec) between checks when waiting for cluster -# creation. (floating point value) -#magnum_cluster_create_poll_interval = 1.0 - -# Delay between creating Manila share and polling for its status. -# (floating point value) -#manila_share_create_prepoll_delay = 2.0 - -# Timeout for Manila share creation. (floating point value) -#manila_share_create_timeout = 300.0 - -# Interval between checks when waiting for Manila share creation. -# (floating point value) -#manila_share_create_poll_interval = 3.0 - -# Timeout for Manila share deletion. (floating point value) -#manila_share_delete_timeout = 180.0 - -# Interval between checks when waiting for Manila share deletion. -# (floating point value) -#manila_share_delete_poll_interval = 2.0 - -# mistral execution timeout (integer value) -#mistral_execution_timeout = 200 - -# Delay between creating Monasca metrics and polling for its elements. -# (floating point value) -#monasca_metric_create_prepoll_delay = 15.0 - -# A timeout in seconds for an environment deploy (integer value) -# Deprecated group/name - [benchmark]/deploy_environment_timeout -#murano_deploy_environment_timeout = 1200 - -# Deploy environment check interval in seconds (integer value) -# Deprecated group/name - [benchmark]/deploy_environment_check_interval -#murano_deploy_environment_check_interval = 5 - -# Time to sleep after start before polling for status (floating point -# value) -#nova_server_start_prepoll_delay = 0.0 - -# Server start timeout (floating point value) -#nova_server_start_timeout = 300.0 - -# Server start poll interval (floating point value) -#nova_server_start_poll_interval = 1.0 - -# Time to sleep after stop before polling for status (floating point -# value) -#nova_server_stop_prepoll_delay = 0.0 - -# Server stop timeout (floating point value) -#nova_server_stop_timeout = 300.0 - -# Server stop poll interval (floating point value) -#nova_server_stop_poll_interval = 2.0 - -# Time to sleep after boot before polling for status (floating point -# value) -#nova_server_boot_prepoll_delay = 1.0 - -# Server boot timeout (floating point value) -#nova_server_boot_timeout = 300.0 - -# Server boot poll interval (floating point value) -#nova_server_boot_poll_interval = 2.0 - -# Time to sleep after delete before polling for status (floating point -# value) -#nova_server_delete_prepoll_delay = 2.0 - -# Server delete timeout (floating point value) -#nova_server_delete_timeout = 300.0 - -# Server delete poll interval (floating point value) -#nova_server_delete_poll_interval = 2.0 - -# Time to sleep after reboot before polling for status (floating point -# value) -#nova_server_reboot_prepoll_delay = 2.0 - -# Server reboot timeout (floating point value) -#nova_server_reboot_timeout = 300.0 - -# Server reboot poll interval (floating point value) -#nova_server_reboot_poll_interval = 2.0 - -# Time to sleep after rebuild before polling for status (floating -# point value) -#nova_server_rebuild_prepoll_delay = 1.0 - -# Server rebuild timeout (floating point value) -#nova_server_rebuild_timeout = 300.0 - -# Server rebuild poll interval (floating point value) -#nova_server_rebuild_poll_interval = 1.0 - -# Time to sleep after rescue before polling for status (floating point -# value) -#nova_server_rescue_prepoll_delay = 2.0 - -# Server rescue timeout (floating point value) -#nova_server_rescue_timeout = 300.0 - -# Server rescue poll interval (floating point value) -#nova_server_rescue_poll_interval = 2.0 - -# Time to sleep after unrescue before polling for status (floating -# point value) -#nova_server_unrescue_prepoll_delay = 2.0 - -# Server unrescue timeout (floating point value) -#nova_server_unrescue_timeout = 300.0 - -# Server unrescue poll interval (floating point value) -#nova_server_unrescue_poll_interval = 2.0 - -# Time to sleep after suspend before polling for status (floating -# point value) -#nova_server_suspend_prepoll_delay = 2.0 - -# Server suspend timeout (floating point value) -#nova_server_suspend_timeout = 300.0 - -# Server suspend poll interval (floating point value) -#nova_server_suspend_poll_interval = 2.0 - -# Time to sleep after resume before polling for status (floating point -# value) -#nova_server_resume_prepoll_delay = 2.0 - -# Server resume timeout (floating point value) -#nova_server_resume_timeout = 300.0 - -# Server resume poll interval (floating point value) -#nova_server_resume_poll_interval = 2.0 - -# Time to sleep after pause before polling for status (floating point -# value) -#nova_server_pause_prepoll_delay = 2.0 - -# Server pause timeout (floating point value) -#nova_server_pause_timeout = 300.0 - -# Server pause poll interval (floating point value) -#nova_server_pause_poll_interval = 2.0 - -# Time to sleep after unpause before polling for status (floating -# point value) -#nova_server_unpause_prepoll_delay = 2.0 - -# Server unpause timeout (floating point value) -#nova_server_unpause_timeout = 300.0 - -# Server unpause poll interval (floating point value) -#nova_server_unpause_poll_interval = 2.0 - -# Time to sleep after shelve before polling for status (floating point -# value) -#nova_server_shelve_prepoll_delay = 2.0 - -# Server shelve timeout (floating point value) -#nova_server_shelve_timeout = 300.0 - -# Server shelve poll interval (floating point value) -#nova_server_shelve_poll_interval = 2.0 - -# Time to sleep after unshelve before polling for status (floating -# point value) -#nova_server_unshelve_prepoll_delay = 2.0 - -# Server unshelve timeout (floating point value) -#nova_server_unshelve_timeout = 300.0 - -# Server unshelve poll interval (floating point value) -#nova_server_unshelve_poll_interval = 2.0 - -# Time to sleep after image_create before polling for status (floating -# point value) -#nova_server_image_create_prepoll_delay = 0.0 - -# Server image_create timeout (floating point value) -#nova_server_image_create_timeout = 300.0 - -# Server image_create poll interval (floating point value) -#nova_server_image_create_poll_interval = 2.0 - -# Time to sleep after image_delete before polling for status (floating -# point value) -#nova_server_image_delete_prepoll_delay = 0.0 - -# Server image_delete timeout (floating point value) -#nova_server_image_delete_timeout = 300.0 - -# Server image_delete poll interval (floating point value) -#nova_server_image_delete_poll_interval = 2.0 - -# Time to sleep after resize before polling for status (floating point -# value) -#nova_server_resize_prepoll_delay = 2.0 - -# Server resize timeout (floating point value) -#nova_server_resize_timeout = 400.0 - -# Server resize poll interval (floating point value) -#nova_server_resize_poll_interval = 5.0 - -# Time to sleep after resize_confirm before polling for status -# (floating point value) -#nova_server_resize_confirm_prepoll_delay = 0.0 - -# Server resize_confirm timeout (floating point value) -#nova_server_resize_confirm_timeout = 200.0 - -# Server resize_confirm poll interval (floating point value) -#nova_server_resize_confirm_poll_interval = 2.0 - -# Time to sleep after resize_revert before polling for status -# (floating point value) -#nova_server_resize_revert_prepoll_delay = 0.0 - -# Server resize_revert timeout (floating point value) -#nova_server_resize_revert_timeout = 200.0 - -# Server resize_revert poll interval (floating point value) -#nova_server_resize_revert_poll_interval = 2.0 - -# Time to sleep after live_migrate before polling for status (floating -# point value) -#nova_server_live_migrate_prepoll_delay = 1.0 - -# Server live_migrate timeout (floating point value) -#nova_server_live_migrate_timeout = 400.0 - -# Server live_migrate poll interval (floating point value) -#nova_server_live_migrate_poll_interval = 2.0 - -# Time to sleep after migrate before polling for status (floating -# point value) -#nova_server_migrate_prepoll_delay = 1.0 - -# Server migrate timeout (floating point value) -#nova_server_migrate_timeout = 400.0 - -# Server migrate poll interval (floating point value) -#nova_server_migrate_poll_interval = 2.0 - -# Nova volume detach timeout (floating point value) -#nova_detach_volume_timeout = 200.0 - -# Nova volume detach poll interval (floating point value) -#nova_detach_volume_poll_interval = 2.0 - -# A timeout in seconds for a cluster create operation (integer value) -# Deprecated group/name - [benchmark]/cluster_create_timeout -#sahara_cluster_create_timeout = 1800 - -# A timeout in seconds for a cluster delete operation (integer value) -# Deprecated group/name - [benchmark]/cluster_delete_timeout -#sahara_cluster_delete_timeout = 900 - -# Cluster status polling interval in seconds (integer value) -# Deprecated group/name - [benchmark]/cluster_check_interval -#sahara_cluster_check_interval = 5 - -# A timeout in seconds for a Job Execution to complete (integer value) -# Deprecated group/name - [benchmark]/job_execution_timeout -#sahara_job_execution_timeout = 600 - -# Job Execution status polling interval in seconds (integer value) -# Deprecated group/name - [benchmark]/job_check_interval -#sahara_job_check_interval = 5 - -# Amount of workers one proxy should serve to. (integer value) -#sahara_workers_per_proxy = 20 - -# Interval between checks when waiting for a VM to become pingable -# (floating point value) -#vm_ping_poll_interval = 1.0 - -# Time to wait for a VM to become pingable (floating point value) -#vm_ping_timeout = 120.0 - -# Time to wait for glance image to be deleted. (floating point value) -#glance_image_delete_timeout = 120.0 - -# Interval between checks when waiting for image deletion. (floating -# point value) -#glance_image_delete_poll_interval = 1.0 - -# Time to sleep after creating a resource before polling for it status -# (floating point value) -#glance_image_create_prepoll_delay = 2.0 - -# Time to wait for glance image to be created. (floating point value) -#glance_image_create_timeout = 120.0 - -# Interval between checks when waiting for image creation. (floating -# point value) -#glance_image_create_poll_interval = 1.0 - -# Watcher audit launch interval (floating point value) -#watcher_audit_launch_poll_interval = 2.0 - -# Watcher audit launch timeout (integer value) -#watcher_audit_launch_timeout = 300 - -# Time in seconds to wait for senlin action to finish. (floating point -# value) -#senlin_action_timeout = 3600 - -# Neutron create loadbalancer timeout (floating point value) -#neutron_create_loadbalancer_timeout = 500.0 - -# Neutron create loadbalancer poll interval (floating point value) -#neutron_create_loadbalancer_poll_interval = 2.0 - -# Enable or disable osprofiler to trace the scenarios -#enable_profiler = True - -[cleanup] - -# -# From rally -# - -# A timeout in seconds for deleting resources (integer value) -#resource_deletion_timeout = 600 - -# Number of cleanup threads to run (integer value) -#cleanup_threads = 20 - - -[database] - -# -# From oslo.db -# - -# If True, SQLite uses synchronous mode. (boolean value) -#sqlite_synchronous = true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string to use to connect to the database. -# (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = - -# The SQLAlchemy connection string to use to connect to the slave -# database. (string value) -#slave_connection = - -# The SQL mode to be used for MySQL sessions. This option, including -# the default, overrides any server-set SQL mode. To use whatever SQL -# mode is set by the server configuration, set this to no value. -# Example: mysql_sql_mode= (string value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool. Setting a -# value of 0 indicates no limit. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = 5 - -# Maximum number of database connection retries during startup. Set to -# -1 to specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a SQL connection. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with SQLAlchemy. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = 50 - -# Verbosity of SQL debugging information: 0=None, 100=Everything. -# (integer value) -# Minimum value: 0 -# Maximum value: 100 -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add Python stack traces to SQL as comment strings. (boolean value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = false - -# If set, use this value for pool_timeout with SQLAlchemy. (integer -# value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on connection -# lost. (boolean value) -#use_db_reconnect = false - -# Seconds between retries of a database transaction. (integer value) -#db_retry_interval = 1 - -# If True, increases the interval between retries of a database -# operation up to db_max_retry_interval. (boolean value) -#db_inc_retry_interval = true - -# If db_inc_retry_interval is set, the maximum seconds between retries -# of a database operation. (integer value) -#db_max_retry_interval = 10 - -# Maximum retries in case of connection error or deadlock error before -# error is raised. Set to -1 to specify an infinite retry count. -# (integer value) -#db_max_retries = 20 - - -[roles_context] - -# -# From rally -# - -# How many concurrent threads to use for serving roles context -# (integer value) -#resource_management_workers = 30 - - -[tempest] - -# -# From rally -# - -# image URL (string value) -#img_url = http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img - -# Image disk format to use when creating the image (string value) -#img_disk_format = qcow2 - -# Image container format to use when creating the image (string value) -#img_container_format = bare - -# Regular expression for name of a public image to discover it in the -# cloud and use it for the tests. Note that when Rally is searching -# for the image, case insensitive matching is performed. Specify -# nothing ('img_name_regex =') if you want to disable discovering. In -# this case Rally will create needed resources by itself if the values -# for the corresponding config options are not specified in the -# Tempest config file (string value) -#img_name_regex = ^.*(cirros|testvm).*$ - -# Role required for users to be able to create Swift containers -# (string value) -#swift_operator_role = Member - -# User role that has reseller admin (string value) -#swift_reseller_admin_role = ResellerAdmin - -# Role required for users to be able to manage Heat stacks (string -# value) -#heat_stack_owner_role = heat_stack_owner - -# Role for Heat template-defined users (string value) -#heat_stack_user_role = heat_stack_user - -# Primary flavor RAM size used by most of the test cases (integer -# value) -#flavor_ref_ram = 64 - -# Alternate reference flavor RAM size used by test thatneed two -# flavors, like those that resize an instance (integer value) -#flavor_ref_alt_ram = 128 - -# RAM size flavor used for orchestration test cases (integer value) -#heat_instance_type_ram = 64 - - -[users_context] - -# -# From rally -# - -# The number of concurrent threads to use for serving users context. -# (integer value) -#resource_management_workers = 20 - -# ID of domain in which projects will be created. (string value) -#project_domain = default - -# ID of domain in which users will be created. (string value) -#user_domain = default - -# The default role name of the keystone to assign to users. (string -# value) -#keystone_default_role = member diff --git a/install_rally.sh b/install_rally.sh deleted file mode 100755 index 0a6cffe4..00000000 --- a/install_rally.sh +++ /dev/null @@ -1,826 +0,0 @@ -#!/usr/bin/env bash -# -# This script installs Rally. -# Specifically, it is able to install and configure -# Rally either globally (system-wide), or isolated in -# a virtual environment using the virtualenv tool. -# -# NOTE: The script assumes that you have the following -# programs already installed: -# -> Python 2.6, Python 2.7 or Python 3.4 - -set -e - -PROG=$(basename "${0}") - -running_as_root() { - test "$(/usr/bin/id -u)" -eq 0 -} - -VERBOSE="" -ASKCONFIRMATION=1 -RECREATEDEST="ask" -USEVIRTUALENV="yes" -DEVELOPMENT_MODE="false" - -# ansi colors for formatting heredoc -ESC=$(printf "\e") -GREEN="$ESC[0;32m" -NO_COLOR="$ESC[0;0m" -RED="$ESC[0;31m" - -PYTHON2=$(which python || true) -PYTHON3=$(which python3 || true) -PYTHON=${PYTHON2:-$PYTHON3} -BASE_PIP_URL=${BASE_PIP_URL:-"https://pypi.python.org/simple"} -VIRTUALENV_VERSION="15.1.0" -VIRTUALENV_URL="https://raw.github.com/pypa/virtualenv/$VIRTUALENV_VERSION/virtualenv.py" - -RALLY_GIT_URL="https://git.openstack.org/openstack/rally" -RALLY_GIT_BRANCH="master" -RALLY_CONFIGURATION_DIR=/etc/rally -RALLY_DATABASE_DIR=/var/lib/rally/database -DBTYPE=sqlite -DBNAME=rally.sqlite - -# Variable used by script_interrupted to know what to cleanup -CURRENT_ACTION="none" - -## Exit status codes (mostly following ) -# successful exit -EX_OK=0 - -# wrong command-line invocation -EX_USAGE=64 - -# missing dependencies (e.g., no C compiler) -EX_UNAVAILABLE=69 - -# wrong python version -EX_SOFTWARE=70 - -# cannot create directory or file -EX_CANTCREAT=73 - -# user aborted operations -EX_TEMPFAIL=75 - -# misused as: unexpected error in some script we call -EX_PROTOCOL=76 - -# abort RC [MSG] -# -# Print error message MSG and abort shell execution with exit code RC. -# If MSG is not given, read it from STDIN. -# -abort () { - local rc="$1" - shift - (echo -en "$RED$PROG: ERROR: $NO_COLOR"; - if [ $# -gt 0 ]; then echo "$@"; else cat; fi) 1>&2 - exit "$rc" -} - -# die RC HEADER <<... -# -# Print an error message with the given header, then abort shell -# execution with exit code RC. Additional text for the error message -# *must* be passed on STDIN. -# -die () { - local rc="$1" - header="$2" - shift 2 - cat 1>&2 <<__EOF__ -$RED========================================================== -$PROG: ERROR: $header -========================================================== -$NO_COLOR -__EOF__ - if [ $# -gt 0 ]; then - # print remaining arguments one per line - for line in "$@"; do - echo "$line" 1>&2; - done - else - # additional message text provided on STDIN - cat 1>&2; - fi - cat 1>&2 <<__EOF__ - -If the above does not help you resolve the issue, please contact the -Rally team by sending an email to the OpenStack mailing list -openstack-dev@lists.openstack.org. Include the full output of this -script to help us identifying the problem. -$RED -Aborting installation!$NO_COLOR -__EOF__ - exit "$rc" -} - -script_interrupted () { - echo "Interrupted by the user. Cleaning up..." - [ -n "${VIRTUAL_ENV}" -a "${VIRTUAL_ENV}" == "$VENVDIR" ] && deactivate - - case $CURRENT_ACTION in - creating_venv|venv-created) - if [ -d "$VENVDIR" ] - then - if ask_yn "Do you want to delete the virtual environment in '$VENVDIR'?" - then - rm -rf "$VENVDIR" - fi - fi - ;; - downloading-src|src-downloaded) - # This is only relevant when installing with --system, - # otherwise the git repository is cloned into the - # virtualenv directory - if [ -d "$SOURCEDIR" ] - then - if ask_yn "Do you want to delete the downloaded source in '$SOURCEDIR'?" - then - rm -rf "$SOURCEDIR" - fi - fi - ;; - esac - - abort $EX_TEMPFAIL "Script interrupted by the user" -} - -trap script_interrupted SIGINT - -print_usage () { - cat <<__EOF__ -Usage: $PROG [options] - -This script will install Rally in your system. - -Options: -$GREEN -h, --help $NO_COLOR Print this help text -$GREEN -v, --verbose $NO_COLOR Verbose mode -$GREEN -s, --system $NO_COLOR Install system-wide. -$GREEN -d, --target DIRECTORY$NO_COLOR Install Rally virtual environment into DIRECTORY. - (Default: $HOME/rally if not root). -$GREEN --url $NO_COLOR Git repository public URL to download Rally from. - This is useful when you have only installation script and want to install Rally - from custom repository. - (Default: ${RALLY_GIT_URL}). - (Ignored when you are already in git repository). -$GREEN --branch $NO_COLOR Git branch name, tag (Rally release), commit hash, ref, or other - tree-ish to install. (Default: master) - Ignored when you are already in git repository. -$GREEN -f, --overwrite $NO_COLOR Deprecated. Use -r instead. -$GREEN -r, --recreate $NO_COLOR Remove target directory if it already exist. - If neither '-r' nor '-R' is set default behaviour is to ask. -$GREEN -R, --no-recreate $NO_COLOR Do not remove target directory if it already exist. - If neither '-r' nor '-R' is set default behaviour is to ask. -$GREEN -y, --yes $NO_COLOR Do not ask for confirmation: assume a 'yes' reply - to every question. -$GREEN -D, --dbtype TYPE $NO_COLOR Select the database type. TYPE can be one of - 'sqlite', 'mysql', 'postgresql'. - Default: sqlite -$GREEN --db-user USER $NO_COLOR Database user to use. Only used when --dbtype - is either 'mysql' or 'postgresql'. -$GREEN --db-password PASSWORD$NO_COLOR Password of the database user. Only used when - --dbtype is either 'mysql' or 'postgresql'. -$GREEN --db-host HOST $NO_COLOR Database host. Only used when --dbtype is - either 'mysql' or 'postgresql' -$GREEN --db-name NAME $NO_COLOR Name of the database. Only used when --dbtype is - either 'mysql' or 'postgresql' -$GREEN -p, --python EXE $NO_COLOR The python interpreter to use. Default: $PYTHON -$GREEN --develop $NO_COLOR Install Rally with editable source code try. - (Default: false) -$GREEN --no-color $NO_COLOR Disable output coloring. - -__EOF__ -} - -# ask_yn PROMPT -# -# Ask a Yes/no question preceded by PROMPT. -# Set the env. variable REPLY to 'yes' or 'no' -# and return 0 or 1 depending on the users' -# answer. -# -ask_yn () { - if [ $ASKCONFIRMATION -eq 0 ]; then - # assume 'yes' - REPLY='yes' - return 0 - fi - while true; do - read -p "$1 [yN] " REPLY - case "$REPLY" in - [Yy]*) REPLY='yes'; return 0 ;; - [Nn]*|'') REPLY='no'; return 1 ;; - *) echo "Please type 'y' (yes) or 'n' (no)." ;; - esac - done -} - -have_command () { - type "$1" >/dev/null 2>/dev/null -} - -require_command () { - if ! have_command "$1"; then - abort 1 "Could not find required command '$1' in system PATH. Aborting." - fi -} - -require_python () { - require_command "$PYTHON" - if "$PYTHON" -c 'import sys; sys.exit(sys.version_info[:2] >= (2, 6))' - then - die $EX_UNAVAILABLE "Wrong version of python is installed" <<__EOF__ - -Rally requires Python version 2.6+. Unfortunately, we do not support -your version of python: $("$PYTHON" -V 2>&1 | sed 's/python//gi'). - -If a version of Python suitable for using Rally is present in some -non-standard location, you can specify it from the command line by -running this script again with option '--python' followed by the path of -the correct 'python' binary. -__EOF__ - fi -} - -which_missing_packages () { - if [ ! -f bindep.txt ]; then - abort $EX_PROTOCOL \ - "bindep.txt not found. Unable to find missing packages." - fi - require_command "bindep" - require_command "lsb_release" - echo "$(bindep -b | tr '\n' ' ')" -} - -which_missing_commands () { - # These commands are required to run install_rally.sh - local missing="" - if ! have_command "wget"; then - missing="wget" - fi - if ! have_command "git"; then - missing="$missing git" - fi - if ! have_command "pip"; then - missing="$missing python-pip" - fi - echo "$missing" -} - - -# Download command -download() { - wget -nv $VERBOSE --no-check-certificate -O "$@"; -} - -get_pkg_manager () { - if have_command apt-get; then - # Debian/Ubuntu - if [ "$ASKCONFIRMATION" -eq 0 ]; then - pkg_manager="apt-get install --yes --force-yes" - else - pkg_manager="apt-get install" - fi - elif have_command dnf; then - # dnf based RHEL/CentOS/Fedora - if [ "$ASKCONFIRMATION" -eq 0 ]; then - pkg_manager="dnf install -y" - else - pkg_manager="dnf install" - fi - elif have_command yum; then - # yum based RHEL/CentOS/Fedora - if [ "$ASKCONFIRMATION" -eq 0 ]; then - pkg_manager="yum install -y" - else - pkg_manager="yum install" - fi - elif have_command zypper; then - # SuSE - if [ "$ASKCONFIRMATION" -eq 0 ]; then - pkg_manager="zypper -n --no-gpg-checks --non-interactive install --auto-agree-with-licenses" - else - pkg_manager="zypper install" - fi - else - # MacOSX maybe? - echo "Cannot determine what package manager this system has, so I cannot check if requisite software is installed. I'm proceeding anyway, but you may run into errors later." - fi - echo $pkg_manager -} - -install_required_sw () { - # instead of guessing which distribution this is, we check for the - # package manager name as it basically identifies the distro - local missing pkg_manager - missing=$1 - pkg_manager=$(get_pkg_manager) - - if [ -n "$missing" ]; then - cat <<__EOF__ -The following software packages need to be installed -in order for Rally to work:$GREEN $missing -$NO_COLOR -__EOF__ - - # If we are root - if running_as_root; then - cat <<__EOF__ -In order to install the required software you would need to run as -'root' the following command: -$GREEN - $pkg_manager $missing -$NO_COLOR -__EOF__ - # ask if we have to install it - if ask_yn "Do you want me to install these packages for you?"; then - # install - if [[ "$missing" == *python-pip* ]]; then - missing=${missing//python-pip/} - if ! $pkg_manager python-pip; then - if ask_yn "Error installing python-pip. Install from external source?"; then - local pdir=$(mktemp /tmp/tmp.XXXXXXXXXX -d) - local getpip="$pdir/get-pip.py" - download "$getpip" https://bootstrap.pypa.io/get-pip.py - if ! "$PYTHON" "$getpip"; then - abort $EX_PROTOCOL "Error while installing python-pip from external source." - fi - else - abort $EX_TEMPFAIL \ - "Please install python-pip manually." - fi - fi - fi - if [ -n "$missing" ] && ! $pkg_manager $missing; then - abort $EX_UNAVAILABLE "Error while installing $missing" - fi - # installation successful - else # don't want to install the packages - die $EX_UNAVAILABLE "missing software prerequisites" <<__EOF__ -Please, install the required software before installing Rally - -__EOF__ - fi - else # Not running as root - cat <<__EOF__ -There is a small chance that the required software -is actually installed though we failed to detect it, -so you may choose to proceed with Rally installation -anyway. Be warned however, that continuing is very -likely to fail! - -__EOF__ - if ask_yn "Proceed with installation anyway?" - then - echo "Proceeding with installation at your request... keep fingers crossed!" - else - die $EX_UNAVAILABLE "missing software prerequisites" <<__EOF__ -Please ask your system administrator to install the missing packages, -or, if you have root access, you can do that by running the following -command from the 'root' account: -$GREEN - $pkg_manager $missing -$NO_COLOR -__EOF__ - fi - fi - fi - -} - -install_db_connector () { - case $DBTYPE in - mysql) - pip install pymysql - ;; - postgresql) - pip install psycopg2 - ;; - esac -} - -install_virtualenv () { - DESTDIR=$1 - - if [ -n "$VIRTUAL_ENV" ]; then - die $EX_SOFTWARE "Virtualenv already active" <<__EOF__ -A virtual environment seems to be already active. This will cause -this script to FAIL. - -Run 'deactivate', then run this script again. -__EOF__ - fi - - # Use the latest virtualenv that can use `.tar.gz` files - VIRTUALENV_DST="$DESTDIR/virtualenv-$VIRTUALENV_VERSION.py" - mkdir -p "$DESTDIR" - download "$VIRTUALENV_DST" "$VIRTUALENV_URL" - "$PYTHON" "$VIRTUALENV_DST" $VERBOSE --no-setuptools --no-pip --no-wheel \ - -p "$PYTHON" "$DESTDIR" - - . "$DESTDIR/bin/activate" - - download - https://bootstrap.pypa.io/get-pip.py | python -\ - || die $EX_PROTOCOL \ - "Error while running get-pip.py" <<__EOF__ - -The required Python package pip could not be installed -in virtualenv. - -__EOF__ - - pip install setuptools wheel || die $EX_PROTOCOL \ - "Error while running 'pip install setuptools wheel'" <<__EOF__ - -The required Python package setuptools, wheel could not be installed -in virtualenv. - -__EOF__ -} - -setup_rally_configuration () { - SRCDIR=$1 - ETCDIR=$RALLY_CONFIGURATION_DIR - DBDIR=$RALLY_DATABASE_DIR - - [ -d "$ETCDIR" ] || mkdir -p "$ETCDIR" - cp "$SRCDIR"/etc/rally/rally.conf.sample "$ETCDIR"/rally.conf - - [ -d "$DBDIR" ] || mkdir -p "$DBDIR" - local CONF_TMPFILE=$(mktemp /tmp/tmp.XXXXXXXXXX) - sed "s|#connection *=.*|connection = \"$DBCONNSTRING\"|" "$ETCDIR"/rally.conf > "$CONF_TMPFILE" - cat "$CONF_TMPFILE" > "$ETCDIR"/rally.conf - rm "$CONF_TMPFILE" - rally-manage db recreate -} - -rally_venv () { - echo "Installing Rally virtualenv in directory '$VENVDIR' ..." - CURRENT_ACTION="creating-venv" - if ! install_virtualenv "$VENVDIR"; then - die $EX_PROTOCOL "Unable to create a new virtualenv in '$VENVDIR': 'virtualenv.py' script exited with code $rc." <<__EOF__ -The script was unable to create a valid virtual environment. -__EOF__ - fi - CURRENT_ACTION="venv-created" - rc=0 -} - -### Main program ### -short_opts='d:vsyfrRhD:p:' -long_opts='target:,verbose,overwrite,recreate,no-recreate,system,yes,dbtype:,python:,db-user:,db-password:,db-host:,db-name:,help,url:,branch:,develop,no-color' - -set +e -if [ "x$(getopt -T)" = 'x' ]; then - # GNU getopt - args=$(getopt --name "$PROG" --shell sh -l "$long_opts" -o "$short_opts" -- "$@") - if [ $? -ne 0 ]; then - abort 1 "Type '$PROG --help' to get usage information." - fi - # use 'eval' to remove getopt quoting - eval set -- "$args" -else - # old-style getopt, use compatibility syntax - args=$(getopt "$short_opts" "$@") - if [ $? -ne 0 ]; then - abort 1 "Type '$PROG -h' to get usage information." - fi - eval set -- "$args" -fi -set -e - -# Command line parsing -while true -do - case "$1" in - -d|--target) - shift - VENVDIR=$(readlink -m "$1") - ;; - -h|--help) - print_usage - exit $EX_OK - ;; - -v|--verbose) - VERBOSE="-v" - ;; - -s|--system) - USEVIRTUALENV="no" - ;; - -f|--overwrite) - RECREATEDEST=yes - ;; - -r|--recreate) - RECREATEDEST=yes - ;; - -R|--no-recreate) - RECREATEDEST=no - ;; - -y|--yes) - ASKCONFIRMATION=0 - ;; - --url) - shift - RALLY_GIT_URL=$1 - ;; - --branch) - shift - RALLY_GIT_BRANCH=$1 - ;; - -D|--dbtype) - shift - DBTYPE=$1 - case $DBTYPE in - sqlite|mysql|postgresql);; - *) - print_usage | die $EX_USAGE \ - "An invalid option has been detected." - ;; - esac - ;; - --db-user) - shift - DBUSER=$1 - ;; - --db-password) - shift - DBPASSWORD=$1 - ;; - --db-host) - shift - DBHOST=$1 - ;; - --db-name) - shift - DBNAME=$1 - ;; - -p|--python) - shift - PYTHON=$1 - ;; - --develop) - DEVELOPMENT_MODE=true - ;; - --no-color) - RED="" - GREEN="" - NO_COLOR="" - ;; - --) - shift - break - ;; - *) - print_usage | die $EX_USAGE "An invalid option has been detected." - esac - shift -done - -### Post-processing ### - -if [ "$USEVIRTUALENV" == "no" ] && [ -n "$VENVDIR" ]; then - die $EX_USAGE "Ambiguous arguments" <<__EOF__ -Option -d/--target can not be used with --system. -__EOF__ -fi - -if running_as_root; then - if [ -z "$VENVDIR" ]; then - USEVIRTUALENV='no' - fi -else - if [ "$USEVIRTUALENV" == 'no' ]; then - die $EX_USAGE "Insufficient privileges" <<__EOF__ -$REDRoot permissions required in order to install system-wide. -As non-root user you may only install in virtualenv.$NO_COLOR -__EOF__ - fi - if [ -z "$VENVDIR" ]; then - VENVDIR="$HOME"/rally - fi -fi - -# Fix RALLY_DATABASE_DIR if virtualenv is used -if [ "$USEVIRTUALENV" = 'yes' ] -then - RALLY_CONFIGURATION_DIR=$VENVDIR/etc/rally - RALLY_DATABASE_DIR="$VENVDIR"/database -fi - -if [ "$DBTYPE" = 'sqlite' ]; then - if [ "${DBNAME:0:1}" = '/' ]; then - DBFILE="$DBNAME" - else - DBFILE="${RALLY_DATABASE_DIR}/${DBNAME}" - fi - DBCONNSTRING="sqlite:///${DBFILE}" -else - if [ -z "$DBUSER" -o -z "$DBPASSWORD" -o -z "$DBHOST" -o -z "$DBNAME" ] - then - die $EX_USAGE "Missing mandatory options" <<__EOF__ -When specifying a database type different than 'sqlite', you also have -to specify the database name, host, and username and password of a -valid user with write access to the database. - -Please, re-run the script with valid values for the options: -$GREEN - --db-host - --db-name - --db-user - --db-password$NO_COLOR -__EOF__ - fi - DBAUTH="$DBUSER:$DBPASSWORD@$DBHOST" - if [ "$DBTYPE" = 'mysql' ]; then - DBCONNSTRING="$DBTYPE+pymysql://$DBAUTH/$DBNAME" - elif [ "$DBTYPE" = 'postgresql' ]; then - DBCONNSTRING="$DBTYPE+psycopg2://$DBAUTH/$DBNAME" - else - DBCONNSTRING="$DBTYPE://$DBAUTH/$DBNAME" - fi -fi - -# check and install prerequisites -install_required_sw "$(which_missing_commands)" -require_python - - -# Install virtualenv, if required -if [ "$USEVIRTUALENV" = 'yes' ]; then - if [ -d "$VENVDIR" ] - then - if [ $RECREATEDEST = 'ask' ]; then - echo "Destination directory '$VENVDIR' already exists." - echo "I can wipe it out in order to make a new installation," - echo "but this means any files in that directory, and the ones" - echo "underneath it will be deleted." - echo - - if ! ask_yn "Do you want to wipe the installation directory '$VENVDIR'?" - then - echo "*Not* overwriting destination directory '$VENVDIR'." - RECREATEDEST=no - else - RECREATEDEST=yes - - fi - fi - - if [ $RECREATEDEST = 'yes' ]; - then - echo "Removing directory $VENVDIR as requested." - rm $VERBOSE -rf "$VENVDIR" - rally_venv - elif [ $RECREATEDEST = 'no' ]; - then - echo "Using existing virtualenv at $VENVDIR..." - . "$VENVDIR"/bin/activate - else - abort 66 "Internal error: unexpected value '$RECREATEDEST' for RECREATEDEST." - fi - else - rally_venv - fi -fi - -# Install rally -ORIG_WD=$(pwd) - -BASEDIR=$(dirname "$(readlink -e "$0")") - -# If we are inside the git repo, don't download it again. -if [ -d "$BASEDIR"/.git ] -then - SOURCEDIR=$BASEDIR - ( - cd "$BASEDIR" - if find . -name '*.py[co]' -exec rm -f {} +; then - echo "Wiped python compiled files." - else - echo "Warning! Unable to wipe python compiled files" - fi - ) -else - if [ "$USEVIRTUALENV" = 'yes' ] - then - SOURCEDIR="$VENVDIR"/src - else - SOURCEDIR="$ORIG_WD"/rally.git - fi - - if ! [ -d "$SOURCEDIR"/.git ] - then - echo "Downloading Rally from git repository $RALLY_GIT_URL ..." - CURRENT_ACTION="downloading-src" - git clone "$RALLY_GIT_URL" "$SOURCEDIR" - ( - cd "$SOURCEDIR" - git checkout "$RALLY_GIT_BRANCH" - ) - if ! [ -d "$SOURCEDIR"/.git ]; then - abort $EX_CANTCREAT "Unable to download git repository" - fi - CURRENT_ACTION="src-downloaded" - fi -fi - -install_db_connector - -# Install rally -cd "$SOURCEDIR" -# Get latest available pip and reset shell cache -pip install -i "$BASE_PIP_URL" -U 'pip!=8' -hash -r - -# Install dependencies -pip install -i "$BASE_PIP_URL" pbr 'tox<=1.6.1' bindep - -# Install binary dependencies -install_required_sw "$(which_missing_packages)" - -# Uninstall possible previous version -pip uninstall -y rally || true -# Install rally -if [ "$DEVELOPMENT_MODE" = "true" ]; then - pip install -i "$BASE_PIP_URL" -e . -else - pip install -i "$BASE_PIP_URL" . -fi - -cd "$ORIG_WD" - -# Post-installation -if [ "$USEVIRTUALENV" = 'yes' ] -then - # Fix bash_completion - cat >> "$VENVDIR"/bin/activate <<__EOF__ - -. "$VENVDIR/etc/bash_completion.d/rally.bash_completion" -__EOF__ - - setup_rally_configuration "$SOURCEDIR" - - if ! [ "$DEVELOPMENT_MODE" = "true" ]; then - SAMPLESDIR=$VENVDIR/samples - mkdir -p "$SAMPLESDIR" - cp -r "$SOURCEDIR"/samples/* "$SAMPLESDIR"/ - fi - mkdir -p "$VENVDIR"/etc/bash_completion.d - install "$SOURCEDIR"/etc/rally.bash_completion \ - "$VENVDIR"/etc/bash_completion.d/ - - cat <<__EOF__ -$GREEN============================== -Installation of Rally is done! -============================== -$NO_COLOR -In order to work with Rally you have to enable the virtual environment -with the command: - - . $VENVDIR/bin/activate - -You need to run the above command on every new shell you open before -using Rally, but just once per session. - -Information about your Rally installation: - - * Method:$GREEN virtualenv$NO_COLOR - * Virtual Environment at:$GREEN $VENVDIR$NO_COLOR - * Database at:$GREEN $RALLY_DATABASE_DIR$NO_COLOR - * Configuration file at:$GREEN $RALLY_CONFIGURATION_DIR$NO_COLOR - * Samples at:$GREEN $SAMPLESDIR$NO_COLOR - -__EOF__ -else - setup_rally_configuration "$SOURCEDIR" - - if ! [ "$DEVELOPMENT_MODE" = "true" ]; then - SAMPLESDIR=/usr/share/rally/samples - mkdir -p "$SAMPLESDIR" - cp -r "$SOURCEDIR"/samples/* "$SAMPLESDIR"/ - fi - ln -s /usr/local/etc/bash_completion.d/rally.bash_completion \ - /etc/bash_completion.d/ 2> /dev/null || true - if [ -f "${DBFILE}" ]; then - chmod 777 "$DBFILE" - fi - - cat <<__EOF__ -$GREEN============================== -Installation of Rally is done! -============================== -$NO_COLOR -Rally is now installed in your system. Information about your Rally -installation: - - * Method:$GREEN system$NO_COLOR - * Database at:$GREEN $RALLY_DATABASE_DIR$NO_COLOR - * Configuration file at:$GREEN $RALLY_CONFIGURATION_DIR$NO_COLOR - * Samples at:$GREEN $SAMPLESDIR$NO_COLOR -__EOF__ -fi diff --git a/optional-requirements.txt b/optional-requirements.txt deleted file mode 100644 index e169a4a7..00000000 --- a/optional-requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Purpose of optional-requirements is simple - list requirements which cannot -# be placed in main requirements.txt . -# -# Reasons: -# - package doesn't support the same Python versions as Rally package -# supports (see classifiers from setup.cfg) -# - package doesn't have releases -# If these rules do not relate to your package, feel free to propose it as main -# requirement to Rally (requirements.txt file). diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index 230aeef0..00000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,40 +0,0 @@ -Rally job related files -======================= - -This directory contains rally tasks and plugins that are run by OpenStack CI. - -Structure ---------- - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute paths in rally tasks. - Files will be located in ~/.rally/extra/* - -* rally.yaml is a task that is run in gates against OpenStack (nova network) - -* rally-neutron.yaml is a task that is run in gates against OpenStack with - Neutron Service - -* rally-designate.yaml is a task that is run in gates against OpenStack with - Designate Service. It's experimental job. To trigger make a review with - "check experimental" text. - -* rally-zaqar.yaml is a task that is run in gates against OpenStack with - Zaqar Service. It's experimental job. To trigger make a review with - "check experimental" text. - - -Useful links ------------- - -* More about Rally: https://rally.readthedocs.org/en/latest/ - -* How to add rally-gates: http://rally.readthedocs.io/en/latest/quick_start/gates.html - -* About plugins: https://rally.readthedocs.io/en/latest/plugins/index.html - -* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins diff --git a/rally-jobs/certifcation_task_args.yaml b/rally-jobs/certifcation_task_args.yaml deleted file mode 100644 index d77d743e..00000000 --- a/rally-jobs/certifcation_task_args.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - service_list: - - authentication - - nova - - neutron - - keystone - - cinder - - glance - use_existing_users: false - image_name: "^(cirros.*-disk|TestVM)$" - flavor_name: "m1.tiny" - glance_image_location: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - smoke: true - users_amount: 1 - tenants_amount: 1 - controllers_amount: 1 - compute_amount: 1 - storage_amount: 1 - network_amount: 1 - diff --git a/rally-jobs/cinder.yaml b/rally-jobs/cinder.yaml deleted file mode 100755 index b034b38f..00000000 --- a/rally-jobs/cinder.yaml +++ /dev/null @@ -1,1207 +0,0 @@ -{% set image_name = "^(cirros.*-disk|TestVM)$" %} -{% set flavor_name = "m1.tiny" %} ---- - CinderVolumes.create_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - image: - name: {{image_name}} - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.validate_cinder: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.cinder_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_delete_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - image: - name: "image-context-test" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 2 - roles: - - admin - images: - image_url: "~/.rally/extra/fake-image.img" - disk_format: "raw" - container_format: "bare" - images_per_tenant: 1 - image_name: "image-context-test" - visibility: "public" - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_update_volume: - - - args: - update_volume_kwargs: - description: "desc_updated" - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - update_volume_kwargs: - description: "desc_updated" - size: 1 - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_volume_and_update_readonly_flag: - - - args: - size: 1 - read_only: true - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - read_only: false - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_list_volume: - - - args: - size: 1 - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 3 - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - detailed: True - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_get_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - image: - name: {{image_name}} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.list_volumes: - - - args: - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - volumes: - size: 1 - volumes_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.list_types: - {% for s in ("true", "false") %} - - - args: - is_public: {{s}} - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - {% endfor %} - - CinderVolumes.create_and_accept_transfer: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_extend_volume: - - - args: - size: 1 - new_size: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 2 - new_size: - min: 3 - max: 4 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_from_volume_and_delete_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_attach_volume: - - - args: - size: 1 - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_snapshot_and_attach_volume: - - - args: - volume_type: "lvmdriver-1" - size: - min: 1 - max: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - servers: - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - servers_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - - args: - volume_type: "test" - size: - min: 1 - max: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - servers: - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - servers_per_tenant: 1 - volume_types: - - "test" - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_delete_snapshot: - - - args: - force: false - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volumes: - size: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_list_snapshots: - - - args: - force: False - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_upload_volume_to_image: - #- - # args: - # size: 1 - # runner: - # type: "constant" - # times: 1 - # concurrency: 1 - # context: - # users: - # tenants: 1 - # users_per_tenant: 1 - # sla: - # failure_rate: - # max: 0 - - - args: - size: 1 - image: - name: {{image_name}} - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - volume_type: test - image: - name: {{image_name}} - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - volume_types: - - test - sla: - failure_rate: - max: 0 - - CinderVolumes.create_volume_backup: - - - args: - size: 1 - do_delete: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" - sla: - failure_rate: - max: 0 - - - args: - size: 1 - do_delete: False - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" - sla: - failure_rate: - max: 0 - - CinderVolumeBackups.create_incremental_volume_backup: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - roles: - - "admin" - sla: - failure_rate: - max: 40 - - CinderVolumes.create_and_restore_volume_backup: - - - args: - size: 1 - do_delete: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" - sla: - failure_rate: - max: 0 - - - args: - size: 1 - do_delete: False - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_list_volume_backups: - - - args: - size: 1 - detailed: True - do_delete: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" - sla: - failure_rate: - max: 0 - - - args: - size: 1 - detailed: True - do_delete: False - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" - sla: - failure_rate: - max: 0 - - CinderVolumes.create_nested_snapshots_and_attach_volume: - - - args: - size: - min: 1 - max: 1 - nested_level: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - servers: - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - servers_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_volume_and_clone: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 1 - nested_level: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_and_update_volume_type: - - - args: - description: "test" - update_description: "test update" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_volume_from_snapshot: - - - args: - do_delete: true - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volumes: - size: 1 - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_and_get_volume_type: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_and_delete_volume_type: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_and_delete_encryption_type: - - - args: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 - - - args: - create_specs: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_and_list_volume_types: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_volume_type_and_encryption_type: - - - args: - description: "rally tests creating types" - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - create_specs: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_and_list_encryption_type: - - - args: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 - - - args: - create_specs: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_and_set_volume_type_keys: - - - args: - description: "rally tests creating types" - volume_type_key: - volume_backend_name: "LVM_iSCSI" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumes.list_transfers: - - - args: - detailed: true - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - Quotas.cinder_get: - - - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderQos.create_and_list_qos: - - - args: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderQos.create_and_get_qos: - - - args: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_get_and_delete_encryption_type: - - - args: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_and_update_encryption_type: - - - args: - create_provider: "LuksEncryptor" - create_cipher: "aes-xts-plain64" - create_key_size: 512 - create_control_location: "front-end" - update_provider: "CryptsetupEncryptor" - update_cipher: "aes-xts-plain" - update_key_size: 256 - update_control_location: "back-end" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 - - CinderQos.create_and_set_qos: - - - args: - consumer: "back-end" - write_iops_sec: "10" - read_iops_sec: "1000" - set_consumer: "both" - set_write_iops_sec: "11" - set_read_iops_sec: "1001" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CinderVolumeTypes.create_volume_type_add_and_list_type_access: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst deleted file mode 100644 index bd150f84..00000000 --- a/rally-jobs/extra/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -Extra files -=========== - -All files from this directory will be copy pasted to gates, so you are able to -use absolute path in rally tasks. Files will be in ~/.rally/extra/* - -murano/ directory ------------------ - -Here we have Murano applications that is used to prepare Murano context and -to deploy environment. \ No newline at end of file diff --git a/rally-jobs/extra/autoscaling_group.yaml.template b/rally-jobs/extra/autoscaling_group.yaml.template deleted file mode 100644 index 6c9892b4..00000000 --- a/rally-jobs/extra/autoscaling_group.yaml.template +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - flavor: - type: string - default: m1.tiny - constraints: - - custom_constraint: nova.flavor - image: - type: string - default: cirros-0.3.5-x86_64-disk - constraints: - - custom_constraint: glance.image - scaling_adjustment: - type: number - default: 1 - max_size: - type: number - default: 5 - constraints: - - range: {min: 1} - - -resources: - asg: - type: OS::Heat::AutoScalingGroup - properties: - resource: - type: OS::Nova::Server - properties: - image: { get_param: image } - flavor: { get_param: flavor } - min_size: 1 - desired_capacity: 3 - max_size: { get_param: max_size } - - scaling_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: asg} - scaling_adjustment: { get_param: scaling_adjustment } - -outputs: - scaling_url: - value: {get_attr: [scaling_policy, alarm_url]} diff --git a/rally-jobs/extra/autoscaling_policy.yaml.template b/rally-jobs/extra/autoscaling_policy.yaml.template deleted file mode 100644 index a22487e3..00000000 --- a/rally-jobs/extra/autoscaling_policy.yaml.template +++ /dev/null @@ -1,17 +0,0 @@ -heat_template_version: 2013-05-23 - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: 1 \ No newline at end of file diff --git a/rally-jobs/extra/default.yaml.template b/rally-jobs/extra/default.yaml.template deleted file mode 100644 index eb4f2f2d..00000000 --- a/rally-jobs/extra/default.yaml.template +++ /dev/null @@ -1 +0,0 @@ -heat_template_version: 2014-10-16 \ No newline at end of file diff --git a/rally-jobs/extra/fake-image.img b/rally-jobs/extra/fake-image.img deleted file mode 100644 index e69de29b..00000000 diff --git a/rally-jobs/extra/hook_example_script.sh b/rally-jobs/extra/hook_example_script.sh deleted file mode 100644 index c084c0f8..00000000 --- a/rally-jobs/extra/hook_example_script.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/sh - -rand_int() { - od -An -tu -N1 /dev/urandom | tr -d ' ' -} - -cat << EOF -{ - "additive": [ - { - "title": "Statistics table from Hook", - "chart_plugin": "StatsTable", - "data": [ - ["Alice", $(rand_int)], - ["Bob", $(rand_int)], - ["Carol", $(rand_int)]] - }, - { - "title": "StackedArea chart from Hook", - "description": "This is generated by ${0}", - "chart_plugin": "StackedArea", - "data": [ - ["Alpha", $(rand_int)], - ["Beta", $(rand_int)], - ["Gamma", $(rand_int)]] - } - ], - "complete": [ - { - "title": "Lines chart from Hook", - "description": "Random data generated by ${0}", - "chart_plugin": "Lines", - "axis_label": "X-axis label", - "label": "Y-axis label", - "data": [ - ["Foo", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]], - ["Bar", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]], - ["Spam", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]], - ["Quiz", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]] - ] - }, - { - "title": "Pie chart from Hook", - "description": "Yet another data generated by ${0}", - "chart_plugin": "Pie", - "data": [ - ["Cat", $(rand_int)], - ["Tiger", $(rand_int)], - ["Jaguar", $(rand_int)], - ["Panther", $(rand_int)], - ["Lynx", $(rand_int)] - ] - } - ] -} -EOF diff --git a/rally-jobs/extra/install_benchmark.sh b/rally-jobs/extra/install_benchmark.sh deleted file mode 100644 index 51c998be..00000000 --- a/rally-jobs/extra/install_benchmark.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/sh - -set -e - -main() { - cat > ~/dd_test.sh <<'EOF' -#!/bin/sh -time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; } -file=/tmp/test.img -c=1000 #1GB -write_seq_1gb=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c") -read_seq_1gb=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c") -[ -f $file ] && rm $file - -echo "{ - \"write_seq_1gb\": $write_seq_1gb, - \"read_seq_1gb\": $read_seq_1gb - }" -EOF - - chmod a+x ~/dd_test.sh -} - -main diff --git a/rally-jobs/extra/instance_test.sh b/rally-jobs/extra/instance_test.sh deleted file mode 100644 index e15bd045..00000000 --- a/rally-jobs/extra/instance_test.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/sh -# Load server and output JSON results ready to be processed -# by Rally scenario - -for ex in awk top grep free tr df dc dd gzip -do - if ! type ${ex} >/dev/null - then - echo "Executable is required by script but not available on a server: ${ex}" >&2 - return 1 - fi -done - -get_used_cpu_percent() { - echo 100 $(top -b -n 1 | grep -i CPU | head -n 1 | awk '{print $8}' | tr -d %) - p | dc -} - -get_used_ram_percent() { - local total=$(free | grep Mem: | awk '{print $2}') - local used=$(free | grep -- -/+\ buffers | awk '{print $3}') - echo ${used} 100 \* ${total} / p | dc -} - -get_used_disk_percent() { - df -P / | grep -v Filesystem | awk '{print $5}' | tr -d % -} - -get_seconds() { - (time -p ${1}) 2>&1 | awk '/real/{print $2}' -} - -complete_load() { - local script_file=${LOAD_SCRIPT_FILE:-/tmp/load.sh} - local stop_file=${LOAD_STOP_FILE:-/tmp/load.stop} - local processes_num=${LOAD_PROCESSES_COUNT:-20} - local size=${LOAD_SIZE_MB:-5} - - cat << EOF > ${script_file} -until test -e ${stop_file} -do dd if=/dev/urandom bs=1M count=${size} 2>/dev/null | gzip >/dev/null ; done -EOF - - local sep - local cpu - local ram - local dis - rm -f ${stop_file} - for i in $(seq ${processes_num}) - do - i=$((i-1)) - sh ${script_file} & - cpu="${cpu}${sep}[${i}, $(get_used_cpu_percent)]" - ram="${ram}${sep}[${i}, $(get_used_ram_percent)]" - dis="${dis}${sep}[${i}, $(get_used_disk_percent)]" - sep=", " - done - > ${stop_file} - cat << EOF - { - "title": "Generate load by spawning processes", - "description": "Each process runs gzip for ${size}M urandom data in a loop", - "chart_plugin": "Lines", - "axis_label": "Number of processes", - "label": "Usage, %", - "data": [ - ["CPU", [${cpu}]], - ["Memory", [${ram}]], - ["Disk", [${dis}]]] - } -EOF -} - -additive_dd() { - local c=${1:-50} # Megabytes - local file=/tmp/dd_test.img - local write=$(get_seconds "dd if=/dev/urandom of=${file} bs=1M count=${c}") - local read=$(get_seconds "dd if=${file} of=/dev/null bs=1M count=${c}") - local gzip=$(get_seconds "gzip ${file}") - rm ${file}.gz - cat << EOF - { - "title": "Write, read and gzip file", - "description": "Using file '${file}', size ${c}Mb.", - "chart_plugin": "StackedArea", - "data": [ - ["write_${c}M", ${write}], - ["read_${c}M", ${read}], - ["gzip_${c}M", ${gzip}]] - }, - { - "title": "Statistics for write/read/gzip", - "chart_plugin": "StatsTable", - "data": [ - ["write_${c}M", ${write}], - ["read_${c}M", ${read}], - ["gzip_${c}M", ${gzip}]] - } - -EOF -} - -cat << EOF -{ - "additive": [$(additive_dd)], - "complete": [$(complete_load)] -} -EOF diff --git a/rally-jobs/extra/mistral_input.json b/rally-jobs/extra/mistral_input.json deleted file mode 100644 index 2d3edf39..00000000 --- a/rally-jobs/extra/mistral_input.json +++ /dev/null @@ -1 +0,0 @@ -{"input1": "value1", "some_json_input": {"a": "b"}} \ No newline at end of file diff --git a/rally-jobs/extra/mistral_params.json b/rally-jobs/extra/mistral_params.json deleted file mode 100644 index e75c3f82..00000000 --- a/rally-jobs/extra/mistral_params.json +++ /dev/null @@ -1 +0,0 @@ -{"env": {"env_param": "env_param_value"}} \ No newline at end of file diff --git a/rally-jobs/extra/mistral_wb.yaml b/rally-jobs/extra/mistral_wb.yaml deleted file mode 100644 index 98ccdceb..00000000 --- a/rally-jobs/extra/mistral_wb.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -version: "2.0" - -name: wb - -workflows: - wf1: - type: direct - input: - - input1: input1 - - some_json_input: {} - tasks: - hello: - action: std.echo output="Hello" - publish: - result: $ diff --git a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip b/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip deleted file mode 100644 index 690b1285..00000000 Binary files a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip and /dev/null differ diff --git a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/Classes/HelloReporter.yaml b/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/Classes/HelloReporter.yaml deleted file mode 100644 index 2eca9d0d..00000000 --- a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/Classes/HelloReporter.yaml +++ /dev/null @@ -1,25 +0,0 @@ -Namespaces: - =: io.murano.apps - std: io.murano - sys: io.murano.system - - -Name: HelloReporter - -Extends: std:Application - -Properties: - name: - Contract: $.string().notNull() - -Workflow: - initialize: - Body: - - $.environment: $.find(std:Environment).require() - - deploy: - Body: - - If: not $.getAttr(deployed, false) - Then: - - $.environment.reporter.report($this, 'Starting deployment! Hello!') - - $.setAttr(deployed, True) diff --git a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/UI/ui.yaml b/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/UI/ui.yaml deleted file mode 100644 index 2d572f5f..00000000 --- a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/UI/ui.yaml +++ /dev/null @@ -1,23 +0,0 @@ -Version: 2 - -Application: - ?: - type: io.murano.apps.HelloReporter - name: $.appConfiguration.name - -Forms: - - appConfiguration: - fields: - - name: name - type: string - label: Application Name - description: >- - Enter a desired name for the application. Just A-Z, a-z, 0-9, dash and - underline are allowed - - name: unitNamingPattern - type: string - required: false - hidden: true - widgetMedia: - js: ['muranodashboard/js/support_placeholder.js'] - css: {all: ['muranodashboard/css/support_placeholder.css']} diff --git a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/manifest.yaml b/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/manifest.yaml deleted file mode 100644 index 58075461..00000000 --- a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/manifest.yaml +++ /dev/null @@ -1,10 +0,0 @@ -Format: 1.0 -Type: Application -FullName: io.murano.apps.HelloReporter -Name: HelloReporter -Description: | - HelloReporter test app. -Author: 'Mirantis, Inc' -Tags: [] -Classes: - io.murano.apps.HelloReporter: HelloReporter.yaml diff --git a/rally-jobs/extra/murano/applications/README.rst b/rally-jobs/extra/murano/applications/README.rst deleted file mode 100644 index ed710758..00000000 --- a/rally-jobs/extra/murano/applications/README.rst +++ /dev/null @@ -1,17 +0,0 @@ -Murano applications -=================== - -Files for Murano benchmarking - -Structure ---------- - -* / directories. Each directory store a simple Murano package - for environment deployment in Murano context. Also there can be other files - needs for application. - - -Useful links ------------- - -* `More about Murano package `_ diff --git a/rally-jobs/extra/random_strings.yaml.template b/rally-jobs/extra/random_strings.yaml.template deleted file mode 100644 index 2dd676c1..00000000 --- a/rally-jobs/extra/random_strings.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/resource_group.yaml.template b/rally-jobs/extra/resource_group.yaml.template deleted file mode 100644 index b3f505fa..00000000 --- a/rally-jobs/extra/resource_group.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 2 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/resource_group_server_with_volume.yaml.template b/rally-jobs/extra/resource_group_server_with_volume.yaml.template deleted file mode 100644 index fbc8842a..00000000 --- a/rally-jobs/extra/resource_group_server_with_volume.yaml.template +++ /dev/null @@ -1,44 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template that creates a resource group with servers and volumes. - The template allows to create a lot of nested stacks with standard - configuration: nova instance, cinder volume attached to that instance - -parameters: - - num_instances: - type: number - description: number of instances that should be created in resource group - constraints: - - range: {min: 1} - instance_image: - type: string - default: cirros-0.3.5-x86_64-disk - instance_volume_size: - type: number - description: Size of volume to attach to instance - default: 1 - constraints: - - range: {min: 1, max: 1024} - instance_flavor: - type: string - description: Type of the instance to be created. - default: m1.tiny - instance_availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - -resources: - group_of_volumes: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: num_instances} - resource_def: - type: /home/jenkins/.rally/extra/server_with_volume.yaml.template - properties: - image: {get_param: instance_image} - volume_size: {get_param: instance_volume_size} - flavor: {get_param: instance_flavor} - availability_zone: {get_param: instance_availability_zone} diff --git a/rally-jobs/extra/resource_group_with_constraint.yaml.template b/rally-jobs/extra/resource_group_with_constraint.yaml.template deleted file mode 100644 index 6eca4bb4..00000000 --- a/rally-jobs/extra/resource_group_with_constraint.yaml.template +++ /dev/null @@ -1,21 +0,0 @@ -heat_template_version: 2013-05-23 - -description: Template for testing caching. - -parameters: - count: - type: number - default: 40 - delay: - type: number - default: 0.3 - -resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: count} - resource_def: - type: OS::Heat::TestResource - properties: - constraint_prop_secs: {get_param: delay} diff --git a/rally-jobs/extra/resource_group_with_outputs.yaml.template b/rally-jobs/extra/resource_group_with_outputs.yaml.template deleted file mode 100644 index f47d03cc..00000000 --- a/rally-jobs/extra/resource_group_with_outputs.yaml.template +++ /dev/null @@ -1,37 +0,0 @@ -heat_template_version: 2013-05-23 -parameters: - attr_wait_secs: - type: number - default: 0.5 - -resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: 10 - resource_def: - type: OS::Heat::TestResource - properties: - attr_wait_secs: {get_param: attr_wait_secs} - -outputs: - val1: - value: {get_attr: [rg, resource.0.output]} - val2: - value: {get_attr: [rg, resource.1.output]} - val3: - value: {get_attr: [rg, resource.2.output]} - val4: - value: {get_attr: [rg, resource.3.output]} - val5: - value: {get_attr: [rg, resource.4.output]} - val6: - value: {get_attr: [rg, resource.5.output]} - val7: - value: {get_attr: [rg, resource.6.output]} - val8: - value: {get_attr: [rg, resource.7.output]} - val9: - value: {get_attr: [rg, resource.8.output]} - val10: - value: {get_attr: [rg, resource.9.output]} \ No newline at end of file diff --git a/rally-jobs/extra/server_with_ports.yaml.template b/rally-jobs/extra/server_with_ports.yaml.template deleted file mode 100644 index 0e344fc0..00000000 --- a/rally-jobs/extra/server_with_ports.yaml.template +++ /dev/null @@ -1,64 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - public_net: - type: string - default: public - image: - type: string - default: cirros-0.3.5-x86_64-disk - flavor: - type: string - default: m1.tiny - cidr: - type: string - default: 11.11.11.0/24 - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - networks: - - port: { get_resource: server_port } - - router: - type: OS::Neutron::Router - properties: - external_gateway_info: - network: {get_param: public_net} - - router_interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: private_subnet } - - private_net: - type: OS::Neutron::Net - - private_subnet: - type: OS::Neutron::Subnet - properties: - network: { get_resource: private_net } - cidr: {get_param: cidr} - - port_security_group: - type: OS::Neutron::SecurityGroup - properties: - name: default_port_security_group - description: > - Default security group assigned to port. The neutron default group is not - used because neutron creates several groups with the same name=default and - nova cannot chooses which one should it use. - - server_port: - type: OS::Neutron::Port - properties: - network: {get_resource: private_net} - fixed_ips: - - subnet: { get_resource: private_subnet } - security_groups: - - { get_resource: port_security_group } diff --git a/rally-jobs/extra/server_with_volume.yaml.template b/rally-jobs/extra/server_with_volume.yaml.template deleted file mode 100644 index 6e65cec7..00000000 --- a/rally-jobs/extra/server_with_volume.yaml.template +++ /dev/null @@ -1,39 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - image: - type: string - default: cirros-0.3.5-x86_64-disk - flavor: - type: string - default: m1.tiny - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server} - mountpoint: /dev/vdc diff --git a/rally-jobs/extra/updated_autoscaling_policy_inplace.yaml.template b/rally-jobs/extra/updated_autoscaling_policy_inplace.yaml.template deleted file mode 100644 index cf34879c..00000000 --- a/rally-jobs/extra/updated_autoscaling_policy_inplace.yaml.template +++ /dev/null @@ -1,23 +0,0 @@ -heat_template_version: 2013-05-23 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates resource parameters without resource re-creation(replacement) - in the stack defined by autoscaling_policy.yaml.template. It allows to measure - performance of "pure" resource update operation only. - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: -1 \ No newline at end of file diff --git a/rally-jobs/extra/updated_random_strings_add.yaml.template b/rally-jobs/extra/updated_random_strings_add.yaml.template deleted file mode 100644 index e06d42e0..00000000 --- a/rally-jobs/extra/updated_random_strings_add.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates the stack defined by random_strings.yaml.template with additional resource. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_three: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/updated_random_strings_delete.yaml.template b/rally-jobs/extra/updated_random_strings_delete.yaml.template deleted file mode 100644 index d02593e3..00000000 --- a/rally-jobs/extra/updated_random_strings_delete.yaml.template +++ /dev/null @@ -1,11 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by random_strings.yaml.template. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/updated_random_strings_replace.yaml.template b/rally-jobs/extra/updated_random_strings_replace.yaml.template deleted file mode 100644 index 46d8bff4..00000000 --- a/rally-jobs/extra/updated_random_strings_replace.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by - random_strings.yaml.template and re-creates it with the updated parameters - (so-called update-replace). That happens because some parameters cannot be - changed without resource re-creation. The template allows to measure performance - of update-replace operation. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 40 \ No newline at end of file diff --git a/rally-jobs/extra/updated_resource_group_increase.yaml.template b/rally-jobs/extra/updated_resource_group_increase.yaml.template deleted file mode 100644 index 891074eb..00000000 --- a/rally-jobs/extra/updated_resource_group_increase.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource_group.yaml.template - and adds children resources to that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 3 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/updated_resource_group_reduce.yaml.template b/rally-jobs/extra/updated_resource_group_reduce.yaml.template deleted file mode 100644 index b4d1d173..00000000 --- a/rally-jobs/extra/updated_resource_group_reduce.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource_group.yaml.template - and deletes children resources from that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 1 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/workload/wordpress_heat_template.yaml b/rally-jobs/extra/workload/wordpress_heat_template.yaml deleted file mode 100644 index 9cdb3e38..00000000 --- a/rally-jobs/extra/workload/wordpress_heat_template.yaml +++ /dev/null @@ -1,219 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Heat WordPress template to support F23, using only Heat OpenStack-native - resource types, and without the requirement for heat-cfntools in the image. - WordPress is web software you can use to create a beautiful website or blog. - This template installs a single-instance WordPress deployment using a local - MySQL database to store the data. - -parameters: - - wp_instances_count: - type: number - default: 1 - - timeout: - type: number - description: Timeout for WaitCondition, seconds - default: 1000 - - router_id: - type: string - description: ID of the router - default: b9135c24-d998-4e2f-b0aa-2b0a40c21ae5 - - network_id: - type: string - description: ID of the network to allocate floating IP from - default: 4eabc459-0096-4479-b105-67ec0cff18cb - - key_name: - type: string - description : Name of a KeyPair to enable SSH access to the instance - default: nova-kp - - wp_instance_type: - type: string - description: Instance type for WordPress server - default: m1.small - - wp_image: - type: string - description: > - Name or ID of the image to use for the WordPress server. - Recommended value is fedora-23.x86_64; - http://cloud.fedoraproject.org/fedora-23.x86_64.qcow2. - default: fedora-23.x86_64 - - image: - type: string - description: > - Name or ID of the image to use for the gate-node. - default: fedora-23.x86_64 - - instance_type: - type: string - description: Instance type for gate-node. - default: m1.small - - - db_name: - type: string - description: WordPress database name - default: wordpress - constraints: - - length: { min: 1, max: 64 } - description: db_name must be between 1 and 64 characters - - allowed_pattern: '[a-zA-Z][a-zA-Z0-9]*' - description: > - db_name must begin with a letter and contain only alphanumeric - characters - db_username: - type: string - description: The WordPress database admin account username - default: admin - hidden: true - constraints: - - length: { min: 1, max: 16 } - description: db_username must be between 1 and 16 characters - - allowed_pattern: '[a-zA-Z][a-zA-Z0-9]*' - description: > - db_username must begin with a letter and contain only alphanumeric - characters - db_password: - type: string - description: The WordPress database admin account password - default: admin - hidden: true - constraints: - - length: { min: 1, max: 41 } - description: db_password must be between 1 and 41 characters - - allowed_pattern: '[a-zA-Z0-9]*' - description: db_password must contain only alphanumeric characters - db_root_password: - type: string - description: Root password for MySQL - default: admin - hidden: true - constraints: - - length: { min: 1, max: 41 } - description: db_root_password must be between 1 and 41 characters - - allowed_pattern: '[a-zA-Z0-9]*' - description: db_root_password must contain only alphanumeric characters - -resources: - wordpress_instances: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: wp_instances_count} - resource_def: - type: wp-instances.yaml - properties: - name: wp_%index% - image: { get_param: wp_image } - flavor: { get_param: wp_instance_type } - key_name: { get_param: key_name } - db_root_password: { get_param: db_root_password } - db_name: { get_param: db_name } - db_username: { get_param: db_username } - db_password: { get_param: db_password } - wc_notify: { get_attr: ['wait_handle', 'curl_cli'] } - subnet: {get_resource: subnet} - network: {get_resource: network} - security_group: {get_resource: security_group} - - gate_instance: - type: OS::Nova::Server - properties: - image: { get_param: image } - flavor: { get_param: instance_type } - key_name: { get_param: key_name } - networks: - - port: {get_resource: port_gate} - user_data_format: RAW - user_data: | - #cloud-config - packages: - - python - - siege - - httpd-tools - - security_group: - type: OS::Neutron::SecurityGroup - properties: - rules: - - port_range_max: null - port_range_min: null - protocol: icmp - remote_ip_prefix: 0.0.0.0/0 - - port_range_max: 80 - port_range_min: 80 - protocol: tcp - remote_ip_prefix: 0.0.0.0/0 - - port_range_max: 443 - port_range_min: 443 - protocol: tcp - remote_ip_prefix: 0.0.0.0/0 - - port_range_max: 22 - port_range_min: 22 - protocol: tcp - remote_ip_prefix: 0.0.0.0/0 - - network: - type: OS::Neutron::Net - properties: - name: wordpress-network - - subnet: - type: OS::Neutron::Subnet - properties: - cidr: 10.0.0.1/24 - dns_nameservers: [8.8.8.8] - ip_version: 4 - network: {get_resource: network} - - port_gate: - type: OS::Neutron::Port - properties: - fixed_ips: - - subnet: {get_resource: subnet} - network: {get_resource: network} - replacement_policy: AUTO - security_groups: - - {get_resource: security_group} - - floating_ip: - type: OS::Neutron::FloatingIP - properties: - port_id: {get_resource: port_gate} - floating_network: {get_param: network_id} - - router_interface: - type: OS::Neutron::RouterInterface - properties: - router_id: {get_param: router_id} - subnet: {get_resource: subnet} - - wait_condition: - type: OS::Heat::WaitCondition - properties: - handle: {get_resource: wait_handle} - count: {get_param: wp_instances_count} - timeout: {get_param: timeout} - - wait_handle: - type: OS::Heat::WaitConditionHandle - -outputs: - curl_cli: - value: { get_attr: ['wait_handle', 'curl_cli'] } - - wp_nodes: - value: { get_attr: ['wordpress_instances', 'attributes', 'ip'] } - - gate_node: - value: { get_attr: ['floating_ip', 'floating_ip_address'] } - - net_name: - value: { get_attr: ['network', 'name'] } diff --git a/rally-jobs/extra/workload/wp-instances.yaml b/rally-jobs/extra/workload/wp-instances.yaml deleted file mode 100644 index 9a04d42a..00000000 --- a/rally-jobs/extra/workload/wp-instances.yaml +++ /dev/null @@ -1,82 +0,0 @@ -heat_template_version: 2014-10-16 - -parameters: - name: { type: string } - wc_notify: { type: string } - subnet: { type: string } - network: { type: string } - security_group: { type: string } - key_name: { type: string } - flavor: { type: string } - image: { type: string } - db_name: { type: string } - db_username: { type: string } - db_password: { type: string } - db_root_password: { type: string } - -resources: - wordpress_instance: - type: OS::Nova::Server - properties: - name: { get_param: name } - image: { get_param: image } - flavor: { get_param: flavor } - key_name: { get_param: key_name } - networks: - - port: {get_resource: port} - user_data_format: RAW - user_data: - str_replace: - template: | - #!/bin/bash -v - sudo yum -y install mariadb mariadb-server httpd wordpress curl - sudo touch /var/log/mariadb/mariadb.log - sudo chown mysql.mysql /var/log/mariadb/mariadb.log - sudo systemctl start mariadb.service - # Setup MySQL root password and create a user - sudo mysqladmin -u root password db_rootpassword - cat << EOF | mysql -u root --password=db_rootpassword - CREATE DATABASE db_name; - GRANT ALL PRIVILEGES ON db_name.* TO "db_user"@"localhost" - IDENTIFIED BY "db_password"; - FLUSH PRIVILEGES; - EXIT - EOF - sudo sed -i "/Deny from All/d" /etc/httpd/conf.d/wordpress.conf - sudo sed -i "s/Require local/Require all granted/" /etc/httpd/conf.d/wordpress.conf - sudo sed -i s/database_name_here/db_name/ /etc/wordpress/wp-config.php - sudo sed -i s/username_here/db_user/ /etc/wordpress/wp-config.php - sudo sed -i s/password_here/db_password/ /etc/wordpress/wp-config.php - sudo systemctl start httpd.service - IP=$(ip r get 8.8.8.8 | grep src | awk '{print $7}') - curl --data 'user_name=admin&password=123&password2=123&admin_email=asd@asd.com' http://$IP/wordpress/wp-admin/install.php?step=2 - mkfifo /tmp/data - (for i in $(seq 1000); do - echo -n "1,$i,$i,page," - head -c 100000 /dev/urandom | base64 -w 0 - echo - done - ) > /tmp/data & - mysql -u root --password=db_rootpassword wordpress -e 'LOAD DATA LOCAL INFILE "/tmp/data" INTO TABLE wp_posts FIELDS TERMINATED BY "," (post_author,post_title,post_name,post_type,post_content);' - sudo sh -c 'echo "172.16.0.6 mos80-ssl.fuel.local" >> /etc/hosts' - wc_notify --insecure --data-binary '{"status": "SUCCESS"}' - params: - db_rootpassword: { get_param: db_root_password } - db_name: { get_param: db_name } - db_user: { get_param: db_username } - db_password: { get_param: db_password } - wc_notify: { get_param: wc_notify } - - port: - type: OS::Neutron::Port - properties: - fixed_ips: - - subnet: {get_param: subnet} - network: {get_param: network} - replacement_policy: AUTO - security_groups: - - {get_param: security_group} - -outputs: - ip: - value: { get_attr: ['wordpress_instance', 'networks'] } diff --git a/rally-jobs/heat.yaml b/rally-jobs/heat.yaml deleted file mode 100644 index 268143b5..00000000 --- a/rally-jobs/heat.yaml +++ /dev/null @@ -1,367 +0,0 @@ ---- - HeatStacks.create_and_list_stack: - - - args: - template_path: "~/.rally/extra/default.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - HeatStacks.create_and_delete_stack: - - - args: - template_path: "~/.rally/extra/default.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - - - args: - template_path: "~/.rally/extra/server_with_volume.yaml.template" - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - - args: - template_path: "~/.rally/extra/resource_group_server_with_volume.yaml.template" - parameters: - num_instances: 2 - files: ["~/.rally/extra/server_with_volume.yaml.template"] - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - - args: - template_path: "~/.rally/extra/resource_group_with_constraint.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - HeatStacks.create_check_delete_stack: - - - args: - template_path: "~/.rally/extra/random_strings.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - HeatStacks.create_update_delete_stack: - - - args: - template_path: "~/.rally/extra/random_strings.yaml.template" - updated_template_path: "~/.rally/extra/updated_random_strings_add.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - - - args: - template_path: "~/.rally/extra/random_strings.yaml.template" - updated_template_path: "~/.rally/extra/updated_random_strings_delete.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - - - args: - template_path: "~/.rally/extra/random_strings.yaml.template" - updated_template_path: "~/.rally/extra/updated_random_strings_replace.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - - - args: - template_path: "~/.rally/extra/autoscaling_policy.yaml.template" - updated_template_path: "~/.rally/extra/updated_autoscaling_policy_inplace.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - - - args: - template_path: "~/.rally/extra/resource_group.yaml.template" - updated_template_path: "~/.rally/extra/updated_resource_group_increase.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - - - args: - template_path: "~/.rally/extra/resource_group.yaml.template" - updated_template_path: "~/.rally/extra/updated_resource_group_reduce.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - # HeatStacks.list_stacks_and_resources: - # - - # runner: - # type: "constant" - # times: 6 - # concurrency: 3 - # context: - # users: - # tenants: 2 - # users_per_tenant: 3 - # stacks: - # stacks_per_tenant: 2 - # resources_per_stack: 10 - # sla: - # failure_rate: - # max: 0 - - HeatStacks.create_suspend_resume_delete_stack: - - - args: - template_path: "~/.rally/extra/random_strings.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - #HeatStacks.list_stacks_and_events: - # - - # runner: - # type: "constant" - # times: 6 - # concurrency: 3 - # context: - # users: - # tenants: 2 - # users_per_tenant: 3 - # stacks: - # stacks_per_tenant: 2 - # resources_per_stack: 10 - # sla: - # failure_rate: - # max: 0 - - HeatStacks.create_snapshot_restore_delete_stack: - - - args: - template_path: "~/.rally/extra/random_strings.yaml.template" - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - HeatStacks.create_stack_and_scale: - - - args: - template_path: "~/.rally/extra/autoscaling_group.yaml.template" - output_key: "scaling_url" - delta: 1 - parameters: - scaling_adjustment: 1 - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - template_path: "~/.rally/extra/autoscaling_group.yaml.template" - output_key: "scaling_url" - delta: -1 - parameters: - scaling_adjustment: -1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - HeatStacks.create_stack_and_list_output: - - - args: - template_path: "~/.rally/extra/resource_group_with_outputs.yaml.template" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - HeatStacks.create_stack_and_list_output_via_API: - - - args: - template_path: "~/.rally/extra/resource_group_with_outputs.yaml.template" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - HeatStacks.create_stack_and_show_output: - - - args: - template_path: "~/.rally/extra/resource_group_with_outputs.yaml.template" - output_key: "val1" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - HeatStacks.create_stack_and_show_output_via_API: - - - args: - template_path: "~/.rally/extra/resource_group_with_outputs.yaml.template" - output_key: "val1" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - Authenticate.validate_heat: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/nova.yaml b/rally-jobs/nova.yaml deleted file mode 100755 index 78bbbfe6..00000000 --- a/rally-jobs/nova.yaml +++ /dev/null @@ -1,1354 +0,0 @@ -{%- set cirros_image_url = "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} -{% set flavor_name = "m1.tiny" %} -{% set volume_type = "" %} ---- - Authenticate.validate_nova: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - Quotas.nova_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - Quotas.nova_get: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - Quotas.nova_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - auto_assign_nic: true - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 2 - - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - force_delete: true - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_list_server: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - detailed: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - user_choice_method: "round_robin" - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - files: - /home/bootfile: "rallytest" - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServerGroups.create_and_get_server_group: - {% for s in (["affinity"], ["anti-affinity"]) %} - - - args: - kwargs: - policies: {{s}} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - {% endfor %} - - NovaServerGroups.create_and_list_server_groups: - {% for s in (["affinity"], ["anti-affinity"]) %} - - - args: - kwargs: - policies: {{s}} - all_projects: false - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - {% endfor %} - - NovaServers.suspend_and_resume_server: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.pause_and_unpause_server: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.shelve_and_unshelve_server: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_rebuild_server: - - - args: - flavor: - name: {{flavor_name}} - from_image: - name: {{image_name}} - to_image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_associate_floating_ip: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 - - NovaServers.boot_server_associate_and_dissociate_floating_ip: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - network: {} - sla: - failure_rate: - max: 0 - - NovaServers.list_servers: - - - args: - detailed: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - servers: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - servers_per_tenant: 2 - auto_assign_nic: True - network: - networks_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_show_server: - - - args: - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_get_console_output: - - - args: - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_get_console_url: - {% for s in ("novnc", "xvpvnc") %} - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - console_type: {{s}} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - {% endfor %} - - NovaServers.resize_server: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - to_flavor: - name: "m1.small" - confirm: true - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - to_flavor: - name: "m1.small" - confirm: false - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.resize_shutoff_server: - {% for s in ("true", "false") %} - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - to_flavor: - name: "m1.small" - confirm: {{s}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - {% endfor %} - - NovaServers.boot_server_attach_created_volume_and_resize: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - to_flavor: - name: "m1.small" - volume_size: 1 - confirm: true - force_delete: false - do_delete: true - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_server_from_volume_and_resize: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - to_flavor: - name: "m1.small" - volume_size: 1 - confirm: true - force_delete: false - do_delete: true - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_bounce_server: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - actions: - - - hard_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - - - pause_unpause: 1 - - - suspend_resume: 1 - - - lock_unlock: 1 - - - shelve_unshelve: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_lock_unlock_and_delete: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_server_from_volume_and_delete: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - volume_size: 1 - volume_type: {{volume_type}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaServers.boot_server_from_volume: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - volume_size: 1 - volume_type: {{volume_type}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - NovaServers.snapshot_server: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 0 - - NovaServers.boot_server: - - - args: - flavor: - name: "^ram64$" - image: - name: {{image_name}} - auto_assign_nic: false - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - flavors: - - - name: "ram64" - ram: 64 - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: {{flavor_name}} - image: - name: "rally-named-image-from-context" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - roles: - - admin - images: - image_url: "{{ cirros_image_url }}" - disk_format: "qcow2" - container_format: "bare" - images_per_tenant: 1 - image_name: "rally-named-image-from-context" - visibility: "public" - sla: - failure_rate: - max: 0 - - NovaHypervisors.list_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 4 - concurrency: 2 - sla: - failure_rate: - max: 0 - - NovaHypervisors.statistics_hypervisors: - - - args: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaHypervisors.list_and_get_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaHypervisors.list_and_get_uptime_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaHypervisors.list_and_search_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaImages.list_images: - - - args: - detailed: True - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaKeypair.create_and_delete_keypair: - - - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - key_type: "x509" - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - nova: - version: 2.2 - sla: - failure_rate: - max: 0 - - NovaKeypair.create_and_list_keypairs: - - - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaKeypair.create_and_get_keypair: - - - args: {} - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaKeypair.boot_and_delete_server_with_keypair: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - network: - start_cidr: "10.2.0.0/24" - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - key_type: "x509" - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - network: - start_cidr: "10.2.0.0/24" - api_versions: - nova: - version: 2.2 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_delete_multiple_servers: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - count: 5 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - NovaFlavors.list_flavors: - - - args: - detailed: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaAgents.list_agents: - - - runner: - type: "constant" - concurrency: 2 - times: 4 - sla: - failure_rate: - max: 0 - - NovaAggregates.list_aggregates: - - - runner: - type: "constant" - concurrency: 2 - times : 4 - sla: - failure_rate: - max: 0 - - NovaAggregates.create_aggregate_add_and_remove_host: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaAvailabilityZones.list_availability_zones: - - - args: - detailed: true - runner: - type: "constant" - concurrency: 2 - times: 4 - sla: - failure_rate: - max: 0 - - NovaHosts.list_hosts: - - - runner: - type: "constant" - concurrency: 2 - times: 4 - sla: - failure_rate: - max: 0 - - NovaHosts.list_and_get_hosts: - - - runner: - type: "constant" - concurrency: 2 - times: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaServices.list_services: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - sla: - failure_rate: - max: 0 - - NovaFlavors.create_flavor: - - - args: - ram: 500 - vcpus: 1 - disk: 1 - runner: - type: "constant" - times: 4 - concurrency: 2 - sla: - failure_rate: - max: 0 - - NovaFlavors.create_flavor_and_set_keys: - - - args: - ram: 500 - vcpus : 1 - disk: 1 - extra_specs: - quota:disk_read_bytes_sec: 10240 - runner: - type: "constant" - concurrency: 2 - times: 4 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaFlavors.create_and_get_flavor: - - - args: - ram: 500 - vcpus: 1 - disk: 1 - runner: - type: "constant" - times: 4 - concurrency: 2 - sla: - failure_rate: - max: 0 - - NovaFlavors.create_and_delete_flavor: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - args: - ram: 500 - vcpus : 1 - disk: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaFlavors.create_and_list_flavor_access: - - - args: - ram: 500 - vcpus: 1 - disk: 1 - runner: - type: "constant" - times: 4 - concurrency: 2 - sla: - failure_rate: - max: 0 - - NovaFlavors.create_flavor_and_add_tenant_access: - - - args: - ram: 500 - vcpus : 1 - disk: 1 - runner: - type: "constant" - concurrency: 2 - times: 4 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_update_server: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaServers.boot_server_from_volume_snapshot: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - volume_size: 1 - volume_type: {{volume_type}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaAggregates.create_and_list_aggregates: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaAggregates.create_and_delete_aggregate: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaAggregates.create_and_update_aggregate: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaAggregates.create_and_get_aggregate_details: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaServers.boot_server_and_attach_interface: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - boot_server_args: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NovaAggregates.create_aggregate_add_host_and_boot_server: - - - args: - image: - name: {{image_name}} - metadata: - test_metadata: "true" - availability_zone: "nova" - ram: 512 - vcpus: 1 - disk: 1 - boot_server_kwargs: {} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - NovaServers.boot_server_and_list_interfaces: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: {{image_name}} - runner: - type: "constant" - times: 3 - concurrency: 3 - context: - users: - tenants: 2 - users_per_tenant: 2 - network: - start_cidr: "100.1.0.0/26" - sla: - failure_rate: - max: 0 - - NovaServers.boot_server_attach_volume_and_list_attachments: - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - volume_size: 1 - volume_num: 2 - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst deleted file mode 100644 index 33bec0d2..00000000 --- a/rally-jobs/plugins/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -Rally plugins -============= - -All *.py modules from this directory will be auto-loaded by Rally and all -plugins will be discoverable. There is no need of any extra configuration -and there is no difference between writing them here and in rally code base. - -Note that it is better to push all interesting and useful benchmarks to Rally -code base, this simplifies administration for Operators. diff --git a/rally-jobs/plugins/__init__.py b/rally-jobs/plugins/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally-jobs/plugins/fake_plugin.py b/rally-jobs/plugins/fake_plugin.py deleted file mode 100644 index f2abef8f..00000000 --- a/rally-jobs/plugins/fake_plugin.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import random -import time - -from rally.task import atomic -from rally.task import scenario - -# This is used to test relative import -from test_relative_import import zzz - - -@scenario.configure(name="FakePlugin.testplugin") -class FakePlugin(scenario.Scenario): - """Fake plugin with a scenario.""" - - @atomic.action_timer("test1") - def _test1(self, factor): - time.sleep(random.random() * 0.1) - - @atomic.action_timer("test2") - def _test2(self, factor): - time.sleep(random.random() * factor) - - def run(self, factor=1): - """Fake scenario. - - :param factor: influences the argument value for a time.sleep() call - """ - zzz.some_very_important_function() - self._test1(factor) - self._test2(factor) diff --git a/rally-jobs/plugins/rally_profile.py b/rally-jobs/plugins/rally_profile.py deleted file mode 100644 index d35250e2..00000000 --- a/rally-jobs/plugins/rally_profile.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from rally.common import utils -from rally.task import atomic -from rally.task import scenario - - -@scenario.configure(name="RallyProfile.generate_names_in_atomic") -class GenerateNamesInAtomic(scenario.Scenario, utils.RandomNameGeneratorMixin): - - def run(self, number_of_names): - """Generate random names in atomic. - - :param number_of_names: int number of names to create - """ - with atomic.ActionTimer(self, "generate_%s_names" % number_of_names): - for i in range(number_of_names): - self.generate_random_name() - - -@scenario.configure(name="RallyProfile.calculate_atomic") -class CalculateAtomic(scenario.Scenario, utils.RandomNameGeneratorMixin): - - def run(self, number_of_atomics): - """Calculate atomic actions. - - :param number_of_atomics: int number of atomics to run - """ - tmp_name = "tmp_actions" - atomic_inst = atomic.ActionTimerMixin() - - calc_atomic_name = "calculate_%s_atomics" % number_of_atomics - with atomic.ActionTimer(self, calc_atomic_name): - for _ in range(number_of_atomics): - with atomic.ActionTimer(atomic_inst, tmp_name): - pass diff --git a/rally-jobs/plugins/test_relative_import/__init__.py b/rally-jobs/plugins/test_relative_import/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally-jobs/plugins/test_relative_import/zzz.py b/rally-jobs/plugins/test_relative_import/zzz.py deleted file mode 100644 index cbd847b1..00000000 --- a/rally-jobs/plugins/test_relative_import/zzz.py +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This module is used just for test that relative imports work well - - -def some_very_important_function(): - return 42 diff --git a/rally-jobs/rally-designate.yaml b/rally-jobs/rally-designate.yaml deleted file mode 100644 index f05fb897..00000000 --- a/rally-jobs/rally-designate.yaml +++ /dev/null @@ -1,206 +0,0 @@ ---- - DesignateBasic.create_and_delete_domain: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_update_domain: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_delete_records: - - - args: - records_per_domain: 5 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_list_domains: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_list_records: - - - args: - records_per_domain: 5 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.list_domains: - - - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_list_servers: - - - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_delete_server: - - - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.list_servers: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_list_zones: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_delete_zone: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_list_recordsets: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 - sla: - failure_rate: - max: 0 - - DesignateBasic.create_and_delete_recordsets: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 - sla: - failure_rate: - max: 0 - - DesignateBasic.list_zones: - - - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 10 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-ironic.yaml b/rally-jobs/rally-ironic.yaml deleted file mode 100644 index bd6811db..00000000 --- a/rally-jobs/rally-ironic.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - {% for s in ("create_and_list_node", "create_and_delete_node") %} - IronicNodes.{{s}}: - - - args: - driver: "fake" - properties: - capabilities: "boot_option:local" - runner: - type: "constant" - times: 100 - concurrency: 20 - context: - users: - tenants: 5 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - {% endfor %} diff --git a/rally-jobs/rally-keystone-api-v2.yaml b/rally-jobs/rally-keystone-api-v2.yaml deleted file mode 100644 index 9eb3859f..00000000 --- a/rally-jobs/rally-keystone-api-v2.yaml +++ /dev/null @@ -1,711 +0,0 @@ -{%- set cirros_image_url = "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" %} ---- - KeystoneBasic.create_user: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_delete_user: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_user_set_enabled_and_delete: - - - args: - enabled: true - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - args: - enabled: false - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_tenants: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.get_entities: - - - runner: - type: "constant" - times: 20 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - args: - service_name: null - runner: - type: "constant" - times: 20 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - args: - service_name: "nova" - runner: - type: "constant" - times: 20 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.add_and_remove_user_role: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_delete_role: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_get_role: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_add_and_list_user_roles: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_users: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_tenant: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_tenant_with_users: - - - args: - users_per_tenant: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 3 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_user_update_password: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_update_user: - - - args: - create_user_kwargs: {} - update_user_kwargs: - email: "newemail@rally.me" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_update_and_delete_tenant: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_delete_service: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_services: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_ec2credentials: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_delete_ec2credential: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - Dummy.openstack: - - - args: - sleep: 0.01 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 8 - users_per_tenant: 4 - sla: - failure_rate: - max: 0 - - - args: - sleep: 0.6 - runner: - type: "constant" - concurrency: 2 - times: 4 - timeout: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - sleep: 0.6 - runner: - type: "rps" - rps: 2 - times: 5 - timeout: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - sleep: 0.01 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - quotas: - nova: - instances: 200 - cores: 200 - ram: -1 - floating_ips: 200 - fixed_ips: 200 - metadata_items: -1 - injected_files: -1 - injected_file_content_bytes: -1 - injected_file_path_bytes: -1 - key_pairs: 500 - security_groups: 400 - security_group_rules: 600 - cinder: - gigabytes: -1 - snapshots: -1 - volumes: -1 - sla: - failure_rate: - max: 0 - - Authenticate.keystone: - - - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 2 - users_per_tenant: 10 - sla: - failure_rate: - max: 0 - - Authenticate.validate_glance: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 - - HttpRequests.check_request: - - - args: - url: "http://www.example.com" - method: "GET" - status_code: 200 - runner: - type: "constant" - times: 2 - concurrency: 2 - sla: - failure_rate: - max: 0 - - HttpRequests.check_random_request: - - - args: - requests: - - - url: "http://www.example.com" - method: "GET" - - - url: "http://www.openstack.org" - method: "GET" - status_code: 200 - runner: - type: "constant" - times: 2 - concurrency: 2 - sla: - failure_rate: - max: 0 - - GlanceImages.list_images: - - - runner: - type: "constant" - times: 5 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 2 - images: - image_url: "{{ cirros_image_url }}" - disk_format: "qcow2" - container_format: "bare" - images_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - - runner: - type: "constant" - times: 5 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 2 - images: - image_url: "~/.rally/extra/fake-image.img" - disk_format: "qcow2" - container_format: "bare" - images_per_tenant: 1 - sla: - failure_rate: - max: 0 - - GlanceImages.create_and_get_image: - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 0 - - GlanceImages.create_and_delete_image: - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 - - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 3 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 0 - -# - -# args: -# image_location: "{{ cirros_image_url }}" -# container_format: "bare" -# disk_format: "qcow2" -# runner: -# type: "constant" -# times: 1 -# concurrency: 1 -# context: -# users: -# tenants: 1 -# users_per_tenant: 1 -# api_versions: -# glance: -# version: 1 -# roles: -# - admin -# sla: -# failure_rate: -# max: 0 -# - GlanceImages.create_and_list_image: -# - -# args: -# image_location: "~/.rally/extra/fake-image.img" -# container_format: "bare" -# disk_format: "qcow2" -# runner: -# type: "constant" -# times: 1 -# concurrency: 1 -# context: -# users: -# tenants: 1 -# users_per_tenant: 1 -# api_versions: -# glance: -# version: 1 -# sla: -# failure_rate: -# max: 0 -# - - - args: - image_location: "~/.rally/extra/fake-image.img" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 0 - - GlanceImages.create_image_and_boot_instances: - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - flavor: - name: "m1.tiny" - number_instances: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 3 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - GlanceImages.create_and_update_image: - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - SwiftObjects.create_container_and_object_then_list_objects: - - - args: - objects_per_container: 2 - object_size: 5120 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - sla: - failure_rate: - max: 0 - - SwiftObjects.create_container_and_object_then_delete_all: - - - args: - objects_per_container: 5 - object_size: 102400 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - sla: - failure_rate: - max: 0 - - SwiftObjects.create_container_and_object_then_download_object: - - - args: - objects_per_container: 5 - object_size: 1024 - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - sla: - failure_rate: - max: 0 - - SwiftObjects.list_and_download_objects_in_containers: - - - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 1 - objects_per_container: 5 - object_size: 10240 - sla: - failure_rate: - max: 0 - - SwiftObjects.list_objects_in_containers: - - - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 1 - objects_per_container: 10 - object_size: 1024 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_roles: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-magnum.yaml b/rally-jobs/rally-magnum.yaml deleted file mode 100644 index 73efc57f..00000000 --- a/rally-jobs/rally-magnum.yaml +++ /dev/null @@ -1,76 +0,0 @@ ---- - MagnumClusterTemplates.list_cluster_templates: - - - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - flavor_id: "m1.small" - master_flavor_id: "m1.small" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - docker_volume_size: 5 - coe: "kubernetes" - network_driver: "flannel" - docker_storage_driver: "devicemapper" - master_lb_enabled: False - sla: - failure_rate: - max: 0 - - - - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - flavor_id: "m1.small" - master_flavor_id: "m1.small" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - docker_volume_size: 5 - coe: "swarm" - network_driver: "docker" - docker_storage_driver: "devicemapper" - master_lb_enabled: False - sla: - failure_rate: - max: 0 - - MagnumClusters.create_and_list_clusters: - - - args: - node_count: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - flavor_id: "m1.small" - master_flavor_id: "m1.small" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - docker_volume_size: 5 - coe: "swarm" - network_driver: "docker" - docker_storage_driver: "devicemapper" - master_lb_enabled: False - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-manila-no-ss.yaml b/rally-jobs/rally-manila-no-ss.yaml deleted file mode 100644 index 689e5298..00000000 --- a/rally-jobs/rally-manila-no-ss.yaml +++ /dev/null @@ -1,137 +0,0 @@ ---- - Dummy.openstack: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - shares: -1 - gigabytes: -1 - snapshots: -1 - snapshot_gigabytes: -1 - share_networks: -1 - - ManilaShares.list_shares: - - - args: - detailed: True - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - {% for s in ("create_and_delete_share", "create_and_list_share") %} - ManilaShares.{{s}}: - - - args: - share_proto: "nfs" - size: 1 - share_type: "dhss_false" - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - {% endfor %} - - ManilaShares.create_and_extend_share: - - - args: - share_proto: "nfs" - size: 1 - share_type: "dhss_false" - new_size: 2 - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - ManilaShares.create_and_shrink_share: - - - args: - share_proto: "nfs" - size: 2 - share_type: "dhss_false" - new_size: 1 - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - ManilaShares.set_and_delete_metadata: - - - args: - sets: 1 - set_size: 3 - delete_size: 3 - key_min_length: 1 - key_max_length: 256 - value_min_length: 1 - value_max_length: 1024 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - share_type: "dhss_false" - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-manila.yaml b/rally-jobs/rally-manila.yaml deleted file mode 100644 index 500acfee..00000000 --- a/rally-jobs/rally-manila.yaml +++ /dev/null @@ -1,252 +0,0 @@ ---- - Dummy.openstack: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - shares: -1 - gigabytes: -1 - snapshots: -1 - snapshot_gigabytes: -1 - share_networks: -1 - - ManilaShares.list_shares: - - - args: - detailed: True - runner: - type: "constant" - times: 12 - concurrency: 1 - context: - users: - tenants: 3 - users_per_tenant: 4 - user_choice_method: "round_robin" - sla: - failure_rate: - max: 0 - - ManilaShares.create_and_extend_share: - - - args: - share_proto: "nfs" - size: 1 - new_size: 2 - share_type: "dhss_true" - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_share_networks: - use_share_networks: True - sla: - failure_rate: - max: 0 - - ManilaShares.create_and_shrink_share: - - - args: - share_proto: "nfs" - size: 2 - new_size: 1 - share_type: "dhss_true" - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_share_networks: - use_share_networks: True - sla: - failure_rate: - max: 0 - - {% for s in ("create_and_delete_share", "create_and_list_share") %} - ManilaShares.{{s}}: - - - args: - share_proto: "nfs" - size: 1 - share_type: "dhss_true" - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 4 - concurrency: 4 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_share_networks: - use_share_networks: True - sla: - failure_rate: - max: 0 - {% endfor %} - - ManilaShares.create_share_network_and_delete: - - - args: - name: "rally" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - manila: - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - ManilaShares.create_share_network_and_list: - - - args: - name: "rally" - detailed: True - search_opts: - name: "rally" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - manila: - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - ManilaShares.list_share_servers: - - - args: - search_opts: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - ManilaShares.create_security_service_and_delete: - {% for s in ("ldap", "kerberos", "active_directory") %} - - - args: - security_service_type: {{s}} - dns_ip: "fake_dns_ip" - server: "fake-server" - domain: "fake_domain" - user: "fake_user" - password: "fake_password" - name: "fake_name" - description: "fake_description" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - {% endfor %} - - ManilaShares.attach_security_service_to_share_network: - {% for s in ("ldap", "kerberos", "active_directory") %} - - - args: - security_service_type: {{s}} - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - share_networks: -1 - sla: - failure_rate: - max: 0 - {% endfor %} - - ManilaShares.set_and_delete_metadata: - - - args: - sets: 1 - set_size: 3 - delete_size: 3 - key_min_length: 1 - key_max_length: 256 - value_min_length: 1 - value_max_length: 1024 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_share_networks: - use_share_networks: True - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - share_type: "dhss_true" - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-mistral.yaml b/rally-jobs/rally-mistral.yaml deleted file mode 100644 index 5ecb0ffc..00000000 --- a/rally-jobs/rally-mistral.yaml +++ /dev/null @@ -1,80 +0,0 @@ ---- - MistralWorkbooks.list_workbooks: - - - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - MistralWorkbooks.create_workbook: - - - args: - definition: "~/.rally/extra/mistral_wb.yaml" - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - - args: - definition: "~/.rally/extra/mistral_wb.yaml" - do_delete: true - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - MistralExecutions.list_executions: - - - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - MistralExecutions.create_execution_from_workbook: - - - args: - definition: "~/.rally/extra/mistral_wb.yaml" - workflow_name: "wf1" - params: "~/.rally/extra/mistral_params.json" - wf_input: "~/.rally/extra/mistral_input.json" - do_delete: true - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 \ No newline at end of file diff --git a/rally-jobs/rally-monasca.yaml b/rally-jobs/rally-monasca.yaml deleted file mode 100644 index 8de3400a..00000000 --- a/rally-jobs/rally-monasca.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- - MonascaMetrics.list_metrics: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - roles: - - "monasca-user" - monasca_metrics: - "dimensions": - "region": "RegionOne" - "service": "identity" - "hostname": "fake_host" - "url": "http://fake_host:5000/v2.0" - "metrics_per_tenant": 10 - sla: - failure_rate: - max: 0 - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - roles: - - "monasca-user" - monasca_metrics: - "metrics_per_tenant": 10 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-mos.yaml b/rally-jobs/rally-mos.yaml deleted file mode 100644 index b74c3f92..00000000 --- a/rally-jobs/rally-mos.yaml +++ /dev/null @@ -1,837 +0,0 @@ ---- -{%- set keystone_version = keystone_version|default("v2.0") %} -{% if keystone_version == "v2.0" %} - - SaharaNodeGroupTemplates.create_and_list_node_group_templates: - - - args: - hadoop_version: "{{sahara_hadoop_version}}" - flavor: - name: "m1.small" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - sahara: - service_type: {{sahara_service_type}} - sla: - failure_rate: - max: 0 - - SaharaNodeGroupTemplates.create_delete_node_group_templates: - - - args: - hadoop_version: "{{sahara_hadoop_version}}" - flavor: - name: "m1.small" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - sahara: - service_type: {{sahara_service_type}} - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_tenants: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_tenant: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_tenant_with_users: - - - args: - users_per_tenant: 10 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - sla: - failure_rate: - max: 0 - -{% endif %} - - KeystoneBasic.create_user: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_delete_user: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_users: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - HeatStacks.create_and_list_stack: - - - args: - template_path: "~/.rally/extra/default.yaml.template" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - HeatStacks.create_and_delete_stack: - - - args: - template_path: "~/.rally/extra/default.yaml.template" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.keystone: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.validate_cinder: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.validate_glance: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.validate_heat: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.validate_nova: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.cinder_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.nova_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.nova_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - VMTasks.boot_runcommand_delete: - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - floating_network: "{{external_net}}" - use_floating_ip: true - command: - script_file: "~/.rally/extra/instance_test.sh" - interpreter: "/bin/sh" - username: "cirros" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 - - - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - - args: - auto_assign_nic: true - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - NovaServers.boot_and_list_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - detailed: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.list_servers: - - - args: - detailed: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - servers: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - servers_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_bounce_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - actions: - - - hard_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_server: - - - args: - flavor: - name: "^ram64$" - image: - name: "TestVM|cirros.*uec" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - flavors: - - - name: "ram64" - ram: 64 - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_networks: - - - args: - network_create_args: - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_subnets: - - - args: - network_create_args: - subnet_create_args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: - subnet_create_args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: - port_create_args: - ports_per_network: 4 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - port: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: False - name: "_updated" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnets_per_network: 2 - subnet_update_args: - enable_dhcp: False - name: "_subnet_updated" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 5 - users_per_tenant: 5 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - router_update_args: - admin_state_up: False - name: "_router_updated" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 5 - port_update_args: - admin_state_up: False - device_id: "dummy_id" - device_owner: "dummy_owner" - name: "_port_updated" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 10 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_upload_volume_to_image: - - - args: - size: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_volume_backup: - - - args: - size: 1 - do_delete: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_restore_volume_backup: - - - args: - size: 1 - do_delete: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_list_volume_backups: - - - args: - size: 1 - detailed: True - do_delete: True - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - VMTasks.runcommand_heat: - - - args: - workload: - resource: ["rally.plugins.workload", "siege.py"] - username: "fedora" - template: /home/rally/.rally/extra/workload/wordpress_heat_template.yaml - files: - wp-instances.yaml: /home/rally/.rally/extra/workload/wp-instances.yaml - parameters: - wp_instances_count: 2 - wp_instance_type: gig - instance_type: gig - wp_image: fedora - image: fedora - network_id: {{external_net_id}} - context: - users: - tenants: 1 - users_per_tenant: 1 - flavors: - - name: gig - ram: 1024 - disk: 4 - vcpus: 1 - runner: - concurrency: 1 - timeout: 3000 - times: 1 - type: constant - sla: - failure_rate: - max: 100 diff --git a/rally-jobs/rally-murano.yaml b/rally-jobs/rally-murano.yaml deleted file mode 100644 index d346b790..00000000 --- a/rally-jobs/rally-murano.yaml +++ /dev/null @@ -1,146 +0,0 @@ ---- - MuranoEnvironments.list_environments: - - - runner: - type: "constant" - times: 30 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_environments: - environments_per_tenant: 2 - sla: - failure_rate: - max: 0 - - MuranoEnvironments.create_and_delete_environment: - - - runner: - type: "constant" - times: 20 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - MuranoEnvironments.create_and_deploy_environment: - - - args: - packages_per_env: 2 - runner: - type: "constant" - times: 8 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - roles: - - "admin" - sla: - failure_rate: - max: 0 - - - args: - packages_per_env: 2 - runner: - type: "constant" - times: 8 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - roles: - - "admin" - - MuranoPackages.import_and_list_packages: - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - MuranoPackages.import_and_delete_package: - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - MuranoPackages.import_and_filter_applications: - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - filter_query: {"category" : "Web"} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - MuranoPackages.package_lifecycle: - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - body: {"categories": ["Web"]} - operation: "add" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-neutron-existing-users.yaml b/rally-jobs/rally-neutron-existing-users.yaml deleted file mode 100644 index 4782f2e6..00000000 --- a/rally-jobs/rally-neutron-existing-users.yaml +++ /dev/null @@ -1,131 +0,0 @@ -{% set flavor_name = "m1.tiny" %} -{% set image_name = "^cirros.*-disk$" %} -{% set cirros_image_url = "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" %} -{% set smoke = 0 %} - ---- - version: 2 - title: rally-neutron-existing-users.yaml - description: > - The task contains various scenarios that do not require admin user - subtasks: - - - title: Test main Cinder actions - workloads: - - - name: CinderVolumes.create_volume - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - sla: - failure_rate: - max: 0 - - - name: CinderVolumes.create_volume - args: - size: 1 - image: - name: {{image_name}} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - - name: CinderVolumes.create_snapshot_and_attach_volume - args: - volume_type: "lvmdriver-1" - size: - min: 1 - max: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - servers: - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - servers_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - title: Test main Nova actions - workloads: - - - name: NovaServers.boot_and_list_server - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - sla: - failure_rate: - max: 0 - - - title: Test main Glance actions - workloads: - - - name: GlanceImages.create_and_delete_image - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 100 - - - title: Test main Neutron actions - workloads: - - - name: NeutronNetworks.create_and_list_networks - args: - network_create_args: - runner: - type: "constant" - times: 2 - concurrency: 2 - sla: - failure_rate: - max: 0 - - - name: NeutronNetworks.create_and_list_subnets - args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - sla: - failure_rate: - max: 0 - - - name: NeutronNetworks.create_and_list_floating_ips - args: - floating_network: "public" - floating_ip_args: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-neutron-extensions.yaml b/rally-jobs/rally-neutron-extensions.yaml deleted file mode 100644 index 3ed5e242..00000000 --- a/rally-jobs/rally-neutron-extensions.yaml +++ /dev/null @@ -1,119 +0,0 @@ ---- - NeutronLoadbalancerV2.create_and_list_loadbalancers: - - - args: - lb_create_args: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_list_bgpvpns: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_update_bgpvpns: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_delete_bgpvpns: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - router: {} - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_list_networks_associations: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_list_routers_associations: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - router: {} - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-neutron.yaml b/rally-jobs/rally-neutron.yaml deleted file mode 100644 index 7f5187f8..00000000 --- a/rally-jobs/rally-neutron.yaml +++ /dev/null @@ -1,773 +0,0 @@ -{% set image_name = "^cirros.*-disk$" %} -{% set flavor_name = "m1.tiny" %} -{% set smoke = 0 %} - ---- - NeutronNetworks.create_and_list_networks: - - - args: - network_create_args: {} - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 20 - - - args: - network_create_args: - provider:network_type: "vxlan" - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - roles: - - "admin" - sla: - failure_rate: - max: 20 - - NeutronNetworks.set_and_clear_router_gateway: - - - args: - network_create_args: - router:external: True - router_create_args: {} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - router: -1 - roles: - - "admin" - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_show_network: - - - args: - network_create_args: {} - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_subnets: - - - args: - network_create_args: - subnet_create_args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: {{smoke or 8 }} - concurrency: {{smoke or 4}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.create_and_show_subnets: - - - args: - network_create_args: - subnet_create_args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: {{smoke or 8 }} - concurrency: {{smoke or 4}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 20 - - NeutronSecurityGroup.create_and_list_security_groups: - - - args: - security_group_create_args: {} - runner: - type: "constant" - times: {{smoke or 8 }} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 20 - - NeutronSecurityGroup.create_and_show_security_group: - - - args: - security_group_create_args: {} - runner: - type: "constant" - times: {{smoke or 8 }} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 20 - - NeutronSecurityGroup.create_and_delete_security_groups: - - - args: - security_group_create_args: {} - runner: - type: "constant" - times: {{smoke or 8 }} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 20 - - NeutronSecurityGroup.create_and_update_security_groups: - - - args: - security_group_create_args: {} - security_group_update_args: {} - runner: - type: "constant" - times: {{smoke or 8 }} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 20 - - NeutronSecurityGroup.create_and_list_security_group_rules: - - - args: - security_group_args: {} - security_group_rule_args: {} - runner: - type: "constant" - times: {{smoke or 8 }} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 20 - - NeutronSecurityGroup.create_and_show_security_group_rule: - - - args: - security_group_args: {} - security_group_rule_args: {} - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_floating_ips: - - - args: - floating_network: "public" - floating_ip_args: {} - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - floatingip: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: - subnet_create_args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.create_and_show_routers: - - - args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: - port_create_args: - ports_per_network: 4 - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - port: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.list_agents: - - - args: - agent_args: {} - runner: - type: "constant" - times: {{smoke or 4}} - concurrency: {{smoke or 2}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_show_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 2 - runner: - type: "constant" - times: {{smoke or 4}} - concurrency: {{smoke or 2}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: False - name: "_updated" - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnets_per_network: 2 - subnet_update_args: - enable_dhcp: False - name: "_subnet_updated" - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - router_update_args: - admin_state_up: False - name: "_router_updated" - runner: - type: "constant" - times: {{smoke or 4}} - concurrency: {{smoke or 4}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - runner: - type: "constant" - times: {{smoke or 20}} - concurrency: {{smoke or 10}} - context: - users: - tenants: {{smoke or 3}} - users_per_tenant: {{smoke or 2}} - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - network: {} - users: - tenants: {{smoke or 3}} - users_per_tenant: {{smoke or 2}} - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.create_and_delete_floating_ips: - - - args: - floating_network: "public" - floating_ip_args: {} - runner: - type: "constant" - times: {{smoke or 8}} - concurrency: {{smoke or 4}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - floatingip: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - runner: - type: "constant" - times: {{smoke or 4}} - concurrency: {{smoke or 4}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 5 - runner: - type: "constant" - times: {{smoke or 4}} - concurrency: {{smoke or 4}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 20 - - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 2 - port_update_args: - admin_state_up: False - device_id: "dummy_id" - device_owner: "dummy_owner" - name: "_port_updated" - runner: - type: "constant" - times: {{smoke or 10}} - concurrency: {{smoke or 5}} - context: - network: {} - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 20 - - Quotas.neutron_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: {{smoke or 10}} - concurrency: {{smoke or 2}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_delete_server: - - - args: - auto_assign_nic: True - flavor: - name: "m1.tiny" - image: - name: {{image_name}} - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 2 - dns_nameservers: - - "8.8.8.8" - - "8.8.4.4" - sla: - failure_rate: - max: 0 - - VMTasks.boot_runcommand_delete: - - - args: - flavor: - name: "m1.tiny" - image: - name: {{image_name}} - command: - script_file: "~/.rally/extra/instance_test.sh" - interpreter: "/bin/sh" - username: "cirros" - runner: - type: "constant" - times: {{smoke or 2}} - concurrency: {{smoke or 2}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 2}} - network: {} - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: "m1.tiny" - image: - name: {{image_name}} - command: - script_file: "~/.rally/extra/instance_test.sh" - interpreter: "/bin/sh" - username: "cirros" - volume_args: - size: 2 - runner: - type: "constant" - times: {{smoke or 2}} - concurrency: {{smoke or 2}} - context: - users: - tenants: {{smoke or 2}} - users_per_tenant: {{smoke or 1}} - network: {} - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: {{flavor_name}} - image: - name: {{image_name}} - floating_network: "public" - command: - script_inline: | - time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; } - file=/tmp/test.img - c=100 #100M - write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c") - read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c") - [ -f $file ] && rm $file - - echo "{ - \"write_seq\": $write_seq, - \"read_seq\": $read_seq - }" - interpreter: "/bin/sh" - username: "cirros" - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 - - - args: - command: - remote_path: "./dd_test.sh" - flavor: - name: "m1.tiny" - username: "cirros" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - image_command_customizer: - command: - local_path: "~/.rally/extra/install_benchmark.sh" - remote_path: "./install_benchmark.sh" - flavor: - name: "m1.tiny" - image: - name: {{image_name}} - username: "cirros" - users: - tenants: 1 - users_per_tenant: 1 - network: - dns_nameservers: [] - - VMTasks.dd_load_test: - - - args: - flavor: - name: "m1.tiny" - image: - name: {{image_name}} - floating_network: "public" - force_delete: false - command: - interpreter: "/bin/sh" - username: "cirros" - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} diff --git a/rally-jobs/rally-senlin.yaml b/rally-jobs/rally-senlin.yaml deleted file mode 100644 index 17c14989..00000000 --- a/rally-jobs/rally-senlin.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- - SenlinClusters.create_and_delete_cluster: - - - args: - desired_capacity: 3 - min_size: 0 - max_size: 5 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - profiles: - type: os.nova.server - version: "1.0" - properties: - name: cirros_server - flavor: 1 - image: "cirros-0.3.5-x86_64-disk" - networks: - - network: private - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-watcher.yaml b/rally-jobs/rally-watcher.yaml deleted file mode 100644 index 3ccdf3a8..00000000 --- a/rally-jobs/rally-watcher.yaml +++ /dev/null @@ -1,63 +0,0 @@ ---- - Watcher.create_audit_and_delete: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "round_robin" - params: - - goal: - name: "dummy" - strategy: - name: "dummy" - sla: - failure_rate: - max: 0 - - Watcher.create_audit_template_and_delete: - - - args: - goal: - name: "dummy" - strategy: - name: "dummy" - runner: - type: "constant" - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 - - Watcher.list_audit_templates: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "random" - params: - - goal: - name: "workload_balancing" - strategy: - name: "workload_stabilization" - - goal: - name: "dummy" - strategy: - name: "dummy" - sla: - failure_rate: - max: 0 \ No newline at end of file diff --git a/rally-jobs/rally-zaqar.yaml b/rally-jobs/rally-zaqar.yaml deleted file mode 100644 index a63e889f..00000000 --- a/rally-jobs/rally-zaqar.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - ZaqarBasic.create_queue: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 - - ZaqarBasic.producer_consumer: - - - args: - min_msg_count: 50 - max_msg_count: 200 - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally.yaml b/rally-jobs/rally.yaml deleted file mode 100644 index e012f6bb..00000000 --- a/rally-jobs/rally.yaml +++ /dev/null @@ -1,718 +0,0 @@ -{%- set cirros_image_url = "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" %} ---- - KeystoneBasic.create_user: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_delete_user: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_user_set_enabled_and_delete: - - - args: - enabled: true - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - args: - enabled: false - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_tenants: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.get_entities: - - - runner: - type: "constant" - times: 20 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - args: - service_name: null - runner: - type: "constant" - times: 20 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - args: - service_name: "nova" - runner: - type: "constant" - times: 20 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.add_and_remove_user_role: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_delete_role: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_get_role: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_add_and_list_user_roles: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_roles: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_users: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_tenant: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_tenant_with_users: - - - args: - users_per_tenant: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 3 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_user_update_password: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_update_user: - - - args: - create_user_kwargs: {} - update_user_kwargs: - enabled: False - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_update_and_delete_tenant: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_delete_service: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_services: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_ec2credentials: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_delete_ec2credential: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - Dummy.openstack: - - - args: - sleep: 0.01 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 8 - users_per_tenant: 4 - sla: - failure_rate: - max: 0 - - - args: - sleep: 0.6 - runner: - type: "constant" - concurrency: 2 - times: 4 - timeout: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - sleep: 0.6 - runner: - type: "rps" - rps: 2 - times: 5 - timeout: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - description: "Check 'quotas' context." - args: - sleep: 0.01 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - quotas: - nova: - instances: 200 - cores: 200 - ram: -1 - floating_ips: 200 - fixed_ips: 200 - metadata_items: -1 - injected_files: -1 - injected_file_content_bytes: -1 - injected_file_path_bytes: -1 - key_pairs: 500 - security_groups: 400 - security_group_rules: 600 - cinder: - gigabytes: -1 - snapshots: -1 - volumes: -1 - sla: - failure_rate: - max: 0 - - Authenticate.keystone: - - - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 2 - users_per_tenant: 10 - sla: - failure_rate: - max: 0 - - Authenticate.validate_glance: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 - - HttpRequests.check_request: - - - args: - url: "http://www.example.com" - method: "GET" - status_code: 200 - runner: - type: "constant" - times: 2 - concurrency: 2 - sla: - failure_rate: - max: 0 - - HttpRequests.check_random_request: - - - args: - requests: - - - url: "http://www.example.com" - method: "GET" - - - url: "http://localhost" - method: "GET" - status_code: 200 - runner: - type: "constant" - times: 2 - concurrency: 2 - sla: - failure_rate: - max: 0 - - GlanceImages.list_images: - - - runner: - type: "constant" - times: 5 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 2 - images: - image_url: "{{ cirros_image_url }}" - disk_format: "qcow2" - container_format: "bare" - images_per_tenant: 1 - sla: - failure_rate: - max: 100 - - - - runner: - type: "constant" - times: 5 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 2 - images: - image_url: "~/.rally/extra/fake-image.img" - disk_format: "qcow2" - container_format: "bare" - images_per_tenant: 1 - sla: - failure_rate: - max: 100 - - GlanceImages.create_and_get_image: - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 100 - - GlanceImages.create_and_delete_image: -# - -# args: -# image_location: "{{ cirros_image_url }}" -# container_format: "bare" -# disk_format: "qcow2" -# runner: -# type: "constant" -# times: 1 -# concurrency: 1 -# context: -# users: -# tenants: 2 -# users_per_tenant: 3 -# api_versions: -# glance: -# version: 1 -# sla: -# failure_rate: -# max: 0 -# - - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 3 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 100 - -# -# - -# args: -# image_location: "{{ cirros_image_url }}" -# container_format: "bare" -# disk_format: "qcow2" -# runner: -# type: "constant" -# times: 1 -# concurrency: 1 -# context: -# users: -# tenants: 1 -# users_per_tenant: 1 -# api_versions: -# glance: -# version: 1 -# roles: -# - admin -# sla: -# failure_rate: -# max: 0 - - GlanceImages.create_and_list_image: -# - -# args: -# image_location: "~/.rally/extra/fake-image.img" -# container_format: "bare" -# disk_format: "qcow2" -# runner: -# type: "constant" -# times: 1 -# concurrency: 1 -# context: -# users: -# tenants: 1 -# users_per_tenant: 1 -# api_versions: -# glance: -# version: 1 -# sla: -# failure_rate: -# max: 0 -# - - - - args: - image_location: "~/.rally/extra/fake-image.img" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 100 - - GlanceImages.create_image_and_boot_instances: - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - flavor: - name: "m1.tiny" - number_instances: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 3 - users_per_tenant: 1 - sla: - failure_rate: - max: 100 - - GlanceImages.create_and_update_image: - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 100 - - SwiftObjects.create_container_and_object_then_list_objects: - - - args: - objects_per_container: 2 - object_size: 5120 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - sla: - failure_rate: - max: 0 - - SwiftObjects.create_container_and_object_then_delete_all: - - - args: - objects_per_container: 5 - object_size: 102400 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - sla: - failure_rate: - max: 0 - - SwiftObjects.create_container_and_object_then_download_object: - - - args: - objects_per_container: 5 - object_size: 1024 - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - sla: - failure_rate: - max: 0 - - SwiftObjects.list_and_download_objects_in_containers: - - - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 1 - objects_per_container: 5 - object_size: 10240 - sla: - failure_rate: - max: 0 - - SwiftObjects.list_objects_in_containers: - - - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 1 - objects_per_container: 10 - object_size: 1024 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally_args.yaml b/rally-jobs/rally_args.yaml deleted file mode 100644 index 51391b31..00000000 --- a/rally-jobs/rally_args.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- - - image_name: "^cirros.*-disk$" diff --git a/rally-jobs/sahara-clusters.yaml b/rally-jobs/sahara-clusters.yaml deleted file mode 100644 index f69245d6..00000000 --- a/rally-jobs/sahara-clusters.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{%- set sahara_service_type = "data-processing" %} ---- - - SaharaClusters.create_and_delete_cluster: - - - args: - master_flavor: - name: "m1.small" - worker_flavor: - name: "m1.small" - workers_count: 1 - plugin_name: "vanilla" - hadoop_version: "{{sahara_hadoop_version}}" - auto_security_group: True - runner: - type: "constant" - times: 1 - concurrency: 1 - timeout: 3000 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://rally-ci.tk/stuff/sahara-liberty-vanilla-2.7.1-ubuntu-14.04.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "{{sahara_hadoop_version}}" - api_versions: - sahara: - service_type: {{sahara_service_type}} - network: {} - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/self-rally.yaml b/rally-jobs/self-rally.yaml deleted file mode 100644 index ca7cbb02..00000000 --- a/rally-jobs/self-rally.yaml +++ /dev/null @@ -1,487 +0,0 @@ ---- - version: 2 - title: Task for rally-tox-self job - description: > - This task contains various scenarios for testing rally features - subtasks: - - - title: Test SLA plugins - workloads: - - - name: Dummy.dummy - description: "Check SLA" - args: - sleep: 0.25 - runner: - type: "constant" - times: 20 - concurrency: 5 - sla: - failure_rate: - max: 0 - max_seconds_per_iteration: 1.0 - max_avg_duration: 0.5 - outliers: - max: 1 - min_iterations: 10 - sigmas: 10 - performance_degradation: - max_degradation: 50 - - - name: Dummy.failure - description: Check failure_rate SLA plugin - args: - sleep: 0.2 - from_iteration: 5 - to_iteration: 15 - each: 2 - runner: - type: "constant" - times: 20 - concurrency: 5 - sla: - failure_rate: - min: 25 - max: 25 - - - name: Dummy.dummy_timed_atomic_actions - description: Check max_avg_duration_per_atomic SLA plugin - args: - number_of_actions: 5 - sleep_factor: 1 - runner: - type: "constant" - times: 3 - concurrency: 3 - sla: - max_avg_duration_per_atomic: - action_0: 1.0 - action_1: 2.0 - action_2: 3.0 - action_3: 4.0 - action_4: 5.0 - - - - title: Test constant runner - workloads: - - - name: Dummy.dummy - description: "Check 'constant' runner." - args: - sleep: 0.25 - runner: - type: "constant" - times: 8 - concurrency: 4 - max_cpu_count: 2 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - args: - sleep: 0 - runner: - type: "constant" - times: 4500 - concurrency: 20 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: > - Check the ability of constant runner to terminate scenario by timeout. - args: - sleep: 30 - runner: - type: "constant" - times: 2 - concurrency: 2 - timeout: 1 - sla: - failure_rate: - min: 100 - - - - title: Test constant_for_duration runner - workloads: - - - name: Dummy.dummy - description: "Check 'constant_for_duration' runner." - args: - sleep: 0.1 - runner: - type: "constant_for_duration" - duration: 5 - concurrency: 5 - sla: - failure_rate: - max: 0 - - - - title: Test rps runner - workloads: - - - name: Dummy.dummy - description: "Check 'rps' runner." - args: - sleep: 0.001 - runner: - type: "rps" - times: 2000 - rps: 200 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: > - Check 'rps' runner with float value of requests per second. - args: - sleep: 0.1 - runner: - type: "rps" - times: 5 - rps: 0.5 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: > - Check 'rps' runner with float value of requests per second. - args: - sleep: 0.1 - runner: - type: "rps" - times: 5 - rps: 0.2 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: > - Check 'max_concurrency' and 'max_cpu_count' properties of 'rps' runner. - args: - sleep: 0.001 - runner: - type: "rps" - times: 200 - rps: 20 - max_concurrency: 10 - max_cpu_count: 3 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: "Check 'rps' with start, end, step arguments" - args: - sleep: 0.25 - runner: - type: "rps" - times: 55 - rps: - start: 1 - end: 10 - step: 1 - max_concurrency: 10 - max_cpu_count: 3 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: "Check 'rps' with start, end, step arguments" - args: - sleep: 0.5 - runner: - type: "rps" - times: 55 - rps: - start: 1 - end: 10 - step: 1 - duration: 2 - max_concurrency: 10 - max_cpu_count: 3 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: > - Check the ability of rps runner to terminate scenario by timeout. - args: - sleep: 30 - runner: - type: "rps" - times: 1 - rps: 1 - timeout: 1 - sla: - failure_rate: - min: 100 - - - - title: Test serial runner - workloads: - - - name: Dummy.dummy - description: "Check 'serial' runner." - args: - sleep: 0.1 - runner: - type: "serial" - times: 20 - sla: - failure_rate: - max: 0 - - - - title: Test Hook and Trigger plugins - workloads: - - - name: Dummy.dummy - description: "Check sys_call hook." - args: - sleep: 0.75 - runner: - type: "constant" - times: 20 - concurrency: 2 - hooks: - - name: sys_call - description: Run script - args: sh ~/.rally/extra/hook_example_script.sh - trigger: - name: event - args: - unit: iteration - at: [2, 5, 8, 13, 17] - - name: sys_call - description: Show time - args: date +%Y-%m-%dT%H:%M:%S - trigger: - name: event - args: - unit: time - at: [0, 2, 5, 6, 9] - - name: sys_call - description: Show system name - args: uname -a - trigger: - name: event - args: - unit: iteration - at: [2, 3, 4, 5, 6, 8, 10, 12, 13, 15, 17, 18] - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: "Check periodic trigger with iteration unit." - args: - sleep: 0.25 - runner: - type: "constant" - times: 10 - concurrency: 2 - hooks: - - name: sys_call - description: test hook - args: /bin/true - trigger: - name: periodic - args: - unit: iteration - step: 2 - start: 4 - end: 8 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: "Check event trigger args." - args: - sleep: 1 - runner: - type: "constant" - times: 10 - concurrency: 1 - hooks: - - name: sys_call - description: Get system name - args: uname -a - trigger: - name: event - args: - unit: time - at: [0, 2, 4, 6, 8, 10] - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy - description: "Check periodic trigger with time unit." - args: - sleep: 1 - runner: - type: "constant" - times: 10 - concurrency: 1 - hooks: - - name: sys_call - description: test hook - args: /bin/true - trigger: - name: periodic - args: - unit: time - step: 2 - start: 0 - end: 6 - - - - title: Test Dummy scenarios - workloads: - - - name: Dummy.dummy_exception - args: - size_of_message: 5 - runner: - type: "constant" - times: 20 - concurrency: 5 - - - name: Dummy.dummy_exception_probability - args: - exception_probability: 0.05 - runner: - type: "constant" - times: 2042 - concurrency: 1 - - - name: Dummy.dummy_exception_probability - args: - exception_probability: 0.5 - runner: - type: "constant" - times: 100 - concurrency: 1 - sla: - failure_rate: - min: 20 - max: 80 - - - name: Dummy.dummy_output - runner: - type: "constant" - times: 20 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - name: Dummy.dummy_random_fail_in_atomic - args: - exception_probability: 0.5 - runner: - type: "constant" - times: 50 - concurrency: 10 - - - name: Dummy.dummy_random_action - runner: - type: "constant" - times: 10 - concurrency: 5 - - - - title: Test function based scenario - workloads: - - - name: FakePlugin.testplugin - runner: - type: "constant" - times: 4 - concurrency: 4 - sla: - failure_rate: - max: 0 - - - - title: Profile generate_random_name method - workloads: - - - name: RallyProfile.generate_names_in_atomic - args: - number_of_names: 100 - runner: - type: "constant" - times: 1000 - concurrency: 10 - sla: - max_avg_duration_per_atomic: - generate_100_names: 0.015 - failure_rate: - max: 0 - - - name: RallyProfile.generate_names_in_atomic - args: - number_of_names: 1000 - runner: - type: "constant" - times: 500 - concurrency: 10 - sla: - max_avg_duration_per_atomic: - generate_1000_names: 0.1 - failure_rate: - max: 0 - - - name: RallyProfile.generate_names_in_atomic - args: - number_of_names: 10000 - runner: - type: "constant" - times: 200 - concurrency: 10 - sla: - max_avg_duration_per_atomic: - generate_10000_names: 1 - failure_rate: - max: 0 - - - - title: Profile atomic actions - workloads: - - - name: RallyProfile.calculate_atomic - args: - number_of_atomics: 100 - runner: - type: "constant" - times: 300 - concurrency: 10 - sla: - max_avg_duration_per_atomic: - calculate_100_atomics: 0.04 - failure_rate: - max: 0 - - - name: RallyProfile.calculate_atomic - args: - number_of_atomics: 500 - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - max_avg_duration_per_atomic: - calculate_500_atomics: 0.5 - failure_rate: - max: 0 diff --git a/rally-jobs/telemetry-neutron.yaml b/rally-jobs/telemetry-neutron.yaml deleted file mode 100644 index 2d00497c..00000000 --- a/rally-jobs/telemetry-neutron.yaml +++ /dev/null @@ -1,429 +0,0 @@ -{% set image_name = "^cirros.*-disk$" %} -{% set flavor_name = "m1.nano" %} -{% set smoke = 0 %} ---- - - CeilometerEvents.create_user_and_get_event: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerEvents.create_user_and_list_event_types: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerEvents.create_user_and_list_events: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerTraits.create_user_and_list_trait_descriptions: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerTraits.create_user_and_list_traits: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerMeters.list_meters: - - - runner: - type: constant - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - ceilometer: - counter_name: "benchmark_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 1 - samples_per_resource: 1 - timestamp_interval: 1 - sla: - failure_rate: - max: 0 - - CeilometerResource.list_resources: - - - runner: - type: constant - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - ceilometer: - counter_name: "benchmark_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 1 - samples_per_resource: 1 - timestamp_interval: 1 - sla: - failure_rate: - max: 0 - - CeilometerSamples.list_samples: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resources_per_tenant: 3 - samples_per_resource: 10 - timestamp_interval: 60 - metadata_list: - - status: "active" - name: "fake_resource" - deleted: "False" - created_at: "2015-09-04T12:34:19.000000" - - status: "not_active" - name: "fake_resource_1" - deleted: "False" - created_at: "2015-09-10T06:55:12.000000" - batch_size: 5 - sla: - failure_rate: - max: 0 - - CeilometerResource.get_tenant_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_volume: 1.0 - counter_unit: "instance" - resources_per_tenant: 3 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_and_delete_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_and_list_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_and_get_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_and_update_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_alarm_and_get_history: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - state: "ok" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.list_alarms: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerQueries.create_and_query_alarms: - - - args: - filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} - orderby: !!null - limit: 10 - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerQueries.create_and_query_alarm_history: - - - args: - orderby: !!null - limit: !!null - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerStats.get_stats: - - - runner: - type: constant - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "benchmark_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally benchmark on" - deleted: "false" - - - status: "terminated" - name: "rally benchmark off" - deleted: "true" - args: - meter_name: "benchmark_meter" - filter_by_user_id: true - filter_by_project_id: true - filter_by_resource_id: true - metadata_query: - status: "terminated" - period: 300 - groupby: "resource_id" - sla: - failure_rate: - max: 0 - - CeilometerQueries.create_and_query_samples: - - - args: - filter: {"=": {"counter_unit": "instance"}} - orderby: !!null - limit: 10 - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: "1.0" - resource_id: "resource_id" - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/rally/__init__.py b/rally/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/aas/__init__.py b/rally/aas/__init__.py deleted file mode 100644 index 82a80586..00000000 --- a/rally/aas/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# FIXME(andreykurilin): implement Rally-as-a-Service -pass diff --git a/rally/api.py b/rally/api.py deleted file mode 100644 index fcb23254..00000000 --- a/rally/api.py +++ /dev/null @@ -1,1379 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import os -import re -import sys -import time -import traceback - -import jinja2 -import jinja2.meta -import jsonschema -from oslo_config import cfg -import requests -from requests.packages import urllib3 - -from rally.common import opts -from rally.common.i18n import _, _LI, _LE -from rally.common import logging -from rally.common import objects -from rally.common.plugin import discover -from rally.common import utils -from rally.common import version as rally_version -from rally import consts -from rally.deployment import engine as deploy_engine -from rally import exceptions -from rally.task import engine -from rally.task import exporter as texporter -from rally.verification import context as vcontext -from rally.verification import manager as vmanager -from rally.verification import reporter as vreporter - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -API_REQUEST_PREFIX = "/api" - - -class APIGroup(object): - def __init__(self, api): - """Initialize API group. - - :param api: an instance of rally.api.API object - """ - self.api = api - - -def api_wrapper(path, method): - def decorator(func): - def inner(self, *args, **kwargs): - if args: - raise TypeError("It is restricted to use positional" - " argument for API calls") - - if self.api.endpoint_url: - # it's a call to the remote Rally instance - return self.api._request(self.api.endpoint_url + path, - method, **kwargs) - else: - try: - return func(self, *args, **kwargs) - except Exception as e: - raise exceptions.make_exception(e) - - inner.path = path - inner.method = method - - return inner - return decorator - - -class _Deployment(APIGroup): - - def _create(self, config, name): - """Create a deployment. - - :param config: a dict with deployment configuration - :param name: a str represents a name of the deployment - :returns: Deployment object - """ - - try: - deployment = objects.Deployment(name=name, config=config) - except exceptions.DeploymentNameExists as e: - if logging.is_debug(): - LOG.exception(e) - raise - - deployer = deploy_engine.Engine.get_engine( - deployment["config"]["type"], deployment) - try: - deployer.validate() - except jsonschema.ValidationError: - LOG.error(_LE("Deployment %s: Schema validation error.") % - deployment["uuid"]) - deployment.update_status(consts.DeployStatus.DEPLOY_FAILED) - raise - - with deployer: - credentials = deployer.make_deploy() - deployment.update_credentials(credentials) - return deployment - - @api_wrapper(path=API_REQUEST_PREFIX + "/deployment/create", - method="POST") - def create(self, config, name): - return self._create(config, name).to_dict() - - @api_wrapper(path=API_REQUEST_PREFIX + "/deployment/destroy", - method="DELETE") - def destroy(self, deployment): - """Destroy the deployment. - - :param deployment: UUID or name of the deployment - """ - # TODO(akscram): We have to be sure that there are no running - # tasks for this deployment. - # TODO(akscram): Check that the deployment have got a status that - # is equal to "*->finished" or "deploy->inconsistent". - deployment = objects.Deployment.get(deployment) - try: - deployer = deploy_engine.Engine.get_engine( - deployment["config"]["type"], deployment) - with deployer: - deployer.make_cleanup() - except exceptions.PluginNotFound: - LOG.info(_("Deployment %s will be deleted despite exception") - % deployment["uuid"]) - - for verifier in self.api.verifier.list(): - self.api.verifier.delete(verifier_id=verifier["name"], - deployment_id=deployment["name"], - force=True) - - deployment.delete() - - @api_wrapper(path=API_REQUEST_PREFIX + "/deployment/recreate", - method="POST") - def recreate(self, deployment, config=None): - """Performs a cleanup and then makes a deployment again. - - :param deployment: UUID or name of the deployment - :param config: an optional dict with deployment config to update before - redeploy - """ - deployment = objects.Deployment.get(deployment) - deployer = deploy_engine.Engine.get_engine( - deployment["config"]["type"], deployment) - - if config: - if deployment["config"]["type"] != config["type"]: - raise exceptions.RallyException( - "Can't change deployment type.") - try: - deployer.validate(config) - except jsonschema.ValidationError: - LOG.error(_LE("Config schema validation error.")) - raise - - with deployer: - deployer.make_cleanup() - - if config: - deployment.update_config(config) - - credentials = deployer.make_deploy() - deployment.update_credentials(credentials) - - def _get(self, deployment): - """Get the deployment. - - :param deployment: UUID or name of the deployment - :returns: Deployment instance - """ - return objects.Deployment.get(deployment) - - @api_wrapper(path=API_REQUEST_PREFIX + "/deployment/get", - method="GET") - def get(self, deployment): - return self._get(deployment).to_dict() - - @api_wrapper(path=API_REQUEST_PREFIX + "/deployment/service_list", - method="GET") - def service_list(self, deployment): - """Get the services list. - - :param deployment: Deployment object - :returns: Service list - """ - # TODO(astudenov): make this method platform independent - admin = deployment.get_credentials_for("openstack")["admin"] - return admin.list_services() - - @api_wrapper(path=API_REQUEST_PREFIX + "/deployment/list", - method="GET") - def list(self, status=None, parent_uuid=None, name=None): - """Get the deployments list. - - :returns: Deployment list - """ - return [deployment.to_dict() for deployment in - objects.Deployment.list(status, parent_uuid, name)] - - @api_wrapper(path=API_REQUEST_PREFIX + "/deployment/check", - method="GET") - def check(self, deployment): - """Check keystone authentication and list all available services. - - :param deployment: UUID of deployment - :returns: Service list - """ - result = {} - all_credentials = self._get(deployment).get_all_credentials() - for platform in all_credentials: - result[platform] = [] - for credential in all_credentials[platform]: - no_error = True - result[platform].append({"services": []}) - active_user = None - if credential["admin"]: - active_user = credential["admin"] - try: - credential["admin"].verify_connection() - except Exception as e: - no_error = False - result[platform][-1]["admin_error"] = { - "etype": e.__class__.__name__, - "msg": str(e), - "trace": traceback.format_exc()} - for user in credential["users"]: - try: - user.verify_connection() - except Exception as e: - no_error = False - result[platform][-1]["user_error"] = { - "etype": e.__class__.__name__, - "msg": str(e), - "trace": traceback.format_exc()} - break - - if no_error: - active_user = active_user or credential["users"][0] - services = active_user.list_services() - result[platform][-1]["services"] = services - - return result - - -class _Task(APIGroup): - - TASK_RESULT_SCHEMA = objects.task.TASK_RESULT_SCHEMA - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/list", - method="GET") - def list(self, **filters): - return [task.to_dict() for task in objects.Task.list(**filters)] - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/get", method="GET") - def get(self, task_id, detailed=False): - """Get task data - - :param task_id: Task UUID - :param detailed: whether return detailed information(including - subtasks and workloads) or not. - """ - return objects.Task.get(task_id, detailed=detailed).to_dict() - - # TODO(andreykurilin): move it to some kind of utils - @api_wrapper(path=API_REQUEST_PREFIX + "/task/render_template", - method="GET") - def render_template(self, task_template, template_dir="./", **kwargs): - """Render jinja2 task template to Rally input task. - - :param task_template: String that contains template - :param template_dir: The path of directory contain template files - :param kwargs: Dict with template arguments - :returns: rendered template str - """ - - def is_really_missing(mis, task_template): - # NOTE(boris-42): Removing variables that have default values from - # missing. Construction that won't be properly - # checked is {% set x = x or 1} - if re.search(mis.join(["{%\s*set\s+", "\s*=\s*", "[^\w]+"]), - task_template): - return False - # NOTE(jlk): Also check for a default filter which can show up as - # a missing variable - if re.search(mis + "\s*\|\s*default\(", task_template): - return False - return True - - # NOTE(boris-42): We have to import builtins to get the full list of - # builtin functions (e.g. range()). Unfortunately, - # __builtins__ doesn't return them (when it is not - # main module) - from six.moves import builtins - - env = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir, encoding="utf8")) - env.globals.update(self.create_template_functions()) - ast = env.parse(task_template) - # NOTE(Julia Varigina): - # Bug in jinja2.meta.find_undeclared_variables - # - # The method shows inconsistent behavior: - # it does not return undeclared variables that appear - # in included templates only (via {%- include "some_template.yaml"-%}) - # and in the same time is declared in jinja2.Environment.globals. - # - # This is different for undeclared variables that appear directly - # in task_template. The method jinja2.meta.find_undeclared_variables - # returns an undeclared variable that is used in task_template - # and is set in jinja2.Environment.globals. - # - # Despite this bug, jinja resolves values - # declared in jinja2.Environment.globals for both types of undeclared - # variables and successfully renders templates in both cases. - required_kwargs = jinja2.meta.find_undeclared_variables(ast) - missing = (set(required_kwargs) - set(kwargs) - set(dir(builtins)) - - set(env.globals)) - real_missing = [mis for mis in missing - if is_really_missing(mis, task_template)] - if real_missing: - multi_msg = _("Please specify next template task arguments: %s") - single_msg = _("Please specify template task argument: %s") - - raise TypeError((len(real_missing) > 1 and multi_msg or single_msg) - % ", ".join(real_missing)) - - render_template = env.from_string(task_template).render(**kwargs) - return render_template - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/create_template_functions", - method="POST") - def create_template_functions(self): - - def template_min(int1, int2): - return min(int1, int2) - - def template_max(int1, int2): - return max(int1, int2) - - def template_round(float1): - return int(round(float1)) - - def template_ceil(float1): - import math - return int(math.ceil(float1)) - - return {"min": template_min, "max": template_max, - "ceil": template_ceil, "round": template_round} - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/create", - method="POST") - def create(self, deployment, tags=None): - """Create a task without starting it. - - Task is a list of benchmarks that will be called one by one, results of - execution will be stored in DB. - - :param deployment: UUID or name of the deployment - :param tags: a list of tags for this task - :returns: Task object - """ - deployment = objects.Deployment.get(deployment) - if deployment["status"] != consts.DeployStatus.DEPLOY_FINISHED: - raise exceptions.DeploymentNotFinishedStatus( - name=deployment["name"], - uuid=deployment["uuid"], - status=deployment["status"]) - - return objects.Task(deployment_uuid=deployment["uuid"], - tags=tags).to_dict() - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/validate", - method="GET") - def validate(self, deployment, config, task_instance=None, task=None): - """Validate a task config against specified deployment. - - :param deployment: UUID or name of the deployment (will be ignored in - case of transmitting task_instance or task arguments) - :param config: a dict with a task configuration - :param task_instance: DEPRECATED. Use "task" argument to transmit task - uuid instead - """ - if task_instance is not None: - LOG.warning("Transmitting task object in `task validate` is " - "deprecated since Rally 0.10. To use pre-created " - "task, transmit task UUID instead via `task` " - "argument.") - task = objects.Task.get(task_instance["uuid"]) - deployment = task["deployment_uuid"] - elif task: - task = objects.Task.get(task) - deployment = task["deployment_uuid"] - else: - task = objects.Task(deployment_uuid=deployment, temporary=True) - deployment = objects.Deployment.get(deployment) - - benchmark_engine = engine.TaskEngine(config, task, deployment) - benchmark_engine.validate() - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/start", method="POST") - def start(self, deployment, config, task=None, abort_on_sla_failure=False): - """Validate and start a task. - - Task is a list of benchmarks that will be called one by one, results of - execution will be stored in DB. - - :param deployment: UUID or name of the deployment (will be ignored in - case of transmitting existing task) - :param config: a dict with a task configuration - :param task: Task UUID to use pre-created task. If None, new task will - be created - :param abort_on_sla_failure: If set to True, the task execution will - stop when any SLA check for it fails - """ - if task and isinstance(task, objects.Task): - LOG.warning("Transmitting task object in `task start` is " - "deprecated since Rally 0.10. To use pre-created " - "task, transmit task UUID instead.") - if task.is_temporary: - raise ValueError(_( - "Unable to run a temporary task. Please check your code.")) - task = objects.Task.get(task["uuid"]) - elif task is not None: - task = objects.Task.get(task) - - if task is not None: - deployment = task["deployment_uuid"] - - deployment = objects.Deployment.get(deployment) - if deployment["status"] != consts.DeployStatus.DEPLOY_FINISHED: - raise exceptions.DeploymentNotFinishedStatus( - name=deployment["name"], - uuid=deployment["uuid"], - status=deployment["status"]) - - if task is None: - task = objects.Task(deployment_uuid=deployment["uuid"]) - - benchmark_engine = engine.TaskEngine( - config, task, deployment, - abort_on_sla_failure=abort_on_sla_failure) - - benchmark_engine.validate() - - LOG.info("Task %s config is valid." % task["uuid"]) - LOG.info("Benchmark Task %s on Deployment %s" % (task["uuid"], - deployment["uuid"])) - - benchmark_engine = engine.TaskEngine( - config, task, deployment, - abort_on_sla_failure=abort_on_sla_failure) - - try: - benchmark_engine.run() - except Exception: - deployment.update_status(consts.DeployStatus.DEPLOY_INCONSISTENT) - raise - - return task["uuid"], task.get_status(task["uuid"]) - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/abort", method="PUT") - def abort(self, task_uuid, soft=False, async=True): - """Abort running task. - - :param task_uuid: The UUID of the task - :type task_uuid: str - :param soft: If set to True, task should be aborted after execution of - current scenario, otherwise as soon as possible before - all the scenario iterations finish [Default: False] - :type soft: bool - :param async: don't wait until task became in 'running' state - [Default: False] - :type async: bool - """ - if not async: - current_status = objects.Task.get_status(task_uuid) - if current_status in objects.Task.NOT_IMPLEMENTED_STAGES_FOR_ABORT: - LOG.info(_LI("Task status is '%s'. Should wait until it became" - " 'running'") % current_status) - while (current_status in - objects.Task.NOT_IMPLEMENTED_STAGES_FOR_ABORT): - time.sleep(1) - current_status = objects.Task.get_status(task_uuid) - - objects.Task.get(task_uuid).abort(soft=soft) - - if not async: - LOG.info(_LI("Waiting until the task stops.")) - finished_stages = [consts.TaskStatus.ABORTED, - consts.TaskStatus.FINISHED, - consts.TaskStatus.CRASHED] - while objects.Task.get_status(task_uuid) not in finished_stages: - time.sleep(1) - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/delete", method="DELETE") - def delete(self, task_uuid, force=False): - """Delete the task. - - :param task_uuid: The UUID of the task - :param force: If set to True, then delete the task despite to the - status - :raises TaskInvalidStatus: when the status of the task is not - in FINISHED, FAILED or ABORTED and - the force argument is not True - """ - if force: - objects.Task.delete_by_uuid(task_uuid, status=None) - elif objects.Task.get_status(task_uuid) in ( - consts.TaskStatus.ABORTED, - consts.TaskStatus.FINISHED, - consts.TaskStatus.CRASHED): - objects.Task.delete_by_uuid(task_uuid, status=None) - else: - objects.Task.delete_by_uuid( - task_uuid, status=consts.TaskStatus.FINISHED) - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/import_results", - method="POST") - def import_results(self, deployment, task_results, tags=None): - """Import json results of a task into rally database""" - deployment = objects.Deployment.get(deployment) - if deployment["status"] != consts.DeployStatus.DEPLOY_FINISHED: - raise exceptions.DeploymentNotFinishedStatus( - name=deployment["name"], - uuid=deployment["uuid"], - status=deployment["status"]) - - task_inst = objects.Task(deployment_uuid=deployment["uuid"], - tags=tags) - task_inst.update_status(consts.TaskStatus.RUNNING) - for subtask in task_results["subtasks"]: - subtask_obj = task_inst.add_subtask(title=subtask.get("title")) - for workload in subtask["workloads"]: - workload_obj = subtask_obj.add_workload( - name=workload["name"], description=workload["description"], - position=workload["position"], runner=workload["runner"], - context=workload["context"], hooks=workload["hooks"], - sla=workload["sla"], args=workload["args"]) - chunk_size = CONF.raw_result_chunk_size - workload_data_count = 0 - while len(workload["data"]) > chunk_size: - results_chunk = workload["data"][:chunk_size] - workload["data"] = workload["data"][chunk_size:] - results_chunk.sort(key=lambda x: x["timestamp"]) - workload_obj.add_workload_data(workload_data_count, - {"raw": results_chunk}) - workload_data_count += 1 - workload_obj.add_workload_data(workload_data_count, - {"raw": workload["data"]}) - workload_obj.set_results( - sla_results=workload["sla_results"].get("sla"), - hooks_results=workload["hooks"], - start_time=workload["start_time"], - full_duration=workload["full_duration"], - load_duration=workload["load_duration"]) - subtask_obj.update_status(consts.SubtaskStatus.FINISHED) - task_inst.update_status(consts.SubtaskStatus.FINISHED) - - LOG.info("Task results have been successfully imported.") - - return task_inst.to_dict() - - @api_wrapper(path=API_REQUEST_PREFIX + "/task/export", - method="POST") - def export(self, tasks_uuids, output_type, output_dest=None): - """Generate a report for a task or a few tasks. - - :param tasks_uuids: List of tasks UUIDs - :param output_type: Plugin name of task reporter - :param output_dest: Destination for task report - """ - - tasks_results = [] - for task_uuid in tasks_uuids: - tasks_results.append(self.get(task_id=task_uuid, detailed=True)) - - reporter_cls = texporter.TaskExporter.get(output_type) - reporter_cls.validate(output_dest) - - LOG.info("Building '%s' report for the following task(s): " - "'%s'.", output_type, "', '".join(tasks_uuids)) - result = texporter.TaskExporter.make(reporter_cls, - tasks_results, - output_dest, - api=self.api) - LOG.info("The report has been successfully built.") - return result - - -class _Verifier(APIGroup): - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/list_plugins", - method="GET") - def list_plugins(self, namespace=None): - """List all plugins for verifiers management. - - :param namespace: Verifier plugin namespace - """ - return [{"name": p.get_name(), - "namespace": p.get_platform(), - "description": p.get_info()["title"], - "location": "%s.%s" % (p.__module__, p.__name__)} - for p in vmanager.VerifierManager.get_all(platform=namespace)] - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/create", method="POST") - def create(self, name, vtype, namespace=None, source=None, version=None, - system_wide=False, extra_settings=None): - """Create a verifier. - - :param name: Verifier name - :param vtype: Verifier plugin name - :param namespace: Verifier plugin namespace. Should be specified when - there are two verifier plugins with equal names but - in different namespaces - :param source: Path or URL to the repo to clone verifier from - :param version: Branch, tag or commit ID to checkout before - verifier installation - :param system_wide: Whether or not to use the system-wide environment - for verifier instead of a virtual environment - :param extra_settings: Extra installation settings for verifier - """ - # check that the specified verifier type exists - vmanager.VerifierManager.get(vtype, platform=namespace) - - LOG.info("Creating verifier '%s'.", name) - - try: - verifier = self._get(name) - except exceptions.ResourceNotFound: - verifier = objects.Verifier.create( - name=name, source=source, system_wide=system_wide, - version=version, vtype=vtype, namespace=namespace, - extra_settings=extra_settings) - else: - raise exceptions.RallyException( - "Verifier with name '%s' already exists! Please, specify " - "another name for verifier and try again." % verifier.name) - - properties = {} - - default_namespace = verifier.manager.get_platform() - if not namespace and default_namespace: - properties["namespace"] = default_namespace - - default_source = verifier.manager._meta_get("default_repo") - if not source and default_source: - properties["source"] = default_source - - if properties: - verifier.update_properties(**properties) - - verifier.update_status(consts.VerifierStatus.INSTALLING) - try: - verifier.manager.install() - except Exception: - verifier.update_status(consts.VerifierStatus.FAILED) - raise - verifier.update_status(consts.VerifierStatus.INSTALLED) - - LOG.info("Verifier %s has been successfully created!", verifier) - - return verifier.uuid - - def _get(self, verifier_id): - """Get a verifier. - - :param verifier_id: Verifier name or UUID - """ - return objects.Verifier.get(verifier_id) - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/get", method="GET") - def get(self, verifier_id): - return self._get(verifier_id).to_dict() - - def _list(self, status=None): - """List all verifiers. - - :param status: Status to filter verifiers by - """ - return objects.Verifier.list(status) - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/list", method="GET") - def list(self, status=None): - return [item.to_dict() for item in self._list(status)] - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/delete", method="DELETE") - def delete(self, verifier_id, deployment_id=None, force=False): - """Delete a verifier. - - :param verifier_id: Verifier name or UUID - :param deployment_id: Deployment name or UUID. If specified, - only the deployment-specific data will be deleted - for verifier - :param force: Delete all stored verifier verifications. - If deployment_id specified, only verifications of this - deployment will be deleted - """ - verifier = self._get(verifier_id) - verifications = self.api.verification.list( - verifier_id=verifier_id, - deployment_id=deployment_id) - if verifications: - d_msg = ((" for deployment '%s'" % deployment_id) - if deployment_id else "") - if force: - LOG.info("Deleting all verifications created by verifier " - "%s%s.", verifier, d_msg) - for verification in verifications: - self.api.verification.delete( - verification_uuid=verification["uuid"]) - else: - raise exceptions.RallyException( - "Failed to delete verifier {0} because there are stored " - "verifier verifications{1}! Please, make sure that they " - "are not important to you. Use 'force' flag if you would " - "like to delete verifications{1} as well." - .format(verifier, d_msg)) - - if deployment_id: - LOG.info("Deleting deployment-specific data for verifier %s.", - verifier) - verifier.set_deployment(deployment_id) - verifier.manager.uninstall() - LOG.info("Deployment-specific data has been successfully deleted!") - else: - LOG.info("Deleting verifier %s.", verifier) - verifier.manager.uninstall(full=True) - objects.Verifier.delete(verifier_id) - LOG.info("Verifier has been successfully deleted!") - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/update", method="PUT") - def update(self, verifier_id, system_wide=None, version=None, - update_venv=False): - """Update a verifier. - - :param verifier_id: Verifier name or UUID - :param system_wide: Switch to using the system-wide environment - :param version: Branch, tag or commit ID to checkout - :param update_venv: Update the virtual environment for verifier - """ - if system_wide is None and version is None and not update_venv: - # nothing to update - raise exceptions.RallyException( - "At least one of the following parameters should be " - "specified: 'system_wide', 'version', 'update_venv'.") - - verifier = self._get(verifier_id) - LOG.info("Updating verifier %s.", verifier) - - if verifier.status != consts.VerifierStatus.INSTALLED: - raise exceptions.RallyException( - "Failed to update verifier %s because verifier is in '%s' " - "status, but should be in '%s'." % ( - verifier, verifier.status, consts.VerifierStatus.INSTALLED) - ) - - system_wide_in_use = (system_wide or - (system_wide is None and verifier.system_wide)) - if update_venv and system_wide_in_use: - raise exceptions.RallyException( - "It is impossible to update the virtual environment for " - "verifier %s when it uses the system-wide environment." - % verifier) - - # store original status to set it again after updating or rollback - original_status = verifier.status - verifier.update_status(consts.VerifierStatus.UPDATING) - - properties = {} # store new verifier properties to update old ones - - sw_is_checked = False - - if version: - properties["version"] = version - - backup = utils.BackupHelper() - rollback_msg = ("Failed to update verifier %s. It has been " - "rollbacked to the previous state." % verifier) - backup.add_rollback_action(LOG.info, rollback_msg) - backup.add_rollback_action(verifier.update_status, original_status) - with backup(verifier.manager.repo_dir): - verifier.manager.checkout(version) - - if system_wide_in_use: - verifier.manager.check_system_wide() - sw_is_checked = True - - if system_wide is not None: - if system_wide == verifier.system_wide: - LOG.info( - "Verifier %s is already switched to system_wide=%s. " - "Nothing will be changed.", verifier, verifier.system_wide) - else: - properties["system_wide"] = system_wide - if not system_wide: - update_venv = True # we need to install a virtual env - else: - # NOTE(andreykurilin): should we remove previously created - # virtual environment?! - if not sw_is_checked: - verifier.manager.check_system_wide() - - if update_venv: - backup = utils.BackupHelper() - rollback_msg = ("Failed to update the virtual environment for " - "verifier %s. It has been rollbacked to the " - "previous state." % verifier) - backup.add_rollback_action(LOG.info, rollback_msg) - backup.add_rollback_action(verifier.update_status, original_status) - with backup(verifier.manager.venv_dir): - verifier.manager.install_venv() - - properties["status"] = original_status # change verifier status back - verifier.update_properties(**properties) - - LOG.info("Verifier %s has been successfully updated!", verifier) - - return verifier.uuid - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/configure", method="PUT") - def configure(self, verifier, deployment_id, extra_options=None, - reconfigure=False): - """Configure a verifier. - - :param verifier: Verifier object or (name or UUID) - :param deployment_id: Deployment name or UUID - :param extra_options: Extend verifier configuration with extra options - :param reconfigure: Reconfigure verifier - """ - if not isinstance(verifier, objects.Verifier): - verifier = self._get(verifier) - verifier.set_deployment(deployment_id) - LOG.info( - "Configuring verifier %s for deployment '%s' (UUID=%s).", - verifier, verifier.deployment["name"], verifier.deployment["uuid"]) - - if verifier.status != consts.VerifierStatus.INSTALLED: - raise exceptions.RallyException( - "Failed to configure verifier %s for deployment '%s' " - "(UUID=%s) because verifier is in '%s' status, but should be " - "in '%s'." % (verifier, verifier.deployment["name"], - verifier.deployment["uuid"], verifier.status, - consts.VerifierStatus.INSTALLED)) - - msg = ("Verifier %s has been successfully configured for deployment " - "'%s' (UUID=%s)!" % (verifier, verifier.deployment["name"], - verifier.deployment["uuid"])) - vm = verifier.manager - if vm.is_configured(): - LOG.info("Verifier is already configured!") - if not reconfigure: - if not extra_options: - return vm.get_configuration() - else: - # Just add extra options to the config file. - if logging.is_debug(): - LOG.debug("Adding the following extra options: %s " - "to verifier configuration.", extra_options) - else: - LOG.info( - "Adding extra options to verifier configuration.") - vm.extend_configuration(extra_options) - LOG.info(msg) - return vm.get_configuration() - - LOG.info("Reconfiguring verifier.") - - raw_config = vm.configure(extra_options=extra_options) - - LOG.info(msg) - - return raw_config - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/override_configuration", - method="PUT") - def override_configuration(self, verifier_id, deployment_id, - new_configuration): - """Override verifier configuration (e.g., rewrite the config file). - - :param verifier_id: Verifier name or UUID - :param deployment_id: Deployment name or UUID - :param new_configuration: New configuration for verifier - """ - verifier = self._get(verifier_id) - if verifier.status != consts.VerifierStatus.INSTALLED: - raise exceptions.RallyException( - "Failed to override verifier configuration for deployment " - "'%s' (UUID=%s) because verifier %s is in '%s' status, but " - "should be in '%s'." % ( - verifier.deployment["name"], verifier.deployment["uuid"], - verifier, verifier.status, consts.VerifierStatus.INSTALLED) - ) - - verifier.set_deployment(deployment_id) - LOG.info("Overriding configuration of verifier %s for deployment '%s' " - "(UUID=%s).", verifier, verifier.deployment["name"], - verifier.deployment["uuid"]) - verifier.manager.override_configuration(new_configuration) - LOG.info("Configuration of verifier %s has been successfully " - "overridden for deployment '%s' (UUID=%s)!", verifier, - verifier.deployment["name"], verifier.deployment["uuid"]) - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/list_tests", - method="GET") - def list_tests(self, verifier_id, pattern=""): - """List all verifier tests. - - :param verifier_id: Verifier name or UUID - :param pattern: Pattern which will be used for matching - """ - verifier = self._get(verifier_id) - if verifier.status != consts.VerifierStatus.INSTALLED: - raise exceptions.RallyException( - "Failed to list verifier tests because verifier %s is in '%s' " - "status, but should be in '%s'." % ( - verifier, verifier.status, consts.VerifierStatus.INSTALLED) - ) - - if pattern: - verifier.manager.validate_args({"pattern": pattern}) - - return verifier.manager.list_tests(pattern) - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/add_extension", - method="POST") - def add_extension(self, verifier_id, source, version=None, - extra_settings=None): - """Add a verifier extension. - - :param verifier_id: Verifier name or UUID - :param source: Path or URL to the repo to clone verifier extension from - :param version: Branch, tag or commit ID to checkout before - installation of the verifier extension - :param extra_settings: Extra installation settings for verifier - extension - """ - verifier = self._get(verifier_id) - if verifier.status != consts.VerifierStatus.INSTALLED: - raise exceptions.RallyException( - "Failed to add verifier extension because verifier %s " - "is in '%s' status, but should be in '%s'." % ( - verifier, verifier.status, consts.VerifierStatus.INSTALLED) - ) - - LOG.info("Adding extension for verifier %s.", verifier) - - # store original status to rollback it after failure - original_status = verifier.status - verifier.update_status(consts.VerifierStatus.EXTENDING) - try: - verifier.manager.install_extension(source, version=version, - extra_settings=extra_settings) - finally: - verifier.update_status(original_status) - - LOG.info("Extension for verifier %s has been successfully added!", - verifier) - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/list_extensions", - method="GET") - def list_extensions(self, verifier_id): - """List all verifier extensions. - - :param verifier_id: Verifier name or UUID - """ - verifier = self._get(verifier_id) - if verifier.status != consts.VerifierStatus.INSTALLED: - raise exceptions.RallyException( - "Failed to list verifier extensions because verifier %s " - "is in '%s' status, but should be in '%s.'" % ( - verifier, verifier.status, consts.VerifierStatus.INSTALLED) - ) - - return verifier.manager.list_extensions() - - @api_wrapper(path=API_REQUEST_PREFIX + "/verifier/delete_extension", - method="DELETE") - def delete_extension(self, verifier_id, name): - """Delete a verifier extension. - - :param verifier_id: Verifier name or UUID - :param name: Verifier extension name - """ - verifier = self._get(verifier_id) - if verifier.status != consts.VerifierStatus.INSTALLED: - raise exceptions.RallyException( - "Failed to delete verifier extension because verifier %s " - "is in '%s' status, but should be in '%s'." % ( - verifier, verifier.status, consts.VerifierStatus.INSTALLED) - ) - - LOG.info("Deleting extension for verifier %s.", verifier) - verifier.manager.uninstall_extension(name) - LOG.info("Extension for verifier %s has been successfully deleted!", - verifier) - - -class _Verification(APIGroup): - - @api_wrapper(path=API_REQUEST_PREFIX + "/verification/start", - method="POST") - def start(self, verifier_id, deployment_id, tags=None, **run_args): - """Start a verification. - - :param verifier_id: Verifier name or UUID - :param deployment_id: Deployment name or UUID - :param tags: List of tags to assign them to verification - :param run_args: Dictionary with run arguments for verification - """ - # TODO(ylobankov): Add an ability to skip tests by specifying only test - # names (without test IDs). Also, it would be nice to - # skip the whole test suites. For example, all tests - # in the class or module. - - deployment = objects.Deployment.get(deployment_id) - - if deployment["status"] != consts.DeployStatus.DEPLOY_FINISHED: - raise exceptions.DeploymentNotFinishedStatus( - name=deployment["name"], - uuid=deployment["uuid"], - status=deployment["status"]) - - verifier = self.api.verifier._get(verifier_id) - if verifier.status != consts.VerifierStatus.INSTALLED: - raise exceptions.RallyException( - "Failed to start verification because verifier %s is in '%s' " - "status, but should be in '%s'." % ( - verifier, verifier.status, consts.VerifierStatus.INSTALLED) - ) - - verifier.set_deployment(deployment_id) - if not verifier.manager.is_configured(): - self.api.verifier.configure(verifier=verifier, - deployment_id=deployment_id) - - # TODO(andreykurilin): save validation results to db - verifier.manager.validate(run_args) - - verification = objects.Verification.create( - verifier_id=verifier_id, deployment_id=deployment_id, tags=tags, - run_args=run_args) - LOG.info("Starting verification (UUID=%s) for deployment '%s' " - "(UUID=%s) by verifier %s.", verification.uuid, - verifier.deployment["name"], verifier.deployment["uuid"], - verifier) - verification.update_status(consts.VerificationStatus.RUNNING) - - context = {"config": verifier.manager._meta_get("context"), - "run_args": run_args, - "verification": verification, - "verifier": verifier} - try: - with vcontext.ContextManager(context): - results = verifier.manager.run(context) - except Exception as e: - verification.set_error(e) - raise - - # TODO(ylobankov): Check that verification exists in the database - # because users may delete verification before tests - # finish. - verification.finish(results.totals, results.tests) - - LOG.info("Verification (UUID=%s) has been successfully finished for " - "deployment '%s' (UUID=%s)!", verification.uuid, - verifier.deployment["name"], verifier.deployment["uuid"]) - - return {"verification": verification.to_dict(), - "totals": results.totals, - "tests": results.tests} - - @api_wrapper(path=API_REQUEST_PREFIX + "/verification/rerun", - method="POST") - def rerun(self, verification_uuid, deployment_id=None, failed=False, - tags=None, concurrency=0): - """Rerun tests from a verification. - - :param verification_uuid: Verification UUID - :param deployment_id: Deployment name or UUID - :param failed: Rerun only failed tests - :param tags: List of tags to assign them to verification - :param concurrency: The number of processes to use to run verifier - tests - """ - # TODO(ylobankov): Improve this method in the future: put some - # information about re-run in run_args. - run_args = {} - if concurrency: - run_args["concurrency"] = concurrency - - verification = self._get(verification_uuid) - tests = verification.tests - - if failed: - tests = [t for t, r in tests.items() if r["status"] == "fail"] - if not tests: - raise exceptions.RallyException( - "There are no failed tests from verification (UUID=%s)." - % verification_uuid) - else: - tests = tests.keys() - - deployment = (deployment_id if deployment_id - else verification.deployment_uuid) - deployment = self.api.deployment.get(deployment=deployment) - LOG.info("Re-running %stests from verification (UUID=%s) for " - "deployment '%s' (UUID=%s).", "failed " if failed else "", - verification.uuid, deployment["name"], deployment["uuid"]) - return self.start(verifier_id=verification.verifier_uuid, - deployment_id=deployment["uuid"], - load_list=tests, tags=tags, **run_args) - - def _get(self, verification_uuid): - """Get a verification. - - :param verification_uuid: Verification UUID - """ - return objects.Verification.get(verification_uuid) - - @api_wrapper(path=API_REQUEST_PREFIX + "/verification/get", method="GET") - def get(self, verification_uuid): - return self._get(verification_uuid).to_dict() - - @api_wrapper(path=API_REQUEST_PREFIX + "/verification/list", - method="GET") - def list(self, verifier_id=None, deployment_id=None, - tags=None, status=None): - """List all verifications. - - :param verifier_id: Verifier name or UUID - :param deployment_id: Deployment name or UUID - :param tags: Tags to filter verifications by - :param status: Status to filter verifications by - """ - return [item.to_dict() for item in objects.Verification.list( - verifier_id, deployment_id=deployment_id, - tags=tags, status=status)] - - @api_wrapper(path=API_REQUEST_PREFIX + "/verification/delete", - method="DELETE") - def delete(self, verification_uuid): - """Delete a verification. - - :param verification_uuid: Verification UUID - """ - verification = self._get(verification_uuid) - LOG.info("Deleting verification (UUID=%s).", verification.uuid) - verification.delete() - LOG.info("Verification has been successfully deleted!") - - @api_wrapper(path=API_REQUEST_PREFIX + "/verification/report", - method="GET") - def report(self, uuids, output_type, output_dest=None): - """Generate a report for a verification or a few verifications. - - :param uuids: List of verifications UUIDs - :param output_type: Plugin name of verification reporter - :param output_dest: Destination for verification report - """ - verifications = [self._get(uuid) for uuid in uuids] - - reporter_cls = vreporter.VerificationReporter.get(output_type) - reporter_cls.validate(output_dest) - - LOG.info("Building '%s' report for the following verification(s): " - "'%s'.", output_type, "', '".join(uuids)) - result = vreporter.VerificationReporter.make(reporter_cls, - verifications, - output_dest) - LOG.info(_LI("The report has been successfully built.")) - return result - - @api_wrapper(path=API_REQUEST_PREFIX + "/verification/import_results", - method="POST") - def import_results(self, verifier_id, deployment_id, data, **run_args): - """Import results of a test run into Rally database. - - :param verifier_id: Verifier name or UUID - :param deployment_id: Deployment name or UUID - :param data: Results data of a test run to import - :param run_args: Dictionary with run arguments - """ - # TODO(aplanas): Create an external deployment if this is missing, as - # required in the blueprint [1]. - # [1] https://blueprints.launchpad.net/rally/+spec/verification-import - - verifier = self.api.verifier._get(verifier_id) - verifier.set_deployment(deployment_id) - LOG.info("Importing test results into a new verification for " - "deployment '%s' (UUID=%s), using verifier %s.", - verifier.deployment["name"], verifier.deployment["uuid"], - verifier) - - verifier.manager.validate_args(run_args) - - verification = objects.Verification.create(verifier_id, - deployment_id=deployment_id, - run_args=run_args) - verification.update_status(consts.VerificationStatus.RUNNING) - - try: - results = verifier.manager.parse_results(data) - except Exception as e: - verification.set_failed(e) - raise - verification.finish(results.totals, results.tests) - - LOG.info("Test results have been successfully imported.") - - return {"verification": verification.to_dict(), - "totals": results.totals, - "tests": results.tests} - - -class API(object): - - CONFIG_SEARCH_PATHS = [sys.prefix + "/etc/rally", "~/.rally", "/etc/rally"] - CONFIG_FILE_NAME = "rally.conf" - - def __init__(self, config_file=None, config_args=None, - rally_endpoint=None, plugin_paths=None, skip_db_check=False): - """Initialize Rally API instance - - :param config_file: Path to rally configuration file. If None, default - path will be selected - :type config_file: str - :param config_args: Arguments for initialization current configuration - :type config_args: list - :param rally_endpoint: [Restricted]Rally endpoint connection string. - :type rally_endpoint: str - :param plugin_paths: Additional custom plugin locations - :type plugin_paths: list - :param skip_db_check: Allows to skip db revision check - :type skip_db_check: bool - """ - - self.endpoint_url = rally_endpoint - if rally_endpoint: - raise NotImplementedError(_("Sorry, but Rally-as-a-Service is " - "not ready yet.")) - - try: - config_files = ([config_file] if config_file else - self._default_config_file()) - CONF(config_args or [], - project="rally", - version=rally_version.version_string(), - default_config_files=config_files) - - opts.register() - - logging.setup("rally") - if not CONF.get("log_config_append"): - # The below two lines are to disable noise from request module. - # The standard way should be we make such lots of settings on - # the root rally. However current oslo codes doesn't support - # such interface. So I choose to use a 'hacking' way to avoid - # INFO logs from request module where user didn't give specific - # log configuration. And we could remove this hacking after - # oslo.log has such interface. - LOG.debug( - "INFO logs from urllib3 and requests module are hide.") - requests_log = logging.getLogger("requests").logger - requests_log.setLevel(logging.WARNING) - urllib3_log = logging.getLogger("urllib3").logger - urllib3_log.setLevel(logging.WARNING) - - LOG.debug("urllib3 insecure warnings are hidden.") - for warning in ("InsecurePlatformWarning", - "SNIMissingWarning", - "InsecureRequestWarning"): - warning_cls = getattr(urllib3.exceptions, warning, None) - if warning_cls is not None: - urllib3.disable_warnings(warning_cls) - - # NOTE(wtakase): This is for suppressing boto error logging. - LOG.debug("ERROR log from boto module is hide.") - boto_log = logging.getLogger("boto").logger - boto_log.setLevel(logging.CRITICAL) - - # Set alembic log level to ERROR - alembic_log = logging.getLogger("alembic").logger - alembic_log.setLevel(logging.ERROR) - - except cfg.ConfigFilesNotFoundError as e: - cfg_files = e.config_files - raise exceptions.RallyException(_( - "Failed to read configuration file(s): %s") % cfg_files) - - # Check that db is upgraded to the latest revision - if not skip_db_check: - self.check_db_revision() - - # Load plugins - plugin_paths = plugin_paths or [] - if "plugin_paths" in CONF: - plugin_paths.extend(CONF.get("plugin_paths") or []) - for path in plugin_paths: - discover.load_plugins(path) - - # NOTE(andreykurilin): There is no reason to auto-discover API's. We - # have only 4 classes, so let's do it in good old way - hardcode them:) - self._deployment = _Deployment(self) - self._task = _Task(self) - self._verifier = _Verifier(self) - self._verification = _Verification(self) - - def _default_config_file(self): - for path in self.CONFIG_SEARCH_PATHS: - abspath = os.path.abspath(os.path.expanduser(path)) - fpath = os.path.join(abspath, self.CONFIG_FILE_NAME) - if os.path.isfile(fpath): - return [fpath] - - def check_db_revision(self): - rev = rally_version.database_revision() - - # Check that db exists - if rev["revision"] is None: - raise exceptions.RallyException(_( - "Database is missing. Create database by command " - "`rally-manage db create'")) - - # Check that db is updated - if rev["revision"] != rev["current_head"]: - raise exceptions.RallyException(_( - "Database seems to be outdated. Run upgrade from " - "revision %(revision)s to %(current_head)s by command " - "`rally-manage db upgrade'") % rev) - - def _request(self, path, method, **kwargs): - headers = { - "RALLY-CLIENT-VERSION": rally_version.version_string(), - "RALLY-API": "1.0" - } - response = requests.request(method, path, - json=kwargs, headers=headers) - if response.status_code != 200: - raise exceptions.find_exception(response) - - return response.json( - object_pairs_hook=collections.OrderedDict)["result"] - - @property - def deployment(self): - return self._deployment - - @property - def task(self): - return self._task - - @property - def verifier(self): - return self._verifier - - @property - def verification(self): - return self._verification - - @property - def version(self): - return 1 diff --git a/rally/cli/__init__.py b/rally/cli/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/cli/cliutils.py b/rally/cli/cliutils.py deleted file mode 100644 index 49db3da4..00000000 --- a/rally/cli/cliutils.py +++ /dev/null @@ -1,752 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import argparse -import inspect -import json -import os -import sys -import textwrap -import warnings - -import decorator -import jsonschema -from oslo_config import cfg -from oslo_utils import encodeutils -import prettytable -import six -import sqlalchemy.exc - -from rally import api -from rally.common.i18n import _ -from rally.common import logging -from rally.common.plugin import info -from rally import exceptions - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -# Some CLI-specific constants -MARGIN = 3 - - -class MissingArgs(Exception): - """Supplied arguments are not sufficient for calling a function.""" - def __init__(self, missing): - self.missing = missing - msg = _("Missing arguments: %s") % ", ".join(missing) - super(MissingArgs, self).__init__(msg) - - -def validate_args(fn, *args, **kwargs): - """Check that the supplied args are sufficient for calling a function. - - >>> validate_args(lambda a: None) - Traceback (most recent call last): - ... - MissingArgs: Missing argument(s): a - >>> validate_args(lambda a, b, c, d: None, 0, c=1) - Traceback (most recent call last): - ... - MissingArgs: Missing argument(s): b, d - - :param fn: the function to check - :param args: the positional arguments supplied - :param kwargs: the keyword arguments supplied - """ - argspec = inspect.getargspec(fn) - - num_defaults = len(argspec.defaults or []) - required_args = argspec.args[:len(argspec.args) - num_defaults] - - if getattr(fn, "__self__", None): - required_args.pop(0) - - missing_required_args = required_args[len(args):] - missing = [arg for arg in missing_required_args if arg not in kwargs] - if missing: - raise MissingArgs(missing) - - -def print_list(objs, fields, formatters=None, sortby_index=0, - mixed_case_fields=None, field_labels=None, - normalize_field_names=False, - table_label=None, print_header=True, print_border=True, - out=sys.stdout): - """Print a list or objects as a table, one row per object. - - :param objs: iterable of :class:`Resource` - :param fields: attributes that correspond to columns, in order - :param formatters: `dict` of callables for field formatting - :param sortby_index: index of the field for sorting table rows - :param mixed_case_fields: fields corresponding to object attributes that - have mixed case names (e.g., 'serverId') - :param field_labels: Labels to use in the heading of the table, default to - fields. - :param normalize_field_names: If True, field names will be transformed, - e.g. "Field Name" -> "field_name", otherwise they will be used - unchanged. - :param table_label: Label to use as header for the whole table. - :param print_header: print table header. - :param print_border: print table border. - :param out: stream to write output to. - - """ - formatters = formatters or {} - mixed_case_fields = mixed_case_fields or [] - field_labels = field_labels or fields - if len(field_labels) != len(fields): - raise ValueError(_("Field labels list %(labels)s has different number " - "of elements than fields list %(fields)s"), - {"labels": field_labels, "fields": fields}) - - if sortby_index is None: - kwargs = {} - else: - kwargs = {"sortby": field_labels[sortby_index]} - pt = prettytable.PrettyTable(field_labels) - pt.align = "l" - - for o in objs: - row = [] - for field in fields: - if field in formatters: - row.append(formatters[field](o)) - else: - field_name = field - - if normalize_field_names: - if field_name not in mixed_case_fields: - field_name = field_name.lower() - field_name = field_name.replace(" ", "_").replace("-", "_") - - if isinstance(o, dict): - data = o.get(field_name, "") - else: - data = getattr(o, field_name, "") - row.append(data) - pt.add_row(row) - - if not print_border or not print_header: - pt.set_style(prettytable.PLAIN_COLUMNS) - pt.left_padding_width = 0 - pt.right_padding_width = 1 - - table_body = pt.get_string(header=print_header, - border=print_border, - **kwargs) + "\n" - - table_header = "" - - if table_label: - table_width = table_body.index("\n") - table_header = make_table_header(table_label, table_width) - table_header += "\n" - - if six.PY3: - if table_header: - out.write(encodeutils.safe_encode(table_header).decode()) - out.write(encodeutils.safe_encode(table_body).decode()) - else: - if table_header: - out.write(encodeutils.safe_encode(table_header)) - out.write(encodeutils.safe_encode(table_body)) - - -def print_dict(obj, fields=None, formatters=None, mixed_case_fields=False, - normalize_field_names=False, property_label="Property", - value_label="Value", table_label=None, print_header=True, - print_border=True, wrap=0, out=sys.stdout): - """Print dict as a table. - - :param obj: dict to print - :param fields: `dict` of keys to print from d. Defaults to all keys - :param formatters: `dict` of callables for field formatting - :param mixed_case_fields: fields corresponding to object attributes that - have mixed case names (e.g., 'serverId') - :param normalize_field_names: If True, field names will be transformed, - e.g. "Field Name" -> "field_name", otherwise they will be used - unchanged. - :param property_label: label of "property" column - :param value_label: label of "value" column - :param table_label: Label to use as header for the whole table. - :param print_header: print table header. - :param print_border: print table border. - :param out: stream to write output to. - """ - formatters = formatters or {} - mixed_case_fields = mixed_case_fields or [] - if not fields: - if isinstance(obj, dict): - fields = sorted(obj.keys()) - else: - fields = [name for name in dir(obj) - if (not name.startswith("_") and - not callable(getattr(obj, name)))] - - pt = prettytable.PrettyTable([property_label, value_label], caching=False) - pt.align = "l" - for field_name in fields: - if field_name in formatters: - data = formatters[field_name](obj) - else: - field = field_name - if normalize_field_names: - if field not in mixed_case_fields: - field = field_name.lower() - field = field.replace(" ", "_").replace("-", "_") - - if isinstance(obj, dict): - data = obj.get(field, "") - else: - data = getattr(obj, field, "") - - # convert dict to str to check length - if isinstance(data, (dict, list)): - data = json.dumps(data) - if wrap > 0: - data = textwrap.fill(six.text_type(data), wrap) - # if value has a newline, add in multiple rows - # e.g. fault with stacktrace - if (data and - isinstance(data, six.string_types) and - (r"\n" in data or "\r" in data)): - # "\r" would break the table, so remove it. - if "\r" in data: - data = data.replace("\r", "") - lines = data.strip().split(r"\n") - col1 = field_name - for line in lines: - pt.add_row([col1, line]) - col1 = "" - else: - if data is None: - data = "-" - pt.add_row([field_name, data]) - - table_body = pt.get_string(header=print_header, - border=print_border) + "\n" - - table_header = "" - - if table_label: - table_width = table_body.index("\n") - table_header = make_table_header(table_label, table_width) - table_header += "\n" - - if six.PY3: - if table_header: - out.write(encodeutils.safe_encode(table_header).decode()) - out.write(encodeutils.safe_encode(table_body).decode()) - else: - if table_header: - out.write(encodeutils.safe_encode(table_header)) - out.write(encodeutils.safe_encode(table_body)) - - -def make_table_header(table_label, table_width, - junction_char="+", horizontal_char="-", - vertical_char="|"): - """Generalized way make a table header string. - - :param table_label: label to print on header - :param table_width: total width of table - :param junction_char: character used where vertical and - horizontal lines meet. - :param horizontal_char: character used for horizontal lines. - :param vertical_char: character used for vertical lines. - - :returns: string - """ - - if len(table_label) >= (table_width - 2): - raise ValueError(_("Table header %s is longer than total" - "width of the table.")) - - label_and_space_width = table_width - len(table_label) - 2 - padding = 0 if label_and_space_width % 2 == 0 else 1 - - half_table_width = label_and_space_width // 2 - left_spacing = (" " * half_table_width) - right_spacing = (" " * (half_table_width + padding)) - - border_line = "".join((junction_char, - (horizontal_char * (table_width - 2)), - junction_char,)) - - label_line = "".join((vertical_char, - left_spacing, - table_label, - right_spacing, - vertical_char,)) - - return "\n".join((border_line, label_line,)) - - -def make_header(text, size=80, symbol="-"): - """Unified way to make header message to CLI. - - :param text: what text to write - :param size: Length of header decorative line - :param symbol: What symbol to use to create header - """ - header = symbol * size + "\n" - header += "%s\n" % text - header += symbol * size + "\n" - return header - - -def suppress_warnings(f): - f._suppress_warnings = True - return f - - -@decorator.decorator -def process_keystone_exc(f, *args, **kwargs): - from keystoneclient import exceptions as keystone_exc - - try: - return f(*args, **kwargs) - except keystone_exc.Unauthorized as e: - print(_("User credentials are wrong! \n%s") % e) - return 1 - except keystone_exc.AuthorizationFailure as e: - print(_("Failed to authorize! \n%s") % e) - return 1 - except keystone_exc.ConnectionRefused as e: - print(_("Rally can't reach the Keystone service! \n%s") % e) - return 1 - - -class CategoryParser(argparse.ArgumentParser): - - """Customized arguments parser - - We need this one to override hardcoded behavior. - So, we want to print item's help instead of 'error: too few arguments'. - Also, we want not to print positional arguments in help message. - """ - - def format_help(self): - formatter = self._get_formatter() - - # usage - formatter.add_usage(self.usage, self._actions, - self._mutually_exclusive_groups) - - # description - formatter.add_text(self.description) - - # positionals, optionals and user-defined groups - # INFO(oanufriev) _action_groups[0] contains positional arguments. - for action_group in self._action_groups[1:]: - formatter.start_section(action_group.title) - formatter.add_text(action_group.description) - formatter.add_arguments(action_group._group_actions) - formatter.end_section() - - # epilog - formatter.add_text(self.epilog) - - # determine help from format above - return formatter.format_help() - - def error(self, message): - self.print_help(sys.stderr) - if message.startswith("argument") and message.endswith("is required"): - # NOTE(pirsriva) Argparse will currently raise an error - # message for only 1 missing argument at a time i.e. in the - # error message it WILL NOT LIST ALL the missing arguments - # at once INSTEAD only 1 missing argument at a time - missing_arg = message.split()[1] - print(_("Missing argument:\n%s") % missing_arg) - sys.exit(2) - - -def pretty_float_formatter(field, ndigits=None): - """Create a float value formatter function for the given field. - - :param field: str name of an object, which value should be formatted - :param ndigits: int number of digits after decimal point to round - default is None - this disables rounding - :returns: field formatter function - """ - def _formatter(obj): - value = obj[field] if type(obj) == dict else getattr(obj, field) - if type(value) in (int, float): - if ndigits: - return round(value, ndigits) - return value - return "n/a" - return _formatter - - -def args(*args, **kwargs): - def _decorator(func): - func.__dict__.setdefault("args", []).insert(0, (args, kwargs)) - if "metavar" not in kwargs and "action" not in kwargs: - # NOTE(andreykurilin): argparse constructs awful metavars... - kwargs["metavar"] = "<%s>" % args[0].replace( - "--", "").replace("-", "_") - return func - return _decorator - - -def alias(command_name): - """Allow cli to use alias command name instead of function name. - - :param command_name: desired command name - """ - def decorator(func): - func.alias = command_name - return func - return decorator - - -def deprecated_args(*args, **kwargs): - def _decorator(func): - if "release" not in kwargs: - raise ValueError("'release' is required keyword argument of " - "'deprecated_args' decorator.") - func.__dict__.setdefault("args", []).insert(0, (args, kwargs)) - func.__dict__.setdefault("deprecated_args", []) - func.deprecated_args.append(args[0]) - - help_msg = "[Deprecated since Rally %s] " % kwargs.pop("release") - if "alternative" in kwargs: - help_msg += "Use '%s' instead. " % kwargs.pop("alternative") - if "help" in kwargs: - help_msg += kwargs["help"] - kwargs["help"] = help_msg - return func - return _decorator - - -def help_group(uuid): - """Label cli method with specific group. - - Joining methods by groups allows to compose more user-friendly help - messages in CLI. - - :param uuid: Name of group to find common methods. It will be used for - sorting groups in help message, so you can start uuid with - some number (i.e "1_launcher", "2_management") to put groups in proper - order. Note: default group had "0" uuid. - """ - - def wrapper(func): - func.help_group = uuid - return func - return wrapper - - -def _methods_of(cls): - """Get all callable methods of a class that don't start with underscore. - - :returns: a list of tuples of the form (method_name, method) - """ - # The idea of unbound methods exists in Python 2 and was removed in - # Python 3, so "inspect.ismethod" is used here for Python 2 and - # "inspect.isfunction" for Python 3. - all_methods = inspect.getmembers( - cls, predicate=lambda x: inspect.ismethod(x) or inspect.isfunction(x)) - methods = [m for m in all_methods if not m[0].startswith("_")] - - help_groups = {} - for m in methods: - group = getattr(m[1], "help_group", "0") - help_groups.setdefault(group, []).append(m) - - if len(help_groups) > 1: - # we should sort methods by groups - methods = [] - for group in sorted(help_groups.items(), key=lambda x: x[0]): - if methods: - # None -> empty line between groups - methods.append((None, None)) - methods.extend(group[1]) - return methods - - -def _compose_category_description(category): - - descr_pairs = _methods_of(category) - - description = "" - doc = category.__doc__ - if doc: - description = doc.strip() - if descr_pairs: - description += "\n\nCommands:\n" - sublen = lambda item: len(item[0]) if item[0] else 0 - first_column_len = max(map(sublen, descr_pairs)) + MARGIN - for item in descr_pairs: - if item[0] is None: - description += "\n" - continue - name = getattr(item[1], "alias", item[0].replace("_", "-")) - if item[1].__doc__: - doc = info.parse_docstring( - item[1].__doc__)["short_description"] - else: - doc = "" - name += " " * (first_column_len - len(name)) - description += " %s%s\n" % (name, doc) - - return description - - -def _compose_action_description(action_fn): - description = "" - if action_fn.__doc__: - parsed_doc = info.parse_docstring(action_fn.__doc__) - short = parsed_doc.get("short_description") - long = parsed_doc.get("long_description") - - description = "%s\n\n%s" % (short, long) if long else short - - return description - - -def _add_command_parsers(categories, subparsers): - - # INFO(oanufriev) This monkey patching makes our custom parser class to be - # used instead of native. This affects all subparsers down from - # 'subparsers' parameter of this function (categories and actions). - subparsers._parser_class = CategoryParser - - parser = subparsers.add_parser("version") - - parser = subparsers.add_parser("bash-completion") - parser.add_argument("query_category", nargs="?") - - for category in categories: - command_object = categories[category]() - descr = _compose_category_description(categories[category]) - parser = subparsers.add_parser( - category, description=descr, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.set_defaults(command_object=command_object) - - category_subparsers = parser.add_subparsers(dest="action") - - for method_name, method in _methods_of(command_object): - if method is None: - continue - method_name = method_name.replace("_", "-") - descr = _compose_action_description(method) - parser = category_subparsers.add_parser( - getattr(method, "alias", method_name), - formatter_class=argparse.RawDescriptionHelpFormatter, - description=descr, help=descr) - - action_kwargs = [] - for args, kwargs in getattr(method, "args", []): - # FIXME(markmc): hack to assume dest is the arg name without - # the leading hyphens if no dest is supplied - kwargs.setdefault("dest", args[0][2:]) - action_kwargs.append(kwargs["dest"]) - kwargs["dest"] = "action_kwarg_" + kwargs["dest"] - parser.add_argument(*args, **kwargs) - - parser.set_defaults(action_fn=method) - parser.set_defaults(action_kwargs=action_kwargs) - parser.add_argument("action_args", nargs="*") - - -def validate_deprecated_args(argv, fn): - if (len(argv) > 3 - and (argv[2] == fn.__name__) - and getattr(fn, "deprecated_args", None)): - for item in fn.deprecated_args: - if item in argv[3:]: - LOG.warning("Deprecated argument %s for %s." % (item, - fn.__name__)) - - -def run(argv, categories): - parser = lambda subparsers: _add_command_parsers(categories, subparsers) - category_opt = cfg.SubCommandOpt("category", - title="Command categories", - help="Available categories", - handler=parser) - - CONF.register_cli_opt(category_opt) - help_msg = ("Additional custom plugin locations. Multiple files or " - "directories may be specified. All plugins in the specified" - " directories and subdirectories will be imported. Plugins in" - " /opt/rally/plugins and ~/.rally/plugins will always be " - "imported.") - - CONF.register_cli_opt(cfg.ListOpt("plugin-paths", - default=os.environ.get( - "RALLY_PLUGIN_PATHS"), - help=help_msg)) - - try: - rapi = api.API(config_args=argv[1:], skip_db_check=True) - except exceptions.RallyException as e: - print(e) - return(2) - - if CONF.category.name == "version": - print(CONF.version) - return(0) - - if CONF.category.name == "bash-completion": - print(_generate_bash_completion_script()) - return(0) - - fn = CONF.category.action_fn - fn_args = [encodeutils.safe_decode(arg) - for arg in CONF.category.action_args] - # api instance always is the first argument - fn_args.insert(0, rapi) - fn_kwargs = {} - for k in CONF.category.action_kwargs: - v = getattr(CONF.category, "action_kwarg_" + k) - if v is None: - continue - if isinstance(v, six.string_types): - v = encodeutils.safe_decode(v) - fn_kwargs[k] = v - - # call the action with the remaining arguments - # check arguments - try: - validate_args(fn, *fn_args, **fn_kwargs) - except MissingArgs as e: - # NOTE(mikal): this isn't the most helpful error message ever. It is - # long, and tells you a lot of things you probably don't want to know - # if you just got a single arg wrong. - print(fn.__doc__) - CONF.print_help() - print("Missing arguments:") - for missing in e.missing: - for arg in fn.args: - if arg[1].get("dest", "").endswith(missing): - print(" " + arg[0][0]) - break - return(1) - - try: - validate_deprecated_args(argv, fn) - - # skip db check for db and plugin commands - if CONF.category.name not in ("db", "plugin"): - rapi.check_db_revision() - - if getattr(fn, "_suppress_warnings", False): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - ret = fn(*fn_args, **fn_kwargs) - else: - ret = fn(*fn_args, **fn_kwargs) - return(ret) - - except (IOError, TypeError, ValueError, - exceptions.RallyException, jsonschema.ValidationError) as e: - if logging.is_debug(): - LOG.exception(e) - else: - print(e) - return 1 - except sqlalchemy.exc.OperationalError as e: - if logging.is_debug(): - LOG.exception(e) - print(e) - print("Looks like Rally can't connect to its DB.") - print("Make sure that connection string in rally.conf is proper:") - print(CONF.database.connection) - return 1 - except Exception: - print(_("Command failed, please check log for more info")) - raise - - -def _generate_bash_completion_script(): - from rally.cli import main - bash_data = """#!/bin/bash - -# Standalone _filedir() alternative. -# This exempts from dependence of bash completion routines -function _rally_filedir() -{ - test "${1}" \\ - && COMPREPLY=( \\ - $(compgen -f -- "${cur}" | grep -E "${1}") \\ - $(compgen -o plusdirs -- "${cur}") ) \\ - || COMPREPLY=( \\ - $(compgen -o plusdirs -f -- "${cur}") \\ - $(compgen -d -- "${cur}") ) -} - -_rally() -{ - declare -A SUBCOMMANDS - declare -A OPTS - -%(data)s - for OPT in ${!OPTS[*]} ; do - CMD=${OPT%%%%_*} - CMDSUB=${OPT#*_} - SUBCOMMANDS[${CMD}]+="${CMDSUB} " - done - - COMMANDS="${!SUBCOMMANDS[*]}" - COMPREPLY=() - - local cur="${COMP_WORDS[COMP_CWORD]}" - local prev="${COMP_WORDS[COMP_CWORD-1]}" - - if [[ $cur =~ ^(\.|\~|\/) ]] || [[ $prev =~ ^--out(|put-file)$ ]] ; then - _rally_filedir - elif [[ $prev =~ ^--(task|filename)$ ]] ; then - _rally_filedir "\\.json|\\.yaml|\\.yml" - elif [ $COMP_CWORD == "1" ] ; then - COMPREPLY=($(compgen -W "$COMMANDS" -- ${cur})) - elif [ $COMP_CWORD == "2" ] ; then - COMPREPLY=($(compgen -W "${SUBCOMMANDS[${prev}]}" -- ${cur})) - else - COMMAND="${COMP_WORDS[1]}_${COMP_WORDS[2]}" - COMPREPLY=($(compgen -W "${OPTS[$COMMAND]}" -- ${cur})) - fi - return 0 -} - -complete -o filenames -F _rally rally -""" - completion = [] - for category, cmds in main.categories.items(): - for name, command in _methods_of(cmds): - if name is None: - continue - command_name = getattr(command, "alias", name.replace("_", "-")) - args_list = [] - for arg in getattr(command, "args", []): - if getattr(command, "deprecated_args", []): - if arg[0][0] not in command.deprecated_args: - args_list.append(arg[0][0]) - else: - args_list.append(arg[0][0]) - args = " ".join(args_list) - - completion.append(""" OPTS["{cat}_{cmd}"]="{args}"\n""".format( - cat=category, cmd=command_name, args=args)) - return bash_data % {"data": "".join(sorted(completion))} diff --git a/rally/cli/commands/__init__.py b/rally/cli/commands/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/cli/commands/deployment.py b/rally/cli/commands/deployment.py deleted file mode 100644 index 27f0e3c2..00000000 --- a/rally/cli/commands/deployment.py +++ /dev/null @@ -1,347 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Rally command: deployment""" - -from __future__ import print_function - -import json -import os -import sys - -import jsonschema - -from rally.cli import cliutils -from rally.cli import envutils -from rally.common import fileutils -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import yamlutils as yaml -from rally import exceptions -from rally import plugins - - -class DeploymentCommands(object): - """Set of commands that allow you to manage deployments.""" - - @cliutils.args("--name", type=str, required=True, - help="Name of the deployment.") - @cliutils.args("--fromenv", action="store_true", - help="Read environment variables instead of config file.") - @cliutils.args("--filename", type=str, required=False, metavar="", - help="Path to the configuration file of the deployment.") - @cliutils.args("--no-use", action="store_false", dest="do_use", - help="Don't set new deployment as default for" - " future operations.") - @plugins.ensure_plugins_are_loaded - def create(self, api, name, fromenv=False, filename=None, do_use=False): - """Create new deployment. - - This command will create a new deployment record in rally - database. In the case of ExistingCloud deployment engine, it - will use the cloud represented in the configuration. If the - cloud doesn't exist, Rally can deploy a new one for you with - Devstack or Fuel. Different deployment engines exist for these - cases (see `rally plugin list --plugin-base Engine` for - more details). - - If you use the ExistingCloud deployment engine, you can pass - the deployment config by environment variables with ``--fromenv``: - - OS_USERNAME - OS_PASSWORD - OS_AUTH_URL - OS_TENANT_NAME or OS_PROJECT_NAME - OS_ENDPOINT_TYPE or OS_INTERFACE - OS_ENDPOINT - OS_REGION_NAME - OS_CACERT - OS_INSECURE - OS_IDENTITY_API_VERSION - - All other deployment engines need more complex configuration - data, so it should be stored in a configuration file. - - You can use physical servers, LXC containers, KVM virtual - machines or virtual machines in OpenStack for deploying the - cloud. Except physical servers, Rally can create cluster nodes - for you. Interaction with virtualization software, OpenStack - cloud or physical servers is provided by server providers. - - :param fromenv: boolean, read environment instead of config file - :param filename: path to the configuration file - :param name: name of the deployment - """ - - if fromenv: - # TODO(astudenov): move this to Credential plugin - config = { - "type": "ExistingCloud", - "creds": {"openstack": envutils.get_creds_from_env_vars()}} - else: - if not filename: - print("Either --filename or --fromenv is required.") - return 1 - filename = os.path.expanduser(filename) - with open(filename, "rb") as deploy_file: - config = yaml.safe_load(deploy_file.read()) - - try: - deployment = api.deployment.create(config=config, name=name) - except jsonschema.ValidationError: - print(_("Config schema validation error: %s.") % sys.exc_info()[1]) - return 1 - except exceptions.DeploymentNameExists: - print(_("Error: %s") % sys.exc_info()[1]) - return 1 - - self.list(api, deployment_list=[deployment]) - if do_use: - self.use(api, deployment) - - @cliutils.args("--filename", type=str, required=False, metavar="", - help="Path to the configuration file of the deployment.") - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of the deployment.") - @envutils.with_default_deployment() - @plugins.ensure_plugins_are_loaded - def recreate(self, api, deployment=None, filename=None): - """Destroy and create an existing deployment. - - Unlike 'deployment destroy', the deployment database record - will not be deleted, so the deployment UUID stays the same. - - :param deployment: UUID or name of the deployment - """ - config = None - if filename: - with open(filename, "rb") as deploy_file: - config = yaml.safe_load(deploy_file.read()) - - api.deployment.recreate(deployment=deployment, config=config) - - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of the deployment.") - @envutils.with_default_deployment() - @plugins.ensure_plugins_are_loaded - def destroy(self, api, deployment=None): - """Destroy existing deployment. - - This will delete all containers, virtual machines, OpenStack - instances or Fuel clusters created during Rally deployment - creation. Also it will remove the deployment record from the - Rally database. - - :param deployment: UUID or name of the deployment - """ - api.deployment.destroy(deployment=deployment) - - def list(self, api, deployment_list=None): - """List existing deployments.""" - - headers = ["uuid", "created_at", "name", "status", "active"] - current_deployment = envutils.get_global("RALLY_DEPLOYMENT") - deployment_list = deployment_list or api.deployment.list() - - table_rows = [] - if deployment_list: - for t in deployment_list: - r = [str(t[column]) for column in headers[:-1]] - r.append("" if t["uuid"] != current_deployment else "*") - table_rows.append(utils.Struct(**dict(zip(headers, r)))) - cliutils.print_list(table_rows, headers, - sortby_index=headers.index("created_at")) - else: - print(_("There are no deployments. " - "To create a new deployment, use:" - "\nrally deployment create")) - - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of the deployment.") - @envutils.with_default_deployment() - @cliutils.suppress_warnings - def config(self, api, deployment=None): - """Display configuration of the deployment. - - Output is the configuration of the deployment in a - pretty-printed JSON format. - - :param deployment: UUID or name of the deployment - """ - deploy = api.deployment.get(deployment=deployment) - result = deploy["config"] - print(json.dumps(result, sort_keys=True, indent=4)) - - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of the deployment.") - @envutils.with_default_deployment() - @plugins.ensure_plugins_are_loaded - def show(self, api, deployment=None): - """Show the credentials of the deployment. - - :param deployment: UUID or name of the deployment - """ - # TODO(astudenov): make this method platform independent - - headers = ["auth_url", "username", "password", "tenant_name", - "region_name", "endpoint_type"] - table_rows = [] - - deployment = api.deployment.get(deployment=deployment) - - creds = deployment["credentials"]["openstack"][0] - users = creds["users"] - admin = creds["admin"] - credentials = users + [admin] if admin else users - for ep in credentials: - data = ["***" if m == "password" else ep.get(m, "") - for m in headers] - table_rows.append(utils.Struct(**dict(zip(headers, data)))) - cliutils.print_list(table_rows, headers) - - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of the deployment.") - @envutils.with_default_deployment() - @plugins.ensure_plugins_are_loaded - def check(self, api, deployment=None): - """Check all credentials and list all available services. - - :param deployment: UUID or name of the deployment - """ - - def is_field_there(lst, field): - return bool([item for item in lst if field in item]) - - def print_error(user_type, error): - print(_("Error while checking %s credentials:") % user_type) - if logging.is_debug(): - print(error["trace"]) - else: - print("\t%s: %s" % (error["etype"], error["msg"])) - - exit_code = 0 - - info = api.deployment.check(deployment=deployment) - for platform in info: - for i, credentials in enumerate(info[platform]): - failed = False - - n = "" if len(info[platform]) == 1 else " #%s" % (i + 1) - header = "Platform %s%s:" % (platform, n) - print(cliutils.make_header(header)) - if "admin_error" in credentials: - print_error("admin", credentials["admin_error"]) - failed = True - if "user_error" in credentials: - print_error("users", credentials["user_error"]) - failed = True - - if not failed: - print("Available services:") - formatters = { - "Service": lambda x: x.get("name"), - "Service Type": lambda x: x.get("type"), - "Status": lambda x: x.get("status", "Available")} - if (is_field_there(credentials["services"], "type") and - is_field_there(credentials["services"], "name")): - headers = ["Service", "Service Type", "Status"] - else: - headers = ["Service", "Status"] - - if is_field_there(credentials["services"], "version"): - headers.append("Version") - - if is_field_there(credentials["services"], "description"): - headers.append("Description") - - cliutils.print_list(credentials["services"], headers, - normalize_field_names=True, - formatters=formatters) - else: - exit_code = 1 - print("\n") - - return exit_code - - def _update_openrc_deployment_file(self, deployment, credential): - openrc_path = os.path.expanduser("~/.rally/openrc-%s" % deployment) - with open(openrc_path, "w+") as env_file: - env_file.write("export OS_AUTH_URL='%(auth_url)s'\n" - "export OS_USERNAME='%(username)s'\n" - "export OS_PASSWORD='%(password)s'\n" - "export OS_TENANT_NAME='%(tenant_name)s'\n" - "export OS_PROJECT_NAME='%(tenant_name)s'\n" - % credential) - if credential.get("region_name"): - env_file.write("export OS_REGION_NAME='%s'\n" % - credential["region_name"]) - if credential.get("endpoint_type"): - env_file.write("export OS_ENDPOINT_TYPE='%sURL'\n" % - credential["endpoint_type"]) - env_file.write("export OS_INTERFACE='%s'\n" % - credential["endpoint_type"]) - if credential.get("endpoint"): - env_file.write("export OS_ENDPOINT='%s'\n" % - credential["endpoint"]) - if credential.get("https_cacert"): - env_file.write("export OS_CACERT='%s'\n" % - credential["https_cacert"]) - if credential.get("project_domain_name"): - env_file.write("export OS_IDENTITY_API_VERSION=3\n" - "export OS_USER_DOMAIN_NAME='%s'\n" - "export OS_PROJECT_DOMAIN_NAME='%s'\n" % - (credential["user_domain_name"], - credential["project_domain_name"])) - expanded_path = os.path.expanduser("~/.rally/openrc") - if os.path.exists(expanded_path): - os.remove(expanded_path) - os.symlink(openrc_path, expanded_path) - - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of a deployment.") - @plugins.ensure_plugins_are_loaded - def use(self, api, deployment): - """Set active deployment. - - :param deployment: UUID or name of the deployment - """ - # TODO(astudenov): make this method platform independent - try: - if not isinstance(deployment, dict): - deployment = api.deployment.get(deployment=deployment) - except exceptions.DeploymentNotFound: - print("Deployment %s is not found." % deployment) - return 1 - print("Using deployment: %s" % deployment["uuid"]) - - fileutils.update_globals_file("RALLY_DEPLOYMENT", - deployment["uuid"]) - - if "openstack" in deployment["credentials"]: - creds = deployment["credentials"]["openstack"][0] - self._update_openrc_deployment_file( - deployment["uuid"], creds["admin"] or creds["users"][0]) - print("~/.rally/openrc was updated\n\nHINTS:\n" - "\n* To use standard OpenStack clients, set up your env by " - "running:\n\tsource ~/.rally/openrc\n" - " OpenStack clients are now configured, e.g run:\n\t" - "openstack image list") diff --git a/rally/cli/commands/plugin.py b/rally/cli/commands/plugin.py deleted file mode 100644 index 4e9120b1..00000000 --- a/rally/cli/commands/plugin.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -from rally.cli import cliutils -from rally.common.plugin import plugin -from rally.common import utils -from rally import plugins - - -class PluginCommands(object): - """Set of commands that allow you to manage Rally plugins.""" - - @staticmethod - def _print_plugins_list(plugin_list): - formatters = { - "Name": lambda p: p.get_name(), - "Namespace": lambda p: p.get_platform(), - "Title": lambda p: p.get_info()["title"], - "Plugin base": lambda p: p._get_base().__name__ - } - - cliutils.print_list(plugin_list, formatters=formatters, - normalize_field_names=True, - fields=["Plugin base", "Name", "Namespace", - "Title"]) - - @cliutils.args("--name", dest="name", type=str, - help="Plugin name.") - @cliutils.args("--namespace", dest="namespace", type=str, - help="Plugin namespace.") - @plugins.ensure_plugins_are_loaded - def show(self, api, name, namespace=None): - """Show detailed information about a Rally plugin.""" - name_lw = name.lower() - all_plugins = plugin.Plugin.get_all(platform=namespace) - found = [p for p in all_plugins if name_lw in p.get_name().lower()] - exact_match = [p for p in found if name_lw == p.get_name().lower()] - - if not found: - if namespace: - print( - "There is no plugin: %(name)s in %(namespace)s namespace" - % {"name": name, "namespace": namespace} - ) - else: - print("There is no plugin: %s" % name) - - elif len(found) == 1 or exact_match: - plugin_ = found[0] if len(found) == 1 else exact_match[0] - plugin_info = plugin_.get_info() - print(cliutils.make_header(plugin_info["title"])) - print("NAME\n\t%s" % plugin_info["name"]) - print("NAMESPACE\n\t%s" % plugin_info["namespace"]) - print("MODULE\n\t%s" % plugin_info["module"]) - if plugin_info["description"]: - print("DESCRIPTION\n\t", end="") - print("\n\t".join(plugin_info["description"].split("\n"))) - if plugin_info["parameters"]: - print("PARAMETERS") - rows = [utils.Struct(name=p["name"], - description=p["doc"]) - for p in plugin_info["parameters"]] - cliutils.print_list(rows, fields=["name", "description"], - sortby_index=None) - else: - print("Multiple plugins found:") - self._print_plugins_list(found) - - @cliutils.args( - "--name", dest="name", type=str, - help="List only plugins that match the given name.") - @cliutils.args( - "--namespace", dest="namespace", type=str, - help="List only plugins that are in the specified namespace.") - @cliutils.args( - "--plugin-base", dest="base_cls", type=str, - help="Plugin base class.") - @plugins.ensure_plugins_are_loaded - def list(self, api, name=None, namespace=None, base_cls=None): - """List all Rally plugins that match name and namespace.""" - all_plugins = plugin.Plugin.get_all(platform=namespace) - matched = all_plugins - if name: - name_lw = name.lower() - matched = [p for p in all_plugins - if name_lw in p.get_name().lower()] - - if base_cls: - matched = [p for p in matched - if p._get_base().__name__ == base_cls] - - if not all_plugins: - print("There is no plugin namespace: %s" % namespace) - elif not matched: - print("There is no plugin: %s" % name) - else: - self._print_plugins_list(matched) diff --git a/rally/cli/commands/task.py b/rally/cli/commands/task.py deleted file mode 100644 index b5541549..00000000 --- a/rally/cli/commands/task.py +++ /dev/null @@ -1,1034 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Rally command: task""" - -from __future__ import print_function -import collections -import datetime as dt -import itertools -import json -import os -import sys -import webbrowser - -import jsonschema -from oslo_utils import uuidutils -import six - -from rally.cli import cliutils -from rally.cli import envutils -from rally.common import fileutils -from rally.common.i18n import _ -from rally.common.io import junit -from rally.common import logging -from rally.common import utils as rutils -from rally.common import version -from rally.common import yamlutils as yaml -from rally import consts -from rally import exceptions -from rally import plugins -from rally.task import atomic -from rally.task.processing import charts -from rally.task.processing import plot -from rally.task.processing import utils as putils -from rally.task import utils as tutils - - -LOG = logging.getLogger(__name__) - - -class FailedToLoadTask(exceptions.RallyException): - error_code = 472 - msg_fmt = _("Invalid %(source)s passed:\n\n\t %(msg)s") - - -class FailedToLoadResults(exceptions.RallyException): - error_code = 529 - msg_fmt = _("ERROR: Invalid task result format in %(source)s\n\n\t%(msg)s") - - -class TaskCommands(object): - """Set of commands that allow you to manage benchmarking tasks and results. - - """ - - def _load_and_validate_task(self, api, task_file, args_file=None, - raw_args=None): - """Load, render and validate tasks template from file with passed args. - - :param task_file: Path to file with input task - :param raw_args: JSON or YAML representation of dict with args that - will be used to render input task with jinja2 - :param args_file: Path to file with JSON or YAML representation - of dict, that will be used to render input with jinja2. If both - specified task_args and task_args_file they will be merged. - raw_args has bigger priority so it will update values - from args_file. - :returns: Str with loaded and rendered task - """ - - print(cliutils.make_header("Preparing input task")) - - if not os.path.isfile(task_file): - raise FailedToLoadTask(source="--task", - msg="File '%s' doesn't exist." % task_file) - with open(task_file) as f: - input_task = f.read() - task_dir = os.path.expanduser(os.path.dirname(task_file)) or "./" - - task_args = {} - if args_file: - if not os.path.isfile(args_file): - raise FailedToLoadTask( - source="--task-args-file", - msg="File '%s' doesn't exist." % args_file) - with open(args_file) as f: - try: - task_args.update(yaml.safe_load(f.read())) - except yaml.ParserError as e: - raise FailedToLoadTask( - source="--task-args-file", - msg="File '%s' has to be YAML or JSON. Details:\n\n%s" - % (args_file, e)) - if raw_args: - try: - data = yaml.safe_load(raw_args) - if isinstance(data, (six.text_type, six.string_types)): - raise yaml.ParserError("String '%s' doesn't look like a " - "dictionary." % raw_args) - task_args.update(data) - except yaml.ParserError as e: - args = [keypair.split("=", 1) - for keypair in raw_args.split(",")] - if len([a for a in args if len(a) != 1]) != len(args): - raise FailedToLoadTask( - source="--task-args", - msg="Value has to be YAML or JSON. Details:\n\n%s" % e) - else: - task_args.update(dict(args)) - - try: - rendered_task = api.task.render_template(task_template=input_task, - template_dir=task_dir, - **task_args) - except Exception as e: - raise FailedToLoadTask( - source="--task", - msg="Failed to render task template.\n\n%s" % e) - - print(_("Task is:\n%s\n") % rendered_task.strip()) - try: - parsed_task = yaml.safe_load(rendered_task) - except Exception as e: - raise FailedToLoadTask( - source="--task", - msg="Wrong format of rendered input task. It should be YAML or" - " JSON. Details:\n\n%s" % e) - - print(_("Task syntax is correct :)")) - return parsed_task - - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of a deployment.") - @cliutils.args("--task", "--filename", metavar="", - dest="task_file", - help="Path to the input task file.") - @cliutils.args("--task-args", metavar="", dest="task_args", - help="Input task args (JSON dict). These args are used " - "to render the Jinja2 template in the input task.") - @cliutils.args("--task-args-file", metavar="", dest="task_args_file", - help="Path to the file with input task args (dict in " - "JSON/YAML). These args are used " - "to render the Jinja2 template in the input task.") - @envutils.with_default_deployment(cli_arg_name="deployment") - @plugins.ensure_plugins_are_loaded - def validate(self, api, task_file, deployment=None, task_args=None, - task_args_file=None): - """Validate a task configuration file. - - This will check that task configuration file has valid syntax and - all required options of scenarios, contexts, SLA and runners are set. - - If both task_args and task_args_file are specified, they will - be merged. task_args has a higher priority so it will override - values from task_args_file. - - :param task_file: Path to the input task file. - :param task_args: Input task args (JSON dict). These args are - used to render the Jinja2 template in the - input task. - :param task_args_file: Path to the file with input task args - (dict in JSON/YAML). These args are - used to render the Jinja2 template in - the input task. - :param deployment: UUID or name of the deployment - """ - - task = self._load_and_validate_task(api, task_file, raw_args=task_args, - args_file=task_args_file) - - api.task.validate(deployment=deployment, config=task) - - print(_("Task config is valid :)")) - - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of a deployment.") - @cliutils.args("--task", "--filename", metavar="", - dest="task_file", - help="Path to the input task file.") - @cliutils.args("--task-args", dest="task_args", metavar="", - help="Input task args (JSON dict). These args are used " - "to render the Jinja2 template in the input task.") - @cliutils.args("--task-args-file", dest="task_args_file", metavar="", - help="Path to the file with input task args (dict in " - "JSON/YAML). These args are used " - "to render the Jinja2 template in the input task.") - @cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False, - help="Mark the task with a tag or a few tags.") - @cliutils.args("--no-use", action="store_false", dest="do_use", - help="Don't set new task as default for future operations.") - @cliutils.args("--abort-on-sla-failure", action="store_true", - dest="abort_on_sla_failure", - help="Abort the execution of a benchmark scenario when" - "any SLA check for it fails.") - @envutils.with_default_deployment(cli_arg_name="deployment") - @plugins.ensure_plugins_are_loaded - def start(self, api, task_file, deployment=None, task_args=None, - task_args_file=None, tags=None, do_use=False, - abort_on_sla_failure=False): - """Start benchmark task. - - If both task_args and task_args_file are specified, they will - be merged. task_args has a higher priority so it will override - values from task_args_file. - - :param task_file: Path to the input task file. - :param task_args: Input task args (JSON dict). These args are - used to render the Jinja2 template in the - input task. - :param task_args_file: Path to the file with input task args - (dict in JSON/YAML). These args are - used to render the Jinja2 template in - the input task. - :param deployment: UUID or name of the deployment - :param tags: optional tag for this task - :param do_use: if True, the new task will be stored as the default one - for future operations - :param abort_on_sla_failure: if True, the execution of a benchmark - scenario will stop when any SLA check - for it fails - """ - input_task = self._load_and_validate_task(api, task_file, - raw_args=task_args, - args_file=task_args_file) - print("Running Rally version", version.version_string()) - - try: - task_instance = api.task.create(deployment=deployment, tags=tags) - tags = "[tags: '%s']" % "', '".join(tags) if tags else "" - - print(cliutils.make_header( - _("Task %(tags)s %(uuid)s: started") - % {"uuid": task_instance["uuid"], "tags": tags})) - print("Benchmarking... This can take a while...\n") - print("To track task status use:\n") - print("\trally task status\n\tor\n\trally task detailed\n") - - if do_use: - self.use(api, task_instance["uuid"]) - - api.task.start(deployment=deployment, config=input_task, - task=task_instance["uuid"], - abort_on_sla_failure=abort_on_sla_failure) - - except exceptions.DeploymentNotFinishedStatus as e: - print(_("Cannot start a task on unfinished deployment: %s") % e) - return 1 - - self.detailed(api, task_id=task_instance["uuid"]) - - @cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.") - @envutils.with_default_task_id - @cliutils.args( - "--soft", action="store_true", - help="Abort task after current scenario finishes execution.") - def abort(self, api, task_id=None, soft=False): - """Abort a running benchmarking task. - - :param task_id: Task uuid - :param soft: if set to True, task should be aborted after execution of - current scenario - """ - if soft: - print("INFO: please be informed that soft abort won't stop " - "a running scenario, but will prevent new ones from " - "starting. If you are running task with only one " - "scenario, soft abort will not help at all.") - - api.task.abort(task_uuid=task_id, soft=soft, async=False) - - print("Task %s successfully stopped." % task_id) - - @cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task") - @envutils.with_default_task_id - def status(self, api, task_id=None): - """Display the current status of a task. - - :param task_id: Task uuid - Returns current status of task - """ - - task = api.task.get(task_id=task_id) - print(_("Task %(task_id)s: %(status)s") - % {"task_id": task_id, "status": task["status"]}) - - @cliutils.args("--uuid", type=str, dest="task_id", - help=("UUID of task. If --uuid is \"last\" the results of " - " the most recently created task will be displayed.")) - @cliutils.args("--iterations-data", dest="iterations_data", - action="store_true", - help="Print detailed results for each iteration.") - @envutils.with_default_task_id - def detailed(self, api, task_id=None, iterations_data=False): - """Print detailed information about given task. - - :param task_id: str, task uuid - :param iterations_data: bool, include results for each iteration - """ - task = api.task.get(task_id=task_id, detailed=True) - - if not task: - print("The task %s can not be found" % task_id) - return 1 - - print() - print("-" * 80) - print(_("Task %(task_id)s: %(status)s") - % {"task_id": task_id, "status": task["status"]}) - - if task["status"] == consts.TaskStatus.CRASHED or task["status"] == ( - consts.TaskStatus.VALIDATION_FAILED): - print("-" * 80) - validation = task["validation_result"] - if logging.is_debug(): - print(yaml.safe_load(validation["trace"])) - else: - print(validation["etype"]) - print(validation["msg"]) - print(_("\nFor more details run:\nrally -d task detailed %s") - % task["uuid"]) - return 0 - elif task["status"] not in [consts.TaskStatus.FINISHED, - consts.TaskStatus.ABORTED]: - print("-" * 80) - print(_("\nThe task %s marked as '%s'. Results " - "available when it is '%s'.") % ( - task_id, task["status"], consts.TaskStatus.FINISHED)) - return 0 - - for workload in itertools.chain( - *[s["workloads"] for s in task["subtasks"]]): - print("-" * 80) - print() - print("test scenario %s" % workload["name"]) - print("args position %s" % workload["position"]) - print("args values:") - print(json.dumps( - {"args": workload["args"], - "runner": workload["runner"], - "context": workload["context"], - "sla": workload["sla"], - "hooks": [r["config"] for r in workload["hooks"]]}, - indent=2)) - print() - - iterations = [] - iterations_headers = ["iteration", "duration"] - iterations_actions = [] - output = [] - task_errors = [] - if iterations_data: - atomic_merger = putils.AtomicMerger( - workload["statistics"]["atomics"]) - atomic_names = atomic_merger.get_merged_names() - for i, atomic_name in enumerate(atomic_names, 1): - action = "%i. %s" % (i, atomic_name) - iterations_headers.append(action) - iterations_actions.append((atomic_name, action)) - - for idx, itr in enumerate(workload["data"], 1): - - if iterations_data: - row = {"iteration": idx, "duration": itr["duration"]} - for name, action in iterations_actions: - atomic_actions = ( - atomic_merger.merge_atomic_actions( - itr["atomic_actions"])) - row[action] = atomic_actions.get(name, 0) - iterations.append(row) - - if "output" in itr: - iteration_output = itr["output"] - else: - iteration_output = {"additive": [], "complete": []} - - # NOTE(amaretskiy): "scenario_output" is supported - # for backward compatibility - if ("scenario_output" in itr - and itr["scenario_output"]["data"]): - iteration_output["additive"].append( - {"data": itr["scenario_output"]["data"].items(), - "title": "Scenario output", - "description": "", - "chart_plugin": "StackedArea"}) - - for idx, additive in enumerate(iteration_output["additive"]): - if len(output) <= idx + 1: - output_table = plot.charts.OutputStatsTable( - workload, title=additive["title"]) - output.append(output_table) - output[idx].add_iteration(additive["data"]) - - if itr.get("error"): - task_errors.append(TaskCommands._format_task_error(itr)) - - self._print_task_errors(task_id, task_errors) - - cols = plot.charts.MainStatsTable.columns - duration_stats = workload["statistics"]["durations"] - formatters = { - "Action": lambda x: x["name"], - "Min (sec)": cliutils.pretty_float_formatter("min", 3), - "Median (sec)": cliutils.pretty_float_formatter("median", 3), - "90%ile (sec)": cliutils.pretty_float_formatter("90%ile", 3), - "95%ile (sec)": cliutils.pretty_float_formatter("95%ile", 3), - "Max (sec)": cliutils.pretty_float_formatter("max", 3), - "Avg (sec)": cliutils.pretty_float_formatter("avg", 3) - } - rows = duration_stats["atomics"] - rows.append(duration_stats["total"]) - cliutils.print_list(rows, - fields=cols, - formatters=formatters, - normalize_field_names=True, - table_label="Response Times (sec)", - sortby_index=None) - print() - - if iterations_data: - formatters = dict(zip(iterations_headers[1:], - [cliutils.pretty_float_formatter(col, 3) - for col in iterations_headers[1:]])) - cliutils.print_list(iterations, - fields=iterations_headers, - table_label="Atomics per iteration", - formatters=formatters) - print() - - if output: - cols = plot.charts.OutputStatsTable.columns - float_cols = cols[1:7] - formatters = dict(zip(float_cols, - [cliutils.pretty_float_formatter(col, 3) - for col in float_cols])) - - for out in output: - data = out.render() - rows = [dict(zip(cols, r)) for r in data["data"]["rows"]] - if rows: - # NOTE(amaretskiy): print title explicitly because - # prettytable fails if title length is too long - print(data["title"]) - cliutils.print_list(rows, fields=cols, - formatters=formatters) - print() - - print(_("Load duration: %s") % rutils.format_float_to_str( - workload["load_duration"])) - print(_("Full duration: %s") % rutils.format_float_to_str( - workload["full_duration"])) - - print("\nHINTS:") - print(_("* To plot HTML graphics with this data, run:")) - print("\trally task report %s --out output.html\n" % task["uuid"]) - print(_("* To generate a JUnit report, run:")) - print("\trally task export %s --type junit --to output.xml\n" % - task["uuid"]) - print(_("* To get raw JSON output of task results, run:")) - print("\trally task results %s\n" % task["uuid"]) - - @cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.") - @envutils.with_default_task_id - @cliutils.suppress_warnings - def results(self, api, task_id=None): - """Display raw task results. - - This will produce a lot of output data about every iteration. - - :param task_id: Task uuid - """ - task = api.task.get(task_id=task_id, detailed=True) - finished_statuses = (consts.TaskStatus.FINISHED, - consts.TaskStatus.ABORTED) - if task["status"] not in finished_statuses: - print(_("Task status is %s. Results available when it is one " - "of %s.") % (task["status"], ", ".join(finished_statuses))) - return 1 - - # TODO(chenhb): Ensure `rally task results` puts out old format. - for workload in itertools.chain( - *[s["workloads"] for s in task["subtasks"]]): - for itr in workload["data"]: - itr["atomic_actions"] = collections.OrderedDict( - tutils.WrapperForAtomicActions( - itr["atomic_actions"]).items() - ) - - results = [ - { - "key": { - "name": w["name"], - "description": w["description"], - "pos": w["position"], - "kw": { - "args": w["args"], - "runner": w["runner"], - "context": w["context"], - "sla": w["sla"], - "hooks": [r["config"] for r in w["hooks"]], - } - }, - "result": w["data"], - "sla": w["sla_results"].get("sla", []), - "hooks": w["hooks"], - "load_duration": w["load_duration"], - "full_duration": w["full_duration"], - "created_at": w["created_at"]} - for w in itertools.chain( - *[s["workloads"] for s in task["subtasks"]])] - - print(json.dumps(results, sort_keys=False, indent=4)) - - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of a deployment.") - @cliutils.args("--all-deployments", action="store_true", - dest="all_deployments", - help="List tasks from all deployments.") - @cliutils.args("--status", type=str, dest="status", - help="List tasks with specified status." - " Available statuses: %s" % ", ".join(consts.TaskStatus)) - @cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False, - help="Tags to filter tasks by.") - @cliutils.args("--uuids-only", action="store_true", - dest="uuids_only", help="List task UUIDs only.") - @envutils.with_default_deployment(cli_arg_name="deployment") - def list(self, api, deployment=None, all_deployments=False, status=None, - tags=None, uuids_only=False): - """List tasks, started and finished. - - Displayed tasks can be filtered by status or deployment. By - default 'rally task list' will display tasks from the active - deployment without filtering by status. - - :param deployment: UUID or name of deployment - :param status: task status to filter by. - Available task statuses are in rally.consts.TaskStatus - :param all_deployments: display tasks from all deployments - :param uuids_only: list task UUIDs only - """ - - filters = {} - headers = ["UUID", "Deployment name", "Created at", "Load duration", - "Status", "Tag(s)"] - - if status in consts.TaskStatus: - filters["status"] = status - elif status: - print(_("Error: Invalid task status '%s'.\n" - "Available statuses: %s") % ( - status, ", ".join(consts.TaskStatus)), - file=sys.stderr) - return(1) - - if not all_deployments: - filters["deployment"] = deployment - - if tags: - filters["tags"] = tags - - task_list = api.task.list(**filters) - - if uuids_only: - if task_list: - print("\n".join([t["uuid"] for t in task_list])) - elif task_list: - def tags_formatter(t): - if not t["tags"]: - return "" - return "'%s'" % "', '".join(t["tags"]) - - formatters = { - "Tag(s)": tags_formatter, - "Load duration": cliutils.pretty_float_formatter( - "task_duration", 3), - "Created at": lambda t: t["created_at"].replace("T", " ") - } - - cliutils.print_list( - task_list, fields=headers, normalize_field_names=True, - sortby_index=headers.index("Created at"), - formatters=formatters) - else: - if status: - print(_("There are no tasks in '%s' status. " - "To run a new task, use:\n" - "\trally task start") % status) - else: - print(_("There are no tasks. To run a new task, use:\n" - "\trally task start")) - - def _load_task_results_file(self, api, task_id): - """Load the json file which is created by `rally task results` """ - with open(os.path.expanduser(task_id)) as inp_js: - tasks_results = yaml.safe_load(inp_js) - - if type(tasks_results) == list: - # it is an old format: - - task = {"subtasks": []} - - start_time = None - - for result in tasks_results: - try: - jsonschema.validate( - result, api.task.TASK_RESULT_SCHEMA) - except jsonschema.ValidationError as e: - raise FailedToLoadResults(source=task_id, - msg=six.text_type(e)) - - iter_count = 0 - failed_iter_count = 0 - min_duration = None - max_duration = None - - atomics = collections.OrderedDict() - - for itr in result["result"]: - if start_time is None or itr["timestamp"] < start_time: - start_time = itr["timestamp"] - # NOTE(chenhb): back compatible for atomic_actions - itr["atomic_actions"] = list( - tutils.WrapperForAtomicActions(itr["atomic_actions"], - itr["timestamp"])) - - iter_count += 1 - if itr.get("error"): - failed_iter_count += 1 - - duration = itr.get("duration", 0) - - if max_duration is None or duration > max_duration: - max_duration = duration - - if min_duration is None or min_duration > duration: - min_duration = duration - - merged_atomic = atomic.merge_atomic(itr["atomic_actions"]) - for key, value in merged_atomic.items(): - duration = value["duration"] - count = value["count"] - if key not in atomics or count > atomics[key]["count"]: - atomics[key] = {"min_duration": duration, - "max_duration": duration, - "count": count} - elif count == atomics[key]["count"]: - if duration < atomics[key]["min_duration"]: - atomics[key]["min_duration"] = duration - if duration > atomics[key]["max_duration"]: - atomics[key]["max_duration"] = duration - - durations_stat = charts.MainStatsTable( - {"total_iteration_count": iter_count, - "statistics": {"atomics": atomics}}) - - for itr in result["result"]: - durations_stat.add_iteration(itr) - - updated_at = dt.datetime.strptime(result["created_at"], - "%Y-%m-%dT%H:%M:%S") - updated_at += dt.timedelta(seconds=result["full_duration"]) - updated_at = updated_at.strftime(consts.TimeFormat.ISO8601) - pass_sla = all(s.get("success") for s in result["sla"]) - workload = {"name": result["key"]["name"], - "position": result["key"]["pos"], - "description": result["key"].get("description", - ""), - "full_duration": result["full_duration"], - "load_duration": result["load_duration"], - "total_iteration_count": iter_count, - "failed_iteration_count": failed_iter_count, - "min_duration": min_duration, - "max_duration": max_duration, - "start_time": start_time, - "created_at": result["created_at"], - "updated_at": updated_at, - "args": result["key"]["kw"]["args"], - "runner": result["key"]["kw"]["runner"], - "hooks": [{"config": h} - for h in result["key"]["kw"]["hooks"]], - "sla": result["key"]["kw"]["sla"], - "sla_results": {"sla": result["sla"]}, - "pass_sla": pass_sla, - "context": result["key"]["kw"]["context"], - "data": sorted(result["result"], - key=lambda x: x["timestamp"]), - "statistics": { - "durations": durations_stat.to_dict(), - "atomics": atomics}, - } - task["subtasks"].append({"workloads": [workload]}) - return task - else: - raise FailedToLoadResults( - source=task_id, msg="Wrong format") - - @cliutils.args("--out", metavar="", - type=str, dest="out", required=False, - help="Path to output file.") - @cliutils.args("--open", dest="open_it", action="store_true", - help="Open the output in a browser.") - @cliutils.args("--tasks", dest="tasks", nargs="+", - help="UUIDs of tasks, or JSON files with task results") - @cliutils.suppress_warnings - def trends(self, api, *args, **kwargs): - """Generate workloads trends HTML report.""" - tasks = kwargs.get("tasks", []) or list(args) - - if not tasks: - print(_("ERROR: At least one task must be specified"), - file=sys.stderr) - return 1 - - results = [] - for task_id in tasks: - if os.path.exists(os.path.expanduser(task_id)): - task_results = self._load_task_results_file(api, task_id) - elif uuidutils.is_uuid_like(task_id): - task_results = api.task.get(task_id=task_id, detailed=True) - else: - print(_("ERROR: Invalid UUID or file name passed: %s") - % task_id, file=sys.stderr) - return 1 - - results.append(task_results) - - result = plot.trends(results) - - out = kwargs.get("out") - if out: - output_file = os.path.expanduser(out) - - with open(output_file, "w+") as f: - f.write(result) - if kwargs.get("open_it"): - webbrowser.open_new_tab("file://" + os.path.realpath(out)) - else: - print(result) - - @cliutils.deprecated_args("--tasks", dest="task_id", nargs="+", - release="0.10.0", alternative="--uuid") - @cliutils.args("--out", metavar="", - type=str, dest="out", required=False, - help="Report destination. Can be a path to a file (in case" - " of HTML, HTML-STATIC, etc. types) to save the" - " report to or a connection string.") - @cliutils.args("--open", dest="open_it", action="store_true", - help="Open the output in a browser.") - @cliutils.args("--html", dest="out_format", - action="store_const", const="html") - @cliutils.args("--html-static", dest="out_format", - action="store_const", const="html-static") - @cliutils.deprecated_args("--junit", dest="out_format", - action="store_const", const="junit-xml", - release="0.10.0", - alternative=("rally task export " - "--type junit-xml")) - @cliutils.args("--uuid", dest="task_id", nargs="+", type=str, - help="UUIDs of tasks") - @envutils.with_default_task_id - @cliutils.suppress_warnings - def report(self, api, task_id=None, out=None, - open_it=False, out_format="html"): - """generate report file or string for specified task.""" - - if [task for task in task_id if os.path.exists( - os.path.expanduser(task))]: - self._old_report(api, tasks=task_id, out=out, - open_it=open_it, out_format=out_format) - else: - self.export(api, task_id=task_id, - output_type=out_format, - output_dest=out, - open_it=open_it) - - def _old_report(self, api, tasks=None, out=None, open_it=False, - out_format="html"): - """Generate report file for specified task. - - :param tasks: list, UUIDs of tasks or pathes files with tasks results - :param out: str, output file name - :param open_it: bool, whether to open output file in web browser - :param out_format: output format (junit, html or html_static) - """ - - tasks = isinstance(tasks, list) and tasks or [tasks] - - results = [] - message = [] - processed_names = {} - for task_file_or_uuid in tasks: - if os.path.exists(os.path.expanduser(task_file_or_uuid)): - task = self._load_task_results_file(api, task_file_or_uuid) - elif uuidutils.is_uuid_like(task_file_or_uuid): - task = api.task.get(task_id=task_file_or_uuid, detailed=True) - else: - print(_("ERROR: Invalid UUID or file name passed: %s" - ) % task_file_or_uuid, - file=sys.stderr) - return 1 - - for workload in itertools.chain( - *[s["workloads"] for s in task["subtasks"]]): - if workload["name"] in processed_names: - processed_names[workload["name"]] += 1 - workload["position"] = processed_names[workload["name"]] - else: - processed_names[workload["name"]] = 0 - results.append(task) - - if out_format.startswith("html"): - result = plot.plot(results, - include_libs=(out_format == "html_static")) - elif out_format == "junit-xml": - test_suite = junit.JUnit("Rally test suite") - for task in results: - for workload in itertools.chain( - *[s["workloads"] for s in task["subtasks"]]): - w_sla = workload["sla_results"].get("sla", []) - if w_sla: - message = ",".join([sla["detail"] for sla in w_sla - if not sla["success"]]) - if message: - outcome = junit.JUnit.FAILURE - else: - outcome = junit.JUnit.SUCCESS - test_suite.add_test(workload["name"], - workload["full_duration"], outcome, - message) - result = test_suite.to_xml() - else: - print(_("Invalid output format: %s") % out_format, file=sys.stderr) - return 1 - - if out: - output_file = os.path.expanduser(out) - - with open(output_file, "w+") as f: - f.write(result) - if open_it: - webbrowser.open_new_tab("file://" + os.path.realpath(out)) - else: - print(result) - - @cliutils.args("--force", action="store_true", help="force delete") - @cliutils.args("--uuid", type=str, dest="task_id", nargs="*", - metavar="", - help="UUID of task or a list of task UUIDs.") - @envutils.with_default_task_id - def delete(self, api, task_id=None, force=False): - """Delete task and its results. - - :param task_id: Task uuid or a list of task uuids - :param force: Force delete or not - """ - def _delete_single_task(tid, force): - try: - api.task.delete(task_uuid=tid, force=force) - print("Successfully deleted task `%s`" % tid) - except exceptions.TaskInvalidStatus as e: - print(e) - print("Use '--force' option to delete the task with vague " - "state.") - - if isinstance(task_id, list): - for tid in task_id: - _delete_single_task(tid, force) - else: - _delete_single_task(task_id, force) - - @cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.") - @cliutils.args("--json", dest="tojson", - action="store_true", - help="Output in JSON format.") - @envutils.with_default_task_id - @cliutils.alias("sla_check") - def sla_check_deprecated(self, api, task_id=None, tojson=False): - """DEPRECATED since Rally 0.8.0, use `rally task sla-check` instead.""" - return self.sla_check(api, task_id=task_id, tojson=tojson) - - @cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.") - @cliutils.args("--json", dest="tojson", - action="store_true", - help="Output in JSON format.") - @envutils.with_default_task_id - def sla_check(self, api, task_id=None, tojson=False): - """Display SLA check results table. - - :param task_id: Task uuid. - :returns: Number of failed criteria. - """ - task = api.task.get(task_id=task_id, detailed=True) - failed_criteria = 0 - data = [] - STATUS_PASS = "PASS" - STATUS_FAIL = "FAIL" - for workload in itertools.chain( - *[s["workloads"] for s in task["subtasks"]]): - for sla in sorted(workload["sla_results"].get("sla", []), - key=lambda x: x["criterion"]): - success = sla.pop("success") - sla["status"] = success and STATUS_PASS or STATUS_FAIL - sla["benchmark"] = workload["name"] - sla["pos"] = workload["position"] - failed_criteria += int(not success) - data.append(sla if tojson else rutils.Struct(**sla)) - if tojson: - print(json.dumps(data, sort_keys=False)) - else: - cliutils.print_list(data, ("benchmark", "pos", "criterion", - "status", "detail")) - return failed_criteria - - @cliutils.args("--uuid", type=str, dest="task_id", - help="UUID of the task") - @cliutils.deprecated_args("--task", dest="task_id", type=str, - release="0.2.0", alternative="--uuid") - def use(self, api, task_id): - """Set active task. - - :param task_id: Task uuid. - """ - print("Using task: %s" % task_id) - api.task.get(task_id=task_id) - fileutils.update_globals_file("RALLY_TASK", task_id) - - @cliutils.args("--uuid", dest="task_id", nargs="+", type=str, - help="UUIDs of tasks") - @cliutils.args("--type", dest="output_type", type=str, - required=True, - help="Report type (Defaults to HTML). Out-of-the-box " - "types: HTML, HTML-Static, JUnit-XML. " - "HINT: You can list all types, executing `rally " - "plugin list --plugin-base TaskExporter` " - "command.") - @cliutils.args("--to", dest="output_dest", type=str, - metavar="", required=False, - help="Report destination. Can be a path to a file (in case" - " of HTML, HTML-Static, JUnit-XML, etc. types) to" - " save the report to or a connection string." - " It depends on the report type." - ) - @envutils.with_default_task_id - @plugins.ensure_plugins_are_loaded - def export(self, api, task_id=None, output_type=None, output_dest=None, - open_it=False): - """Export task results to the custom task's exporting system. - - :param task_id: UUID of the task - :param output_type: str, output type - :param output_dest: output format (html, html-static, junit-xml,etc) - """ - task_id = isinstance(task_id, list) and task_id or [task_id] - report = api.task.export(tasks_uuids=task_id, - output_type=output_type, - output_dest=output_dest) - if "files" in report: - for path in report["files"]: - output_file = os.path.expanduser(path) - with open(output_file, "w+") as f: - f.write(report["files"][path]) - if open_it: - if "open" in report: - webbrowser.open_new_tab(report["open"]) - - if "print" in report: - print(report["print"]) - - @staticmethod - def _print_task_errors(task_id, task_errors): - print(cliutils.make_header("Task %s has %d error(s)" % - (task_id, len(task_errors)))) - for err_data in task_errors: - print(*err_data, sep="\n") - print("-" * 80) - - @staticmethod - def _format_task_error(data): - error_type = _("Unknown type") - error_message = _("Rally hasn't caught anything yet") - error_traceback = _("No traceback available.") - try: - error_type = data["error"][0] - error_message = data["error"][1] - error_traceback = data["error"][2] - except IndexError: - pass - return ("%(error_type)s: %(error_message)s\n" % - {"error_type": error_type, "error_message": error_message}, - error_traceback) - - @cliutils.args("--file", dest="task_file", type=str, metavar="", - required=True, help="JSON file with task results") - @cliutils.args("--deployment", dest="deployment", type=str, - metavar="", required=False, - help="UUID or name of a deployment.") - @cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False, - help="Mark the task with a tag or a few tags.") - @envutils.with_default_deployment(cli_arg_name="deployment") - @cliutils.alias("import") - @cliutils.suppress_warnings - def import_results(self, api, deployment=None, task_file=None, tags=None): - """Import json results of a test into rally database - - :param task_file: list, pathes files with tasks results - :param deployment: UUID or name of the deployment - :param tags: optional tag for this task - """ - - if os.path.exists(os.path.expanduser(task_file)): - tasks_results = self._load_task_results_file(api, task_file) - task = api.task.import_results(deployment=deployment, - task_results=tasks_results, - tags=tags) - print(_("Task UUID: %s.") % task["uuid"]) - else: - print(_("ERROR: Invalid file name passed: %s") % task_file, - file=sys.stderr) - return 1 diff --git a/rally/cli/commands/verify.py b/rally/cli/commands/verify.py deleted file mode 100644 index bf6a12f6..00000000 --- a/rally/cli/commands/verify.py +++ /dev/null @@ -1,809 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Rally command: verify""" - -from __future__ import print_function - -import datetime as dt -import json -import os -import webbrowser - -from six.moves import configparser - -from rally.cli import cliutils -from rally.cli import envutils -from rally.common import fileutils -from rally.common.i18n import _ -from rally.common import logging -from rally.common import yamlutils as yaml -from rally import exceptions -from rally import plugins - -TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" - -LIST_VERIFIERS_HINT = ("HINT: You can list all verifiers, executing " - "command `rally verify list-verifiers`.") -LIST_DEPLOYMENTS_HINT = ("HINT: You can list all deployments, executing " - "command `rally deployment list`.") -LIST_VERIFICATIONS_HINT = ("HINT: You can list all verifications, executing " - "command `rally verify list`.") - -DEFAULT_REPORT_TYPES = ("HTML", "HTML-Static", "JSON", "JUnit-XML") - - -class VerifyCommands(object): - """Verify an OpenStack cloud via a verifier.""" - - @staticmethod - def _print_totals(totals): - print("\n======\n" - "Totals" - "\n======\n" - "\nRan: %(tests_count)s tests in %(tests_duration)s sec.\n" - " - Success: %(success)s\n" - " - Skipped: %(skipped)s\n" - " - Expected failures: %(expected_failures)s\n" - " - Unexpected success: %(unexpected_success)s\n" - " - Failures: %(failures)s\n" % totals) - - @staticmethod - def _print_failures(h_text, failures, symbol="-"): - print("\n%s" % cliutils.make_header( - h_text, size=len(h_text), symbol=symbol).strip()) - for f in failures: - header = "%s\n%s\n" % (f["name"], "-" * len(f["name"])) - failure = "\n%s%s\n" % (header, f["traceback"].strip()) - print(failure) - - def _print_details_after_run(self, results): - failures = [t for t in results["tests"].values() - if t["status"] == "fail"] - if failures: - h_text = "Failed %d %s - output below:" % ( - len(failures), "tests" if len(failures) > 1 else "test") - self._print_failures(h_text, failures, "=") - else: - print(_("\nCongratulations! Verification doesn't have failed " - "tests! :)")) - - @staticmethod - def _base_dir(uuid): - return os.path.expanduser( - "~/.rally/verification/verifier-%s" % uuid) - - def _get_location(self, uuid, loc): - return os.path.join(self._base_dir(uuid), loc) - - @cliutils.args("--namespace", dest="namespace", type=str, metavar="", - required=False, - help="Namespace name (for example, openstack).") - @plugins.ensure_plugins_are_loaded - def list_plugins(self, api, namespace=None): - """List all plugins for verifiers management.""" - if namespace: - namespace = namespace.lower() - verifier_plugins = api.verifier.list_plugins(namespace=namespace) - - fields = ["Plugin name", "Namespace", "Description"] - if logging.is_debug(): - fields.append("Location") - - cliutils.print_list(verifier_plugins, fields, - formatters={"Plugin name": lambda p: p["name"]}, - normalize_field_names=True) - - @cliutils.help_group("verifier") - @cliutils.args("--name", dest="name", type=str, required=True, - help="Verifier name (for example, 'My verifier').") - @cliutils.args("--type", dest="vtype", type=str, required=True, - help="Verifier plugin name. HINT: You can list all " - "verifier plugins, executing command `rally verify " - "list-plugins`.") - @cliutils.args("--namespace", dest="namespace", type=str, metavar="", - required=False, - help="Verifier plugin namespace. Should be specified in " - "case of two verifier plugins with equal names but " - "in different namespaces.") - @cliutils.args("--source", dest="source", type=str, required=False, - help="Path or URL to the repo to clone verifier from.") - @cliutils.args("--version", dest="version", type=str, required=False, - help="Branch, tag or commit ID to checkout before " - "verifier installation (the 'master' branch is used " - "by default).") - @cliutils.args("--system-wide", dest="system_wide", action="store_true", - required=False, - help="Use the system-wide environment for verifier instead " - "of a virtual environment.") - @cliutils.args("--extra-settings", dest="extra", type=str, required=False, - help="Extra installation settings for verifier.") - @cliutils.args("--no-use", dest="do_use", action="store_false", - help="Not to set the created verifier as the default " - "verifier for future operations.") - @plugins.ensure_plugins_are_loaded - def create_verifier(self, api, name, vtype, namespace="", source=None, - version=None, system_wide=False, extra=None, - do_use=True): - """Create a verifier.""" - verifier_uuid = api.verifier.create( - name=name, vtype=vtype, namespace=namespace, source=source, - version=version, system_wide=system_wide, extra_settings=extra) - - if do_use: - self.use_verifier(api, verifier_uuid) - - @cliutils.help_group("verifier") - @cliutils.args("--id", dest="verifier_id", type=str, required=True, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - def use_verifier(self, api, verifier_id): - """Choose a verifier to use for the future operations.""" - verifier = api.verifier.get(verifier_id=verifier_id) - fileutils.update_globals_file(envutils.ENV_VERIFIER, verifier["uuid"]) - print(_("Using verifier '%s' (UUID=%s) as the default verifier " - "for the future operations.") % (verifier["name"], - verifier["uuid"])) - - @cliutils.help_group("verifier") - @cliutils.args("--status", dest="status", type=str, required=False, - help="Status to filter verifiers by.") - @plugins.ensure_plugins_are_loaded - def list_verifiers(self, api, status=None): - """List all verifiers.""" - verifiers = api.verifier.list(status=status) - if verifiers: - fields = ["UUID", "Name", "Type", "Namespace", "Created at", - "Updated at", "Status", "Version", "System-wide", - "Active"] - cv = envutils.get_global(envutils.ENV_VERIFIER) - formatters = { - "Created at": lambda v: v["created_at"], - "Updated at": lambda v: v["updated_at"], - "Active": lambda v: u"\u2714" if v["uuid"] == cv else "", - } - cliutils.print_list(verifiers, fields, formatters=formatters, - normalize_field_names=True, sortby_index=4) - elif status: - print(_("There are no verifiers with status '%s'.") % status) - else: - print(_("There are no verifiers. You can create verifier, using " - "command `rally verify create-verifier`.")) - - @cliutils.help_group("verifier") - @cliutils.args("--id", dest="verifier_id", type=str, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @envutils.with_default_verifier_id() - @plugins.ensure_plugins_are_loaded - def show_verifier(self, api, verifier_id=None): - """Show detailed information about a verifier.""" - verifier = api.verifier.get(verifier_id=verifier_id) - fields = ["UUID", "Status", "Created at", "Updated at", "Active", - "Name", "Description", "Type", "Namespace", "Source", - "Version", "System-wide", "Extra settings", "Location", - "Venv location"] - used_verifier = envutils.get_global(envutils.ENV_VERIFIER) - formatters = { - "Created at": lambda v: v["created_at"].replace("T", " "), - "Updated at": lambda v: v["updated_at"].replace("T", " "), - "Active": lambda v: u"\u2714" - if v["uuid"] == used_verifier else None, - "Extra settings": lambda v: (json.dumps(v["extra_settings"], - indent=4) - if v["extra_settings"] else None), - "Location": lambda v: self._get_location((v["uuid"]), "repo") - } - if not verifier["system_wide"]: - formatters["Venv location"] = lambda v: self._get_location( - v["uuid"], ".venv") - cliutils.print_dict(verifier, fields=fields, formatters=formatters, - normalize_field_names=True, print_header=False, - table_label="Verifier") - print(_("Attention! All you do in the verifier repository or " - "verifier virtual environment, you do it at your own risk!")) - - @cliutils.help_group("verifier") - @cliutils.args("--id", dest="verifier_id", type=str, required=True, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @cliutils.args("--deployment-id", dest="deployment", type=str, - metavar="", required=False, - help="Deployment name or UUID. If specified, only the " - "deployment-specific data will be deleted for " - "verifier. " + LIST_DEPLOYMENTS_HINT) - @cliutils.args("--force", dest="force", action="store_true", - required=False, - help="Delete all stored verifications of the specified " - "verifier. If a deployment specified, only " - "verifications of this deployment will be deleted. " - "Use this argument carefully! You can delete " - "verifications that may be important to you.") - @plugins.ensure_plugins_are_loaded - def delete_verifier(self, api, verifier_id, deployment=None, force=False): - """Delete a verifier.""" - api.verifier.delete(verifier_id=verifier_id, - deployment_id=deployment, - force=force) - - @cliutils.help_group("verifier") - @cliutils.args("--id", dest="verifier_id", type=str, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @cliutils.args("--update-venv", dest="update_venv", action="store_true", - required=False, - help="Update the virtual environment for verifier.") - @cliutils.args("--version", dest="version", type=str, required=False, - help="Branch, tag or commit ID to checkout. HINT: Specify " - "the same version to pull the latest repo code.") - @cliutils.args("--system-wide", dest="system_wide", action="store_true", - required=False, - help="Switch to using the system-wide environment.") - @cliutils.args("--no-system-wide", dest="no_system_wide", - action="store_true", required=False, - help="Switch to using the virtual environment. " - "If the virtual environment doesn't exist, " - "it will be created.") - @envutils.with_default_verifier_id() - @plugins.ensure_plugins_are_loaded - def update_verifier(self, api, verifier_id=None, version=None, - system_wide=None, no_system_wide=None, - update_venv=None): - """Update a verifier.""" - if not (version or system_wide or no_system_wide or update_venv): - print(_("At least one of the following arguments should be " - "provided: '--update-venv', '--version', '--system-wide', " - "'--no-system-wide'.")) - return 1 - - msg = _("Arguments '--%s' and '--%s' cannot be used simultaneously. " - "You can use only one of the mentioned arguments.") - if update_venv and system_wide: - print(msg % ("update-venv", "system-wide")) - return 1 - if system_wide and no_system_wide: - print(msg % ("system-wide", "no-system-wide")) - return 1 - - system_wide = False if no_system_wide else (system_wide or None) - api.verifier.update(verifier_id=verifier_id, - system_wide=system_wide, - version=version, - update_venv=update_venv) - - print(_("HINT: In some cases the verifier config file should be " - "updated as well. Use `rally verify configure-verifier` " - "command to update the config file.")) - - @cliutils.help_group("verifier") - @cliutils.args("--id", dest="verifier_id", type=str, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @cliutils.args("--deployment-id", dest="deployment", type=str, - metavar="", - help="Deployment name or UUID. " + LIST_DEPLOYMENTS_HINT) - @cliutils.args("--reconfigure", dest="reconfigure", action="store_true", - required=False, help="Reconfigure verifier.") - @cliutils.args("--extend", dest="extra_options", type=str, - metavar="", required=False, - help="Extend verifier configuration with extra options. " - "If options are already present, the given ones will " - "override them. Can be a path to a regular config " - "file or just a json/yaml.") - @cliutils.args("--override", dest="new_configuration", type=str, - metavar="", required=False, - help="Override verifier configuration by another one " - "from a given source.") - @cliutils.args("--show", dest="show", action="store_true", required=False, - help="Show verifier configuration.") - @envutils.with_default_deployment(cli_arg_name="deployment-id") - @envutils.with_default_verifier_id() - @plugins.ensure_plugins_are_loaded - def configure_verifier(self, api, verifier_id=None, deployment=None, - reconfigure=False, extra_options=None, - new_configuration=None, show=False): - """Configure a verifier for a specific deployment.""" - - # TODO(ylobankov): Add an ability to read extra options from - # a json or yaml file. - - if new_configuration and (extra_options or reconfigure): - print(_("Argument '--override' cannot be used with arguments " - "'--reconfigure' and '--extend'.")) - return 1 - - if new_configuration: - if not os.path.exists(new_configuration): - print(_("File '%s' not found.") % new_configuration) - return 1 - - with open(new_configuration) as f: - config = f.read() - api.verifier.override_configuration(verifier_id=verifier_id, - deployment_id=deployment, - new_configuration=config) - else: - if extra_options: - if os.path.isfile(extra_options): - conf = configparser.ConfigParser() - conf.read(extra_options) - extra_options = dict(conf._sections) - for s in extra_options: - extra_options[s] = dict(extra_options[s]) - extra_options[s].pop("__name__", None) - - defaults = dict(conf.defaults()) - if defaults: - extra_options["DEFAULT"] = dict(conf.defaults()) - else: - extra_options = yaml.safe_load(extra_options) - - config = api.verifier.configure(verifier=verifier_id, - deployment_id=deployment, - extra_options=extra_options, - reconfigure=reconfigure) - - if show: - print("\n%s\n" % config.strip()) - - @cliutils.help_group("verifier") - @cliutils.args("--id", dest="verifier_id", type=str, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @cliutils.args("--pattern", dest="pattern", type=str, required=False, - help="Pattern which will be used for matching. Can be a " - "regexp or a verifier-specific entity (for example, " - "in case of Tempest you can specify 'set=smoke').") - @envutils.with_default_verifier_id() - @plugins.ensure_plugins_are_loaded - def list_verifier_tests(self, api, verifier_id=None, pattern=""): - """List all verifier tests.""" - tests = api.verifier.list_tests(verifier_id=verifier_id, - pattern=pattern) - if tests: - for test in tests: - print(test) - else: - print(_("No tests found.")) - - @cliutils.help_group("verifier-ext") - @cliutils.args("--id", dest="verifier_id", type=str, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @cliutils.args("--source", dest="source", type=str, required=True, - help="Path or URL to the repo to clone verifier " - "extension from.") - @cliutils.args("--version", dest="version", type=str, required=False, - help="Branch, tag or commit ID to checkout before " - "installation of the verifier extension (the " - "'master' branch is used by default).") - @cliutils.args("--extra-settings", dest="extra", type=str, required=False, - help="Extra installation settings for verifier extension.") - @envutils.with_default_verifier_id() - @plugins.ensure_plugins_are_loaded - def add_verifier_ext(self, api, verifier_id=None, source=None, - version=None, extra=None): - """Add a verifier extension.""" - api.verifier.add_extension(verifier_id=verifier_id, source=source, - version=version, extra_settings=extra) - - @cliutils.help_group("verifier-ext") - @cliutils.args("--id", dest="verifier_id", type=str, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @envutils.with_default_verifier_id() - @plugins.ensure_plugins_are_loaded - def list_verifier_exts(self, api, verifier_id=None): - """List all verifier extensions.""" - verifier_exts = api.verifier.list_extensions(verifier_id=verifier_id) - if verifier_exts: - fields = ["Name", "Entry point"] - if logging.is_debug(): - fields.append("Location") - cliutils.print_list(verifier_exts, fields, - normalize_field_names=True) - else: - print(_("There are no verifier extensions. You can add " - "verifier extension, using command `rally verify " - "add-verifier-ext`.")) - - @cliutils.help_group("verifier-ext") - @cliutils.args("--id", dest="verifier_id", type=str, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @cliutils.args("--name", type=str, required=True, - help="Verifier extension name.") - @envutils.with_default_verifier_id() - @plugins.ensure_plugins_are_loaded - def delete_verifier_ext(self, api, verifier_id=None, name=None): - """Delete a verifier extension.""" - api.verifier.delete_extension(verifier_id=verifier_id, name=name) - - @cliutils.help_group("verification") - @cliutils.args("--id", dest="verifier_id", type=str, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @cliutils.args("--deployment-id", dest="deployment", type=str, - metavar="", - help="Deployment name or UUID. " + LIST_DEPLOYMENTS_HINT) - @cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False, - help="Mark verification with a tag or a few tags.") - @cliutils.args("--pattern", dest="pattern", type=str, required=False, - help="Pattern which will be used for running tests. Can be " - "a regexp or a verifier-specific entity (for example, " - "in case of Tempest you can specify 'set=smoke').") - @cliutils.args("--concurrency", dest="concur", type=int, metavar="", - required=False, - help="How many processes to be used for running verifier " - "tests. The default value (0) auto-detects your CPU " - "count.") - @cliutils.args("--load-list", dest="load_list", type=str, metavar="", - required=False, - help="Path to a file with a list of tests to run.") - @cliutils.args("--skip-list", dest="skip_list", type=str, metavar="", - required=False, - help="Path to a file with a list of tests to skip. " - "Format: json or yaml like a dictionary where keys " - "are test names and values are reasons.") - @cliutils.args("--xfail-list", dest="xfail_list", type=str, - metavar="", required=False, - help="Path to a file with a list of tests that will be " - "considered as expected failures. " - "Format: json or yaml like a dictionary where keys " - "are test names and values are reasons.") - @cliutils.args("--detailed", dest="detailed", action="store_true", - required=False, - help="Show verification details such as errors of failed " - "tests.") - @cliutils.args("--no-use", dest="do_use", action="store_false", - help="Not to set the finished verification as the default " - "verification for future operations.") - @envutils.with_default_deployment(cli_arg_name="deployment-id") - @envutils.with_default_verifier_id() - @plugins.ensure_plugins_are_loaded - def start(self, api, verifier_id=None, deployment=None, tags=None, - pattern=None, concur=0, load_list=None, skip_list=None, - xfail_list=None, detailed=False, do_use=True): - """Start a verification (run verifier tests).""" - if pattern and load_list: - print(_("Arguments '--pattern' and '--load-list' cannot be used " - "simultaneously. You can use only one of the mentioned " - "arguments.")) - return 1 - - def parse(filename): - with open(filename, "r") as f: - return yaml.safe_load(f.read()) - - if load_list: - if not os.path.exists(load_list): - print(_("File '%s' not found.") % load_list) - return 1 - with open(load_list, "r") as f: - load_list = [test for test in f.read().split("\n") if test] - - if skip_list: - if not os.path.exists(skip_list): - print(_("File '%s' not found.") % skip_list) - return 1 - skip_list = parse(skip_list) - - if xfail_list: - if not os.path.exists(xfail_list): - print(_("File '%s' not found.") % xfail_list) - return 1 - xfail_list = parse(xfail_list) - - run_args = {key: value for key, value in ( - ("pattern", pattern), ("load_list", load_list), - ("skip_list", skip_list), ("xfail_list", xfail_list), - ("concurrency", concur)) if value} - - try: - results = api.verification.start( - verifier_id=verifier_id, deployment_id=deployment, - tags=tags, **run_args) - verification_uuid = results["verification"]["uuid"] - except exceptions.DeploymentNotFinishedStatus as e: - print(_("Cannot start a verefication on " - "unfinished deployment: %s") % e) - return 1 - - if detailed: - self._print_details_after_run(results) - - self._print_totals(results["totals"]) - - if do_use: - self.use(api, verification_uuid) - else: - print(_("Verification UUID: %s.") % verification_uuid) - - @cliutils.help_group("verification") - @cliutils.args("--uuid", dest="verification_uuid", type=str, required=True, - help="Verification UUID. " + LIST_VERIFICATIONS_HINT) - def use(self, api, verification_uuid): - """Choose a verification to use for the future operations.""" - verification = api.verification.get( - verification_uuid=verification_uuid) - fileutils.update_globals_file( - envutils.ENV_VERIFICATION, verification["uuid"]) - print(_("Using verification (UUID=%s) as the default verification " - "for the future operations.") % verification["uuid"]) - - @cliutils.help_group("verification") - @cliutils.args("--uuid", dest="verification_uuid", type=str, - help="Verification UUID. " + LIST_VERIFICATIONS_HINT) - @cliutils.args("--deployment-id", dest="deployment", type=str, - metavar="", - help="Deployment name or UUID. " + LIST_DEPLOYMENTS_HINT) - @cliutils.args("--failed", dest="failed", required=False, - help="Rerun only failed tests.", action="store_true") - @cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False, - help="Mark verification with a tag or a few tags.") - @cliutils.args("--concurrency", dest="concur", type=int, metavar="", - required=False, - help="How many processes to be used for running verifier " - "tests. The default value (0) auto-detects your CPU " - "count.") - @cliutils.args("--detailed", dest="detailed", action="store_true", - required=False, - help="Show verification details such as errors of failed " - "tests.") - @cliutils.args("--no-use", dest="do_use", action="store_false", - help="Not to set the finished verification as the default " - "verification for future operations.") - @envutils.with_default_verification_uuid - @envutils.with_default_deployment(cli_arg_name="deployment-id") - @plugins.ensure_plugins_are_loaded - def rerun(self, api, verification_uuid=None, deployment=None, tags=None, - concur=None, failed=False, detailed=False, do_use=True): - """Rerun tests from a verification for a specific deployment.""" - results = api.verification.rerun(verification_uuid=verification_uuid, - deployment_id=deployment, - failed=failed, - tags=tags, - concurrency=concur) - if detailed: - self._print_details_after_run(results) - - self._print_totals(results["totals"]) - - if do_use: - self.use(api, results["verification"]["uuid"]) - else: - print(_("Verification UUID: %s.") - % results["verification"]["uuid"]) - - @cliutils.help_group("verification") - @cliutils.args("--uuid", dest="verification_uuid", type=str, - help="Verification UUID. " + LIST_VERIFICATIONS_HINT) - @cliutils.args("--sort-by", metavar="", dest="sort_by", type=str, - required=False, choices=("name", "duration", "status"), - help="Sort tests by 'name', 'duration' or 'status'.") - @cliutils.args("--detailed", dest="detailed", action="store_true", - required=False, - help="Show verification details such as run arguments " - "and errors of failed tests.") - @envutils.with_default_verification_uuid - def show(self, api, verification_uuid=None, sort_by="name", - detailed=False): - """Show detailed information about a verification.""" - verification = api.verification.get( - verification_uuid=verification_uuid) - verifier = api.verifier.get(verifier_id=verification["verifier_uuid"]) - deployment = api.deployment.get( - deployment=verification["deployment_uuid"]) - - def run_args_formatter(v): - run_args = [] - for k in sorted(v["run_args"]): - if k in ("load_list", "skip_list", "xfail_list"): - value = "(value is too long, %s)" - if detailed: - value %= "will be displayed separately" - else: - value %= "use 'detailed' flag to display it" - else: - value = v["run_args"][k] - run_args.append("%s: %s" % (k, value)) - return "\n".join(run_args) - - # Main table - fields = ["UUID", "Status", "Started at", "Finished at", "Duration", - "Run arguments", "Tags", "Verifier name", "Verifier type", - "Deployment name", "Tests count", "Tests duration, sec", - "Success", "Skipped", "Expected failures", - "Unexpected success", "Failures"] - formatters = { - "Started at": lambda v: v["created_at"].replace("T", " "), - "Finished at": lambda v: v["updated_at"].replace("T", " "), - "Duration": lambda v: (dt.datetime.strptime(v["updated_at"], - TIME_FORMAT) - - dt.datetime.strptime(v["created_at"], - TIME_FORMAT)), - "Run arguments": run_args_formatter, - "Tags": lambda v: ", ".join(v["tags"]) or None, - "Verifier name": lambda v: "%s (UUID: %s)" % (verifier["name"], - verifier["uuid"]), - "Verifier type": ( - lambda v: "%s (namespace: %s)" % (verifier["type"], - verifier["namespace"])), - "Deployment name": ( - lambda v: "%s (UUID: %s)" % (deployment["name"], - deployment["uuid"])), - "Tests duration, sec": lambda v: v["tests_duration"] - } - cliutils.print_dict(verification, fields, formatters=formatters, - normalize_field_names=True, print_header=False, - table_label="Verification") - - if detailed: - h = _("Run arguments") - print("\n%s" % cliutils.make_header(h, len(h)).strip()) - print("\n%s\n" % json.dumps(verification["run_args"], indent=4)) - - # Tests table - tests = verification["tests"] - values = [tests[test_id] for test_id in tests] - fields = ["Name", "Duration, sec", "Status"] - formatters = {"Duration, sec": lambda v: v["duration"]} - index = ("name", "duration", "status").index(sort_by) - cliutils.print_list(values, fields, formatters=formatters, - table_label="Tests", normalize_field_names=True, - sortby_index=index) - - if detailed: - failures = [t for t in tests.values() if t["status"] == "fail"] - if failures: - self._print_failures("Failures", failures) - else: - print(_("\nCongratulations! Verification doesn't have failed " - "tests! :)")) - - @cliutils.help_group("verification") - @cliutils.args("--id", dest="verifier_id", type=str, required=False, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @cliutils.args("--deployment-id", dest="deployment", type=str, - metavar="", required=False, - help="Deployment name or UUID. " + LIST_DEPLOYMENTS_HINT) - @cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False, - help="Tags to filter verifications by.") - @cliutils.args("--status", dest="status", type=str, required=False, - help="Status to filter verifications by.") - def list(self, api, verifier_id=None, deployment=None, tags=None, - status=None): - """List all verifications.""" - verifications = api.verification.list(verifier_id=verifier_id, - deployment_id=deployment, - tags=tags, status=status) - if verifications: - fields = ["UUID", "Tags", "Verifier name", "Deployment name", - "Started at", "Finished at", "Duration", "Status"] - formatters = { - "Tags": lambda v: ", ".join(v["tags"]) or "-", - "Verifier name": (lambda v: api.verifier.get( - verifier_id=v["verifier_uuid"])["name"]), - "Deployment name": (lambda v: api.deployment.get( - deployment=v["deployment_uuid"])["name"]), - "Started at": lambda v: v["created_at"], - "Finished at": lambda v: v["updated_at"], - "Duration": lambda v: (dt.datetime.strptime(v["updated_at"], - TIME_FORMAT) - - dt.datetime.strptime(v["created_at"], - TIME_FORMAT)) - } - cliutils.print_list(verifications, fields, formatters=formatters, - normalize_field_names=True, sortby_index=4) - elif verifier_id or deployment or status or tags: - print(_("There are no verifications that meet specified filter " - "arguments.")) - else: - print(_("There are no verifications. You can start verification, " - "using command `rally verify start`.")) - - @cliutils.help_group("verification") - @cliutils.args("--uuid", nargs="+", dest="verification_uuid", type=str, - required=True, - help="UUIDs of verifications. " + LIST_VERIFICATIONS_HINT) - def delete(self, api, verification_uuid): - """Delete a verification or a few verifications.""" - if not isinstance(verification_uuid, list): - verification_uuid = [verification_uuid] - for v_uuid in verification_uuid: - api.verification.delete(verification_uuid=v_uuid) - - @cliutils.help_group("verification") - @cliutils.args("--uuid", nargs="+", dest="verification_uuid", type=str, - help="UUIDs of verifications. " + LIST_VERIFICATIONS_HINT) - @cliutils.args("--type", dest="output_type", type=str, - required=False, default="json", - help="Report type (Defaults to JSON). Out-of-the-box types:" - " %s. HINT: You can list all types, executing `rally " - "plugin list --plugin-base VerificationReporter` " - "command." % ", ".join(DEFAULT_REPORT_TYPES)) - @cliutils.args("--to", dest="output_dest", type=str, - metavar="", required=False, - help="Report destination. Can be a path to a file (in case " - "of HTML, JSON, etc. types) to save the report to or " - "a connection string. It depends on the report type.") - @cliutils.args("--open", dest="open_it", action="store_true", - required=False, help="Open the output file in a browser.") - @envutils.with_default_verification_uuid - @plugins.ensure_plugins_are_loaded - def report(self, api, verification_uuid=None, output_type=None, - output_dest=None, open_it=None): - """Generate a report for a verification or a few verifications.""" - if not isinstance(verification_uuid, list): - verification_uuid = [verification_uuid] - - result = api.verification.report(uuids=verification_uuid, - output_type=output_type, - output_dest=output_dest) - if "files" in result: - print(_("Saving the report to '%s' file. It may take some time.") - % output_dest) - for path in result["files"]: - full_path = os.path.abspath(os.path.expanduser(path)) - if not os.path.exists(os.path.dirname(full_path)): - os.makedirs(os.path.dirname(full_path)) - with open(full_path, "w") as f: - f.write(result["files"][path]) - print(_("The report has been successfully saved.")) - - if open_it: - if "open" not in result: - print(_("Cannot open '%s' report in the browser because " - "report type doesn't support it.") % output_type) - return 1 - webbrowser.open_new_tab( - "file://" + os.path.abspath(result["open"])) - - if "print" in result: - # NOTE(andreykurilin): we need a separation between logs and - # printed information to be able to parse output - h = _("Verification Report") - print("\n%s\n%s" % (cliutils.make_header(h, len(h)), - result["print"])) - - @cliutils.help_group("verification") - @cliutils.args("--id", dest="verifier_id", type=str, required=False, - help="Verifier name or UUID. " + LIST_VERIFIERS_HINT) - @cliutils.args("--deployment-id", dest="deployment", type=str, - metavar="", required=False, - help="Deployment name or UUID. " + LIST_DEPLOYMENTS_HINT) - @cliutils.args("--file", dest="file_to_parse", type=str, metavar="", - required=True, - help="File to import test results from.") - @cliutils.args("--run-args", dest="run_args", type=str, required=False, - help="Arguments that might be used when running tests. For " - "example, '{concurrency: 2, pattern: set=identity}'.") - @cliutils.args("--no-use", dest="do_use", action="store_false", - help="Not to set the created verification as the default " - "verification for future operations.") - @cliutils.alias("import") - @envutils.with_default_deployment(cli_arg_name="deployment-id") - @envutils.with_default_verifier_id() - @plugins.ensure_plugins_are_loaded - def import_results(self, api, verifier_id=None, deployment=None, - file_to_parse=None, run_args=None, do_use=True): - """Import results of a test run into the Rally database.""" - if not os.path.exists(file_to_parse): - print(_("File '%s' not found.") % file_to_parse) - return 1 - with open(file_to_parse, "r") as f: - data = f.read() - - run_args = yaml.safe_load(run_args) if run_args else {} - verification, results = api.verification.import_results( - verifier_id=verifier_id, deployment_id=deployment, - data=data, **run_args) - self._print_totals(results["totals"]) - - verification_uuid = verification["uuid"] - if do_use: - self.use(api, verification_uuid) - else: - print(_("Verification UUID: %s.") % verification_uuid) diff --git a/rally/cli/envutils.py b/rally/cli/envutils.py deleted file mode 100644 index 37d7698a..00000000 --- a/rally/cli/envutils.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import decorator -from oslo_utils import strutils - -from rally.common import fileutils -from rally.common.i18n import _ -from rally import exceptions - -ENV_DEPLOYMENT = "RALLY_DEPLOYMENT" -ENV_TASK = "RALLY_TASK" -ENV_VERIFIER = "RALLY_VERIFIER" -ENV_VERIFICATION = "RALLY_VERIFICATION" -ENVVARS = [ENV_DEPLOYMENT, ENV_TASK, ENV_VERIFIER, ENV_VERIFICATION] - -MSG_MISSING_ARG = _("Missing argument: --%(arg_name)s") - - -def clear_global(global_key): - path = os.path.expanduser("~/.rally/globals") - if os.path.exists(path): - fileutils.update_env_file(path, global_key, "\n") - if global_key in os.environ: - os.environ.pop(global_key) - - -def clear_env(): - for envvar in ENVVARS: - clear_global(envvar) - - -def get_global(global_key, do_raise=False): - if global_key not in os.environ: - fileutils.load_env_file(os.path.expanduser("~/.rally/globals")) - value = os.environ.get(global_key) - if not value and do_raise: - raise exceptions.InvalidArgumentsException("%s env is missing" - % global_key) - return value - - -def default_from_global(arg_name, env_name, - cli_arg_name, - message=MSG_MISSING_ARG): - def default_from_global(f, *args, **kwargs): - id_arg_index = f.__code__.co_varnames.index(arg_name) - args = list(args) - if args[id_arg_index] is None: - args[id_arg_index] = get_global(env_name) - if not args[id_arg_index]: - print(message % {"arg_name": cli_arg_name}) - return(1) - return f(*args, **kwargs) - return decorator.decorator(default_from_global) - - -def with_default_deployment(cli_arg_name="uuid"): - return default_from_global("deployment", ENV_DEPLOYMENT, cli_arg_name, - message=_("There is no default deployment.\n" - "\tPlease use command:\n" - "\trally deployment use " - "|" - "\nor pass uuid of deployment to " - "the --%(arg_name)s argument of " - "this command")) - - -def with_default_verifier_id(cli_arg_name="id"): - return default_from_global("verifier_id", ENV_VERIFIER, cli_arg_name) - - -with_default_task_id = default_from_global("task_id", ENV_TASK, "uuid") -with_default_verification_uuid = default_from_global("verification_uuid", - ENV_VERIFICATION, "uuid") - - -def get_creds_from_env_vars(): - required_env_vars = ["OS_AUTH_URL", "OS_USERNAME", "OS_PASSWORD"] - missing_env_vars = [v for v in required_env_vars if v not in os.environ] - if missing_env_vars: - msg = ("The following environment variables are " - "required but not set: %s" % " ".join(missing_env_vars)) - raise exceptions.ValidationError(message=msg) - - creds = { - "auth_url": os.environ["OS_AUTH_URL"], - "admin": { - "username": os.environ["OS_USERNAME"], - "password": os.environ["OS_PASSWORD"], - "tenant_name": get_project_name_from_env() - }, - "endpoint_type": get_endpoint_type_from_env(), - "endpoint": os.environ.get("OS_ENDPOINT"), - "region_name": os.environ.get("OS_REGION_NAME", ""), - "https_cacert": os.environ.get("OS_CACERT", ""), - "https_insecure": strutils.bool_from_string( - os.environ.get("OS_INSECURE")), - "profiler_hmac_key": os.environ.get("OSPROFILER_HMAC_KEY") - } - - user_domain_name = os.environ.get("OS_USER_DOMAIN_NAME") - project_domain_name = os.environ.get("OS_PROJECT_DOMAIN_NAME") - identity_api_version = os.environ.get( - "OS_IDENTITY_API_VERSION", os.environ.get("IDENTITY_API_VERSION")) - if (identity_api_version == "3" or - (identity_api_version is None and - (user_domain_name or project_domain_name))): - # it is Keystone v3 and it has another config scheme - creds["admin"]["project_name"] = creds["admin"].pop("tenant_name") - creds["admin"]["user_domain_name"] = user_domain_name or "Default" - project_domain_name = project_domain_name or "Default" - creds["admin"]["project_domain_name"] = project_domain_name - - return creds - - -def get_project_name_from_env(): - tenant_name = os.environ.get("OS_PROJECT_NAME", - os.environ.get("OS_TENANT_NAME")) - if tenant_name is None: - raise exceptions.ValidationError("Either the OS_PROJECT_NAME or " - "OS_TENANT_NAME environment variable " - "is required, but neither is set.") - - return tenant_name - - -def get_endpoint_type_from_env(): - endpoint_type = os.environ.get("OS_ENDPOINT_TYPE", - os.environ.get("OS_INTERFACE")) - if endpoint_type and "URL" in endpoint_type: - endpoint_type = endpoint_type.replace("URL", "") - - return endpoint_type diff --git a/rally/cli/main.py b/rally/cli/main.py deleted file mode 100644 index d5e50422..00000000 --- a/rally/cli/main.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""CLI interface for Rally.""" - -from __future__ import print_function - -import sys - -from rally.cli import cliutils -from rally.cli.commands import deployment -from rally.cli.commands import plugin -from rally.cli.commands import task -from rally.cli.commands import verify - - -categories = { - "deployment": deployment.DeploymentCommands, - "plugin": plugin.PluginCommands, - "task": task.TaskCommands, - "verify": verify.VerifyCommands -} - - -def main(): - return cliutils.run(sys.argv, categories) - -if __name__ == "__main__": - sys.exit(main()) diff --git a/rally/cli/manage.py b/rally/cli/manage.py deleted file mode 100644 index cf4eb5b1..00000000 --- a/rally/cli/manage.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""CLI interface for Rally DB management.""" - -from __future__ import print_function - -import contextlib -import sys - -from rally.cli import cliutils -from rally.cli import envutils -from rally.common import db - - -@contextlib.contextmanager -def output_migration_result(method_name): - """Print migration result.""" - print("%s started." % method_name.capitalize()) - start_revision = db.schema_revision() - yield - print("%s processed." % method_name.capitalize()) - current_revision = db.schema_revision() - if start_revision != current_revision: - print("Database migrated successfully " - "from {start} to {end} revision.".format(start=start_revision, - end=current_revision)) - else: - print("Database is already up to date") - - -class DBCommands(object): - """Commands for DB management.""" - - def recreate(self, api): - """Drop and create Rally database. - - This will delete all existing data. - """ - db.schema_cleanup() - db.schema_create() - envutils.clear_env() - - def create(self, api): - """Create Rally database.""" - db.schema_create() - - def upgrade(self, api): - """Upgrade Rally database to the latest state.""" - with output_migration_result("upgrade"): - db.schema_upgrade() - - def revision(self, api): - """Print current Rally database revision UUID.""" - print(db.schema_revision()) - - -def main(): - categories = {"db": DBCommands} - return cliutils.run(sys.argv, categories) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/rally/common/__init__.py b/rally/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/common/broker.py b/rally/common/broker.py deleted file mode 100644 index 9c424a68..00000000 --- a/rally/common/broker.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import threading - -from rally.common.i18n import _LW -from rally.common import logging - - -LOG = logging.getLogger(__name__) - - -def _consumer(consume, queue): - """Infinity worker that consumes tasks from queue. - - :param consume: method that consumes an object removed from the queue - :param queue: deque object to popleft() objects from - """ - cache = {} - while True: - if not queue: - break - else: - try: - args = queue.popleft() - except IndexError: - # consumed by other thread - continue - try: - consume(cache, args) - except Exception as e: - LOG.warning(_LW("Failed to consume a task from the queue: %s") % e) - if logging.is_debug(): - LOG.exception(e) - - -def _publisher(publish, queue): - """Calls a publish method that fills queue with jobs. - - :param publish: method that fills the queue - :param queue: deque object to be filled by the publish() method - """ - try: - publish(queue) - except Exception as e: - LOG.warning(_LW("Failed to publish a task to the queue: %s") % e) - if logging.is_debug(): - LOG.exception(e) - - -def run(publish, consume, consumers_count=1): - """Run broker. - - publish() put to queue, consume() process one element from queue. - - When publish() is finished and elements from queue are processed process - is finished all consumers threads are cleaned. - - :param publish: Function that puts values to the queue - :param consume: Function that processes a single value from the queue - :param consumers_count: Number of consumers - """ - queue = collections.deque() - _publisher(publish, queue) - - consumers = [] - for i in range(consumers_count): - consumer = threading.Thread(target=_consumer, args=(consume, queue)) - consumer.start() - consumers.append(consumer) - - for consumer in consumers: - consumer.join() diff --git a/rally/common/db/__init__.py b/rally/common/db/__init__.py deleted file mode 100644 index f09aa7a2..00000000 --- a/rally/common/db/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.db.api import * # noqa diff --git a/rally/common/db/api.py b/rally/common/db/api.py deleted file mode 100644 index 396b7df7..00000000 --- a/rally/common/db/api.py +++ /dev/null @@ -1,525 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Defines interface for DB access. - -The underlying driver is loaded as a :class:`LazyPluggable`. - -Functions in this module are imported into the rally.common.db namespace. -Call these functions from rally.common.db namespace, not the -rally.common.db.api namespace. - -All functions in this module return objects that implement a dictionary-like -interface. Currently, many of these objects are sqlalchemy objects that -implement a dictionary interface. However, a future goal is to have all of -these objects be simple dictionaries. - - -**Related Flags** - -:backend: string to lookup in the list of LazyPluggable backends. - `sqlalchemy` is the only supported backend right now. - -:connection: string specifying the sqlalchemy connection to use, like: - `sqlite:///var/lib/cinder/cinder.sqlite`. - -:enable_new_services: when adding a new service to the database, is it in the - pool of available hardware (Default: True) - -""" - -from oslo_config import cfg -from oslo_db import api as db_api -from oslo_db import options as db_options - - -CONF = cfg.CONF - - -db_options.set_defaults(CONF, connection="sqlite:////tmp/rally.sqlite") - - -IMPL = None - - -def get_impl(): - global IMPL - - if not IMPL: - _BACKEND_MAPPING = {"sqlalchemy": "rally.common.db.sqlalchemy.api"} - IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING) - - return IMPL - - -def engine_reset(): - """Reset DB engine.""" - get_impl().engine_reset() - - -def schema_cleanup(): - """Drop DB schema. This method drops existing database.""" - get_impl().schema_cleanup() - - -def schema_upgrade(revision=None): - """Migrate the database to `revision` or the most recent revision.""" - return get_impl().schema_upgrade(revision) - - -def schema_create(): - """Create database schema from models description.""" - return get_impl().schema_create() - - -def schema_revision(detailed=False): - """Return the schema revision.""" - return get_impl().schema_revision(detailed=detailed) - - -def schema_stamp(revision): - """Stamps database with provided revision.""" - return get_impl().schema_stamp(revision) - - -def task_get(uuid, detailed=False): - """Returns task by uuid. - - :param uuid: UUID of the task. - :param detailed: whether return results of task or not (Defaults to False). - :raises TaskNotFound: if the task does not exist. - :returns: task dict with data on the task. - """ - task = get_impl().task_get(uuid, detailed=detailed) - if detailed: - for subtask in task["subtasks"]: - for workload in subtask["workloads"]: - del workload["context_execution"] - return task - - -def task_get_status(uuid): - """Returns task by uuid. - - :param uuid: UUID of the task. - :raises TaskNotFound: if the task does not exist. - :returns: task dict with data on the task. - """ - return get_impl().task_get_status(uuid) - - -def task_create(values): - """Create task record in DB. - - :param values: dict with record values. - :returns: task dict with data on the task. - """ - return get_impl().task_create(values) - - -def task_update(uuid, values): - """Update task by values. - - :param uuid: UUID of the task. - :param values: dict with record values. - :raises TaskNotFound: if the task does not exist. - :returns: new updated task dict with data on the task. - """ - return get_impl().task_update(uuid, values) - - -def task_update_status(task_uuid, status, allowed_statuses): - """Update task status with specified value. - - :param task_uuid: string with UUID of Task instance. - :param status: new value to be written into db instead of status. - :param allowed_statuses: list of expected statuses to update in db. - :raises RallyException: if task not found with specified status. - :returns: the count of rows match as returned by the database's - "row count" feature - """ - return get_impl().task_update_status(task_uuid, allowed_statuses, - status) - - -def task_list(status=None, deployment=None, tags=None): - """Get a list of tasks. - - :param status: Task status to filter the returned list on. If set to - None, all the tasks will be returned. - :param deployment: Deployment UUID to filter the returned list on. - If set to None, tasks from all deployments will be - returned. - :param tags: A list of tags to filter tasks by. - :returns: A list of dicts with data on the tasks. - """ - return get_impl().task_list(status=status, - deployment=deployment, - tags=tags) - - -def task_delete(uuid, status=None): - """Delete a task. - - This method removes the task by the uuid, but if the status - argument is specified, then the task is removed only when these - statuses are equal otherwise an exception is raised. - - :param uuid: UUID of the task. - :raises TaskNotFound: if the task does not exist. - :raises TaskInvalidStatus: if the status of the task does not - equal to the status argument. - """ - return get_impl().task_delete(uuid, status=status) - - -def subtask_create(task_uuid, title, description=None, context=None): - """Create a subtask. - - :param task_uuid: string with UUID of Task instance. - :param title: subtask title. - :param description: subtask description. - :param context: subtask context dict. - :returns: a dict with data on the subtask. - """ - return get_impl().subtask_create(task_uuid, title, description, context) - - -def subtask_update(subtask_uuid, values): - """Update a subtask. - - :param subtask_uuid: string with UUID of Subtask instance. - :param values: dict with record values. - :returns: a dict with data on the subtask. - """ - return get_impl().subtask_update(subtask_uuid, values) - - -def workload_create(task_uuid, subtask_uuid, name, description, position, - runner, runner_type, hooks, context, sla, args, - context_execution=None, statistics=None): - """Create a workload. - - :param task_uuid: string with UUID of Task instance. - :param subtask_uuid: string with UUID of Subtask instance. - :param name: string with the name of Workload - :param description: string with the description of Workload - :param position: integer with an order of Workload in Subtask - :param runner: a dict with config of Workload runner - :param runner_type: a type of Workload runner - :param hooks: a list with Workload hooks config and results - :param context: a dict with config of Workload context - :param sla: a dict with config of Workload sla - :param args: a dict with arguments of Workload - :param context_execution: reserved for further refactoring - :param statistics: reserved for further refactoring - :returns: a dict with data on the workload. - """ - return get_impl().workload_create( - task_uuid, subtask_uuid, name=name, description=description, - position=position, runner=runner, runner_type=runner_type, - hooks=hooks, context=context, sla=sla, args=args, - context_execution=None, statistics=None) - - -def workload_get(workload_uuid): - """Get a workload. - - :param workload_uuid: string with UUID of Workload to fetch. - :returns: a dict with data on the workload. - """ - return get_impl().workload_get(workload_uuid) - - -def workload_data_create(task_uuid, workload_uuid, chunk_order, data): - """Create a workload data. - - :param task_uuid: string with UUID of Task instance. - :param workload_uuid: string with UUID of Workload instance. - :param chunk_order: ordinal index of workload data. - :param data: dict with record values on the workload data. - :returns: a dict with data on the workload data. - """ - return get_impl().workload_data_create(task_uuid, workload_uuid, - chunk_order, data) - - -def workload_set_results(workload_uuid, subtask_uuid, task_uuid, load_duration, - full_duration, start_time, sla_results, - hooks_results=None): - """Set workload results. - - :param workload_uuid: string with UUID of Workload instance. - :param subtask_uuid: string with UUID of Workload's parent Subtask. - :param task_uuid: string with UUID of Workload's parent Task. - :param load_duration: float value of Workload's load duration - :param full_duration: float value of Workload's full duration ( - generating load, executing contexts and etc) - :param start_time: a timestamp of load start - :param sla_results: a list with Workload's SLA results - :param hooks_results: a list with Workload's Hooks results - :returns: a dict with data on the workload. - """ - return get_impl().workload_set_results(workload_uuid=workload_uuid, - subtask_uuid=subtask_uuid, - task_uuid=task_uuid, - load_duration=load_duration, - full_duration=full_duration, - start_time=start_time, - sla_results=sla_results, - hooks_results=hooks_results) - - -def deployment_create(values): - """Create a deployment from the values dictionary. - - :param values: dict with record values on the deployment. - :returns: a dict with data on the deployment. - """ - return get_impl().deployment_create(values) - - -def deployment_delete(uuid): - """Delete a deployment by UUID. - - :param uuid: UUID of the deployment. - :raises DeploymentNotFound: if the deployment does not exist. - :raises DeploymentIsBusy: if the resource is not enough. - """ - return get_impl().deployment_delete(uuid) - - -def deployment_get(deployment): - """Get a deployment by UUID. - - :param deployment: UUID or name of the deployment. - :raises DeploymentNotFound: if the deployment does not exist. - :returns: a dict with data on the deployment. - """ - return get_impl().deployment_get(deployment) - - -def deployment_update(uuid, values): - """Update a deployment by values. - - :param uuid: UUID of the deployment. - :param values: dict with items to update. - :raises DeploymentNotFound: if the deployment does not exist. - :returns: a dict with data on the deployment. - """ - return get_impl().deployment_update(uuid, values) - - -def deployment_list(status=None, parent_uuid=None, name=None): - """Get list of deployments. - - :param status: if None returns any deployments with any status. - :param parent_uuid: filter by parent. If None, return only "root" - deployments. - :param name: name of deployment. - :returns: a list of dicts with data on the deployments. - """ - return get_impl().deployment_list(status=status, parent_uuid=parent_uuid, - name=name) - - -def resource_create(values): - """Create a resource from the values dictionary. - - :param values: a dict with data on the resource. - :returns: a dict with updated data on the resource. - """ - return get_impl().resource_create(values) - - -def resource_get_all(deployment_uuid, provider_name=None, type=None): - """Return resources of a deployment. - - :param deployment_uuid: filter by uuid of a deployment - :param provider_name: filter by provider_name, if is None, then - return all providers - :param type: filter by type, if is None, then return all types - :returns: a list of dicts with data on a resource - """ - return get_impl().resource_get_all(deployment_uuid, - provider_name=provider_name, - type=type) - - -def resource_delete(id): - """Delete a resource. - - :param id: ID of a resource. - :raises ResourceNotFound: if the resource does not exist. - """ - return get_impl().resource_delete(id) - - -def verifier_create(name, vtype, namespace, source, version, system_wide, - extra_settings=None): - """Create a verifier record. - - :param name: verifier name - :param vtype: verifier plugin name - :param namespace: verifier plugin namespace - :param source: path or URL to a verifier repo - :param version: branch, tag or commit ID of a verifier repo - :param system_wide: whether or not to use the system-wide environment - :param extra: verifier-specific installation options - :returns: a dict with verifier data - """ - return get_impl().verifier_create(name=name, vtype=vtype, - namespace=namespace, source=source, - version=version, system_wide=system_wide, - extra_settings=extra_settings) - - -def verifier_get(verifier_id): - """Get a verifier record. - - :param verifier_id: verifier name or UUID - :raises ResourceNotFound: if verifier does not exist - :returns: a dict with verifier data - """ - return get_impl().verifier_get(verifier_id) - - -def verifier_list(status=None): - """Get all verifier records. - - :param status: status to filter verifiers by - :returns: a list of dicts with verifiers data - """ - return get_impl().verifier_list(status) - - -def verifier_delete(verifier_id): - """Delete a verifier record. - - :param verifier_id: verifier name or UUID - :raises ResourceNotFound: if verifier does not exist - """ - get_impl().verifier_delete(verifier_id) - - -def verifier_update(verifier_id, **properties): - """Update a verifier record. - - :param verifier_id: verifier name or UUID - :param properties: a dict with new properties to update verifier record - :raises ResourceNotFound: if verifier does not exist - :returns: the updated dict with verifier data - """ - return get_impl().verifier_update(verifier_id, properties) - - -def verification_create(verifier_uuid, deployment_uuid, tags=None, - run_args=None): - """Create a verification record. - - :param verifier_uuid: verification UUID - :param deployment_uuid: deployment UUID - :param tags: a list of tags to assign them to verification - :param run_args: a dict with run arguments for verification - :returns: a dict with verification data - """ - return get_impl().verification_create(verifier_uuid, deployment_uuid, - tags, run_args) - - -def verification_get(verification_uuid): - """Get a verification record. - - :param verification_uuid: verification UUID - :raises ResourceNotFound: if verification does not exist - :returns: a dict with verification data - """ - return get_impl().verification_get(verification_uuid) - - -def verification_list(verifier_id=None, deployment_id=None, tags=None, - status=None): - """List all verification records. - - :param verifier_id: verifier name or UUID to filter verifications by - :param deployment_id: deployment name or UUID to filter verifications by - :param tags: tags to filter verifications by - :param status: status to filter verifications by - :returns: a list of dicts with verifications data - """ - return get_impl().verification_list(verifier_id, deployment_id, tags, - status) - - -def verification_delete(verification_uuid): - """Delete a verification record. - - :param verification_uuid: verification UUID - :raises ResourceNotFound: if verification does not exist - """ - return get_impl().verification_delete(verification_uuid) - - -def verification_update(uuid, **properties): - """Update a verification record. - - :param uuid: verification UUID - :param properties: a dict with new properties to update verification record - :raises ResourceNotFound: if verification does not exist - :returns: the updated dict with verification data - """ - return get_impl().verification_update(uuid, properties) - - -def register_worker(values): - """Register a new worker service at the specified hostname. - - :param values: A dict of values which must contain the following: - { - "hostname": the unique hostname which identifies - this worker service. - } - :returns: A worker. - :raises WorkerAlreadyRegistered: if worker already registered - """ - return get_impl().register_worker(values) - - -def get_worker(hostname): - """Retrieve a worker service record from the database. - - :param hostname: The hostname of the worker service. - :returns: A worker. - :raises WorkerNotFound: if worker not found - """ - return get_impl().get_worker(hostname) - - -def unregister_worker(hostname): - """Unregister this worker with the service registry. - - :param hostname: The hostname of the worker service. - :raises WorkerNotFound: if worker not found - """ - get_impl().unregister_worker(hostname) - - -def update_worker(hostname): - """Mark a worker as active by updating its "updated_at" property. - - :param hostname: The hostname of this worker service. - :raises WorkerNotFound: if worker not found - """ - get_impl().update_worker(hostname) diff --git a/rally/common/db/sqlalchemy/__init__.py b/rally/common/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/common/db/sqlalchemy/alembic.ini b/rally/common/db/sqlalchemy/alembic.ini deleted file mode 100644 index 389e9f35..00000000 --- a/rally/common/db/sqlalchemy/alembic.ini +++ /dev/null @@ -1,68 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = rally.common.db.sqlalchemy:migrations - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# max length of characters to apply to the -# "slug" field -#truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; this defaults -# to alembic/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat alembic/versions - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -sqlalchemy.url = driver://user:pass@localhost/dbname - - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/rally/common/db/sqlalchemy/api.py b/rally/common/db/sqlalchemy/api.py deleted file mode 100644 index 0c7e907a..00000000 --- a/rally/common/db/sqlalchemy/api.py +++ /dev/null @@ -1,842 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -SQLAlchemy implementation for DB.API -""" - -import collections -import datetime as dt -import os -import time - -import alembic -from alembic import config as alembic_config -import alembic.migration as alembic_migration -from alembic import script as alembic_script -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import session as db_session -from oslo_utils import timeutils -import six -from sqlalchemy import or_ -from sqlalchemy.orm.exc import NoResultFound -from sqlalchemy.orm import load_only as sa_loadonly - -from rally.common.db.sqlalchemy import models -from rally.common.i18n import _ -from rally import consts -from rally import exceptions -from rally.task import atomic -from rally.task.processing import charts - - -CONF = cfg.CONF - -_FACADE = None - -INITIAL_REVISION_UUID = "ca3626f62937" - - -def serialize_data(data): - if data is None: - return None - if isinstance(data, (six.integer_types, - six.string_types, - six.text_type, - dt.date, - dt.time, - float, - )): - return data - if isinstance(data, dict): - return {k: serialize_data(v) for k, v in data.items()} - if isinstance(data, (list, tuple)): - return [serialize_data(i) for i in data] - if hasattr(data, "_as_dict"): - # NOTE(andreykurilin): it is an instance of the Model. It support a - # method `_as_dict`, which should transform an object into dict - # (quite logical as from the method name), BUT it does some extra - # work - tries to load properties which were marked to not be loaded - # in particular request and fails since the session object is not - # present. That is why the code bellow makes a custom transformation. - result = {} - for key in data.__dict__: - if not key.startswith("_"): - result[key] = serialize_data(getattr(data, key)) - return result - - raise ValueError(_("Can not serialize %s") % data) - - -def serialize(fn): - def wrapper(*args, **kwargs): - result = fn(*args, **kwargs) - return serialize_data(result) - return wrapper - - -def _create_facade_lazily(): - global _FACADE - - if _FACADE is None: - _FACADE = db_session.EngineFacade.from_config(CONF) - - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def get_backend(): - """The backend is this module itself.""" - return Connection() - - -def _alembic_config(): - path = os.path.join(os.path.dirname(__file__), "alembic.ini") - config = alembic_config.Config(path) - return config - - -class Connection(object): - - def engine_reset(self): - global _FACADE - - _FACADE = None - - def schema_cleanup(self): - models.drop_db() - - def schema_revision(self, config=None, engine=None, detailed=False): - """Current database revision. - - :param config: Instance of alembic config - :param engine: Instance of DB engine - :param detailed: whether to return a dict with detailed data - :rtype detailed: bool - :returns: Database revision - :rtype: string - :rtype: dict - """ - engine = engine or get_engine() - with engine.connect() as conn: - context = alembic_migration.MigrationContext.configure(conn) - revision = context.get_current_revision() - if detailed: - config = config or _alembic_config() - sc_dir = alembic_script.ScriptDirectory.from_config(config) - return {"revision": revision, - "current_head": sc_dir.get_current_head()} - return revision - - def schema_upgrade(self, revision=None, config=None, engine=None): - """Used for upgrading database. - - :param revision: Desired database version - :type revision: string - :param config: Instance of alembic config - :param engine: Instance of DB engine - """ - revision = revision or "head" - config = config or _alembic_config() - engine = engine or get_engine() - - if self.schema_revision() is None: - self.schema_stamp(INITIAL_REVISION_UUID, config=config) - - alembic.command.upgrade(config, revision or "head") - - def schema_create(self, config=None, engine=None): - """Create database schema from models description. - - Can be used for initial installation instead of upgrade('head'). - :param config: Instance of alembic config - :param engine: Instance of DB engine - """ - engine = engine or get_engine() - - # NOTE(viktors): If we will use metadata.create_all() for non empty db - # schema, it will only add the new tables, but leave - # existing as is. So we should avoid of this situation. - if self.schema_revision(engine=engine) is not None: - raise db_exc.DbMigrationError("DB schema is already under version" - " control. Use upgrade() instead") - - models.BASE.metadata.create_all(engine) - self.schema_stamp("head", config=config) - - def schema_stamp(self, revision, config=None): - """Stamps database with provided revision. - - Don't run any migrations. - :param revision: Should match one from repository or head - to stamp - database with most recent revision - :type revision: string - :param config: Instance of alembic config - """ - config = config or _alembic_config() - return alembic.command.stamp(config, revision=revision) - - def model_query(self, model, session=None): - """The helper method to create query. - - :param model: The instance of - :class:`rally.common.db.sqlalchemy.models.RallyBase` to - request it. - :param session: Reuse the session object or get new one if it is - None. - :returns: The query object. - :raises Exception: when the model is not a sublcass of - :class:`rally.common.db.sqlalchemy.models.RallyBase`. - """ - session = session or get_session() - query = session.query(model) - - def issubclassof_rally_base(obj): - return isinstance(obj, type) and issubclass(obj, models.RallyBase) - - if not issubclassof_rally_base(model): - raise Exception(_("The model should be a subclass of RallyBase")) - - return query - - def _tags_get(self, uuid, tag_type, session=None): - tags = (self.model_query(models.Tag, session=session). - filter_by(uuid=uuid, type=tag_type).all()) - - return list(set(t.tag for t in tags)) - - def _uuids_by_tags_get(self, tag_type, tags): - tags = (self.model_query(models.Tag). - filter(models.Tag.type == tag_type, - models.Tag.tag.in_(tags)).all()) - - return list(set(tag.uuid for tag in tags)) - - def _task_get(self, uuid, load_only=None, session=None): - pre_query = self.model_query(models.Task, session=session) - if load_only: - pre_query = pre_query.options(sa_loadonly(load_only)) - - task = pre_query.filter_by(uuid=uuid).first() - if not task: - raise exceptions.TaskNotFound(uuid=uuid) - task.tags = sorted(self._tags_get(uuid, consts.TagType.TASK, session)) - return task - - def _task_workload_data_get_all(self, workload_uuid): - session = get_session() - with session.begin(): - results = (self.model_query(models.WorkloadData, session=session). - filter_by(workload_uuid=workload_uuid). - order_by(models.WorkloadData.chunk_order.asc())) - - return sorted([raw for workload_data in results - for raw in workload_data.chunk_data["raw"]], - key=lambda x: x["timestamp"]) - - @serialize - def task_get(self, uuid=None, detailed=False): - session = get_session() - task = serialize_data(self._task_get(uuid, session=session)) - - if detailed: - task["subtasks"] = self._subtasks_get_all_by_task_uuid( - uuid, session=session) - - return task - - @serialize - def task_get_status(self, uuid): - return self._task_get(uuid, load_only="status").status - - @serialize - def task_create(self, values): - tags = values.pop("tags", None) - # TODO(ikhudoshyn): currently 'input_task' - # does not come in 'values' - # After completely switching to the new - # DB schema in API we should reconstruct - # input_task's from associated workloads - # the same is true for 'pass_sla', - # 'task_duration', 'validation_result' - # and 'validation_duration' - task = models.Task() - task.update(values) - task.save() - - if tags: - for t in set(tags): - tag = models.Tag() - tag.update({"uuid": task.uuid, - "type": consts.TagType.TASK, - "tag": t}) - tag.save() - task.tags = sorted(self._tags_get(task.uuid, consts.TagType.TASK)) - return task - - @serialize - def task_update(self, uuid, values): - session = get_session() - values.pop("uuid", None) - tags = values.pop("tags", None) - with session.begin(): - task = self._task_get(uuid, session=session) - task.update(values) - - if tags: - for t in set(tags): - tag = models.Tag() - tag.update({"uuid": task.uuid, - "type": consts.TagType.TASK, - "tag": t}) - tag.save() - # take an updated instance of task - task = self._task_get(uuid, session=session) - return task - - def task_update_status(self, uuid, statuses, status_value): - session = get_session() - result = ( - session.query(models.Task).filter( - models.Task.uuid == uuid, models.Task.status.in_( - statuses)). - update({"status": status_value}, synchronize_session=False) - ) - if not result: - status = " or ".join(statuses) - msg = _("Task with uuid='%(uuid)s' and in statuses:'" - "%(statuses)s' not found.'") % {"uuid": uuid, - "statuses": status} - raise exceptions.RallyException(msg) - return result - - @serialize - def task_list(self, status=None, deployment=None, tags=None): - session = get_session() - tasks = [] - with session.begin(): - query = self.model_query(models.Task) - - filters = {} - if status is not None: - filters["status"] = status - if deployment is not None: - filters["deployment_uuid"] = self.deployment_get( - deployment)["uuid"] - if filters: - query = query.filter_by(**filters) - - if tags: - uuids = self._uuids_by_tags_get( - consts.TagType.TASK, tags) - query = query.filter(models.Task.uuid.in_(uuids)) - - for task in query.all(): - task.tags = sorted( - self._tags_get(task.uuid, consts.TagType.TASK, session)) - tasks.append(task) - - return tasks - - def task_delete(self, uuid, status=None): - session = get_session() - with session.begin(): - query = base_query = (self.model_query(models.Task). - filter_by(uuid=uuid)) - if status is not None: - query = base_query.filter_by(status=status) - - (self.model_query(models.WorkloadData).filter_by(task_uuid=uuid). - delete(synchronize_session=False)) - - (self.model_query(models.Workload).filter_by(task_uuid=uuid). - delete(synchronize_session=False)) - - (self.model_query(models.Subtask).filter_by(task_uuid=uuid). - delete(synchronize_session=False)) - - (self.model_query(models.Tag).filter_by( - uuid=uuid, type=consts.TagType.TASK). - delete(synchronize_session=False)) - - count = query.delete(synchronize_session=False) - if not count: - if status is not None: - task = base_query.first() - if task: - raise exceptions.TaskInvalidStatus(uuid=uuid, - require=status, - actual=task.status) - raise exceptions.TaskNotFound(uuid=uuid) - - def _subtasks_get_all_by_task_uuid(self, task_uuid, session=None): - result = (self.model_query(models.Subtask, session=session).filter_by( - task_uuid=task_uuid).all()) - subtasks = [] - for subtask in result: - subtask = serialize_data(subtask) - subtask["workloads"] = [] - workloads = (self.model_query(models.Workload, session=session). - filter_by(subtask_uuid=subtask["uuid"]).all()) - for workload in workloads: - workload.data = self._task_workload_data_get_all( - workload.uuid) - subtask["workloads"].append(serialize_data(workload)) - subtasks.append(subtask) - return subtasks - - @serialize - def subtask_create(self, task_uuid, title, description=None, context=None): - subtask = models.Subtask(task_uuid=task_uuid) - subtask.update({ - "title": title, - "description": description or "", - "context": context or {}, - }) - subtask.save() - return subtask - - @serialize - def subtask_update(self, subtask_uuid, values): - subtask = self.model_query(models.Subtask).filter_by( - uuid=subtask_uuid).first() - subtask.update(values) - subtask.save() - return subtask - - @serialize - def workload_get(self, workload_uuid): - return self.model_query(models.Workload).filter_by( - uuid=workload_uuid).first() - - @serialize - def workload_create(self, task_uuid, subtask_uuid, name, description, - position, runner, runner_type, hooks, context, sla, - args, context_execution, statistics): - workload = models.Workload(task_uuid=task_uuid, - subtask_uuid=subtask_uuid, - name=name, - description=description, - position=position, - runner=runner, - runner_type=runner_type, - hooks=hooks, - context=context, - sla=sla, - args=args, - context_execution=context_execution, - statistics=statistics) - workload.save() - return workload - - @serialize - def workload_data_create(self, task_uuid, workload_uuid, chunk_order, - data): - workload_data = models.WorkloadData(task_uuid=task_uuid, - workload_uuid=workload_uuid) - - raw_data = data.get("raw", []) - iter_count = len(raw_data) - - failed_iter_count = 0 - - started_at = float("inf") - finished_at = 0 - for d in raw_data: - if d.get("error"): - failed_iter_count += 1 - - timestamp = d["timestamp"] - duration = d["duration"] - finished = timestamp + duration - - if timestamp < started_at: - started_at = timestamp - - if finished > finished_at: - finished_at = finished - - now = time.time() - if started_at == float("inf"): - started_at = now - if finished_at == 0: - finished_at = now - - workload_data.update({ - "task_uuid": task_uuid, - "workload_uuid": workload_uuid, - "chunk_order": chunk_order, - "iteration_count": iter_count, - "failed_iteration_count": failed_iter_count, - "chunk_data": {"raw": raw_data}, - # TODO(ikhudoshyn) - "chunk_size": 0, - "compressed_chunk_size": 0, - "started_at": dt.datetime.fromtimestamp(started_at), - "finished_at": dt.datetime.fromtimestamp(finished_at) - }) - workload_data.save() - return workload_data - - @serialize - def workload_set_results(self, workload_uuid, subtask_uuid, task_uuid, - load_duration, full_duration, start_time, - sla_results, hooks_results): - session = get_session() - with session.begin(): - workload_results = self._task_workload_data_get_all(workload_uuid) - - iter_count = len(workload_results) - - failed_iter_count = 0 - max_duration = None - min_duration = None - - for d in workload_results: - if d.get("error"): - failed_iter_count += 1 - - duration = d.get("duration", 0) - - if max_duration is None or duration > max_duration: - max_duration = duration - - if min_duration is None or min_duration > duration: - min_duration = duration - - atomics = collections.OrderedDict() - - for itr in workload_results: - merged_atomic = atomic.merge_atomic(itr["atomic_actions"]) - for name, value in merged_atomic.items(): - duration = value["duration"] - count = value["count"] - if name not in atomics or count > atomics[name]["count"]: - atomics[name] = {"min_duration": duration, - "max_duration": duration, - "count": count} - elif count == atomics[name]["count"]: - if duration < atomics[name]["min_duration"]: - atomics[name]["min_duration"] = duration - if duration > atomics[name]["max_duration"]: - atomics[name]["max_duration"] = duration - - durations_stat = charts.MainStatsTable( - {"total_iteration_count": iter_count, - "statistics": {"atomics": atomics}}) - - for itr in workload_results: - durations_stat.add_iteration(itr) - - sla = sla_results or [] - # NOTE(ikhudoshyn): we call it 'pass_sla' - # for the sake of consistency with other models - # so if no SLAs were specified, then we assume pass_sla == True - success = all([s.get("success") for s in sla]) - - session.query(models.Workload).filter_by( - uuid=workload_uuid).update( - { - "sla_results": {"sla": sla}, - "context_execution": {}, - "hooks": hooks_results or [], - "load_duration": load_duration, - "full_duration": full_duration, - "min_duration": min_duration, - "max_duration": max_duration, - "total_iteration_count": iter_count, - "failed_iteration_count": failed_iter_count, - "start_time": start_time, - "statistics": {"durations": durations_stat.to_dict(), - "atomics": atomics}, - "pass_sla": success} - ) - task_values = { - "task_duration": models.Task.task_duration + load_duration} - if not success: - task_values["pass_sla"] = False - - subtask_values = { - "duration": models.Subtask.duration + load_duration} - if not success: - subtask_values["pass_sla"] = False - session.query(models.Task).filter_by(uuid=task_uuid).update( - task_values) - - session.query(models.Subtask).filter_by(uuid=subtask_uuid).update( - subtask_values) - - def _deployment_get(self, deployment, session=None): - stored_deployment = self.model_query( - models.Deployment, - session=session).filter_by(name=deployment).first() - if not stored_deployment: - stored_deployment = self.model_query( - models.Deployment, - session=session).filter_by(uuid=deployment).first() - - if not stored_deployment: - raise exceptions.DeploymentNotFound(deployment=deployment) - return stored_deployment - - @serialize - def deployment_create(self, values): - deployment = models.Deployment() - try: - deployment.update(values) - deployment.save() - except db_exc.DBDuplicateEntry: - raise exceptions.DeploymentNameExists(deployment=values["name"]) - return deployment - - def deployment_delete(self, uuid): - session = get_session() - with session.begin(): - count = (self.model_query(models.Resource, session=session). - filter_by(deployment_uuid=uuid).count()) - if count: - raise exceptions.DeploymentIsBusy(uuid=uuid) - - count = (self.model_query(models.Deployment, session=session). - filter_by(uuid=uuid).delete(synchronize_session=False)) - if not count: - raise exceptions.DeploymentNotFound(deployment=uuid) - - @serialize - def deployment_get(self, deployment): - return self._deployment_get(deployment) - - @serialize - def deployment_update(self, deployment, values): - session = get_session() - values.pop("uuid", None) - with session.begin(): - dpl = self._deployment_get(deployment, session=session) - dpl.update(values) - return dpl - - @serialize - def deployment_list(self, status=None, parent_uuid=None, name=None): - query = (self.model_query(models.Deployment). - filter_by(parent_uuid=parent_uuid)) - - if name: - query = query.filter_by(name=name) - if status: - query = query.filter_by(status=status) - return query.all() - - @serialize - def resource_create(self, values): - resource = models.Resource() - resource.update(values) - resource.save() - return resource - - @serialize - def resource_get_all(self, deployment_uuid, provider_name=None, type=None): - query = (self.model_query(models.Resource). - filter_by(deployment_uuid=deployment_uuid)) - if provider_name is not None: - query = query.filter_by(provider_name=provider_name) - if type is not None: - query = query.filter_by(type=type) - return query.all() - - def resource_delete(self, id): - count = (self.model_query(models.Resource). - filter_by(id=id).delete(synchronize_session=False)) - if not count: - raise exceptions.ResourceNotFound(id=id) - - @serialize - def verifier_create(self, name, vtype, namespace, source, version, - system_wide, extra_settings=None): - verifier = models.Verifier() - properties = {"name": name, "type": vtype, "namespace": namespace, - "source": source, "extra_settings": extra_settings, - "version": version, "system_wide": system_wide} - verifier.update(properties) - verifier.save() - return verifier - - @serialize - def verifier_get(self, verifier_id): - return self._verifier_get(verifier_id) - - def _verifier_get(self, verifier_id, session=None): - verifier = self.model_query( - models.Verifier, session=session).filter( - or_(models.Verifier.name == verifier_id, - models.Verifier.uuid == verifier_id)).first() - if not verifier: - raise exceptions.ResourceNotFound(id=verifier_id) - return verifier - - @serialize - def verifier_list(self, status=None): - query = self.model_query(models.Verifier) - if status: - query = query.filter_by(status=status) - return query.all() - - def verifier_delete(self, verifier_id): - session = get_session() - with session.begin(): - query = self.model_query( - models.Verifier, session=session).filter( - or_(models.Verifier.name == verifier_id, - models.Verifier.uuid == verifier_id)) - count = query.delete(synchronize_session=False) - if not count: - raise exceptions.ResourceNotFound(id=verifier_id) - - @serialize - def verifier_update(self, verifier_id, properties): - session = get_session() - with session.begin(): - verifier = self._verifier_get(verifier_id) - verifier.update(properties) - verifier.save() - return verifier - - @serialize - def verification_create(self, verifier_id, deployment_id, tags=None, - run_args=None): - verifier = self._verifier_get(verifier_id) - deployment = self._deployment_get(deployment_id) - verification = models.Verification() - verification.update({"verifier_uuid": verifier.uuid, - "deployment_uuid": deployment["uuid"], - "run_args": run_args}) - verification.save() - - if tags: - for t in set(tags): - tag = models.Tag() - tag.update({"uuid": verification.uuid, - "type": consts.TagType.VERIFICATION, - "tag": t}) - tag.save() - - return verification - - @serialize - def verification_get(self, verification_uuid): - verification = self._verification_get(verification_uuid) - verification.tags = sorted(self._tags_get(verification.uuid, - consts.TagType.VERIFICATION)) - return verification - - def _verification_get(self, verification_uuid, session=None): - verification = self.model_query( - models.Verification, session=session).filter_by( - uuid=verification_uuid).first() - if not verification: - raise exceptions.ResourceNotFound(id=verification_uuid) - return verification - - @serialize - def verification_list(self, verifier_id=None, deployment_id=None, - tags=None, status=None): - session = get_session() - with session.begin(): - filter_by = {} - if verifier_id: - verifier = self._verifier_get(verifier_id, session=session) - filter_by["verifier_uuid"] = verifier.uuid - if deployment_id: - deployment = self._deployment_get(deployment_id, - session=session) - filter_by["deployment_uuid"] = deployment.uuid - if status: - filter_by["status"] = status - - query = self.model_query(models.Verification, session=session) - if filter_by: - query = query.filter_by(**filter_by) - - def add_tags_to_verifications(verifications): - for verification in verifications: - verification.tags = sorted(self._tags_get( - verification.uuid, consts.TagType.VERIFICATION)) - return verifications - - if tags: - uuids = self._uuids_by_tags_get( - consts.TagType.VERIFICATION, tags) - query = query.filter(models.Verification.uuid.in_(uuids)) - - return add_tags_to_verifications(query.all()) - - def verification_delete(self, verification_uuid): - session = get_session() - with session.begin(): - count = self.model_query( - models.Verification, session=session).filter_by( - uuid=verification_uuid).delete(synchronize_session=False) - if not count: - raise exceptions.ResourceNotFound(id=verification_uuid) - - @serialize - def verification_update(self, verification_uuid, properties): - session = get_session() - with session.begin(): - verification = self._verification_get(verification_uuid) - verification.update(properties) - verification.save() - return verification - - @serialize - def register_worker(self, values): - try: - worker = models.Worker() - worker.update(values) - worker.update({"updated_at": timeutils.utcnow()}) - worker.save() - return worker - except db_exc.DBDuplicateEntry: - raise exceptions.WorkerAlreadyRegistered( - worker=values["hostname"]) - - @serialize - def get_worker(self, hostname): - try: - return (self.model_query(models.Worker). - filter_by(hostname=hostname).one()) - except NoResultFound: - raise exceptions.WorkerNotFound(worker=hostname) - - def unregister_worker(self, hostname): - count = (self.model_query(models.Worker). - filter_by(hostname=hostname).delete()) - if count == 0: - raise exceptions.WorkerNotFound(worker=hostname) - - def update_worker(self, hostname): - count = (self.model_query(models.Worker). - filter_by(hostname=hostname). - update({"updated_at": timeutils.utcnow()})) - if count == 0: - raise exceptions.WorkerNotFound(worker=hostname) diff --git a/rally/common/db/sqlalchemy/migrations/README.rst b/rally/common/db/sqlalchemy/migrations/README.rst deleted file mode 100644 index 26af212e..00000000 --- a/rally/common/db/sqlalchemy/migrations/README.rst +++ /dev/null @@ -1,87 +0,0 @@ -.. - Copyright 2016 Mirantis Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _db_migrations: - -Database upgrade in Rally -========================= - -Information for users ---------------------- - -Rally supports DB schema versioning (schema versions are called *revisions*) -and migration (upgrade to the latest revision). - -End user is provided with the following possibilities: - -- Print current revision of DB. - - .. code-block:: shell - - rally-manage db revision - -- Upgrade existing DB to the latest state. - - This is needed when previously existing Rally installation is being - upgraded to a newer version. In this case user should issue command - - .. code-block:: shell - - rally-manage db upgrade - - **AFTER** upgrading Rally package. DB schema - will get upgraded to the latest state and all existing data will be kept. - - **WARNING** Rally does NOT support DB schema downgrade. One should consider - backing up existing database in order to be able to rollback the change. - -Information for developers --------------------------- - -DB migration in Rally is implemented via package *alembic*. - -It is highly recommended to get familiar with it's documentation -available by the link_ before proceeding. - -If developer is about to change existing DB schema they should -create a new DB revision and a migration script with the following command. - -.. code-block:: shell - - alembic --config rally/common/db/sqlalchemy/alembic.ini revision -m - -or - -.. code-block:: shell - - alembic --config rally/common/db/sqlalchemy/alembic.ini revision --autogenerate -m - -It will generate migration script -- a file named `_.py` -located in `rally/common/db/sqlalchemy/migrations/versions`. - -Alembic with parameter ``--autogenerate`` makes some "routine" job for -developer, for example it makes some SQLite compatible batch expressions for -migrations. - -Generated script should then be checked, edited if it is needed to be -and added to Rally source tree. - -**WARNING** Even though alembic supports schema downgrade, migration -scripts provided along with Rally do not contain actual code for -downgrade. - -.. references: - -.. _link: https://alembic.readthedocs.org diff --git a/rally/common/db/sqlalchemy/migrations/env.py b/rally/common/db/sqlalchemy/migrations/env.py deleted file mode 100644 index b8246989..00000000 --- a/rally/common/db/sqlalchemy/migrations/env.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2016 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from alembic import context - -from rally.common.db.sqlalchemy import api -from rally.common.db.sqlalchemy import models - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -target_metadata = models.BASE.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - """ - engine = api.get_engine() - with engine.connect() as connection: - context.configure(connection=connection, - render_as_batch=True, - target_metadata=target_metadata) - with context.begin_transaction(): - context.run_migrations() - - -run_migrations_online() diff --git a/rally/common/db/sqlalchemy/migrations/script.py.mako b/rally/common/db/sqlalchemy/migrations/script.py.mako deleted file mode 100644 index bb44cbfa..00000000 --- a/rally/common/db/sqlalchemy/migrations/script.py.mako +++ /dev/null @@ -1,41 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -${"from rally import exceptions" if not downgrades else ""} - -# revision identifiers, used by Alembic. -revision = "${up_revision}" -down_revision = "${down_revision}" -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "raise exceptions.DowngradeNotSupported()"} diff --git a/rally/common/db/sqlalchemy/migrations/versions/08e1515a576c_fix_invalid_verification_logs.py b/rally/common/db/sqlalchemy/migrations/versions/08e1515a576c_fix_invalid_verification_logs.py deleted file mode 100644 index f9eb973e..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/08e1515a576c_fix_invalid_verification_logs.py +++ /dev/null @@ -1,112 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""fix invalid verification logs - -Revision ID: 08e1515a576c -Revises: 54e844ebfbc3 -Create Date: 2016-09-12 15:47:11.279610 - -""" - -# revision identifiers, used by Alembic. -revision = "08e1515a576c" -down_revision = "54e844ebfbc3" -branch_labels = None -depends_on = None - - -import json -import uuid - -from alembic import op -import sqlalchemy as sa - -from rally import consts -from rally import exceptions - - -def UUID(): - return str(uuid.uuid4()) - - -task_helper = sa.Table( - "tasks", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), default=UUID, nullable=False), - sa.Column("status", sa.Enum(*list(consts.TaskStatus), - name="enum_tasks_status"), - default=consts.TaskStatus.INIT, nullable=False), - sa.Column("verification_log", sa.Text, default=""), - sa.Column("tag", sa.String(64), default=""), - sa.Column("deployment_uuid", sa.String(36), nullable=False) -) - - -def _make_trace(etype, emsg, raw_trace=None): - trace = "Traceback (most recent call last):\n" - if raw_trace is None: - trace += "\n\t\t...n/a..\n\n" - else: - trace += "".join(json.loads(raw_trace)) - - trace += "%s: %s" % (etype, emsg) - return trace - - -def upgrade(): - connection = op.get_bind() - for task in connection.execute(task_helper.select()): - verification_log = task.verification_log - - if not verification_log: - continue - - new_value = None - - verification_log = json.loads(verification_log) - if isinstance(verification_log, list): - new_value = {"etype": verification_log[0], - "msg": verification_log[1], - "trace": verification_log[2]} - if new_value["trace"].startswith("["): - # NOTE(andreykurilin): For several cases traceback was - # transmitted as list instead of string. - new_value["trace"] = _make_trace(*verification_log) - else: - if verification_log.startswith("No such file"): - new_value = {"etype": IOError.__name__, - "msg": verification_log} - new_value["trace"] = _make_trace(new_value["etype"], - new_value["msg"]) - elif verification_log.startswith("Task config is invalid"): - new_value = {"etype": exceptions.InvalidTaskException.__name__, - "msg": verification_log} - new_value["trace"] = _make_trace(new_value["etype"], - new_value["msg"]) - elif verification_log.startswith("Failed to load task"): - new_value = {"etype": "FailedToLoadTask", - "msg": verification_log} - new_value["trace"] = _make_trace(new_value["etype"], - new_value["msg"]) - - if new_value: - connection.execute(task_helper.update().where( - task_helper.c.id == task.id).values( - verification_log=json.dumps(new_value))) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/3177d36ea270_merge_credentials_from_users_and_admin.py b/rally/common/db/sqlalchemy/migrations/versions/3177d36ea270_merge_credentials_from_users_and_admin.py deleted file mode 100644 index 9e7ec463..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/3177d36ea270_merge_credentials_from_users_and_admin.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2016 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Merge credentials from users and admin - -Revision ID: 3177d36ea270 -Revises: ca3626f62937 -Create Date: 2016-03-01 16:01:38.747048 - -""" - -# revision identifiers, used by Alembic. -revision = "3177d36ea270" -down_revision = "ca3626f62937" -branch_labels = None -depends_on = None - - -from alembic import op -import sqlalchemy as sa - -from rally import exceptions - - -deployments_helper = sa.Table( - "deployments", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("admin", sa.types.PickleType, nullable=True), - sa.Column("users", sa.types.PickleType, default=[], nullable=False), - sa.Column("credentials", sa.types.PickleType, nullable=True), -) - - -def upgrade(): - with op.batch_alter_table("deployments", schema=None) as batch_op: - batch_op.add_column( - sa.Column("credentials", sa.PickleType(), nullable=True)) - - connection = op.get_bind() - for deployment in connection.execute(deployments_helper.select()): - creds = [ - ["openstack", - { - "admin": deployment.admin, - "users": deployment.users - }] - ] - connection.execute( - deployments_helper.update().where( - deployments_helper.c.id == deployment.id).values( - credentials=creds)) - - with op.batch_alter_table("deployments", schema=None) as batch_op: - batch_op.alter_column("credentials", - existing_type=sa.PickleType, - existing_nullable=True, - nullable=False) - batch_op.drop_column("admin") - batch_op.drop_column("users") - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/32fada9b2fde_remove_admin_domain_name.py b/rally/common/db/sqlalchemy/migrations/versions/32fada9b2fde_remove_admin_domain_name.py deleted file mode 100644 index 4edd6f81..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/32fada9b2fde_remove_admin_domain_name.py +++ /dev/null @@ -1,75 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Remove admin domain name - -Revision ID: 32fada9b2fde -Revises: 5b983f0c9b9a -Create Date: 2016-08-29 08:32:30.818019 - -""" - -# revision identifiers, used by Alembic. -revision = "32fada9b2fde" -down_revision = "6ad4f426f005" -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import exceptions - - -deployments_helper = sa.Table( - "deployments", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column( - "config", - sa_types.MutableJSONEncodedDict, - default={}, - nullable=False, - ) -) - - -def upgrade(): - connection = op.get_bind() - for deployment in connection.execute(deployments_helper.select()): - conf = deployment.config - if conf["type"] != "ExistingCloud": - continue - - should_update = False - - if "admin_domain_name" in conf["admin"]: - del conf["admin"]["admin_domain_name"] - should_update = True - if "users" in conf: - for user in conf["users"]: - if "admin_domain_name" in user: - del user["admin_domain_name"] - should_update = True - - if should_update: - connection.execute( - deployments_helper.update().where( - deployments_helper.c.id == deployment.id).values( - config=conf)) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/35fe16d4ab1c_update_tasks_based_on_workloads.py b/rally/common/db/sqlalchemy/migrations/versions/35fe16d4ab1c_update_tasks_based_on_workloads.py deleted file mode 100644 index 0b3c911a..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/35fe16d4ab1c_update_tasks_based_on_workloads.py +++ /dev/null @@ -1,104 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""update-tasks-based-on-workloads - -Update "pass_sla" and "duration" fields of tasks and subtasks based on -workloads. - -Revision ID: 35fe16d4ab1c -Revises: 92aaaa2a6bb3 -Create Date: 2017-06-07 19:50:03.572493 - -""" - -from alembic import op -import sqlalchemy as sa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import exceptions - -# revision identifiers, used by Alembic. -revision = "35fe16d4ab1c" -down_revision = "92aaaa2a6bb3" -branch_labels = None -depends_on = None - - -task_helper = sa.Table( - "tasks", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), nullable=False), - sa.Column("task_duration", sa.Float()), - sa.Column("pass_sla", sa.Boolean()) -) - -subtask_helper = sa.Table( - "subtasks", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), nullable=False), - sa.Column("duration", sa.Float()), - sa.Column("pass_sla", sa.Boolean()) -) - -workload_helper = sa.Table( - "workloads", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), nullable=False), - sa.Column("task_uuid", sa.String(length=36), nullable=False), - sa.Column("subtask_uuid", sa.String(length=36), nullable=False), - sa.Column("load_duration", sa.Float()), - sa.Column("pass_sla", sa.Boolean()), -) - - -def upgrade(): - tasks = {} - subtasks = {} - - with op.batch_alter_table("workloads") as batch_op: - # change type of column - batch_op.drop_column("start_time") - batch_op.add_column(sa.Column("start_time", sa_types.TimeStamp)) - - connection = op.get_bind() - - for w in connection.execute(workload_helper.select()): - tasks.setdefault(w.task_uuid, {"task_duration": 0, "pass_sla": True}) - subtasks.setdefault(w.subtask_uuid, {"duration": 0, "pass_sla": True}) - tasks[w.task_uuid]["task_duration"] += w.load_duration - subtasks[w.subtask_uuid]["duration"] += w.load_duration - - if not w.pass_sla: - tasks[w.task_uuid]["pass_sla"] = False - subtasks[w.subtask_uuid]["pass_sla"] = False - - for subtask in connection.execute(subtask_helper.select()): - values = subtasks.get(subtask.uuid, {"duration": 0.0, - "pass_sla": True}) - connection.execute(subtask_helper.update().where( - subtask_helper.c.id == subtask.id).values(**values)) - - for task in connection.execute(task_helper.select()): - values = tasks.get(task.uuid, {"task_duration": 0.0, - "pass_sla": True}) - connection.execute(task_helper.update().where( - task_helper.c.id == task.id).values(**values)) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/37fdbb373e8d_fix_test_results_for_verifications.py b/rally/common/db/sqlalchemy/migrations/versions/37fdbb373e8d_fix_test_results_for_verifications.py deleted file mode 100644 index 2079a7f7..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/37fdbb373e8d_fix_test_results_for_verifications.py +++ /dev/null @@ -1,59 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Fix test results for verifications - -Revision ID: 37fdbb373e8d -Revises: 484cd9413e66 -Create Date: 2016-12-29 19:54:23.804525 - -""" - -# revision identifiers, used by Alembic. -revision = "37fdbb373e8d" -down_revision = "484cd9413e66" -branch_labels = None -depends_on = None - - -from alembic import op -import sqlalchemy as sa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import exceptions - - -verifications_helper = sa.Table( - "verifications", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("tests", sa_types.MutableJSONEncodedDict, default={}) -) - - -def upgrade(): - connection = op.get_bind() - for v in connection.execute(verifications_helper.select()): - tests = v.tests - for test in tests.values(): - duration = test.pop("time") - test["duration"] = duration - - connection.execute( - verifications_helper.update().where( - verifications_helper.c.id == v.id).values(tests=tests)) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/484cd9413e66_new_db_schema_for_verification_component.py b/rally/common/db/sqlalchemy/migrations/versions/484cd9413e66_new_db_schema_for_verification_component.py deleted file mode 100644 index 87fa1c1e..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/484cd9413e66_new_db_schema_for_verification_component.py +++ /dev/null @@ -1,216 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provide new db schema for verification component - -Revision ID: 484cd9413e66 -Revises: e654a0648db0 -Create Date: 2016-11-04 17:04:24.614075 - -""" - -# revision identifiers, used by Alembic. -revision = "484cd9413e66" -down_revision = "e654a0648db0" -branch_labels = None -depends_on = None - -import uuid - -from alembic import op -from oslo_utils import timeutils -import sqlalchemy as sa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import exceptions - - -TASK_STATUSES = ["aborted", "aborting", "cleaning up", "failed", "finished", - "init", "paused", "running", "setting up", "soft_aborting", - "verifying"] - -_MAP_OLD_TO_NEW_TEST_STATUSES = { - "OK": "success", - "FAIL": "fail", - "SKIP": "skip" -} - - -def UUID(): - return str(uuid.uuid4()) - - -verification_helper = sa.Table( - "verifications", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), nullable=False), - sa.Column("deployment_uuid", sa.String(36), nullable=False), - sa.Column("status", sa.Enum(*TASK_STATUSES, name="enum_tasks_status"), - default="init", nullable=False), - sa.Column("set_name", sa.String(20)), - sa.Column("tests", sa.Integer), - sa.Column("errors", sa.Integer), - sa.Column("failures", sa.Integer), - sa.Column("time", sa.Float), - sa.Column("created_at", sa.DateTime), - sa.Column("updated_at", sa.DateTime) -) - - -results_helper = sa.Table( - "verification_results", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("verification_uuid", sa.String(36), nullable=False), - sa.Column("data", sa_types.MutableJSONEncodedDict, nullable=False, - default={}), - sa.Column("created_at", sa.DateTime), - sa.Column("updated_at", sa.DateTime) -) - - -def upgrade(): - connection = op.get_bind() - - # create new table to store all verifiers - verifiers_table = op.create_table( - "verifiers", - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), default=UUID, nullable=False), - - sa.Column("name", sa.String(255), unique=True), - sa.Column("description", sa.Text), - - sa.Column("type", sa.String(255), nullable=False), - sa.Column("namespace", sa.String(255)), - - sa.Column("source", sa.String(255)), - sa.Column("version", sa.String(255)), - sa.Column("system_wide", sa.Boolean), - - sa.Column("status", sa.String(36), default="init", nullable=False), - - sa.Column("extra_settings", sa_types.MutableJSONEncodedDict, - nullable=True), - - sa.Column("created_at", sa.DateTime), - sa.Column("updated_at", sa.DateTime) - ) - - op.create_index("verifier_uuid", "verifiers", ["uuid"], unique=True) - - verifications_table = op.create_table( - "verifications_new", - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), default=UUID, nullable=False), - - sa.Column("verifier_uuid", sa.String(36), nullable=False), - sa.Column("deployment_uuid", sa.String(36), nullable=False), - - sa.Column("run_args", sa_types.MutableJSONEncodedDict), - - sa.Column("status", sa.String(36), default="init", nullable=False), - - sa.Column("tests_count", sa.Integer, default=0), - sa.Column("failures", sa.Integer, default=0), - sa.Column("skipped", sa.Integer, default=0), - sa.Column("success", sa.Integer, default=0), - sa.Column("unexpected_success", sa.Integer, default=0), - sa.Column("expected_failures", sa.Integer, default=0), - sa.Column("tests_duration", sa.Float, default=0.0), - - sa.Column("tests", sa_types.MutableJSONEncodedDict, default={}), - - sa.Column("created_at", sa.DateTime), - sa.Column("updated_at", sa.DateTime), - - sa.ForeignKeyConstraint(["verifier_uuid"], ["verifiers.uuid"]), - sa.ForeignKeyConstraint(["deployment_uuid"], ["deployments.uuid"]) - ) - - default_verifier = None - for vresult in connection.execute(results_helper.select()): - if default_verifier is None: - vuuid = UUID() - connection.execute( - verifiers_table.insert(), - [{ - "uuid": vuuid, - "name": "DefaultTempestVerifier", - "description": "It is the default verifier to assign all " - "migrated verification results for", - "type": "tempest", - "namespace": "openstack", - "source": "n/a", - "version": "n/a", - "system_wide": False, - "status": "init", - "created_at": timeutils.utcnow(), - "updated_at": timeutils.utcnow() - }] - ) - default_verifier = connection.execute( - verifiers_table.select().where( - verifiers_table.c.uuid == vuuid)).first() - - data = vresult.data - if "errors" in data: - # it is a very old format... - for test in data["test_cases"].keys(): - old_status = data["test_cases"][test]["status"] - new_status = _MAP_OLD_TO_NEW_TEST_STATUSES.get( - old_status, old_status.lower()) - data["test_cases"][test]["status"] = new_status - - if "failure" in data["test_cases"][test]: - data["test_cases"][test]["traceback"] = data[ - "test_cases"][test]["failure"]["log"] - data["test_cases"][test].pop("failure") - - verifications = connection.execute( - verification_helper.select().where( - verification_helper.c.uuid == vresult.verification_uuid)) - # for each verification result we have single verification object - verification = verifications.first() - - connection.execute( - verifications_table.insert(), - [{"uuid": verification.uuid, - "verifier_uuid": default_verifier.uuid, - "deployment_uuid": verification.deployment_uuid, - "run_args": {"pattern": "set=%s" % verification.set_name}, - "status": verification.status, - "tests": data["test_cases"], - "tests_count": data["tests"], - "failures": data["failures"], - "skipped": data["skipped"], - "success": data["success"], - "unexpected_success": data.get("unexpected_success", 0), - "expected_failures": data.get("expected_failures", 0), - "tests_duration": data["time"], - "created_at": vresult.created_at, - "updated_at": vresult.updated_at - }]) - - op.drop_table("verification_results") - op.drop_table("verifications") - op.rename_table("verifications_new", "verifications") - - op.create_index( - "verification_uuid", "verifications", ["uuid"], unique=True) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/4ef544102ba7_change_task_status_enum.py b/rally/common/db/sqlalchemy/migrations/versions/4ef544102ba7_change_task_status_enum.py deleted file mode 100644 index 291c9267..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/4ef544102ba7_change_task_status_enum.py +++ /dev/null @@ -1,168 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Change task status enum - -Revision ID: 4ef544102ba7 -Revises: 3177d36ea270 -Create Date: 2016-04-22 21:28:50.745316 - -""" - -# revision identifiers, used by Alembic. -revision = "4ef544102ba7" -down_revision = "f33f4610dcda" -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import consts -from rally import exceptions - - -OLD_STATUS = [ - "aborted", "aborting", "cleaning up", "failed", "finished", - "init", "paused", "running", "setting up", "soft_aborting", "verifying" -] -OLD_ENUM = sa.Enum(*OLD_STATUS, name="enum_tasks_status") - -WITHOUT_CHANGES = ( - "init", "running", "aborted", "aborting", "soft_aborting", "paused", - "finished" -) - -OLD_TO_NEW = [ - ("verifying", "validating",), - ("failed", "crashed",) -] - - -task = sa.Table( - "tasks", - sa.MetaData(), - sa.Column("created_at", sa.DateTime(), nullable=True), - sa.Column("updated_at", sa.DateTime(), nullable=True), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("deployment_uuid", sa.String(length=36), nullable=False), - sa.Column("title", sa.String(length=64), default=""), - sa.Column("description", sa.Text(), default=""), - sa.Column("input_task", sa.Text(), default=""), - sa.Column("validation_duration", sa.Float()), - sa.Column("task_duration", sa.Float()), - sa.Column("pass_sla", sa.Boolean()), - sa.Column("status", OLD_ENUM, nullable=False), - sa.Column("new_status", sa.String(36), - default=consts.TaskStatus.INIT), - sa.Column( - "validation_result", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False - ) -) - -subtask = sa.Table( - "subtasks", - sa.MetaData(), - sa.Column("created_at", sa.DateTime()), - sa.Column("updated_at", sa.DateTime()), - sa.Column("id", sa.Integer(), nullable=False, autoincrement=True), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("task_uuid", sa.String(length=36), nullable=False), - sa.Column("title", sa.String(length=64), default=""), - sa.Column("description", sa.Text(), default=""), - sa.Column( - "context", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - sa.Column( - "sla", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - sa.Column("duration", sa.Float()), - sa.Column( - "run_in_parallel", - sa.Boolean(), - default=False, - nullable=False), - sa.Column("pass_sla", sa.Boolean()), - sa.Column("status", OLD_ENUM, nullable=False), - sa.Column("new_status", sa.String(36), - default=consts.SubtaskStatus.RUNNING), - sa.ForeignKeyConstraint(["task_uuid"], ["tasks.uuid"], ), - sa.PrimaryKeyConstraint("id") -) - - -def upgrade(): - # Workaround for Alemic bug #89 - # https://bitbucket.org/zzzeek/alembic/issue/89 - - with op.batch_alter_table("tasks") as batch_op: - batch_op.add_column(sa.Column("new_status", sa.String(36), - default=consts.TaskStatus.INIT)) - with op.batch_alter_table("subtasks") as batch_op: - batch_op.add_column(sa.Column("new_status", sa.String(36), - default=consts.SubtaskStatus.RUNNING)) - - op.execute( - task.update() - .where(task.c.status.in_(WITHOUT_CHANGES)) - .values({"new_status": task.c.status})) - - for old, new in OLD_TO_NEW: - op.execute( - task.update() - .where(task.c.status == op.inline_literal(old)) - .values({"new_status": new})) - - # NOTE(rvasilets): Assume that set_failed was used only in causes of - # validation failed - op.execute( - task.update().where( - (task.c.status == op.inline_literal("failed")) & - (task.c.validation_result == {})).values( - {"new_status": "crashed", "validation_result": {}})) - op.execute( - task.update().where( - (task.c.status == op.inline_literal("failed")) & - (task.c.validation_result != {})).values( - {"new_status": "validation_failed", - "validation_result": task.c.validation_result})) - - op.drop_index("task_status", "tasks") - op.drop_index("subtask_status", "subtasks") - - # NOTE(boris-42): Statuses "setting up", "cleaning up" were not used - with op.batch_alter_table("tasks") as batch_op: - batch_op.drop_column("status") - batch_op.alter_column("new_status", new_column_name="status", - existing_type=sa.String(36)) - with op.batch_alter_table("subtasks") as batch_op: - batch_op.drop_column("status") - batch_op.alter_column("new_status", new_column_name="status", - existing_type=sa.String(36)) - - op.create_index("task_status", "tasks", ["status"]) - op.create_index("subtask_status", "subtasks", ["status"]) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/54e844ebfbc3_update_deployment_configs.py b/rally/common/db/sqlalchemy/migrations/versions/54e844ebfbc3_update_deployment_configs.py deleted file mode 100644 index 3b0f2db3..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/54e844ebfbc3_update_deployment_configs.py +++ /dev/null @@ -1,98 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Update_deployment_configs - -Previously we had bad deployment config validation - -Revision ID: 54e844ebfbc3 -Revises: 3177d36ea270 -Create Date: 2016-07-24 14:53:39.323105 - -""" - -# revision identifiers, used by Alembic. -revision = "54e844ebfbc3" -down_revision = "3177d36ea270" -branch_labels = None -depends_on = None - -from alembic import op # noqa -import sqlalchemy as sa # noqa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import exceptions - - -deployments_helper = sa.Table( - "deployments", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column( - "config", - sa_types.MutableJSONEncodedDict, - default={}, - nullable=False, - ) -) - - -def _check_user_entry(user): - """Fixes wrong format of users.""" - if "tenant_name" in user: - keys = set(user.keys()) - if keys == {"username", "password", "tenant_name", - "project_domain_name", "user_domain_name"}: - if (user["user_domain_name"] == "" and - user["project_domain_name"] == ""): - # it is credentials of keystone v2 and they were created - # --fromenv - del user["user_domain_name"] - del user["project_domain_name"] - return True - else: - # it looks like keystone v3 credentials - user["project_name"] = user.pop("tenant_name") - return True - - -def upgrade(): - connection = op.get_bind() - for deployment in connection.execute(deployments_helper.select()): - conf = deployment.config - if conf["type"] != "ExistingCloud": - continue - - should_update = False - - if _check_user_entry(conf["admin"]): - should_update = True - if "users" in conf: - for user in conf["users"]: - if _check_user_entry(user): - should_update = True - - if conf.get("endpoint_type") == "public": - del conf["endpoint_type"] - should_update = True - - if should_update: - connection.execute( - deployments_helper.update().where( - deployments_helper.c.id == deployment.id).values( - config=conf)) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/6ad4f426f005_add_hooks_to_task_result.py b/rally/common/db/sqlalchemy/migrations/versions/6ad4f426f005_add_hooks_to_task_result.py deleted file mode 100644 index 15136fa8..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/6ad4f426f005_add_hooks_to_task_result.py +++ /dev/null @@ -1,58 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add hooks to task result - -Adds empty hooks list to existing task results - -Revision ID: 6ad4f426f005 -Revises: 08e1515a576c -Create Date: 2016-09-13 18:11:47.703023 - -""" - -# revision identifiers, used by Alembic. -revision = "6ad4f426f005" -down_revision = "08e1515a576c" -branch_labels = None -depends_on = None - -from alembic import op # noqa -import sqlalchemy as sa # noqa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import exceptions - - -task_results_helper = sa.Table( - "task_results", - sa.MetaData(), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("data", sa_types.MutableJSONEncodedDict(), nullable=False), -) - - -def upgrade(): - connection = op.get_bind() - for task_result in connection.execute(task_results_helper.select()): - data = task_result.data - data["hooks"] = [] - connection.execute( - task_results_helper.update().where( - task_results_helper.c.id == task_result.id).values( - data=data)) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/7948b83229f6_workload_min_max_durations.py b/rally/common/db/sqlalchemy/migrations/versions/7948b83229f6_workload_min_max_durations.py deleted file mode 100644 index 530d2a8d..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/7948b83229f6_workload_min_max_durations.py +++ /dev/null @@ -1,80 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""workload-min-max-durations - -Revision ID: 7948b83229f6 -Revises: c517b0011857 -Create Date: 2017-07-22 08:45:25.726422 - -""" - -from alembic import op -import sqlalchemy as sa - - -from rally import exceptions - -# revision identifiers, used by Alembic. -revision = "7948b83229f6" -down_revision = "c517b0011857" -branch_labels = None -depends_on = None - - -workload_helper = sa.Table( - "workloads", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), nullable=False), - sa.Column("min_duration", sa.Float), - sa.Column("max_duration", sa.Float) -) - -workload_data_helper = sa.Table( - "workloaddata", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), nullable=False), - sa.Column("workload_uuid", sa.String(length=36), nullable=False) -) - - -def upgrade(): - connection = op.get_bind() - for workload in connection.execute(workload_helper.select()): - # NOTE(andreykurilin): the cases of "wrong" values for min_duration - # and max_duration are equal. Let's check everything for - # min_duration and apply for both. - should_update = False - if workload.min_duration == -1: - # it is left from the migration to Task Format V2 - should_update = True - elif workload.min_duration == 0: - # should check existence of workload data to ensure where 0 is a - # real min_duration or it is just previous default value - r = (connection.execute(workload_data_helper.select().where( - workload_data_helper.c.workload_uuid == workload.uuid)) - .first()) - if not r: - should_update = True - - if should_update: - connection.execute(workload_helper.update().where( - workload_helper.c.uuid == workload.uuid).values( - min_duration=None, max_duration=None)) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/92aaaa2a6bb3_refactor_credentials.py b/rally/common/db/sqlalchemy/migrations/versions/92aaaa2a6bb3_refactor_credentials.py deleted file mode 100644 index 1bb7e696..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/92aaaa2a6bb3_refactor_credentials.py +++ /dev/null @@ -1,73 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""refactor_credentials - -Revision ID: 92aaaa2a6bb3 -Revises: 4ef544102ba7 -Create Date: 2017-02-01 12:52:43.499663 - -""" - -# revision identifiers, used by Alembic. -revision = "92aaaa2a6bb3" -down_revision = "4ef544102ba7" -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import exceptions - - -deployments_helper = sa.Table( - "deployments", - sa.MetaData(), - sa.Column("name", sa.String(255), unique=True), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("credentials", sa.PickleType, nullable=True), - sa.Column("new_credentials", sa_types.MutableJSONEncodedDict, - default={}, nullable=False) -) - - -def upgrade(): - with op.batch_alter_table("deployments") as batch_op: - batch_op.add_column( - sa.Column("new_credentials", sa_types.MutableJSONEncodedDict, - default={})) - - connection = op.get_bind() - for deployment in connection.execute(deployments_helper.select()): - creds = {} - for cred_type, cred_obj in deployment.credentials: - creds.setdefault(cred_type, []) - creds[cred_type].append(cred_obj) - - connection.execute( - deployments_helper.update().where( - deployments_helper.c.id == deployment.id).values( - new_credentials=creds)) - - with op.batch_alter_table("deployments") as batch_op: - batch_op.drop_column("credentials") - batch_op.alter_column("new_credentials", new_column_name="credentials", - existing_type=sa_types.MutableJSONEncodedDict, - nullable=False) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/a6f364988fc2_change_tag_type_enum.py b/rally/common/db/sqlalchemy/migrations/versions/a6f364988fc2_change_tag_type_enum.py deleted file mode 100644 index 0f238451..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/a6f364988fc2_change_tag_type_enum.py +++ /dev/null @@ -1,66 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Change tag type enum - -Revision ID: a6f364988fc2 -Revises: 37fdbb373e8d -Create Date: 2017-01-17 18:47:10.700459 - -""" - -# revision identifiers, used by Alembic. -revision = "a6f364988fc2" -down_revision = "37fdbb373e8d" -branch_labels = None -depends_on = None - - -from alembic import op -import sqlalchemy as sa - -from rally import exceptions - - -TAG_TYPES = ["task", "subtask"] - -tag_helper = sa.Table( - "tags", - sa.MetaData(), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("type", sa.Enum(*TAG_TYPES, name="enum_tag_types"), - nullable=False), - sa.Column("new_type", sa.String(36), nullable=False) -) - - -def upgrade(): - with op.batch_alter_table("tags") as batch_op: - batch_op.add_column( - sa.Column("new_type", sa.String(36))) - - op.execute(tag_helper.update().values(new_type=tag_helper.c.type)) - - op.drop_index("d_type_tag", "tags") - - with op.batch_alter_table("tags") as batch_op: - batch_op.drop_column("type") - batch_op.alter_column("new_type", new_column_name="type", - existing_type=sa.String(36), nullable=False) - - op.create_index("d_type_tag", "tags", ["uuid", "type", "tag"], unique=True) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/c517b0011857_fill_missed_workload_info.py b/rally/common/db/sqlalchemy/migrations/versions/c517b0011857_fill_missed_workload_info.py deleted file mode 100644 index f9676c72..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/c517b0011857_fill_missed_workload_info.py +++ /dev/null @@ -1,140 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""fill_missed_workload_info - -Revision ID: c517b0011857 -Revises: 35fe16d4ab1c -Create Date: 2017-06-22 18:46:09.281312 - -""" - -import collections - -from alembic import op -import sqlalchemy as sa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import exceptions -from rally.task import atomic -from rally.task.processing import charts - -# revision identifiers, used by Alembic. -revision = "c517b0011857" -down_revision = "35fe16d4ab1c" -branch_labels = None -depends_on = None - - -workload_helper = sa.Table( - "workloads", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), nullable=False), - sa.Column("start_time", sa_types.TimeStamp), - sa.Column("statistics", sa_types.MutableJSONEncodedDict, default={}, - nullable=False), -) - -workload_data_helper = sa.Table( - "workloaddata", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("uuid", sa.String(36), nullable=False), - sa.Column("workload_uuid", sa.String(length=36), nullable=False), - sa.Column("chunk_data", sa_types.MutableJSONEncodedDict(), nullable=False) -) - - -def upgrade(): - connection = op.get_bind() - workloads = {} - for wdata in connection.execute(workload_data_helper.select()): - workloads.setdefault(wdata.workload_uuid, []) - - chunk_data = wdata.chunk_data["raw"] - - require_updating = False - for itr in chunk_data: - if "output" not in itr: - itr["output"] = {"additive": [], "complete": []} - if "scenario_output" in itr and itr["scenario_output"]["data"]: - itr["output"]["additive"].append( - {"items": list(itr["scenario_output"]["data"].items()), - "title": "Scenario output", - "description": "", - "chart": "OutputStackedAreaChart"}) - del itr["scenario_output"] - require_updating = True - if isinstance(itr["atomic_actions"], dict): - new_atomic_actions = [] - started_at = itr["timestamp"] - for name, d in itr["atomic_actions"].items(): - finished_at = started_at + d - new_atomic_actions.append( - {"name": name, "children": [], - "started_at": started_at, - "finished_at": finished_at}) - started_at = finished_at - itr["atomic_actions"] = new_atomic_actions - require_updating = True - - if require_updating: - connection.execute(workload_data_helper.update().where( - workload_data_helper.c.uuid == wdata.uuid).values( - chunk_data={"raw": chunk_data})) - - workloads[wdata.workload_uuid].extend(chunk_data) - - for workload in connection.execute(workload_helper.select()): - if workload.uuid not in workloads or not workloads[workload.uuid]: - continue - data = sorted(workloads[workload.uuid], - key=lambda itr: itr["timestamp"]) - - start_time = data[0]["timestamp"] - - atomics = collections.OrderedDict() - - for itr in workloads[workload.uuid]: - merged_atomic = atomic.merge_atomic(itr["atomic_actions"]) - for name, value in merged_atomic.items(): - duration = value["duration"] - count = value["count"] - if name not in atomics or count > atomics[name]["count"]: - atomics[name] = {"min_duration": duration, - "max_duration": duration, - "count": count} - elif count == atomics[name]["count"]: - if duration < atomics[name]["min_duration"]: - atomics[name]["min_duration"] = duration - if duration > atomics[name]["max_duration"]: - atomics[name]["max_duration"] = duration - - durations_stat = charts.MainStatsTable( - {"total_iteration_count": len(workloads[workload.uuid]), - "statistics": {"atomics": atomics}}) - - for itr in workloads[workload.uuid]: - durations_stat.add_iteration(itr) - - connection.execute(workload_helper.update().where( - workload_helper.c.uuid == workload.uuid).values( - start_time=start_time, - statistics={"durations": durations_stat.render(), - "atomics": atomics})) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/ca3626f62937_init_migration.py b/rally/common/db/sqlalchemy/migrations/versions/ca3626f62937_init_migration.py deleted file mode 100644 index a6009eed..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/ca3626f62937_init_migration.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (c) 2016 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Init migration - -Revision ID: ca3626f62937 -Revises: -Create Date: 2016-01-07 00:27:39.687814 - -""" - -# revision identifiers, used by Alembic. -revision = "ca3626f62937" -down_revision = None -branch_labels = None -depends_on = None - - -from alembic import op -import sqlalchemy as sa - -import rally -from rally.common.db.sqlalchemy import api -from rally import exceptions - - -def upgrade(): - dialect = api.get_engine().dialect - - deployments_columns = [ - sa.Column("created_at", sa.DateTime(), nullable=True), - sa.Column("updated_at", sa.DateTime(), nullable=True), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("parent_uuid", sa.String(length=36), nullable=True), - sa.Column("name", sa.String(length=255), nullable=True), - sa.Column("started_at", sa.DateTime(), nullable=True), - sa.Column("completed_at", sa.DateTime(), nullable=True), - sa.Column( - "config", - rally.common.db.sqlalchemy.types.MutableJSONEncodedDict(), - nullable=False), - sa.Column("admin", sa.PickleType(), nullable=True), - sa.Column("users", sa.PickleType(), nullable=False), - sa.Column("enum_deployments_status", sa.Enum( - "cleanup->failed", "cleanup->finished", "cleanup->started", - "deploy->failed", "deploy->finished", "deploy->inconsistent", - "deploy->init", "deploy->started", "deploy->subdeploy", - name="enum_deploy_status"), nullable=False), - sa.PrimaryKeyConstraint("id"), - sa.UniqueConstraint("name") - ] - - if dialect.name.startswith("sqlite"): - deployments_columns.append( - sa.ForeignKeyConstraint( - ["parent_uuid"], [u"deployments.uuid"], - name="fk_parent_uuid", use_alter=True) - ) - - # commands auto generated by Alembic - please adjust! - op.create_table("deployments", *deployments_columns) - - op.create_index("deployment_parent_uuid", "deployments", - ["parent_uuid"], unique=False) - - op.create_index("deployment_uuid", "deployments", ["uuid"], unique=True) - - if not dialect.name.startswith("sqlite"): - op.create_foreign_key("fk_parent_uuid", "deployments", "deployments", - ["parent_uuid"], ["uuid"]) - - op.create_table( - "workers", - sa.Column("created_at", sa.DateTime(), nullable=True), - sa.Column("updated_at", sa.DateTime(), nullable=True), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("hostname", sa.String(length=255), nullable=True), - sa.PrimaryKeyConstraint("id"), - sa.UniqueConstraint("hostname", name="uniq_worker@hostname") - ) - - op.create_table( - "resources", - sa.Column("created_at", sa.DateTime(), nullable=True), - sa.Column("updated_at", sa.DateTime(), nullable=True), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("provider_name", sa.String(length=255), nullable=True), - sa.Column("type", sa.String(length=255), nullable=True), - sa.Column( - "info", - rally.common.db.sqlalchemy.types.MutableJSONEncodedDict(), - nullable=False), - sa.Column("deployment_uuid", sa.String(length=36), nullable=False), - sa.ForeignKeyConstraint(["deployment_uuid"], [u"deployments.uuid"]), - sa.PrimaryKeyConstraint("id") - ) - op.create_index("resource_deployment_uuid", "resources", - ["deployment_uuid"], unique=False) - - op.create_index("resource_provider_name", "resources", - ["deployment_uuid", "provider_name"], unique=False) - - op.create_index("resource_provider_name_and_type", "resources", - ["deployment_uuid", "provider_name", "type"], - unique=False) - - op.create_index("resource_type", "resources", - ["deployment_uuid", "type"], unique=False) - - op.create_table( - "tasks", - sa.Column("created_at", sa.DateTime(), nullable=True), - sa.Column("updated_at", sa.DateTime(), nullable=True), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("status", sa.Enum( - "aborted", "aborting", "cleaning up", "failed", "finished", - "init", "paused", "running", "setting up", "soft_aborting", - "verifying", name="enum_tasks_status"), nullable=False), - sa.Column("verification_log", sa.Text(), nullable=True), - sa.Column("tag", sa.String(length=64), nullable=True), - sa.Column("deployment_uuid", sa.String(length=36), nullable=False), - sa.ForeignKeyConstraint(["deployment_uuid"], [u"deployments.uuid"], ), - sa.PrimaryKeyConstraint("id") - ) - - op.create_index("task_deployment", "tasks", ["deployment_uuid"], - unique=False) - - op.create_index("task_status", "tasks", ["status"], unique=False) - - op.create_index("task_uuid", "tasks", ["uuid"], unique=True) - - op.create_table( - "verifications", - sa.Column("created_at", sa.DateTime(), nullable=True), - sa.Column("updated_at", sa.DateTime(), nullable=True), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("deployment_uuid", sa.String(length=36), nullable=False), - sa.Column("status", sa.Enum( - "aborted", "aborting", "cleaning up", "failed", "finished", - "init", "paused", "running", "setting up", "soft_aborting", - "verifying", name="enum_tasks_status"), nullable=False), - sa.Column("set_name", sa.String(length=20), nullable=True), - sa.Column("tests", sa.Integer(), nullable=True), - sa.Column("errors", sa.Integer(), nullable=True), - sa.Column("failures", sa.Integer(), nullable=True), - sa.Column("time", sa.Float(), nullable=True), - sa.ForeignKeyConstraint(["deployment_uuid"], [u"deployments.uuid"], ), - sa.PrimaryKeyConstraint("id") - ) - - op.create_index("verification_uuid", "verifications", ["uuid"], - unique=True) - - op.create_table( - "task_results", - sa.Column("created_at", sa.DateTime(), nullable=True), - sa.Column("updated_at", sa.DateTime(), nullable=True), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column( - "key", - rally.common.db.sqlalchemy.types.MutableJSONEncodedDict(), - nullable=False), - sa.Column( - "data", - rally.common.db.sqlalchemy.types.MutableJSONEncodedDict(), - nullable=False), - sa.Column("task_uuid", sa.String(length=36), nullable=True), - sa.ForeignKeyConstraint(["task_uuid"], ["tasks.uuid"], ), - sa.PrimaryKeyConstraint("id") - ) - - op.create_table( - "verification_results", - sa.Column("created_at", sa.DateTime(), nullable=True), - sa.Column("updated_at", sa.DateTime(), nullable=True), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("verification_uuid", sa.String(length=36), nullable=True), - sa.Column( - "data", - rally.common.db.sqlalchemy.types.MutableJSONEncodedDict(), - nullable=False), - sa.ForeignKeyConstraint(["verification_uuid"], ["verifications.uuid"]), - sa.PrimaryKeyConstraint("id") - ) - # end Alembic commands - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/e654a0648db0_refactor_task_results.py b/rally/common/db/sqlalchemy/migrations/versions/e654a0648db0_refactor_task_results.py deleted file mode 100644 index 64a71b0b..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/e654a0648db0_refactor_task_results.py +++ /dev/null @@ -1,470 +0,0 @@ -# Copyright (c) 2016 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Refactor task results - -Revision ID: e654a0648db0 -Revises: 3177d36ea270 -Create Date: 2016-04-01 14:36:56.373349 - -""" - -# revision identifiers, used by Alembic. -revision = "e654a0648db0" -down_revision = "32fada9b2fde" -branch_labels = None -depends_on = None - -import datetime as dt -import json -import uuid - -from alembic import op -import sqlalchemy as sa - -from rally.common.db.sqlalchemy import types as sa_types -from rally import exceptions - -taskhelper = sa.Table( - "tasks", - sa.MetaData(), - sa.Column("created_at", sa.DateTime(), nullable=True), - sa.Column("updated_at", sa.DateTime(), nullable=True), - sa.Column("id", sa.Integer(), nullable=False), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("status", sa.Enum( - "aborted", "aborting", "cleaning up", "failed", "finished", - "init", "paused", "running", "setting up", "soft_aborting", - "verifying", name="enum_tasks_status"), nullable=False), - sa.Column("verification_log", sa.Text(), nullable=True), - sa.Column("tag", sa.String(length=64), nullable=True), - sa.Column("deployment_uuid", sa.String(length=36), nullable=False), - sa.Column("title", sa.String(length=64), default=""), - sa.Column("description", sa.Text(), default=""), - sa.Column("input_task", sa.Text(), default=""), - sa.Column("validation_duration", sa.Float()), - sa.Column("task_duration", sa.Float()), - sa.Column("pass_sla", sa.Boolean()), - sa.Column( - "validation_result", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False - ) -) - -task_result_helper = sa.Table( - "task_results", - sa.MetaData(), - sa.Column("created_at", sa.DateTime()), - sa.Column("updated_at", sa.DateTime()), - sa.Column("id", sa.Integer(), nullable=False, autoincrement=True), - sa.Column( - "key", - sa_types.MutableJSONEncodedDict(), - nullable=False), - sa.Column( - "data", - sa_types.MutableJSONEncodedDict(), - nullable=False), - sa.Column("task_uuid", sa.String(length=36), nullable=True) -) - -taghelper = sa.Table( - "tags", - sa.MetaData(), - sa.Column("created_at", sa.DateTime()), - sa.Column("updated_at", sa.DateTime()), - sa.Column("id", sa.Integer(), nullable=False, autoincrement=True), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("tag", sa.String(length=255), nullable=False), - - sa.Column( - "type", - sa.Enum( - "task", "subtask", - name="enum_tag_types"), - nullable=False) -) - - -def upgrade(): - conn = op.get_bind() - - subtask_table = op.create_table( - "subtasks", - sa.Column("created_at", sa.DateTime()), - sa.Column("updated_at", sa.DateTime()), - sa.Column("id", sa.Integer(), nullable=False, autoincrement=True), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("task_uuid", sa.String(length=36), nullable=False), - sa.Column("title", sa.String(length=64), default=""), - sa.Column("description", sa.Text(), default=""), - sa.Column( - "context", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - sa.Column( - "sla", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - sa.Column("duration", sa.Float()), - - sa.Column( - "run_in_parallel", - sa.Boolean(), - default=False, - nullable=False), - - sa.Column("pass_sla", sa.Boolean()), - - sa.Column( - "status", - sa.Enum( - "finished", "running", "crashed", - name="enum_subtasks_status"), - nullable=False), - - sa.ForeignKeyConstraint(["task_uuid"], ["tasks.uuid"], ), - sa.PrimaryKeyConstraint("id") - - ) - - op.create_index("subtask_uuid", "subtasks", ["uuid"], unique=True) - op.create_index("subtask_status", "subtasks", ["status"], unique=False) - - workload_table = op.create_table( - "workloads", - sa.Column("created_at", sa.DateTime()), - sa.Column("updated_at", sa.DateTime()), - sa.Column("id", sa.Integer(), nullable=False, autoincrement=True), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("task_uuid", sa.String(length=36), nullable=False), - sa.Column("subtask_uuid", sa.String(length=36), nullable=False), - sa.Column("name", sa.String(length=64), nullable=False), - sa.Column("description", sa.Text(), default=""), - sa.Column("position", sa.Integer(), default=0, nullable=False), - - sa.Column( - "runner_type", - sa.String(length=64), - nullable=False), - - sa.Column( - "runner", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - sa.Column( - "args", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - sa.Column( - "context", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - sa.Column( - "hooks", - sa_types.MutableJSONEncodedList(), - default=[], - nullable=False), - - sa.Column( - "sla", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - sa.Column( - "sla_results", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - sa.Column( - "context_execution", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - sa.Column("load_duration", sa.Float(), default=0), - sa.Column("full_duration", sa.Float(), default=0), - sa.Column("min_duration", sa.Float(), default=0), - sa.Column("max_duration", sa.Float(), default=0), - sa.Column("total_iteration_count", sa.Integer(), default=0), - sa.Column("failed_iteration_count", sa.Integer(), default=0), - - sa.Column("pass_sla", sa.Boolean()), - - sa.Column( - "statistics", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - - sa.Column("start_time", sa.DateTime()), - sa.Column("_profiling_data", sa.Text(), default=""), - - sa.ForeignKeyConstraint(["task_uuid"], ["tasks.uuid"], ), - sa.ForeignKeyConstraint(["subtask_uuid"], ["subtasks.uuid"], ), - sa.PrimaryKeyConstraint("id") - ) - - op.create_index("workload_uuid", "workloads", ["uuid"], unique=True) - - workloaddata_table = op.create_table( - "workloaddata", - sa.Column("created_at", sa.DateTime()), - sa.Column("updated_at", sa.DateTime()), - sa.Column("id", sa.Integer(), nullable=False, autoincrement=True), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("task_uuid", sa.String(length=36), nullable=False), - sa.Column("workload_uuid", sa.String(length=36), nullable=False), - sa.Column("chunk_order", sa.Integer(), nullable=False), - sa.Column("iteration_count", sa.Integer(), nullable=False), - sa.Column("failed_iteration_count", sa.Integer(), nullable=False), - sa.Column("chunk_size", sa.Integer(), nullable=False), - - sa.Column( - "compressed_chunk_size", - sa.Integer(), - nullable=False), - - sa.Column("started_at", sa.DateTime(), nullable=False), - sa.Column("finished_at", sa.DateTime(), nullable=False), - sa.Column( - "chunk_data", - sa_types.MutableJSONEncodedDict(), - default={}, - nullable=False), - - sa.ForeignKeyConstraint(["task_uuid"], ["tasks.uuid"], ), - sa.ForeignKeyConstraint(["workload_uuid"], ["workloads.uuid"], ), - sa.PrimaryKeyConstraint("id") - ) - - op.create_index( - "workload_data_uuid", "workloaddata", ["uuid"], unique=True) - - tag_table = op.create_table( - "tags", - sa.Column("created_at", sa.DateTime()), - sa.Column("updated_at", sa.DateTime()), - sa.Column("id", sa.Integer(), nullable=False, autoincrement=True), - sa.Column("uuid", sa.String(length=36), nullable=False), - sa.Column("tag", sa.String(length=255), nullable=False), - - sa.Column( - "type", - sa.Enum( - "task", "subtask", - name="enum_tag_types"), - nullable=False), - - sa.PrimaryKeyConstraint("id") - ) - - op.create_index( - "d_type_tag", "tags", ["uuid", "type", "tag"], unique=True) - - with op.batch_alter_table("tasks") as batch_op: - batch_op.add_column( - sa.Column("title", sa.String(length=64), default="") - ) - - batch_op.add_column( - sa.Column("description", sa.Text(), default="") - ) - - batch_op.add_column( - sa.Column("input_task", sa.Text(), default="") - ) - - batch_op.add_column( - sa.Column("validation_duration", sa.Float()) - ) - - batch_op.add_column( - sa.Column("task_duration", sa.Float()) - ) - - batch_op.add_column( - sa.Column("pass_sla", sa.Boolean()) - ) - - batch_op.add_column( - sa.Column( - "validation_result", - sa_types.MutableJSONEncodedDict(), - default={}) - ) - - for task in conn.execute(taskhelper.select()): - if task.tag: - conn.execute( - tag_table.insert(), - [{ - "uuid": task.uuid, - "type": "task", - "tag": task.tag, - "created_at": task.created_at, - "updated_at": task.updated_at - }] - ) - - task_results = conn.execute( - task_result_helper.select(). - where(task_result_helper.c.task_uuid == task.uuid) - ) - - pass_sla = True - task_duration = 0 - - for task_result in task_results: - raw_data = task_result.data.get("raw", []) - iter_count = len(raw_data) - - failed_iter_count = 0 - max_duration = 0 - min_duration = -1 - - for d in raw_data: - if d.get("error"): - failed_iter_count += 1 - - duration = d.get("duration", 0) - - if duration > max_duration: - max_duration = duration - - if min_duration < 0 or min_duration > duration: - min_duration = duration - - sla = task_result.data.get("sla", []) - success = all([s.get("success") for s in sla]) - - if not success: - pass_sla = False - - task_duration += task_result.data.get("full_duration", 0) - - delta = dt.timedelta( - seconds=task_result.data.get("full_duration", 0)) - start = task_result.created_at - delta - - subtask_uuid = str(uuid.uuid4()) - - conn.execute( - subtask_table.insert(), - [{ - "uuid": subtask_uuid, - "task_uuid": task.uuid, - "created_at": task_result.created_at, - "updated_at": task_result.updated_at, - # NOTE(ikhudoshyn) We don't have info on subtask status - "status": "finished", - "duration": task_result.data.get("full_duration", 0), - "pass_sla": success - }] - ) - - workload_uuid = str(uuid.uuid4()) - - conn.execute( - workload_table.insert(), - [{ - "created_at": task_result.created_at, - "updated_at": task_result.updated_at, - "uuid": workload_uuid, - "task_uuid": task.uuid, - "subtask_uuid": subtask_uuid, - "name": task_result.key["name"], - "position": task_result.key["pos"], - "runner_type": task_result.key["kw"]["runner"]["type"], - "runner": task_result.key["kw"]["runner"], - "context": task_result.key["kw"].get("context", {}), - "sla": task_result.key["kw"].get("sla", {}), - "args": task_result.key["kw"].get("args", {}), - "sla_results": {"sla": sla}, - "context_execution": {}, - "load_duration": task_result.data.get("load_duration", 0), - "full_duration": task_result.data.get("full_duration", 0), - "min_duration": min_duration, - "max_duration": max_duration, - "total_iteration_count": iter_count, - "failed_iteration_count": failed_iter_count, - "pass_sla": success, - "statistics": {}, - "start_time": start, - }] - ) - - conn.execute( - workloaddata_table.insert(), - [{ - "uuid": str(uuid.uuid4()), - "task_uuid": task.uuid, - "workload_uuid": workload_uuid, - "chunk_order": 0, - "iteration_count": iter_count, - "failed_iteration_count": failed_iter_count, - "chunk_data": {"raw": raw_data}, - # TODO(ikhudoshyn) - "chunk_size": 0, - "compressed_chunk_size": 0, - "started_at": start, - "finished_at": task_result.created_at - }] - ) - - task_verification_log = {} - if task.verification_log: - task_verification_log = json.loads(task.verification_log) - - conn.execute( - taskhelper.update().where(taskhelper.c.uuid == task.uuid), - { - "pass_sla": pass_sla, - "task_duration": task_duration, - "validation_duration": 0, - "validation_result": task_verification_log - } - ) - - # TODO(ikhudoshyn) update workload's statistics - - with op.batch_alter_table("tasks") as batch_op: - batch_op.drop_column("tag") - batch_op.drop_column("verification_log") - batch_op.alter_column( - "validation_result", - existing_type=sa_types.MutableJSONEncodedDict(), - nullable=False) - - op.drop_table("task_results") - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/migrations/versions/f33f4610dcda_change_verification_statuses.py b/rally/common/db/sqlalchemy/migrations/versions/f33f4610dcda_change_verification_statuses.py deleted file mode 100644 index 7565a79f..00000000 --- a/rally/common/db/sqlalchemy/migrations/versions/f33f4610dcda_change_verification_statuses.py +++ /dev/null @@ -1,64 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Change verification statuses - -Revision ID: f33f4610dcda -Revises: a6f364988fc2 -Create Date: 2017-01-23 13:56:30.999593 - -""" - -# revision identifiers, used by Alembic. -revision = "f33f4610dcda" -down_revision = "a6f364988fc2" -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - -from rally import exceptions - - -verifications_helper = sa.Table( - "verifications", - sa.MetaData(), - sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), - sa.Column("failures", sa.Integer, default=0), - sa.Column("unexpected_success", sa.Integer, default=0), - sa.Column("status", sa.String(36), nullable=False) -) - - -def upgrade(): - connection = op.get_bind() - for v in connection.execute(verifications_helper.select()): - new_status = v.status - if v.status == "finished" and ( - v.failures != 0 or v.unexpected_success != 0): - new_status = "failed" - elif v.status == "failed": - new_status = "crashed" - else: - pass - - if new_status != v.status: - connection.execute(verifications_helper.update().where( - verifications_helper.c.id == v.id).values( - status=new_status)) - - -def downgrade(): - raise exceptions.DowngradeNotSupported() diff --git a/rally/common/db/sqlalchemy/models.py b/rally/common/db/sqlalchemy/models.py deleted file mode 100644 index d228827c..00000000 --- a/rally/common/db/sqlalchemy/models.py +++ /dev/null @@ -1,456 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -SQLAlchemy models for rally data. -""" - -import uuid - -from oslo_db.sqlalchemy.compat import utils as compat_utils -from oslo_db.sqlalchemy import models -from oslo_utils import timeutils -import sqlalchemy as sa -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import schema - -from rally.common.db.sqlalchemy import types as sa_types -from rally import consts - - -BASE = declarative_base() - - -def UUID(): - return str(uuid.uuid4()) - - -class RallyBase(models.ModelBase): - - metadata = None - created_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow()) - updated_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow(), - onupdate=lambda: timeutils.utcnow()) - - def save(self, session=None): - # NOTE(LimingWu): We can't direct import the api module. That will - # result in the cyclic reference import since the api has imported - # this module. - from rally.common.db.sqlalchemy import api as sa_api - - if session is None: - session = sa_api.get_session() - - super(RallyBase, self).save(session=session) - - -class Deployment(BASE, RallyBase): - """Represent a deployment of OpenStack.""" - __tablename__ = "deployments" - __table_args__ = ( - sa.Index("deployment_uuid", "uuid", unique=True), - sa.Index("deployment_parent_uuid", "parent_uuid"), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - uuid = sa.Column(sa.String(36), default=UUID, nullable=False) - parent_uuid = sa.Column( - sa.String(36), - sa.ForeignKey(uuid, use_alter=True, name="fk_parent_uuid"), - default=None, - ) - name = sa.Column(sa.String(255), unique=True) - started_at = sa.Column(sa.DateTime) - completed_at = sa.Column(sa.DateTime) - # XXX(akscram): Do we need to explicitly store a name of the - # deployment engine? - # engine_name = sa.Column(sa.String(36)) - - config = sa.Column( - sa_types.MutableJSONEncodedDict, - default={}, - nullable=False, - ) - - credentials = sa.Column( - sa_types.MutableJSONEncodedDict, default={}, nullable=False) - - status = sa.Column( - sa.Enum(*consts.DeployStatus, name="enum_deploy_status"), - name="enum_deployments_status", - default=consts.DeployStatus.DEPLOY_INIT, - nullable=False, - ) - - parent = sa.orm.relationship( - "Deployment", - backref=sa.orm.backref("subdeploys"), - remote_side=[uuid], - foreign_keys=parent_uuid, - ) - - -class Resource(BASE, RallyBase): - """Represent a resource of a deployment.""" - __tablename__ = "resources" - __table_args__ = ( - sa.Index("resource_deployment_uuid", "deployment_uuid"), - sa.Index("resource_provider_name", "deployment_uuid", "provider_name"), - sa.Index("resource_type", "deployment_uuid", "type"), - sa.Index("resource_provider_name_and_type", "deployment_uuid", - "provider_name", "type"), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - provider_name = sa.Column(sa.String(255)) - type = sa.Column(sa.String(255)) - - info = sa.Column( - sa_types.MutableJSONEncodedDict, - default={}, - nullable=False, - ) - - deployment_uuid = sa.Column( - sa.String(36), - sa.ForeignKey(Deployment.uuid), - nullable=False, - ) - deployment = sa.orm.relationship( - Deployment, - backref=sa.orm.backref("resources"), - foreign_keys=deployment_uuid, - primaryjoin=(deployment_uuid == Deployment.uuid), - ) - - -class Task(BASE, RallyBase): - """Represents a task.""" - __tablename__ = "tasks" - __table_args__ = ( - sa.Index("task_uuid", "uuid", unique=True), - sa.Index("task_status", "status"), - sa.Index("task_deployment", "deployment_uuid"), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - uuid = sa.Column(sa.String(36), default=UUID, nullable=False) - - deployment_uuid = sa.Column( - sa.String(36), - sa.ForeignKey(Deployment.uuid), - nullable=False, - ) - - deployment = sa.orm.relationship( - Deployment, - backref=sa.orm.backref("tasks"), - foreign_keys=deployment_uuid, - primaryjoin=(deployment_uuid == Deployment.uuid), - ) - - input_task = sa.Column(sa.Text, default="") - title = sa.Column(sa.String(64), default="") - description = sa.Column(sa.Text, default="") - - validation_result = sa.Column( - sa_types.MutableJSONEncodedDict, default={}, nullable=False) - - validation_duration = sa.Column(sa.Float) - task_duration = sa.Column(sa.Float, default=0.0) - pass_sla = sa.Column(sa.Boolean, default=True) - status = sa.Column(sa.String(36), default=consts.TaskStatus.INIT) - - -class Subtask(BASE, RallyBase): - __tablename__ = "subtasks" - __table_args__ = ( - sa.Index("subtask_uuid", "uuid", unique=True), - sa.Index("subtask_status", "status"), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - uuid = sa.Column(sa.String(36), default=UUID, nullable=False) - - task_uuid = sa.Column( - sa.String(36), - sa.ForeignKey(Task.uuid), - nullable=False, - ) - - task = sa.orm.relationship( - Task, - backref=sa.orm.backref("subtasks"), - foreign_keys=task_uuid, - primaryjoin=(task_uuid == Task.uuid), - ) - - title = sa.Column(sa.String(64), default="") - description = sa.Column(sa.Text, default="") - - context = sa.Column( - sa_types.JSONEncodedDict, default={}, nullable=False) - - sla = sa.Column( - sa_types.JSONEncodedDict, default={}, nullable=False) - - run_in_parallel = sa.Column(sa.Boolean, default=False, nullable=False) - duration = sa.Column(sa.Float, default=0.0) - pass_sla = sa.Column(sa.Boolean, default=True) - status = sa.Column(sa.String(36), default=consts.SubtaskStatus.RUNNING) - - -class Workload(BASE, RallyBase): - __tablename__ = "workloads" - __table_args__ = ( - sa.Index("workload_uuid", "uuid", unique=True), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - uuid = sa.Column(sa.String(36), default=UUID, nullable=False) - - task_uuid = sa.Column( - sa.String(36), - sa.ForeignKey(Task.uuid), - nullable=False, - ) - - subtask_uuid = sa.Column( - sa.String(36), - sa.ForeignKey(Subtask.uuid), - nullable=False, - ) - - subtask = sa.orm.relationship( - Subtask, - backref=sa.orm.backref("workloads"), - foreign_keys=subtask_uuid, - primaryjoin=(subtask_uuid == Subtask.uuid), - ) - - name = sa.Column(sa.String(64), nullable=False) - description = sa.Column(sa.Text, default="") - position = sa.Column(sa.Integer, default=0, nullable=False) - - runner = sa.Column( - sa_types.JSONEncodedDict, default={}, nullable=False) - - runner_type = sa.Column(sa.String(64), nullable=False) - - context = sa.Column( - sa_types.JSONEncodedDict, default={}, nullable=False) - - sla = sa.Column( - sa_types.JSONEncodedDict, default={}, nullable=False) - - args = sa.Column( - sa_types.JSONEncodedDict, default={}, nullable=False) - - hooks = sa.Column( - sa_types.JSONEncodedList, default=[], nullable=False) - - sla_results = sa.Column( - sa_types.MutableJSONEncodedDict, default={}, nullable=False) - - context_execution = sa.Column( - sa_types.MutableJSONEncodedDict, default={}, nullable=False) - - start_time = sa.Column(sa_types.TimeStamp) - - load_duration = sa.Column(sa.Float, default=0) - full_duration = sa.Column(sa.Float, default=0) - min_duration = sa.Column(sa.Float) - max_duration = sa.Column(sa.Float) - total_iteration_count = sa.Column(sa.Integer, default=0) - failed_iteration_count = sa.Column(sa.Integer, default=0) - - statistics = sa.Column( - sa_types.MutableJSONEncodedDict, default={}, nullable=False) - - pass_sla = sa.Column(sa.Boolean, default=True) - _profiling_data = sa.Column(sa.Text, default="") - - -class WorkloadData(BASE, RallyBase): - __tablename__ = "workloaddata" - __table_args__ = ( - sa.Index("workload_data_uuid", "uuid", unique=True), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - uuid = sa.Column(sa.String(36), default=UUID, nullable=False) - - task_uuid = sa.Column( - sa.String(36), - sa.ForeignKey(Task.uuid), - nullable=False, - ) - - workload_uuid = sa.Column( - sa.String(36), - sa.ForeignKey(Workload.uuid), - nullable=False, - ) - - workload = sa.orm.relationship( - Workload, - backref=sa.orm.backref("workload_data"), - foreign_keys=workload_uuid, - primaryjoin=(workload_uuid == Workload.uuid), - ) - - chunk_order = sa.Column(sa.Integer, nullable=False) - iteration_count = sa.Column(sa.Integer, nullable=False) - failed_iteration_count = sa.Column(sa.Integer, nullable=False) - chunk_size = sa.Column(sa.Integer, nullable=False) - compressed_chunk_size = sa.Column(sa.Integer, nullable=False) - started_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow(), - nullable=False) - finished_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow(), - nullable=False) - chunk_data = sa.Column( - sa_types.MutableJSONEncodedDict, default={}, nullable=False) - - -class Tag(BASE, RallyBase): - __tablename__ = "tags" - __table_args__ = ( - sa.Index("d_type_tag", "uuid", "type", "tag", unique=True), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - uuid = sa.Column(sa.String(36), default=UUID, nullable=False) - - type = sa.Column(sa.String(36), nullable=False) - - tag = sa.Column(sa.String(255), nullable=False) - - -class Verifier(BASE, RallyBase): - """Represents a verifier.""" - - __tablename__ = "verifiers" - __table_args__ = ( - sa.Index("verifier_uuid", "uuid", unique=True), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - uuid = sa.Column(sa.String(36), default=UUID, nullable=False) - - name = sa.Column(sa.String(255), unique=True) - description = sa.Column(sa.Text) - - type = sa.Column(sa.String(255), nullable=False) - namespace = sa.Column(sa.String(255)) - - source = sa.Column(sa.String(255)) - version = sa.Column(sa.String(255)) - system_wide = sa.Column(sa.Boolean) - - status = sa.Column(sa.String(36), default=consts.VerifierStatus.INIT, - nullable=False) - - extra_settings = sa.Column(sa_types.MutableJSONEncodedDict) - - -class Verification(BASE, RallyBase): - """Represents a verification.""" - - __tablename__ = "verifications" - __table_args__ = ( - sa.Index("verification_uuid", "uuid", unique=True), - ) - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - uuid = sa.Column(sa.String(36), default=UUID, nullable=False) - - verifier_uuid = sa.Column(sa.String(36), - sa.ForeignKey(Verifier.uuid), - nullable=False) - deployment_uuid = sa.Column(sa.String(36), - sa.ForeignKey(Deployment.uuid), - nullable=False) - - run_args = sa.Column(sa_types.MutableJSONEncodedDict) - - status = sa.Column(sa.String(36), default=consts.VerificationStatus.INIT, - nullable=False) - - tests_count = sa.Column(sa.Integer, default=0) - failures = sa.Column(sa.Integer, default=0) - skipped = sa.Column(sa.Integer, default=0) - success = sa.Column(sa.Integer, default=0) - unexpected_success = sa.Column(sa.Integer, default=0) - expected_failures = sa.Column(sa.Integer, default=0) - tests_duration = sa.Column(sa.Float, default=0.0) - - tests = sa.Column(sa_types.MutableJSONEncodedDict, default={}) - - -class Worker(BASE, RallyBase): - __tablename__ = "workers" - __table_args__ = ( - schema.UniqueConstraint("hostname", name="uniq_worker@hostname"), - ) - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - hostname = sa.Column(sa.String(255)) - - -# TODO(boris-42): Remove it after oslo.db > 1.4.1 will be released. -def drop_all_objects(engine): - """Drop all database objects. - - Drops all database objects remaining on the default schema of the given - engine. Per-db implementations will also need to drop items specific to - those systems, such as sequences, custom types (e.g. pg ENUM), etc. - """ - with engine.begin() as conn: - inspector = sa.inspect(engine) - metadata = schema.MetaData() - tbs = [] - all_fks = [] - - for table_name in inspector.get_table_names(): - fks = [] - for fk in inspector.get_foreign_keys(table_name): - if not fk["name"]: - continue - fks.append( - schema.ForeignKeyConstraint((), (), name=fk["name"])) - table = schema.Table(table_name, metadata, *fks) - tbs.append(table) - all_fks.extend(fks) - - if engine.name != "sqlite": - for fkc in all_fks: - conn.execute(schema.DropConstraint(fkc)) - for table in tbs: - conn.execute(schema.DropTable(table)) - - if engine.name == "postgresql": - if compat_utils.sqla_100: - enums = [e["name"] for e in sa.inspect(conn).get_enums()] - else: - enums = conn.dialect._load_enums(conn).keys() - - for e in enums: - conn.execute("DROP TYPE %s" % e) - - -def drop_db(): - # NOTE(LimingWu): We can't direct import the api module. That will - # result in the cyclic reference import since the api has imported - # this module. - from rally.common.db.sqlalchemy import api as sa_api - drop_all_objects(sa_api.get_engine()) diff --git a/rally/common/db/sqlalchemy/types.py b/rally/common/db/sqlalchemy/types.py deleted file mode 100644 index 8eaea6c1..00000000 --- a/rally/common/db/sqlalchemy/types.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import json - -from sqlalchemy.dialects import mysql as mysql_types -from sqlalchemy.ext import mutable -from sqlalchemy import types as sa_types - - -class TimeStamp(sa_types.TypeDecorator): - """Represents datetime/time timestamp object as a bigint value. - - Despite the fact that timestamp objects are represented by float value in - python, the Float column cannot be used for storing such values, since - timestamps values can be bigger than the limit of Float columns at some - back-ends (the value will be cropped in such case). Also, using Datetime - type is not convenient too, since it do not accurate with microseconds. - """ - - impl = sa_types.BigInteger - _coefficient = 1000000.0 - - def process_bind_param(self, value, dialect): - if value is None: - return None - return value * self._coefficient - - def process_result_value(self, value, dialect): - if value is None: - return None - return value / self._coefficient - - def compare_against_backend(self, dialect, conn_type): - return isinstance(conn_type, sa_types.BIGINT) - - -class LongText(sa_types.TypeDecorator): - """Represents an immutable structure as a json-encoded string. - - MySql can store only 64kb in Text type, and for example in psql or - sqlite we are able to store more than 1GB. In some cases, like storing - results of task 64kb is not enough. So this type uses for MySql - LONGTEXT that allows us to store 4GiB. - """ - - def load_dialect_impl(self, dialect): - if dialect.name == "mysql": - return dialect.type_descriptor(mysql_types.LONGTEXT) - else: - return dialect.type_descriptor(sa_types.Text) - - -class JSONEncodedDict(LongText): - """Represents an immutable structure as a json-encoded string.""" - - impl = sa_types.Text - - def process_bind_param(self, value, dialect): - if value is not None: - value = json.dumps(value, sort_keys=False) - return value - - def process_result_value(self, value, dialect): - if value is not None: - value = json.loads( - value, object_pairs_hook=collections.OrderedDict) - return value - - -class JSONEncodedList(JSONEncodedDict): - """Represents an immutable structure as a json-encoded string.""" - - def process_result_value(self, value, dialect): - if value is not None: - value = json.loads(value) - return value - - -class MutableDict(mutable.Mutable, dict): - @classmethod - def coerce(cls, key, value): - """Convert plain dictionaries to MutableDict.""" - - if not isinstance(value, MutableDict): - if isinstance(value, dict): - return MutableDict(value) - - # this call will raise ValueError - return mutable.Mutable.coerce(key, value) - else: - return value - - def __setitem__(self, key, value): - """Detect dictionary set events and emit change events.""" - - dict.__setitem__(self, key, value) - self.changed() - - def __delitem__(self, key): - """Detect dictionary del events and emit change events.""" - - dict.__delitem__(self, key) - self.changed() - - -class MutableList(mutable.Mutable, list): - @classmethod - def coerce(cls, key, value): - """Convert plain lists to MutableList.""" - if not isinstance(value, MutableList): - if isinstance(value, list): - return MutableList(value) - - # this call will raise ValueError - return mutable.Mutable.coerce(key, value) - else: - return value - - def append(self, value): - """Detect list add events and emit change events.""" - list.append(self, value) - self.changed() - - def remove(self, value): - """Removes an item by value and emit change events.""" - list.remove(self, value) - self.changed() - - def __setitem__(self, key, value): - """Detect list set events and emit change events.""" - list.__setitem__(self, key, value) - self.changed() - - def __delitem__(self, i): - """Detect list del events and emit change events.""" - list.__delitem__(self, i) - self.changed() - - -class MutableJSONEncodedList(JSONEncodedList): - """Represent a mutable structure as a json-encoded string.""" - - -class MutableJSONEncodedDict(JSONEncodedDict): - """Represent a mutable structure as a json-encoded string.""" - - -MutableDict.associate_with(MutableJSONEncodedDict) -MutableList.associate_with(MutableJSONEncodedList) diff --git a/rally/common/fileutils.py b/rally/common/fileutils.py deleted file mode 100644 index 8f70a01c..00000000 --- a/rally/common/fileutils.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import tempfile -import zipfile - - -def _read_env_file(path, except_env=None): - """Read the environment variable file. - - :param path: the path of the file - :param except_env: the environment variable to avoid in the output - - :returns: the content of the original file except the line starting with - the except_env parameter - """ - output = [] - if os.path.exists(path): - with open(path, "r") as env_file: - content = env_file.readlines() - for line in content: - if except_env is None or not line.startswith("%s=" % - except_env): - output.append(line) - return output - - -def load_env_file(path): - """Load the environment variable file into os.environ. - - :param path: the path of the file - """ - if os.path.exists(path): - content = _read_env_file(path) - for line in content: - (key, sep, value) = line.partition("=") - os.environ[key] = value.rstrip() - - -def _rewrite_env_file(path, initial_content): - """Rewrite the environment variable file. - - :param path: the path of the file - :param initial_content: the original content of the file - """ - with open(path, "w+") as env_file: - for line in initial_content: - env_file.write(line) - - -def update_env_file(path, env_key, env_value): - """Update the environment variable file. - - :param path: the path of the file - :param env_key: the key to update - :param env_value: the value of the property to update - """ - output = _read_env_file(path, env_key) - output.append("%s=%s" % (env_key, env_value)) - _rewrite_env_file(path, output) - - -def update_globals_file(key, value): - """Update the globals variables file. - - :param key: the key to update - :param value: the value to update - """ - dir = os.path.expanduser("~/.rally/") - if not os.path.exists(dir): - os.makedirs(dir) - expanded_path = os.path.join(dir, "globals") - update_env_file(expanded_path, key, "%s\n" % value) - - -def pack_dir(source_directory, zip_name=None): - """Archive content of the directory into .zip - - Zip content of the source folder excluding root directory - into zip archive. When zip_name is specified, it would be used - as a destination for the archive. Otherwise method would - try to use temporary file as a destination for the archive. - - :param source_directory: root of the newly created archive. - Directory is added recursively. - :param zip_name: destination zip file name. - :raises IOError: whenever there are IO issues. - :returns: path to the newly created zip archive either specified via - zip_name or a temporary one. - """ - - if not zip_name: - fp = tempfile.NamedTemporaryFile(delete=False) - zip_name = fp.name - zipf = zipfile.ZipFile(zip_name, mode="w") - try: - for root, dirs, files in os.walk(source_directory): - for f in files: - abspath = os.path.join(root, f) - relpath = os.path.relpath(abspath, source_directory) - zipf.write(abspath, relpath) - finally: - zipf.close() - return zip_name diff --git a/rally/common/i18n.py b/rally/common/i18n.py deleted file mode 100644 index 0a9d9dec..00000000 --- a/rally/common/i18n.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module for rally. - -See https://docs.openstack.org/oslo.i18n/latest/user/usage.html . - -""" - -import oslo_i18n - - -_translators = oslo_i18n.TranslatorFactory(domain="rally") - -# The primary translation function using the well-known name "_" -_ = _translators.primary - -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error -_LC = _translators.log_critical diff --git a/rally/common/io/__init__.py b/rally/common/io/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/common/io/junit.py b/rally/common/io/junit.py deleted file mode 100644 index 4245b888..00000000 --- a/rally/common/io/junit.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2015: eNovance -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import xml.etree.ElementTree as ET - - -class JUnit(object): - SUCCESS = "success" - FAILURE = "failure" - ERROR = "error" - - def __init__(self, test_suite_name): - self.test_suite_name = test_suite_name - self.test_cases = [] - self.n_tests = 0 - self.n_failures = 0 - self.n_errors = 0 - self.total_time = 0.0 - - def add_test(self, test_name, time, outcome=SUCCESS, message=""): - class_name, name = test_name.split(".", 1) - self.test_cases.append({ - "classname": class_name, - "name": name, - "time": str("%.2f" % time), - "outcome": outcome, - "message": message - }) - - if outcome == JUnit.FAILURE: - self.n_failures += 1 - elif outcome == JUnit.ERROR: - self.n_errors += 1 - elif outcome != JUnit.SUCCESS: - raise ValueError("Unexpected outcome %s" % outcome) - - self.n_tests += 1 - self.total_time += time - - def to_xml(self): - xml = ET.Element("testsuite", { - "name": self.test_suite_name, - "tests": str(self.n_tests), - "time": str("%.2f" % self.total_time), - "failures": str(self.n_failures), - "errors": str(self.n_errors), - }) - for test_case in self.test_cases: - outcome = test_case.pop("outcome") - message = test_case.pop("message") - if outcome in [JUnit.FAILURE, JUnit.ERROR]: - sub = ET.SubElement(xml, "testcase", test_case) - sub.append(ET.Element(outcome, {"message": message})) - else: - xml.append(ET.Element("testcase", test_case)) - return ET.tostring(xml, encoding="utf-8").decode("utf-8") diff --git a/rally/common/io/subunit_v2.py b/rally/common/io/subunit_v2.py deleted file mode 100644 index 44ed7af8..00000000 --- a/rally/common/io/subunit_v2.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_utils import encodeutils -from subunit import v2 - -from rally.common import logging - - -def prepare_input_args(func): - # NOTE(andreykurilin): Variables 'runnable', 'eof', 'route_code' are not - # used in parser. - def inner(self, test_id=None, test_status=None, timestamp=None, - file_name=None, file_bytes=None, mime_type=None, test_tags=None, - runnable=True, eof=False, route_code=None): - if not test_id: - return - - if (test_id.startswith("setUpClass (") or - test_id.startswith("tearDown (")): - test_id = test_id[test_id.find("(") + 1:-1] - - tags = _parse_test_tags(test_id) - - if mime_type: - mime_type, charset = mime_type.split("; ")[:2] - charset = charset.split("=")[1] - else: - charset = None - - func(self, test_id, test_status, timestamp, tags, - file_name, file_bytes, test_tags, mime_type, charset) - - return inner - - -def _parse_test_tags(test_id): - tags = [] - if test_id.find("[") > -1: - tags = test_id.split("[")[1][:-1].split(",") - - return tags - - -class SubunitV2StreamResult(object): - - def __init__(self, expected_failures=None, skipped_tests=None, live=False, - logger_name=None): - self._tests = {} - self._expected_failures = expected_failures or {} - self._skipped_tests = skipped_tests or {} - - self._live = live - self._logger = logging.getLogger(logger_name or __name__) - - self._timestamps = {} - # NOTE(andreykurilin): _first_timestamp and _last_timestamp variables - # are designed to calculate the total time of tests execution. - self._first_timestamp = None - self._last_timestamp = None - - # Store unknown entities and process them later. - self._unknown_entities = {} - self._is_parsed = False - - @staticmethod - def _get_test_name(test_id): - return test_id.split("[")[0] if test_id.find("[") > -1 else test_id - - def _check_expected_failure(self, test_id): - if (test_id in self._expected_failures or - self._get_test_name(test_id) in self._expected_failures): - if self._tests[test_id]["status"] == "fail": - self._tests[test_id]["status"] = "xfail" - if self._expected_failures[test_id]: - self._tests[test_id]["reason"] = ( - self._expected_failures[test_id]) - elif self._tests[test_id]["status"] == "success": - self._tests[test_id]["status"] = "uxsuccess" - - def _process_skipped_tests(self): - for t_id in self._skipped_tests.copy(): - if t_id not in self._tests: - status = "skip" - name = self._get_test_name(t_id) - self._tests[t_id] = {"status": status, - "name": name, - "duration": "%.3f" % 0, - "tags": _parse_test_tags(t_id)} - if self._skipped_tests[t_id]: - self._tests[t_id]["reason"] = self._skipped_tests[t_id] - status += ": %s" % self._tests[t_id]["reason"] - if self._live: - self._logger.info("{-} %s ... %s", name, status) - - self._skipped_tests.pop(t_id) - - def _parse(self): - # NOTE(andreykurilin): When whole test class is marked as skipped or - # failed, there is only one event with reason and status. So we should - # modify all tests of test class manually. - for test_id in self._unknown_entities: - known_test_ids = filter(lambda t: - t == test_id or t.startswith( - "%s." % test_id), self._tests) - for t_id in known_test_ids: - if self._tests[t_id]["status"] == "init": - self._tests[t_id]["status"] = ( - self._unknown_entities[test_id]["status"]) - - if self._unknown_entities[test_id].get("reason"): - self._tests[t_id]["reason"] = ( - self._unknown_entities[test_id]["reason"]) - elif self._unknown_entities[test_id].get("traceback"): - self._tests[t_id]["traceback"] = ( - self._unknown_entities[test_id]["traceback"]) - - # decode data - for test_id in self._tests: - for file_name in ["traceback", "reason"]: - # TODO(andreykurilin): decode fields based on mime_type - if file_name in self._tests[test_id]: - self._tests[test_id][file_name] = ( - encodeutils.safe_decode( - self._tests[test_id][file_name])) - - self._is_parsed = True - - @property - def tests(self): - if not self._is_parsed: - self._parse() - return self._tests - - @property - def totals(self): - td = 0 - if self._first_timestamp: - td = (self._last_timestamp - self._first_timestamp).total_seconds() - - return {"tests_count": len(self.tests), - "tests_duration": "%.3f" % td, - "failures": len(self.filter_tests("fail")), - "skipped": len(self.filter_tests("skip")), - "success": len(self.filter_tests("success")), - "unexpected_success": len(self.filter_tests("uxsuccess")), - "expected_failures": len(self.filter_tests("xfail"))} - - @prepare_input_args - def status(self, test_id=None, test_status=None, timestamp=None, tags=None, - file_name=None, file_bytes=None, worker=None, mime_type=None, - charset=None): - if timestamp: - if not self._first_timestamp: - self._first_timestamp = timestamp - self._last_timestamp = timestamp - - if test_status == "exists": - self._tests[test_id] = {"status": "init", - "name": self._get_test_name(test_id), - "duration": "%.3f" % 0, - "tags": tags if tags else []} - elif test_id in self._tests: - if test_status == "inprogress": - # timestamp of test start - self._timestamps[test_id] = timestamp - self._tests[test_id]["timestamp"] = timestamp.strftime( - "%Y-%m-%dT%H:%M:%S%z") - elif test_status: - self._tests[test_id]["duration"] = "%.3f" % ( - timestamp - self._timestamps[test_id]).total_seconds() - self._tests[test_id]["status"] = test_status - - self._check_expected_failure(test_id) - else: - if file_name in ["traceback", "reason"]: - if file_name not in self._tests[test_id]: - self._tests[test_id][file_name] = file_bytes - else: - self._tests[test_id][file_name] += file_bytes - else: - self._unknown_entities.setdefault(test_id, {"name": test_id}) - self._unknown_entities[test_id]["status"] = test_status - if file_name in ["traceback", "reason"]: - if file_name not in self._unknown_entities[test_id]: - self._unknown_entities[test_id][file_name] = file_bytes - else: - self._unknown_entities[test_id][file_name] += file_bytes - - if self._skipped_tests: - self._process_skipped_tests() - - if self._live and test_status not in (None, "exists", "inprogress"): - duration = "" - if test_id in self._tests: - status = self._tests[test_id]["status"] - duration = " [%ss]" % self._tests[test_id]["duration"] - else: - status = test_status - - status += duration - - if "xfail" in status or "skip" in status: - if test_id in self._tests: - reason = self._tests[test_id].get("reason") - else: - reason = self._unknown_entities[test_id].get("reason") - if reason: - status += ": %s" % reason - - w = "{%s} " % worker.pop().split("-")[1] if worker else "-" - self._logger.info( - "%s ... %s", w + self._get_test_name(test_id), status) - - def filter_tests(self, status): - """Filter tests by given status.""" - filtered_tests = {} - for test in self.tests: - if self.tests[test]["status"] == status: - filtered_tests[test] = self.tests[test] - - return filtered_tests - - -def parse(stream, expected_failures=None, skipped_tests=None, live=False, - logger_name=None): - results = SubunitV2StreamResult(expected_failures, skipped_tests, live, - logger_name) - v2.ByteStreamToStreamResult(stream, "non-subunit").run(results) - - return results - - -def parse_file(filename, expected_failures=None, skipped_tests=None, - live=False, logger_name=None): - with open(filename, "rb") as stream: - return parse(stream, expected_failures, skipped_tests, live, - logger_name) diff --git a/rally/common/logging.py b/rally/common/logging.py deleted file mode 100644 index 042bd9b2..00000000 --- a/rally/common/logging.py +++ /dev/null @@ -1,286 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import traceback - -from oslo_config import cfg -from oslo_log import handlers -from oslo_log import log as oslogging - -from rally.common.i18n import _ - -log = __import__("logging") - -DEBUG_OPTS = [cfg.BoolOpt( - "rally-debug", - default=False, - help="Print debugging output only for Rally. " - "Off-site components stay quiet.")] - -CONF = cfg.CONF -CONF.register_cli_opts(DEBUG_OPTS) -oslogging.register_options(CONF) - -log.RDEBUG = log.DEBUG + 1 -log.addLevelName(log.RDEBUG, "RALLYDEBUG") - -CRITICAL = log.CRITICAL -DEBUG = log.DEBUG -ERROR = log.ERROR -FATAL = log.FATAL -INFO = log.INFO -NOTSET = log.NOTSET -RDEBUG = log.RDEBUG -WARN = log.WARN -WARNING = log.WARNING - - -def setup(product_name, version="unknown"): - dbg_color = handlers.ColorHandler.LEVEL_COLORS[log.DEBUG] - handlers.ColorHandler.LEVEL_COLORS[log.RDEBUG] = dbg_color - - oslogging.setup(CONF, product_name, version) - - if CONF.rally_debug: - oslogging.getLogger( - project=product_name).logger.setLevel(log.RDEBUG) - - -class RallyContextAdapter(oslogging.KeywordArgumentAdapter): - - def debug(self, msg, *args, **kwargs): - self.log(log.RDEBUG, msg, *args, **kwargs) - - -def getLogger(name="unknown", version="unknown"): - - if name not in oslogging._loggers: - oslogging._loggers[name] = RallyContextAdapter(log.getLogger(name), - {"project": "rally", - "version": version}) - return oslogging._loggers[name] - - -LOG = getLogger(__name__) - - -class ExceptionLogger(object): - """Context that intercepts and logs exceptions. - - Usage:: - LOG = logging.getLogger(__name__) - ... - - def foobar(): - with ExceptionLogger(LOG, "foobar warning") as e: - return house_of_raising_exception() - - if e.exception: - raise e.exception # remove if not required - """ - - def __init__(self, logger, warn=None): - self.logger = logger - self.warn = warn - self.exception = None - - def __enter__(self): - return self - - def __exit__(self, type_, value, traceback): - if value: - self.exception = value - - if self.warn: - self.logger.warning(self.warn) - self.logger.debug(value) - if is_debug(): - self.logger.exception(value) - return True - - -class CatcherHandler(log.handlers.BufferingHandler): - def __init__(self): - log.handlers.BufferingHandler.__init__(self, 0) - - def shouldFlush(self): - return False - - def emit(self, record): - self.buffer.append(record) - - -class LogCatcher(object): - """Context manager that catches log messages. - - User can make an assertion on their content or fetch them all. - - Usage:: - LOG = logging.getLogger(__name__) - ... - - def foobar(): - with LogCatcher(LOG) as catcher_in_rye: - LOG.warning("Running Kids") - - catcher_in_rye.assertInLogs("Running Kids") - """ - def __init__(self, logger): - self.logger = getattr(logger, "logger", logger) - self.handler = CatcherHandler() - - def __enter__(self): - self.logger.addHandler(self.handler) - return self - - def __exit__(self, type_, value, traceback): - self.logger.removeHandler(self.handler) - - def assertInLogs(self, msg): - """Assert that `msg' is a substring at least of one logged message. - - :param msg: Substring to look for. - :return: Log messages where the `msg' was found. - Raises AssertionError if none. - """ - in_logs = [record.msg - for record in self.handler.buffer if msg in record.msg] - if not in_logs: - raise AssertionError("Expected `%s' is not in logs" % msg) - return in_logs - - def fetchLogRecords(self): - """Returns all logged Records.""" - return self.handler.buffer - - def fetchLogs(self): - """Returns all logged messages.""" - return [record.msg for record in self.handler.buffer] - - -def _log_wrapper(obj, log_function, msg, **kw): - """A logging wrapper for any method of a class. - - Class instances that use this decorator should have self.task or - self.deployment attribute. The wrapper produces logs messages both - before and after the method execution, in the following format - (example for tasks): - - "Task | Starting: " - [Method execution...] - "Task | Completed: " - - :param obj: task or deployment which must be attribute of "self" - :param log_function: Logging method to be used, e.g. LOG.info - :param msg: Text message (possibly parameterized) to be put to the log - :param **kw: Parameters for msg - """ - def decorator(f): - @functools.wraps(f) - def wrapper(self, *args, **kwargs): - params = {"msg": msg % kw, "obj_name": obj.title(), - "uuid": getattr(self, obj)["uuid"]} - log_function(_("%(obj_name)s %(uuid)s | Starting: %(msg)s") % - params) - result = f(self, *args, **kwargs) - log_function(_("%(obj_name)s %(uuid)s | Completed: %(msg)s") % - params) - return result - return wrapper - return decorator - - -def log_task_wrapper(log_function, msg, **kw): - return _log_wrapper("task", log_function, msg, **kw) - - -def log_deploy_wrapper(log_function, msg, **kw): - return _log_wrapper("deployment", log_function, msg, **kw) - - -def log_verification_wrapper(log_function, msg, **kw): - return _log_wrapper("verification", log_function, msg, **kw) - - -def log_deprecated(message, rally_version, log_function=None, once=False): - """A wrapper marking a certain method as deprecated. - - :param message: Message that describes why the method was deprecated - :param rally_version: version of Rally when the method was deprecated - :param log_function: Logging method to be used, e.g. LOG.info - :param once: Show only once (default is each) - """ - log_function = log_function or LOG.warning - msg = ("`%(func)s()' is deprecated in v%(version)s: %(msg)s." - " Used at %(caller)s") - - def decorator(f): - @functools.wraps(f) - def wrapper(*args, **kwargs): - if not (once and getattr(f, "_warned_dep_method", False)): - log_function(msg % { - "msg": message, - "version": rally_version, - "func": f.__name__, - "caller": str(traceback.extract_stack()[-2]) - }) - - f._warned_dep_method = True - return f(*args, **kwargs) - - return wrapper - return decorator - - -def log_deprecated_args(message, rally_version, deprecated_args, - log_function=None, once=False): - """A wrapper marking certain arguments as deprecated. - - :param message: Message that describes why the arguments were deprecated - :param rally_version: version of Rally when the arguments were deprecated - :param deprecated_args: List of deprecated args. - :param log_function: Logging method to be used, e.g. LOG.info - :param once: Show only once (default is each) - """ - log_function = log_function or LOG.warning - msg = ("Argument(s): %(args)s of `%(func)s()' are deprecated in " - "v%(version)s: %(msg)s. Used at %(caller)s") - - def decorator(f): - - @functools.wraps(f) - def wrapper(*args, **kwargs): - if not (once and getattr(f, "_warned_dep_args", False)): - deprecated = ", ".join([ - "`%s'" % x for x in deprecated_args if x in kwargs]) - if deprecated: - log_function(msg % { - "msg": message, - "version": rally_version, - "args": deprecated, - "func": f.__name__, - "caller": str(traceback.extract_stack()[-2]) - }) - - f._warned_dep_args = True - return f(*args, **kwargs) - - return wrapper - return decorator - - -def is_debug(): - return CONF.debug or CONF.rally_debug diff --git a/rally/common/objects/__init__.py b/rally/common/objects/__init__.py deleted file mode 100644 index 8eb6995e..00000000 --- a/rally/common/objects/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Contains the Rally objects.""" - -from rally.common.objects.credential import Credential # noqa -from rally.common.objects.deploy import Deployment # noqa -from rally.common.objects.task import Subtask # noqa -from rally.common.objects.task import Task # noqa -from rally.common.objects.task import Workload # noqa -from rally.common.objects.verification import Verification # noqa -from rally.common.objects.verifier import Verifier # noqa diff --git a/rally/common/objects/credential.py b/rally/common/objects/credential.py deleted file mode 100644 index e02c8413..00000000 --- a/rally/common/objects/credential.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack import credential - -# TODO(astudenov): remove this class in future releases - - -class Credential(credential.OpenStackCredential): - """Deprecated version of OpenStackCredential class""" - - def to_dict(self, include_permission=False): - dct = super(Credential, self).to_dict() - if not include_permission: - dct.pop("permission") - return dct diff --git a/rally/common/objects/deploy.py b/rally/common/objects/deploy.py deleted file mode 100644 index a6ecb333..00000000 --- a/rally/common/objects/deploy.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime as dt - -import jsonschema - -from rally.common.i18n import _, _LW -from rally.common import db -from rally.common import logging -from rally import consts -from rally.deployment import credential -from rally import exceptions - - -LOG = logging.getLogger(__name__) - -CREDENTIALS_SCHEMA = { - "type": "object", - "patternProperties": { - ".*": { - "type": "array", - "items": { - "type": "object", - "properties": { - "admin": {"type": ["object", "null"]}, - "users": { - "type": "array", - "items": {"type": "object"} - } - }, - "required": ["admin", "users"], - "additionalProperties": False, - }, - } - } -} - - -class Deployment(object): - """Represents a deployment object.""" - TIME_FORMAT = consts.TimeFormat.ISO8601 - - def __init__(self, deployment=None, **attributes): - if deployment: - self.deployment = deployment - else: - self.deployment = db.deployment_create(attributes) - - def __getitem__(self, key): - # TODO(astudenov): remove this in future releases - if key == "admin" or key == "users": - LOG.warning(_LW("deployment.%s is deprecated in Rally 0.9.0. " - "Use deployment.get_credentials_for('openstack')" - "['%s'] to get credentials.") % (key, key)) - return self.get_credentials_for("openstack")[key] - return self.deployment[key] - - def to_dict(self): - result = {} - formatters = ["created_at", "completed_at", "started_at", "updated_at"] - for field, value in self.deployment.items(): - if field in formatters: - if value is None: - value = "n/a" - else: - value = value.strftime(self.TIME_FORMAT) - result[field] = value - return result - - @staticmethod - def get(deploy): - return Deployment(db.deployment_get(deploy)) - - @staticmethod - def list(status=None, parent_uuid=None, name=None): - return [Deployment(deployment) for deployment in - db.deployment_list(status, parent_uuid, name)] - - @staticmethod - def delete_by_uuid(uuid): - db.deployment_delete(uuid) - - def _update(self, values): - self.deployment = db.deployment_update(self.deployment["uuid"], values) - - def update_status(self, status): - self._update({"status": status}) - - def update_name(self, name): - self._update({"name": name}) - - def update_config(self, config): - self._update({"config": config}) - - def update_credentials(self, credentials): - jsonschema.validate(credentials, CREDENTIALS_SCHEMA) - self._update({"credentials": credentials}) - - def get_platforms(self): - return self.deployment["credentials"].keys() - - def get_all_credentials(self): - all_credentials = {} - for platform in self.get_platforms(): - all_credentials[platform] = [] - credential_cls = credential.get(platform) - for credentials in self.deployment["credentials"][platform]: - try: - admin = credentials["admin"] - except Exception: - raise KeyError(credentials) - all_credentials[platform].append({ - "admin": credential_cls(**admin) if admin else None, - "users": [credential_cls(**user) for user in - credentials["users"]]}) - return all_credentials - - def get_credentials_for(self, namespace): - if namespace == "default": - return {"admin": None, "users": []} - try: - creds = self.deployment["credentials"][namespace][0] - except (KeyError, IndexError) as e: - LOG.exception(e) - raise exceptions.RallyException(_( - "No credentials found for %s") % namespace) - - admin = creds["admin"] - credential_cls = credential.get(namespace) - return {"admin": credential_cls(**admin) if admin else None, - "users": [credential_cls(**user) for user in creds["users"]]} - - def set_started(self): - self._update({"started_at": dt.datetime.now(), - "status": consts.DeployStatus.DEPLOY_STARTED}) - - def set_completed(self): - self._update({"completed_at": dt.datetime.now(), - "status": consts.DeployStatus.DEPLOY_FINISHED}) - - def add_resource(self, provider_name, type=None, info=None): - return db.resource_create({ - "deployment_uuid": self.deployment["uuid"], - "provider_name": provider_name, - "type": type, - "info": info, - }) - - def get_resources(self, provider_name=None, type=None): - return db.resource_get_all(self.deployment["uuid"], - provider_name=provider_name, type=type) - - @staticmethod - def delete_resource(resource_id): - db.resource_delete(resource_id) - - def delete(self): - db.deployment_delete(self.deployment["uuid"]) diff --git a/rally/common/objects/task.py b/rally/common/objects/task.py deleted file mode 100644 index 29ae7f0c..00000000 --- a/rally/common/objects/task.py +++ /dev/null @@ -1,504 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime as dt -import uuid - -from rally.common import db -from rally.common.i18n import _LE -from rally.common import logging -from rally import consts -from rally import exceptions - - -LOG = logging.getLogger(__name__) - - -OUTPUT_SCHEMA = { - "type": "object", - "properties": { - "additive": { - "type": "array", - "items": { - "type": "object", - "properties": { - "title": {"type": "string"}, - "description": {"type": "string"}, - "chart_plugin": {"type": "string"}, - "data": { - "type": "array", - "items": { - "type": "array", - "items": [{"type": "string"}, - {"type": "number"}], - "additionalItems": False}}, - "label": {"type": "string"}, - "axis_label": {"type": "string"}}, - "required": ["title", "chart_plugin", "data"], - "additionalProperties": False - } - }, - "complete": { - "type": "array", - "items": { - "type": "object", - "properties": { - "title": {"type": "string"}, - "description": {"type": "string"}, - "chart_plugin": {"type": "string"}, - "data": {"anyOf": [ - {"type": "array", - "items": { - "type": "array", - "items": [ - {"type": "string"}, - {"anyOf": [ - {"type": "array", - "items": {"type": "array", - "items": [{"type": "number"}, - {"type": "number"}] - }}, - {"type": "number"}]}]}}, - {"type": "object", - "properties": { - "cols": {"type": "array", - "items": {"type": "string"}}, - "rows": { - "type": "array", - "items": { - "type": "array", - "items": {"anyOf": [{"type": "string"}, - {"type": "number"}]}} - } - }, - "required": ["cols", "rows"], - "additionalProperties": False}, - {"type": "array", "items": {"type": "string"}}, - ]}, - "label": {"type": "string"}, - "axis_label": {"type": "string"} - }, - "required": ["title", "chart_plugin", "data"], - "additionalProperties": False - } - } - }, - "required": ["additive", "complete"], - "additionalProperties": False -} - -HOOK_RUN_RESULT_SCHEMA = { - "type": "object", - "properties": { - "started_at": {"type": "number"}, - "finished_at": {"type": "number"}, - "triggered_by": { - "type": "object", - "properties": {"event_type": {"type": "string"}, - "value": {}}, - "required": ["event_type", "value"], - "additionalProperties": False - }, - "status": {"type": "string"}, - "error": { - "type": "array", - "minItems": 3, - "maxItems": 3, - "items": {"type": "string"}, - }, - "output": OUTPUT_SCHEMA, - }, - "required": ["finished_at", "triggered_by", "status"], - "additionalProperties": False -} - -HOOK_RESULTS_SCHEMA = { - "type": "object", - "properties": { - "config": {"type": "object"}, - "results": {"type": "array", - "items": HOOK_RUN_RESULT_SCHEMA}, - "summary": {"type": "object"} - }, - "required": ["config", "results", "summary"], - "additionalProperties": False, -} - -TASK_RESULT_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "key": { - "type": "object", - "properties": { - "kw": { - "type": "object" - }, - "name": { - "type": "string" - }, - "pos": { - "type": "integer" - }, - }, - "required": ["kw", "name", "pos"] - }, - "sla": { - "type": "array", - "items": { - "type": "object", - "properties": { - "criterion": { - "type": "string" - }, - "detail": { - "type": "string" - }, - "success": { - "type": "boolean" - } - } - } - }, - "hooks": {"type": "array", "items": HOOK_RESULTS_SCHEMA}, - "result": { - "type": "array", - "items": { - "type": "object", - "properties": { - "atomic_actions": { - # NOTE(chenhb): back compatible, old format is dict - "oneOf": [{"type": "array"}, - {"type": "object"}] - }, - "duration": { - "type": "number" - }, - "error": { - "type": "array" - }, - "idle_duration": { - "type": "number" - }, - # NOTE(amaretskiy): "scenario_output" is deprecated - # in favor of "output" - "scenario_output": { - "type": "object", - "properties": { - "data": { - "type": "object" - }, - "errors": { - "type": "string" - }, - }, - "required": ["data", "errors"] - }, - "output": OUTPUT_SCHEMA - }, - "required": ["atomic_actions", "duration", "error", - "idle_duration"] - } - }, - "load_duration": { - "type": "number", - }, - "full_duration": { - "type": "number", - }, - "created_at": { - "type": "string" - } - }, - "required": ["key", "sla", "result", "load_duration", "full_duration"], - "additionalProperties": False -} - - -TASK_EXTENDED_RESULT_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "id": {"type": "integer"}, - "position": {"type": "integer"}, - "task_uuid": {"type": "string"}, - "name": {"type": "string"}, - "load_duration": {"type": "number"}, - "full_duration": {"type": "number"}, - "data": {"type": "array"}, - "sla": { - "type": "array", - "items": { - "type": "object", - "properties": { - "criterion": { - "type": "string" - }, - "detail": { - "type": "string" - }, - "success": { - "type": "boolean" - } - } - } - }, - "hooks": {"type": "array", "items": HOOK_RESULTS_SCHEMA}, - "iterations": { - "type": "array", - "items": { - "type": "object", - "properties": { - "timestamp": { - "type": "number" - }, - "atomic_actions": { - "type": "array" - }, - "duration": { - "type": "number" - }, - "error": { - "type": "array" - }, - "idle_duration": { - "type": "number" - }, - "output": OUTPUT_SCHEMA - }, - "required": ["atomic_actions", "duration", "error", - "idle_duration", "output"] - }, - "minItems": 1 - }, - "created_at": { - "anyOf": [ - {"type": "string", "format": "date-time"} - ] - }, - "updated_at": { - "anyOf": [ - {"type": "string", "format": "date-time"} - ] - }, - "info": { - "type": "object", - "properties": { - "atomic": {"type": "object"}, - "iterations_count": {"type": "integer"}, - "iterations_failed": {"type": "integer"}, - "min_duration": {"type": "number"}, - "max_duration": {"type": "number"}, - "tstamp_start": {"type": "number"}, - "full_duration": {"type": "number"}, - "load_duration": {"type": "number"} - } - } - }, - "required": ["name", "position", "sla", "iterations", "info"], - "additionalProperties": False -} - - -class Task(object): - """Represents a task object. - - Task states graph - - INIT -> VALIDATING |-> VALIDATION_FAILED - |-> ABORTING -> ABORTED - |-> SOFT_ABORTING -> ABORTED - |-> CRASHED - |-> VALIDATED |-> RUNNING |-> FINISHED - |-> ABORTING -> ABORTED - |-> SOFT_ABORTING -> ABORTED - |-> CRASHED - """ - - # NOTE(andreykurilin): The following stages doesn't contain check for - # current status of task. We should add it in the future, since "abort" - # cmd should work everywhere. - # TODO(andreykurilin): allow abort for each state. - NOT_IMPLEMENTED_STAGES_FOR_ABORT = [consts.TaskStatus.VALIDATING, - consts.TaskStatus.INIT] - - def __init__(self, task=None, temporary=False, **attributes): - """Task object init - - :param task: dictionary like object, that represents a task - :param temporary: whenever this param is True the task will be created - with a random UUID and no database record. Used for special - purposes, like task config validation. - """ - - self.is_temporary = temporary - - if self.is_temporary: - self.task = task or {"uuid": str(uuid.uuid4())} - self.task.update(attributes) - else: - self.task = task or db.task_create(attributes) - - def __getitem__(self, key): - return self.task[key] - - @staticmethod - def _serialize_dt(obj): - if isinstance(obj["created_at"], dt.datetime): - obj["created_at"] = obj["created_at"].strftime( - consts.TimeFormat.ISO8601) - obj["updated_at"] = obj["updated_at"].strftime( - consts.TimeFormat.ISO8601) - - def to_dict(self): - db_task = self.task - deployment_name = db.deployment_get( - self.task["deployment_uuid"])["name"] - db_task["deployment_name"] = deployment_name - self._serialize_dt(db_task) - for subtask in db_task.get("subtasks", []): - self._serialize_dt(subtask) - for workload in subtask["workloads"]: - self._serialize_dt(workload) - return db_task - - @classmethod - def get(cls, uuid, detailed=False): - return cls(db.api.task_get(uuid, detailed=detailed)) - - @staticmethod - def get_status(uuid): - return db.task_get_status(uuid) - - @staticmethod - def list(status=None, deployment=None, tags=None): - return [Task(db_task) for db_task in db.task_list( - status, deployment=deployment, tags=tags)] - - @staticmethod - def delete_by_uuid(uuid, status=None): - db.task_delete(uuid, status=status) - - def _update(self, values): - if not self.is_temporary: - self.task = db.task_update(self.task["uuid"], values) - else: - self.task.update(values) - - def update_status(self, status, allowed_statuses=None): - if allowed_statuses: - db.task_update_status(self.task["uuid"], status, allowed_statuses) - else: - self._update({"status": status}) - - def set_validation_failed(self, log): - self._update({"status": consts.TaskStatus.VALIDATION_FAILED, - "validation_result": log}) - - def set_failed(self, etype, msg, etraceback): - self._update({"status": consts.TaskStatus.CRASHED, - "validation_result": { - "etype": etype, "msg": msg, "trace": etraceback}}) - - def add_subtask(self, **subtask): - return Subtask(self.task["uuid"], **subtask) - - def delete(self, status=None): - db.task_delete(self.task["uuid"], status=status) - - def abort(self, soft=False): - current_status = self.get_status(self.task["uuid"]) - - if current_status in self.NOT_IMPLEMENTED_STAGES_FOR_ABORT: - raise exceptions.RallyException( - _LE("Failed to abort task '%(uuid)s'. It doesn't implemented " - "for '%(stages)s' stages. Current task status is " - "'%(status)s'.") % - {"uuid": self.task["uuid"], "status": current_status, - "stages": ", ".join(self.NOT_IMPLEMENTED_STAGES_FOR_ABORT)}) - elif current_status in [consts.TaskStatus.FINISHED, - consts.TaskStatus.CRASHED, - consts.TaskStatus.ABORTED]: - raise exceptions.RallyException( - _LE("Failed to abort task '%s', since it already " - "finished.") % self.task["uuid"]) - - new_status = (consts.TaskStatus.SOFT_ABORTING - if soft else consts.TaskStatus.ABORTING) - self.update_status(new_status, allowed_statuses=( - consts.TaskStatus.RUNNING, consts.TaskStatus.SOFT_ABORTING)) - - -class Subtask(object): - """Represents a subtask object.""" - - def __init__(self, task_uuid, **attributes): - self.subtask = db.subtask_create(task_uuid, **attributes) - - def __getitem__(self, key): - return self.subtask[key] - - def _update(self, values): - self.subtask = db.subtask_update(self.subtask["uuid"], values) - - def update_status(self, status): - self._update({"status": status}) - - def add_workload(self, name, description, position, runner, context, hooks, - sla, args): - return Workload(task_uuid=self.subtask["task_uuid"], - subtask_uuid=self.subtask["uuid"], name=name, - description=description, position=position, - runner=runner, hooks=hooks, context=context, sla=sla, - args=args) - - -class Workload(object): - """Represents a workload object.""" - - def __init__(self, task_uuid, subtask_uuid, name, description, position, - runner, hooks, context, sla, args): - self.workload = db.workload_create( - task_uuid=task_uuid, subtask_uuid=subtask_uuid, name=name, - description=description, position=position, runner=runner, - runner_type=runner["type"], hooks=hooks, context=context, sla=sla, - args=args) - - def __getitem__(self, key): - return self.workload[key] - - def add_workload_data(self, chunk_order, workload_data): - db.workload_data_create(self.workload["task_uuid"], - self.workload["uuid"], chunk_order, - workload_data) - - def set_results(self, load_duration, full_duration, start_time, - sla_results, hooks_results=None): - db.workload_set_results(workload_uuid=self.workload["uuid"], - subtask_uuid=self.workload["subtask_uuid"], - task_uuid=self.workload["task_uuid"], - load_duration=load_duration, - full_duration=full_duration, - start_time=start_time, - sla_results=sla_results, - hooks_results=hooks_results) - - @classmethod - def format_workload_config(cls, workload): - return {"args": workload["args"], - "runner": workload["runner"], - "context": workload["context"], - "sla": workload["sla"], - "hooks": [r["config"] for r in workload["hooks"]]} diff --git a/rally/common/objects/verification.py b/rally/common/objects/verification.py deleted file mode 100644 index dd841f5c..00000000 --- a/rally/common/objects/verification.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import db -from rally import consts - - -class Verification(object): - """Represents a verification object.""" - TIME_FORMAT = consts.TimeFormat.ISO8601 - - def __init__(self, verification): - """Init a verification object. - - :param verification: Dict representation of a verification - in the database - """ - self._db_entry = verification - - def __getattr__(self, attr): - return self._db_entry[attr] - - def __getitem__(self, item): - return self._db_entry[item] - - def to_dict(self, item=None): - data = {} - formatters = ["created_at", "updated_at"] - fields = ["deployment_uuid", "verifier_uuid", "uuid", "id", - "unexpected_success", "status", "tests", "skipped", - "tags", "tests_duration", "run_args", "success", - "expected_failures", "tests_count", "failures"] - for field in fields: - data[field] = self._db_entry.get(field, "") - for field in formatters: - data[field] = self._db_entry.get(field, "").strftime( - self.TIME_FORMAT) - return data - - @classmethod - def create(cls, verifier_id, deployment_id, tags=None, run_args=None): - return cls(db.verification_create( - verifier_id, deployment_id, tags, run_args)) - - @classmethod - def get(cls, verification_uuid): - return cls(db.verification_get(verification_uuid)) - - @classmethod - def list(cls, verifier_id=None, deployment_id=None, tags=None, - status=None): - verification_list = db.verification_list(verifier_id, deployment_id, - tags, status) - return [cls(db_entry) for db_entry in verification_list] - - def delete(self): - db.verification_delete(self.uuid) - - def _update(self, **properties): - self._db_entry = db.verification_update(self.uuid, **properties) - - def update_status(self, status): - self._update(status=status) - - def finish(self, totals, tests): - if (totals.get("failures", 0) == 0 and - totals.get("unexpected_success", 0) == 0): - status = consts.VerificationStatus.FINISHED - else: - status = consts.VerificationStatus.FAILED - self._update(status=status, tests=tests, **totals) - - def set_error(self, error_message): - # TODO(andreykurilin): Save error message in the database. - self.update_status(consts.VerificationStatus.CRASHED) diff --git a/rally/common/objects/verifier.py b/rally/common/objects/verifier.py deleted file mode 100644 index d5ecfb7f..00000000 --- a/rally/common/objects/verifier.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import db -from rally import consts -from rally import exceptions -from rally.verification import manager - - -class Verifier(object): - """Represents a verifier object.""" - TIME_FORMAT = consts.TimeFormat.ISO8601 - - def __init__(self, verifier): - """Init a verifier object. - - :param verifier: Dict representation of a verifier in the database - """ - self._db_entry = verifier - - self._deployment = None - self._manager = None - - def __getattr__(self, attr): - return self._db_entry[attr] - - def __getitem__(self, item): - return self._db_entry[item] - - def __str__(self): - return "'%s' (UUID=%s)" % (self.name, self.uuid) - - def to_dict(self, item=None): - data = {} - formatters = ["created_at", "updated_at"] - fields = ["status", "system_wide", "uuid", "type", "namespace", - "name", "source", "version", "extra_settings", - "id", "description"] - for field in fields: - data[field] = self._db_entry.get(field, "") - for field in formatters: - data[field] = self._db_entry.get(field, "").strftime( - self.TIME_FORMAT) - return data - - @classmethod - def create(cls, name, vtype, namespace, source, version, system_wide, - extra_settings=None): - db_entry = db.verifier_create(name=name, vtype=vtype, - namespace=namespace, source=source, - version=version, system_wide=system_wide, - extra_settings=extra_settings) - return cls(db_entry) - - @classmethod - def get(cls, verifier_id): - return cls(db.verifier_get(verifier_id)) - - @classmethod - def list(cls, status=None): - return [cls(db_entry) for db_entry in db.verifier_list(status)] - - @staticmethod - def delete(verifier_id): - db.verifier_delete(verifier_id) - - def update_status(self, status): - self.update_properties(status=status) - - def update_properties(self, **properties): - self._db_entry = db.verifier_update(self.uuid, **properties) - - def set_deployment(self, deployment_id): - from rally.common import objects - self._deployment = objects.Deployment.get(deployment_id) - - @property - def deployment(self): - if self._deployment is None: - raise exceptions.RallyException( - "Verifier is not linked to any deployment. Please, call " - "`set_deployment` method.") - return self._deployment - - @property - def manager(self): - # lazy load manager to be able to use non-plugin related stuff without - # loading plugins - if not self._manager: - self._manager = manager.VerifierManager.get(self.type, - self.namespace)(self) - return self._manager diff --git a/rally/common/opts.py b/rally/common/opts.py deleted file mode 100644 index 35d5df3d..00000000 --- a/rally/common/opts.py +++ /dev/null @@ -1,44 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools - -from oslo_config import cfg - -from rally.common import logging -from rally import osclients -from rally.plugins.openstack.cfg import opts as openstack_opts -from rally.task import engine - -CONF = cfg.CONF - - -def list_opts(): - - merged_opts = {} - for category, options in openstack_opts.list_opts().items(): - merged_opts.setdefault(category, []) - merged_opts[category].extend(options) - merged_opts["DEFAULT"] = itertools.chain(logging.DEBUG_OPTS, - osclients.OSCLIENTS_OPTS, - engine.TASK_ENGINE_OPTS) - return merged_opts.items() - - -def register(): - for category, options in list_opts(): - group = cfg.OptGroup(name=category, title="%s options" % category) - if category != "DEFAULT": - CONF.register_group(group) - CONF.register_opts(options, group=group) diff --git a/rally/common/plugin/__init__.py b/rally/common/plugin/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/common/plugin/discover.py b/rally/common/plugin/discover.py deleted file mode 100644 index 51467af0..00000000 --- a/rally/common/plugin/discover.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import imp -import importlib -import os -import pkg_resources -import pkgutil -import sys - -from oslo_utils import importutils -import six - -import rally -from rally.common.i18n import _ -from rally.common import logging - -LOG = logging.getLogger(__name__) - - -def itersubclasses(cls, seen=None): - """Generator over all subclasses of a given class in depth first order. - - NOTE: Use 'seen' to exclude cls which was reduplicated found, because - cls maybe has multiple super classes of the same plugin. - """ - - seen = seen or set() - try: - subs = cls.__subclasses__() - except TypeError: # fails only when cls is type - subs = cls.__subclasses__(cls) - for sub in subs: - if sub not in seen: - seen.add(sub) - yield sub - for sub in itersubclasses(sub, seen): - yield sub - - -def import_modules_from_package(package): - """Import modules from package and append into sys.modules - - :param package: Full package name. For example: rally.deployment.engines - """ - path = [os.path.dirname(rally.__file__), ".."] + package.split(".") - path = os.path.join(*path) - for root, dirs, files in os.walk(path): - for filename in files: - if filename.startswith("__") or not filename.endswith(".py"): - continue - new_package = ".".join(root.split(os.sep)).split("....")[1] - module_name = "%s.%s" % (new_package, filename[:-3]) - if module_name not in sys.modules: - sys.modules[module_name] = importutils.import_module( - module_name) - - -def import_modules_by_entry_point(): - """Import plugins by entry-point 'rally_plugins'.""" - for ep in pkg_resources.iter_entry_points("rally_plugins"): - if ep.name == "path": - try: - m = ep.load() - if hasattr(m, "__path__"): - path = pkgutil.extend_path(m.__path__, m.__name__) - else: - path = [m.__file__] - prefix = m.__name__ + "." - for loader, name, _is_pkg in pkgutil.walk_packages( - path, prefix=prefix): - sys.modules[name] = importlib.import_module(name) - except Exception as e: - msg = ("\t Failed to load plugins from module '%(module)s' " - "(package: '%(package)s')" % - {"module": ep.module_name, - "package": "%s %s" % (ep.dist.project_name, - ep.dist.version)}) - if logging.is_debug(): - LOG.exception(msg) - else: - LOG.warning(msg + (": %s" % six.text_type(e))) - - -def load_plugins(dir_or_file): - if os.path.isdir(dir_or_file): - directory = dir_or_file - LOG.info(_("Loading plugins from directories %s/*") % - directory.rstrip("/")) - - to_load = [] - for root, dirs, files in os.walk(directory, followlinks=True): - to_load.extend((plugin[:-3], root) - for plugin in files if plugin.endswith(".py")) - for plugin, directory in to_load: - if directory not in sys.path: - sys.path.append(directory) - - fullpath = os.path.join(directory, plugin) - try: - fp, pathname, descr = imp.find_module(plugin, [directory]) - imp.load_module(plugin, fp, pathname, descr) - fp.close() - LOG.info(_("\t Loaded module with plugins: %s.py") % fullpath) - except Exception as e: - LOG.warning( - "\t Failed to load module with plugins %(path)s.py: %(e)s" - % {"path": fullpath, "e": e}) - if logging.is_debug(): - LOG.exception(e) - elif os.path.isfile(dir_or_file): - plugin_file = dir_or_file - LOG.info(_("Loading plugins from file %s") % plugin_file) - if plugin_file not in sys.path: - sys.path.append(plugin_file) - try: - plugin_name = os.path.splitext(plugin_file.split("/")[-1])[0] - imp.load_source(plugin_name, plugin_file) - LOG.info(_("\t Loaded module with plugins: %s.py") % plugin_name) - except Exception as e: - LOG.warning(_( - "\t Failed to load module with plugins %(path)s: %(e)s") - % {"path": plugin_file, "e": e}) - if logging.is_debug(): - LOG.exception(e) diff --git a/rally/common/plugin/info.py b/rally/common/plugin/info.py deleted file mode 100644 index 8fdc3f6c..00000000 --- a/rally/common/plugin/info.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import sys - -PARAM_OR_RETURNS_REGEX = re.compile(":(?:param|returns)") -RETURNS_REGEX = re.compile(":returns: (?P.*)", re.S) -PARAM_REGEX = re.compile(":param (?P[\*\w]+): (?P.*?)" - "(?:(?=:param)|(?=:return)|(?=:raises)|\Z)", re.S) - - -def trim(docstring): - """trim function from PEP-257""" - if not docstring: - return "" - # Convert tabs to spaces (following the normal Python rules) - # and split into a list of lines: - lines = docstring.expandtabs().splitlines() - # Determine minimum indentation (first line doesn't count): - indent = sys.maxsize - for line in lines[1:]: - stripped = line.lstrip() - if stripped: - indent = min(indent, len(line) - len(stripped)) - # Remove indentation (first line is special): - trimmed = [lines[0].strip()] - if indent < sys.maxsize: - for line in lines[1:]: - trimmed.append(line[indent:].rstrip()) - # Strip off trailing and leading blank lines: - while trimmed and not trimmed[-1]: - trimmed.pop() - while trimmed and not trimmed[0]: - trimmed.pop(0) - - # Current code/unittests expects a line return at - # end of multiline docstrings - # workaround expected behavior from unittests - if "\n" in docstring: - trimmed.append("") - - # Return a single string: - return "\n".join(trimmed) - - -def reindent(string): - return "\n".join(l.strip() for l in string.strip().split("\n")) - - -def parse_docstring(docstring): - """Parse the docstring into its components. - - :returns: a dictionary of form - { - "short_description": ..., - "long_description": ..., - "params": [{"name": ..., "doc": ...}, ...], - "returns": ... - } - """ - - short_description = long_description = returns = "" - params = [] - - if docstring: - docstring = trim(docstring) - - lines = docstring.split("\n", 1) - short_description = lines[0] - - if len(lines) > 1: - long_description = lines[1].strip() - - params_returns_desc = None - - match = PARAM_OR_RETURNS_REGEX.search(long_description) - if match: - long_desc_end = match.start() - params_returns_desc = long_description[long_desc_end:].strip() - long_description = long_description[:long_desc_end].rstrip() - - if params_returns_desc: - params = [ - {"name": name, "doc": trim(doc)} - for name, doc in PARAM_REGEX.findall(params_returns_desc) - ] - - match = RETURNS_REGEX.search(params_returns_desc) - if match: - returns = reindent(match.group("doc")) - - return { - "short_description": short_description, - "long_description": long_description, - "params": params, - "returns": returns - } - - -class InfoMixin(object): - - @classmethod - def _get_doc(cls): - """Return documentary of class - - By default it returns docstring of class, but it can be overridden - for example for cases like merging own docstring with parent - """ - return cls.__doc__ - - @classmethod - def get_info(cls): - doc = parse_docstring(cls._get_doc()) - - return { - "name": cls.get_name(), - "platform": cls.get_platform(), - "namespace": cls.get_platform(), - "module": cls.__module__, - "title": doc["short_description"], - "description": doc["long_description"], - "parameters": doc["params"], - "schema": getattr(cls, "CONFIG_SCHEMA", None), - "returns": doc["returns"] - } diff --git a/rally/common/plugin/meta.py b/rally/common/plugin/meta.py deleted file mode 100644 index 6af725ac..00000000 --- a/rally/common/plugin/meta.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - - -DEFAULT_META_CONCATENATION = { - "context": "dict", - "validators": "list", -} - - -class MetaMixin(object): - """Safe way to store meta information related to class object. - - We are storing information in class object instead of the instance. - Information is stored in dict that is initialized only once during the - load of module, it means that all subclasses of this class will point to - the same dict object with the information. - - Sample that explains why it's important to use MetaMixin: - - >>> # Using direct fields - >>> - >>> class A(object): - >>> _meta = {} - >>> - >>> class B(A): - >>> pass - >>> - >>> B._meta["a"] = 10 - >>> assert A._meta["a"] == 10 # We changed meta of base class, which - # is going to produce nasty bugs - - >>> # MetaMixin in action - >>> - >>> class A(MetaMixin): - >>> pass - >>> - >>> class B(A): - >>> pass - >>> - >>> A._meta_set("a", 10) # Raises ReferenceError - >>> A._meta_init() - >>> A._meta_set("a", 10) # Set meta field "a" - >>> - >>> B._meta_get("a") # Raises ReferenceError - >>> B._meta_init() - >>> B._meta_set("a", 20) # Set meta field "a" - >>> - >>> assert A._meta_get("a") == 10 - >>> assert B._meta_get("a") == 20 - """ - - @classmethod - def _meta_init(cls): - """Initialize meta for this class.""" - cls._meta = {} - - # set default values defined in all parent classes - for class_ in reversed(cls.__mro__): - default_meta = vars(class_).get("DEFAULT_META", {}) - for key, value in default_meta.items(): - if key in DEFAULT_META_CONCATENATION: - if DEFAULT_META_CONCATENATION[key] == "list": - cls._meta.setdefault(key, []) - cls._meta[key].extend(value) - elif DEFAULT_META_CONCATENATION[key] == "dict": - cls._meta.setdefault(key, {}) - cls._meta[key].update(value) - else: - cls._meta[key] = copy.deepcopy(value) - - @classmethod - def _meta_clear(cls): - cls._meta.clear() # NOTE(boris-42): make sure that meta is deleted - delattr(cls, "_meta") - - @classmethod - def _meta_is_inited(cls, raise_exc=True): - """Check if meta is initialized. - - It means that this class has own cls._meta object (not pointer - to parent cls._meta) - """ - if vars(cls).get("_meta") is None: - if raise_exc: - raise ReferenceError( - "Trying to use MetaMixin before initialization %s. " - "Call _meta_init() before using it" % cls) - return False - return True - - @classmethod - def _meta_get(cls, key, default=None): - """Get value corresponding to key in meta data.""" - cls._meta_is_inited() - return cls._meta.get(key, default) - - @classmethod - def _meta_set(cls, key, value): - """Set value for key in meta.""" - cls._meta_is_inited() - cls._meta[key] = value - - @classmethod - def _meta_setdefault(cls, key, value): - """Set default value for key in meta.""" - cls._meta_is_inited() - cls._meta.setdefault(key, value) diff --git a/rally/common/plugin/plugin.py b/rally/common/plugin/plugin.py deleted file mode 100644 index f55fa84d..00000000 --- a/rally/common/plugin/plugin.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from rally.common.i18n import _ -from rally.common.i18n import _LE -from rally.common.plugin import discover -from rally.common.plugin import info -from rally.common.plugin import meta -from rally import exceptions - - -def base(): - """Mark Plugin as a base. - - Base Plugins are used to have better organization of plugins. - - It basically resolved to problems: - - - Having different types of plugins (e.g. Sceanrio, Context, SLA, ...) - - Auto generation of plugin reference with splitting plugins by their base - - Plugin lookup - one can easily get all plugins from some base. - - .. warning:: This decorator should be added the line before - six.add_metaclass if it is used. - """ - def wrapper(cls): - if not issubclass(cls, Plugin): - raise exceptions.RallyException(_( - "Plugin's Base can be only a subclass of Plugin class.")) - - parent = cls._get_base() - if parent != Plugin: - raise exceptions.RallyException(_( - "'%(plugin_cls)s' can not be marked as plugin base, since it " - "inherits from '%(parent)s' which is also plugin base.") % { - "plugin_cls": cls.__name__, - "parent": parent.__name__}) - - cls.base_ref = cls - return cls - return wrapper - - -def configure(name, platform="default", hidden=False): - """Use this decorator to configure plugin's attributes. - - Plugin is not discoverable until configure() is performed. - - :param name: name of plugin that is used for searching purpose - :param platform: platform name that plugin belongs to - :param hidden: if True the plugin will be marked as hidden and can be - loaded only explicitly - """ - - def decorator(plugin): - if name is None: - plugin_id = "%s.%s" % (plugin.__module__, plugin.__name__) - raise ValueError("The name of the plugin %s cannot be None." % - plugin_id) - - plugin._meta_init() - try: - existing_plugin = plugin._get_base().get( - name=name, platform=platform, allow_hidden=True, - fallback_to_default=False) - except exceptions.PluginNotFound: - plugin._meta_set("name", name) - plugin._meta_set("platform", platform) - else: - plugin.unregister() - raise exceptions.PluginWithSuchNameExists( - name=name, platform=existing_plugin.get_platform(), - existing_path=( - sys.modules[existing_plugin.__module__].__file__), - new_path=sys.modules[plugin.__module__].__file__ - ) - plugin._meta_set("hidden", hidden) - return plugin - - return decorator - - -def deprecated(reason, rally_version): - """Mark plugin as deprecated. - - :param reason: Message that describes reason of plugin deprecation - :param rally_version: Deprecated since this version of Rally - """ - def decorator(plugin): - plugin._meta_set("deprecated", { - "reason": reason, - "rally_version": rally_version - }) - return plugin - - return decorator - - -class Plugin(meta.MetaMixin, info.InfoMixin): - """Base class for all Plugins in Rally.""" - - @classmethod - def unregister(cls): - """Removes all plugin meta information and makes it undiscoverable.""" - cls._meta_clear() - - @classmethod - def _get_base(cls): - return getattr(cls, "base_ref", Plugin) - - @classmethod - def get(cls, name, platform=None, allow_hidden=False, - fallback_to_default=True): - """Return plugin by its name for specified platform. - - This method iterates over all subclasses of cls and returns plugin - by name for specified platform. - - If platform is not specified, it will return first found plugin from - any of platform. - - :param name: Plugin's name - :param platform: Plugin's platform - :param allow_hidden: if False and found plugin is hidden then - PluginNotFound will be raised - :param fallback_to_default: if True, then it tries to find - plugin within "default" platform - """ - - potential_result = cls.get_all(name=name, platform=platform, - allow_hidden=True) - - if fallback_to_default and len(potential_result) == 0: - # try to find in default platform - potential_result = cls.get_all(name=name, platform="default", - allow_hidden=True) - - if len(potential_result) == 1: - plugin = potential_result[0] - if allow_hidden or not plugin.is_hidden(): - return plugin - - elif potential_result: - hint = _LE("Try to choose the correct Plugin base or platform to " - "search in.") - if platform: - needle = "%s at %s platform" % (name, platform) - else: - needle = "%s at any of platform" % name - raise exceptions.MultipleMatchesFound( - needle=needle, - haystack=", ".join(p.get_name() for p in potential_result), - hint=hint) - - raise exceptions.PluginNotFound( - name=name, platform=platform or "any of") - - @classmethod - def get_all(cls, platform=None, allow_hidden=False, name=None): - """Return all subclass plugins of plugin. - - All plugins that are not configured will be ignored. - :param platform: return only plugins for specific platform. - :param name: return only plugins with specified name. - :param allow_hidden: if False return only non hidden plugins - """ - plugins = [] - - for p in discover.itersubclasses(cls): - if not issubclass(p, Plugin): - continue - if not p._meta_is_inited(raise_exc=False): - continue - if name and name != p.get_name(): - continue - if platform and platform != p.get_platform(): - continue - if not allow_hidden and p.is_hidden(): - continue - plugins.append(p) - - return plugins - - @classmethod - def get_name(cls): - """Return plugin's name.""" - return cls._meta_get("name") - - @classmethod - def get_platform(cls): - """"Return plugin's platform name.""" - return cls._meta_get("platform") - - @classmethod - def is_hidden(cls): - """Returns whatever plugin is hidden or not.""" - return cls._meta_get("hidden", False) - - @classmethod - def is_deprecated(cls): - """Returns deprecation details if plugin is deprecated.""" - return cls._meta_get("deprecated", False) diff --git a/rally/common/sshutils.py b/rally/common/sshutils.py deleted file mode 100644 index 15f946de..00000000 --- a/rally/common/sshutils.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""High level ssh library. - -Usage examples: - -Execute command and get output: - - ssh = sshclient.SSH("root", "example.com", port=33) - status, stdout, stderr = ssh.execute("ps ax") - if status: - raise Exception("Command failed with non-zero status.") - print stdout.splitlines() - -Execute command with huge output: - - class PseudoFile(object): - def write(chunk): - if "error" in chunk: - email_admin(chunk) - - ssh = sshclient.SSH("root", "example.com") - ssh.run("tail -f /var/log/syslog", stdout=PseudoFile(), timeout=False) - -Execute local script on remote side: - - ssh = sshclient.SSH("user", "example.com") - status, out, err = ssh.execute("/bin/sh -s arg1 arg2", - stdin=open("~/myscript.sh", "r")) - -Upload file: - - ssh = sshclient.SSH("user", "example.com") - ssh.run("cat > ~/upload/file.gz", stdin=open("/store/file.gz", "rb")) - -Eventlet: - - eventlet.monkey_patch(select=True, time=True) - or - eventlet.monkey_patch() - or - sshclient = eventlet.import_patched("opentstack.common.sshclient") - -""" - -import os -import select -import socket -import time - -import paramiko -import six - -from rally.common import logging -from rally import exceptions - -LOG = logging.getLogger(__name__) - - -class SSH(object): - """Represent ssh connection.""" - - def __init__(self, user, host, port=22, pkey=None, - key_filename=None, password=None): - """Initialize SSH client. - - :param user: ssh username - :param host: hostname or ip address of remote ssh server - :param port: remote ssh port - :param pkey: RSA or DSS private key string or file object - :param key_filename: private key filename - :param password: password - """ - - self.user = user - self.host = host - self.port = port - self.pkey = self._get_pkey(pkey) if pkey else None - self.password = password - self.key_filename = key_filename - self._client = False - - def _get_pkey(self, key): - if isinstance(key, six.string_types): - key = six.moves.StringIO(key) - errors = [] - for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey): - try: - return key_class.from_private_key(key) - except paramiko.SSHException as e: - errors.append(e) - raise exceptions.SSHError("Invalid pkey: %s" % (errors)) - - def _get_client(self): - if self._client: - return self._client - try: - self._client = paramiko.SSHClient() - self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - self._client.connect(self.host, username=self.user, - port=self.port, pkey=self.pkey, - key_filename=self.key_filename, - password=self.password, timeout=1) - return self._client - except Exception as e: - message = ("Exception %(exception_type)s was raised " - "during connect to %(user)s@%(host)s:%(port)s. " - "Exception value is: %(exception)r") - self._client = False - raise exceptions.SSHError(message % {"exception": e, - "user": self.user, - "host": self.host, - "port": self.port, - "exception_type": type(e)}) - - def close(self): - self._client.close() - self._client = False - - def run(self, cmd, stdin=None, stdout=None, stderr=None, - raise_on_error=True, timeout=3600): - """Execute specified command on the server. - - :param cmd: Command to be executed. - :param stdin: Open file or string to pass to stdin. - :param stdout: Open file to connect to stdout. - :param stderr: Open file to connect to stderr. - :param raise_on_error: If False then exit code will be return. If True - then exception will be raised if non-zero code. - :param timeout: Timeout in seconds for command execution. - Default 1 hour. No timeout if set to 0. - """ - - client = self._get_client() - - if isinstance(stdin, six.string_types): - stdin = six.moves.StringIO(stdin) - - return self._run(client, cmd, stdin=stdin, stdout=stdout, - stderr=stderr, raise_on_error=raise_on_error, - timeout=timeout) - - def _run(self, client, cmd, stdin=None, stdout=None, stderr=None, - raise_on_error=True, timeout=3600): - - if isinstance(cmd, (list, tuple)): - cmd = " ".join(six.moves.shlex_quote(str(p)) for p in cmd) - - transport = client.get_transport() - session = transport.open_session() - session.exec_command(cmd) - start_time = time.time() - - data_to_send = "" - stderr_data = None - - # If we have data to be sent to stdin then `select' should also - # check for stdin availability. - if stdin and not stdin.closed: - writes = [session] - else: - writes = [] - - while True: - # Block until data can be read/write. - r, w, e = select.select([session], writes, [session], 1) - - if session.recv_ready(): - data = session.recv(4096) - LOG.debug("stdout: %r" % data) - if stdout is not None: - stdout.write(data.decode("utf8")) - continue - - if session.recv_stderr_ready(): - stderr_data = session.recv_stderr(4096) - LOG.debug("stderr: %r" % stderr_data) - if stderr is not None: - stderr.write(stderr_data.decode("utf8")) - continue - - if session.send_ready(): - if stdin is not None and not stdin.closed: - if not data_to_send: - data_to_send = stdin.read(4096) - if not data_to_send: - stdin.close() - session.shutdown_write() - writes = [] - continue - sent_bytes = session.send(data_to_send) - LOG.debug("sent: %s" % data_to_send[:sent_bytes]) - data_to_send = data_to_send[sent_bytes:] - - if session.exit_status_ready(): - break - - if timeout and (time.time() - timeout) > start_time: - args = {"cmd": cmd, "host": self.host} - raise exceptions.SSHTimeout("Timeout executing command " - "'%(cmd)s' on host %(host)s" - % args) - if e: - raise exceptions.SSHError("Socket error.") - - exit_status = session.recv_exit_status() - if 0 != exit_status and raise_on_error: - fmt = "Command '%(cmd)s' failed with exit_status %(status)d." - details = fmt % {"cmd": cmd, "status": exit_status} - if stderr_data: - details += " Last stderr data: '%s'." % stderr_data - raise exceptions.SSHError(details) - return exit_status - - def execute(self, cmd, stdin=None, timeout=3600): - """Execute the specified command on the server. - - :param cmd: Command to be executed, can be a list. - :param stdin: Open file to be sent on process stdin. - :param timeout: Timeout for execution of the command. - - :returns: tuple (exit_status, stdout, stderr) - """ - stdout = six.moves.StringIO() - stderr = six.moves.StringIO() - - exit_status = self.run(cmd, stderr=stderr, - stdout=stdout, stdin=stdin, - timeout=timeout, raise_on_error=False) - stdout.seek(0) - stderr.seek(0) - return (exit_status, stdout.read(), stderr.read()) - - def wait(self, timeout=120, interval=1): - """Wait for the host will be available via ssh.""" - start_time = time.time() - while True: - try: - return self.execute("uname") - except (socket.error, exceptions.SSHError) as e: - LOG.debug("Ssh is still unavailable: %r" % e) - time.sleep(interval) - if time.time() > (start_time + timeout): - raise exceptions.SSHTimeout("Timeout waiting for '%s'" % - self.host) - - def _put_file_sftp(self, localpath, remotepath, mode=None): - client = self._get_client() - - with client.open_sftp() as sftp: - sftp.put(localpath, remotepath) - if mode is None: - mode = 0o777 & os.stat(localpath).st_mode - sftp.chmod(remotepath, mode) - - def _put_file_shell(self, localpath, remotepath, mode=None): - cmd = ["cat > %s" % remotepath] - if mode is not None: - cmd.append("chmod 0%o %s" % (mode, remotepath)) - - with open(localpath, "rb") as localfile: - cmd = "; ".join(cmd) - self.run(cmd, stdin=localfile) - - def put_file(self, localpath, remotepath, mode=None): - """Copy specified local file to the server. - - :param localpath: Local filename. - :param remotepath: Remote filename. - :param mode: Permissions to set after upload - """ - import socket - try: - self._put_file_sftp(localpath, remotepath, mode=mode) - except (paramiko.SSHException, socket.error): - self._put_file_shell(localpath, remotepath, mode=mode) diff --git a/rally/common/streaming_algorithms.py b/rally/common/streaming_algorithms.py deleted file mode 100644 index 5535112b..00000000 --- a/rally/common/streaming_algorithms.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import abc -import math - -import six - -from rally.task.processing import utils - - -@six.add_metaclass(abc.ABCMeta) -class StreamingAlgorithm(object): - """Base class for streaming computations that scale.""" - - @abc.abstractmethod - def add(self, value): - """Process a single value from the input stream.""" - - @abc.abstractmethod - def merge(self, other): - """Merge results processed by another instance.""" - - @abc.abstractmethod - def result(self): - """Return the result based on the values processed so far.""" - - def _cast_to_float(self, value): - try: - return float(value) - except (TypeError, ValueError): - raise TypeError("Non-numerical value: %r" % value) - - -class MeanComputation(StreamingAlgorithm): - """Compute mean for a stream of numbers.""" - - def __init__(self): - self.total = 0.0 - self.count = 0 - - def add(self, value): - self.count += 1 - self.total += value - - def merge(self, other): - self.count += other.count - self.total += other.total - - def result(self): - if self.count: - return self.total / self.count - return None - - -class StdDevComputation(StreamingAlgorithm): - """Compute standard deviation for a stream of numbers.""" - - def __init__(self): - self.count = 0 - # NOTE(msdubov): To compute std, we need the auxiliary variables below. - self.dev_sum = 0.0 - self.mean_computation = MeanComputation() - self.mean = 0.0 - - def add(self, value): - # NOTE(msdubov): This streaming method for std computation appears - # in "The Art of Computer Programming" by D. Knuth, - # Vol 2, p. 232, 3rd edition. - self.count += 1 - mean_prev = self.mean - self.mean_computation.add(value) - self.mean = self.mean_computation.result() - self.dev_sum = self.dev_sum + (value - mean_prev) * (value - self.mean) - - def merge(self, other): - if not other.mean_computation.count: - return - dev_sum1 = self.dev_sum - count1 = self.count - mean1 = self.mean - - dev_sum2 = other.dev_sum - count2 = other.count - mean2 = other.mean - - self.mean_computation.merge(other.mean_computation) - self.mean = self.mean_computation.result() - self.count += other.count - - self.dev_sum = (dev_sum1 + count1 * mean1 ** 2 + - dev_sum2 + count2 * mean2 ** 2 - - self.count * self.mean ** 2) - - def result(self): - # NOTE(amaretskiy): Need at least two values to be processed - if self.count < 2: - return None - return math.sqrt(self.dev_sum / (self.count - 1)) - - -class MinComputation(StreamingAlgorithm): - """Compute minimal value from a stream of numbers.""" - - def __init__(self): - self._value = None - - def add(self, value): - value = self._cast_to_float(value) - - if self._value is None or value < self._value: - self._value = value - - def merge(self, other): - if other._value is not None: - self.add(other._value) - - def result(self): - return self._value - - -class MaxComputation(StreamingAlgorithm): - """Compute maximal value from a stream of numbers.""" - - def __init__(self): - self._value = None - - def add(self, value): - value = self._cast_to_float(value) - - if self._value is None or value > self._value: - self._value = value - - def merge(self, other): - if other._value is not None: - self.add(other._value) - - def result(self): - return self._value - - -class PercentileComputation(StreamingAlgorithm): - """Compute percentile value from a stream of numbers.""" - - def __init__(self, percent, length): - """Init streaming computation. - - :param percent: numeric percent (from 0.00..1 to 0.999..) - :param length: count of the measurements - """ - if not 0 < percent < 1: - raise ValueError("Unexpected percent: %s" % percent) - self._percent = percent - - self._graph_zipper = utils.GraphZipper(length, 10000) - - def add(self, value): - self._graph_zipper.add_point(value) - - def merge(self, other): - # TODO(ikhudoshyn): Implement me - raise NotImplementedError() - - def result(self): - results = list( - map(lambda x: x[1], self._graph_zipper.get_zipped_graph())) - if results: - # NOTE(amaretskiy): Calculate percentile of a list of values - results.sort() - k = (len(results) - 1) * self._percent - f = math.floor(k) - c = math.ceil(k) - if f == c: - return results[int(k)] - d0 = results[int(f)] * (c - k) - d1 = results[int(c)] * (k - f) - return (d0 + d1) - return None - - -class IncrementComputation(StreamingAlgorithm): - """Simple incremental counter.""" - - def __init__(self): - self._count = 0 - - def add(self, *args): - self._count += 1 - - def merge(self, other): - self._count += other._count - - def result(self): - return self._count - - -class DegradationComputation(StreamingAlgorithm): - """Calculates degradation from a stream of numbers - - Finds min and max values from a stream and then calculates - ratio between them in percentage. Works only with positive numbers. - """ - - def __init__(self): - self.min_value = MinComputation() - self.max_value = MaxComputation() - - def add(self, value): - if value <= 0.0: - raise ValueError("Unexpected value: %s" % value) - self.min_value.add(value) - self.max_value.add(value) - - def merge(self, other): - min_result = other.min_value.result() - if min_result is not None: - self.min_value.add(min_result) - max_result = other.max_value.result() - if max_result is not None: - self.max_value.add(max_result) - - def result(self): - min_result = self.min_value.result() - max_result = self.max_value.result() - if min_result is None or max_result is None: - return 0.0 - return (max_result / min_result - 1) * 100.0 diff --git a/rally/common/utils.py b/rally/common/utils.py deleted file mode 100644 index dc65a873..00000000 --- a/rally/common/utils.py +++ /dev/null @@ -1,813 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import bisect -import collections -import copy -import ctypes -import heapq -import inspect -import multiprocessing -import os -import random -import re -import shutil -import string -import sys -import tempfile -import time -import uuid - -from six import moves - -from rally.common.i18n import _, _LE -from rally.common import logging -from rally import exceptions - -LOG = logging.getLogger(__name__) - - -class ImmutableMixin(object): - _inited = False - - def __init__(self): - self._inited = True - - def __setattr__(self, key, value): - if self._inited: - raise AttributeError(_("This object is immutable.")) - super(ImmutableMixin, self).__setattr__(key, value) - - -class EnumMixin(object): - def __iter__(self): - for k, v in moves.map(lambda x: (x, getattr(self, x)), dir(self)): - if not k.startswith("_"): - yield v - - -class StdOutCapture(object): - def __init__(self): - self.stdout = sys.stdout - - def __enter__(self): - sys.stdout = moves.StringIO() - return sys.stdout - - def __exit__(self, type, value, traceback): - sys.stdout = self.stdout - - -class StdErrCapture(object): - def __init__(self): - self.stderr = sys.stderr - - def __enter__(self): - sys.stderr = moves.StringIO() - return sys.stderr - - def __exit__(self, type, value, traceback): - sys.stderr = self.stderr - - -class Timer(object): - """Timer based on context manager interface.""" - - def __enter__(self): - self.error = None - self.start = time.time() - return self - - def timestamp(self): - return self.start - - def finish_timestamp(self): - return self.finish - - def __exit__(self, type, value, tb): - self.finish = time.time() - if type: - self.error = (type, value, tb) - - def duration(self): - return self.finish - self.start - - -class Struct(object): - def __init__(self, **entries): - self.__dict__.update(entries) - - def __getitem__(self, item, default=None): - return getattr(self, item, default) - - -class RAMInt(object): - """Share RAM integer, for IPC. - - This class represents iterable which refers directly to an integer value - stored in RAM. Being a true system-level singleton, this allows safely - share integer among processes and threads. - """ - - def __init__(self, base_value=0): - self.__int = multiprocessing.Value("I", base_value) - - def __int__(self): - return self.__int.value - - def __str__(self): - return str(self.__int.value) - - def __iter__(self): - return self - - def __next__(self): - with self.__int._lock: - value = self.__int.value - self.__int.value += 1 - if self.__int.value > value: - return value - raise StopIteration - - def next(self): - return self.__next__() - - def reset(self): - with self.__int._lock: - self.__int.value = 0 - - -def get_method_class(func): - """Return the class that defined the given method. - - :param func: function to get the class for. - :returns: class object or None if func is not an instance method. - """ - if hasattr(func, "im_class"): - # this check works in Python 2 - for cls in inspect.getmro(func.im_class): - if func.__name__ in cls.__dict__: - return cls - elif hasattr(func, "__qualname__") and inspect.isfunction(func): - # this check works in Python 3 - cls = getattr( - inspect.getmodule(func), - func.__qualname__.split("..", 1)[0].rsplit(".", 1)[0]) - if isinstance(cls, type): - return cls - else: - return None - - -def first_index(lst, predicate): - """Return the index of the first element that matches a predicate. - - :param lst: list to find the matching element in. - :param predicate: predicate object. - :returns: the index of the first matching element or None if no element - matches the predicate. - """ - for i, e in enumerate(lst): - if predicate(e): - return i - return None - - -@logging.log_deprecated(message="Its not used elsewhere in Rally already.", - rally_version="0.4.1") -def distance(s1, s2): - """Computes the edit distance between two strings. - - The edit distance is the Levenshtein distance. The larger the return value, - the more edits are required to transform one string into the other. - - :param s1: First string to compare - :param s2: Second string to compare - :returns: Integer distance between two strings - """ - n = range(0, len(s1) + 1) - for y in range(1, len(s2) + 1): - l, n = n, [y] - for x in moves.range(1, len(s1) + 1): - n.append(min(l[x] + 1, n[-1] + 1, - l[x - 1] + (s2[y - 1] != s1[x - 1]))) - return n[-1] - - -def retry(times, func, *args, **kwargs): - """Try to execute multiple times function mitigating exceptions. - - :param times: Amount of attempts to execute function - :param func: Function that should be executed - :param args: *args that are passed to func - :param kwargs: **kwargs that are passed to func - - :raises Exception: Raise any exception that can raise func - :returns: Result of func(*args, **kwargs) - """ - - for i in range(times): - try: - return func(*args, **kwargs) - except Exception: - if i == times - 1: - raise - - -def iterate_per_tenants(users): - """Iterate of a single arbitrary user from each tenant - - :type users: list of users - :return: iterator of a single user from each tenant - """ - processed_tenants = set() - for user in users: - if user["tenant_id"] not in processed_tenants: - processed_tenants.add(user["tenant_id"]) - yield (user, user["tenant_id"]) - - -class RandomNameGeneratorMixin(object): - """Mixin for objects that need to generate random names. - - This mixin provides one method, - ``generate_random_name()``. Classes that include it must provide a - ``self.task`` attribute that references a task dict or a - ``self.verification`` attribute that references a verification dict. - Classes that use this mixin may set two class variables to alter the - behavior of ``generate_random_name()``: - - * ``RESOURCE_NAME_FORMAT``: A mktemp(1)-like format string that - will be used to pattern the generated random string. It must - contain two separate segments of at least three 'X's; the first - one will be replaced by a portion of the task ID, and the second - will be replaced with a random string. - * ``RESOURCE_NAME_ALLOWED_CHARACTERS``: A string consisting of the - characters allowed in the random portions of the name. - """ - _resource_name_placeholder_re = re.compile( - "^(?P.*?)(?PX{3,})(?P[^X]+?)(?PX{3,})" - "(?P.*)$") - - RESOURCE_NAME_FORMAT = "rally_XXXXXXXX_XXXXXXXX" - RESOURCE_NAME_ALLOWED_CHARACTERS = string.ascii_letters + string.digits - - @classmethod - def _generate_random_part(cls, length): - """Generate a random string. - - :param length: The length of the random string. - :returns: string, randomly-generated string of the specified length - containing only characters from - cls.RESOURCE_NAME_ALLOWED_CHARACTERS - """ - return "".join(random.choice(cls.RESOURCE_NAME_ALLOWED_CHARACTERS) - for i in range(length)) - - @classmethod - def _generate_task_id_part(cls, task_id, length): - # NOTE(stpierre): the first part of the random name is a - # subset of the task ID - task_id_part = task_id.replace("-", "")[0:length] - - if len(task_id_part) < length: - LOG.debug("Task ID %(task_id)s cannot be included in a random " - "name because it is too short. Format: %(format)s" % - {"task_id": task_id, - "format": cls.RESOURCE_NAME_FORMAT}) - elif any(char not in cls.RESOURCE_NAME_ALLOWED_CHARACTERS - for char in task_id_part): - LOG.debug("Task ID %(task_id)s cannot be included in a random " - "name because it includes disallowed characters. " - "Allowed characters are: %(chars)s" % - {"task_id": task_id, - "chars": cls.RESOURCE_NAME_ALLOWED_CHARACTERS}) - else: - return task_id_part - - # NOTE(stpierre): either the task UUID is shorter than the - # task portion; or the portion of the task ID that we - # would use contains only characters in - # resource_name_allowed_characters. - try: - # NOTE(stpierre): seed pRNG with task ID so that all random - # names with the same task ID have the same task ID part - random.seed(task_id) - return cls._generate_random_part(length) - finally: - random.seed() - - def get_owner_id(self): - if hasattr(self, "task"): - return self.task["uuid"] - elif hasattr(self, "verification"): - return self.verification["uuid"] - - def generate_random_name(self): - """Generate pseudo-random resource name for scenarios. - - The name follows a deterministic pattern, which helps support - out-of-band cleanup of Rally-created resources. - - If possible, a portion of the task ID will be used in the - random name. If the task ID contains characters that are not - allowed by the 'RESOURCE_NAME_ALLOWED_CHARACTERS' class - variable, then a random string, seeded with the task ID, will - be generated for the task portion of the random name. - - :returns: str, pseudo-random name - """ - task_id = self.get_owner_id() - - match = self._resource_name_placeholder_re.match( - self.RESOURCE_NAME_FORMAT) - if match is None: - raise ValueError("%s is not a valid resource name format" % - self.RESOURCE_NAME_FORMAT) - parts = match.groupdict() - return "".join([ - parts["prefix"], - self._generate_task_id_part(task_id, len(parts["task"])), - parts["sep"], - self._generate_random_part(len(parts["rand"])), - parts["suffix"]]) - - @classmethod - def name_matches_object(cls, name, task_id=None, exact=True): - """Determine if a resource name could have been created by this class. - - :param name: The resource name to check against this class's - RESOURCE_NAME_FORMAT. - :param task_id: The task ID that must match the task portion of - the random name - :param exact: If False, then additional information may follow - the expected name. (For instance, this is useful - when bulk creating instances, since Nova - automatically appends a UUID to each instance - created thusly.) - :returns: bool - """ - match = cls._resource_name_placeholder_re.match( - cls.RESOURCE_NAME_FORMAT) - parts = match.groupdict() - subst = { - "prefix": re.escape(parts["prefix"]), - "sep": re.escape(parts["sep"]), - "suffix": re.escape(parts["suffix"]), - "chars": re.escape(cls.RESOURCE_NAME_ALLOWED_CHARACTERS), - "rand_length": len(parts["rand"])} - if task_id: - subst["task_id"] = cls._generate_task_id_part(task_id, - len(parts["task"])) - else: - subst["task_id"] = "[%s]{%s}" % (subst["chars"], - len(parts["task"])) - subst["extra"] = "" if exact else ".*" - name_re = re.compile( - "%(prefix)s%(task_id)s%(sep)s" - "[%(chars)s]{%(rand_length)s}%(suffix)s%(extra)s$" % subst) - return bool(name_re.match(name)) - - -def name_matches_object(name, *objects, **kwargs): - """Determine if a resource name could have been created by given objects. - - The object(s) must implement RandomNameGeneratorMixin. - - It will often be more efficient to pass a list of classes to - name_matches_object() than to perform multiple - name_matches_object() calls, since this function will deduplicate - identical name generation options. - - :param name: The resource name to check against the object's - RESOURCE_NAME_FORMAT. - :param *objects: Classes or objects to fetch random name - generation parameters from. - :param **kwargs: Additional keyword args. See the docstring for - RandomNameGenerator.name_matches_object() for - details on what args are recognized. - :returns: bool - """ - unique_rng_options = {} - for obj in objects: - key = (obj.RESOURCE_NAME_FORMAT, obj.RESOURCE_NAME_ALLOWED_CHARACTERS) - if key not in unique_rng_options: - unique_rng_options[key] = obj - return any(obj.name_matches_object(name, **kwargs) - for obj in unique_rng_options.values()) - - -def make_name_matcher(*names): - """Construct a matcher for custom names - - In case of contexts, there can be custom names. To reuse common cleanup - mechanism for cleaning up such resources, this method creates a subclass of - RandomNameGeneratorMixin with customized `name_matches_object` method. - """ - class CustomNameMatcher(RandomNameGeneratorMixin): - # generate unique string to guarantee processing that custom names - RESOURCE_NAME_FORMAT = str(uuid.uuid4()) - - NAMES = names - - @classmethod - def name_matches_object(cls, name, task_id=None, exact=True): - return name in cls.NAMES - - return CustomNameMatcher - - -def merge(length, *sources): - """Merge lists of lists. - - Each source produces (or contains) lists of ordered items. - Items of each list must be greater or equal to all items of - the previous list (that implies that items must be comparable). - - The function merges the sources into lists with the length - equal to given one, except the last list which can be shorter. - - Example: - it1 = iter([[1, 3, 5], [5, 7, 9, 14], [17, 21, 36, 41]]) - it2 = iter([[2, 2, 4], [9, 10], [16, 19, 23, 26, 91]]) - it3 = iter([[5], [5, 7, 11, 14, 14, 19, 23]]) - - it = merge(10, it1, it2, it3) - - for i in it: - print i - - prints out: - [1, 2, 2, 3, 4, 5, 5, 5, 5, 7, 7, 9, 9, 10] - [11, 14, 14, 14, 16, 17, 19, 19, 21, 23, 23] - [26, 36, 41, 91] - - :param: length, length of generated lists, except the last one. - :param: sources, generators that produce lists of items to merge - """ - - streams = [ - {"data": [], "gen": src} - for src in sources] - - out_chunk = [] - while True: - while len(out_chunk) < length: - - # Least right item among streams - lri = None - - # Refresh data if needed - for s in streams: - if s["gen"] and not s["data"]: - try: - while not s["data"]: - s["data"] = next(s["gen"]) - except StopIteration: - s["gen"] = None - - # ... and define least right item - if s["data"]: - rightmost_item = s["data"][-1] - if (lri is None) or (rightmost_item < lri): - lri = rightmost_item - - # No more data to merge - if lri is None: - break - - to_merge = [] - for s in streams: - if s["data"]: - pos = bisect.bisect_right(s["data"], lri) - to_merge.append(s["data"][:pos]) - s["data"] = s["data"][pos:] - - out_chunk += heapq.merge(*to_merge) - - if out_chunk: - if len(out_chunk) > length: - yield out_chunk[:length] - out_chunk = out_chunk[length:] - else: - yield out_chunk - out_chunk = [] - else: - return - - -def interruptable_sleep(sleep_time, atomic_delay=0.1): - """Return after sleep_time seconds. - - Divide sleep_time by atomic_delay, and call time.sleep N times. - This should give a chance to interrupt current thread. - - :param sleep_time: idle time of method (in seconds). - :param atomic_delay: parameter with which time.sleep would be called - int(sleep_time / atomic_delay) times. - """ - if atomic_delay <= 0: - raise ValueError("atomic_delay should be > 0") - - if sleep_time >= 0: - if sleep_time < 1: - return time.sleep(sleep_time) - - for x in moves.xrange(int(sleep_time / atomic_delay)): - time.sleep(atomic_delay) - - left = sleep_time - (int(sleep_time / atomic_delay)) * atomic_delay - if left: - time.sleep(left) - else: - raise ValueError("sleep_time should be >= 0") - - -def terminate_thread(thread_ident, exc_type=exceptions.ThreadTimeoutException): - """Terminate a python thread. - - Use PyThreadState_SetAsyncExc to terminate thread. - - :param thread_ident: threading.Thread.ident value - :param exc_type: an Exception type to be raised - """ - - ctypes.pythonapi.PyThreadState_SetAsyncExc( - ctypes.c_long(thread_ident), ctypes.py_object(exc_type)) - - -def timeout_thread(queue): - """Terminate threads by timeout. - - Function need to be run in separate thread. Its designed to terminate - threads which are running longer then timeout. - - Parent thread will put tuples (thread_ident, deadline) in the queue, - where `thread_ident` is Thread.ident value of thread to watch, and - `deadline` is timestamp when thread should be terminated. Also tuple - (None, None) should be put when all threads are exited and no more - threads to watch. - - :param queue: Queue object to communicate with parent thread. - """ - - all_threads = collections.deque() - while True: - if not all_threads: - timeout = None - else: - thread, deadline = all_threads[0] - timeout = deadline - time.time() - try: - next_thread = queue.get(timeout=timeout) - all_threads.append(next_thread) - except (moves.queue.Empty, ValueError): - # NOTE(rvasilets) Empty means that timeout was occurred. - # ValueError means that timeout lower than 0. - if thread.isAlive(): - LOG.info("Thread %s is timed out. Terminating." % thread.ident) - terminate_thread(thread.ident) - all_threads.popleft() - - if next_thread == (None, None,): - return - - -class LockedDict(dict): - """This represents dict which can be locked for updates. - - It is read-only by default, but it can be updated via context manager - interface: - - d = LockedDict(foo="bar") - d["spam"] = 42 # RuntimeError - with d.unlocked(): - d["spam"] = 42 # Works - """ - - def __init__(self, *args, **kwargs): - super(LockedDict, self).__init__(*args, **kwargs) - self._is_locked = True - self._is_ready_to_be_unlocked = False - - def lock(obj): - if isinstance(obj, dict): - return LockedDict(obj) - elif isinstance(obj, list): - return tuple([lock(v) for v in obj]) - return obj - - with self.unlocked(): - for k, v in self.items(): - self[k] = lock(v) - - def _check_is_unlocked(self): - if self._is_locked: - raise RuntimeError("Trying to change read-only dict %r" % self) - - def unlocked(self): - self._is_ready_to_be_unlocked = True - return self - - def __deepcopy__(self, memo=None): - def unlock(obj): - if isinstance(obj, LockedDict): - obj = dict(obj) - for k, v in obj.items(): - obj[k] = unlock(v) - elif type(obj) == tuple: - obj = tuple([unlock(v) for v in obj]) - return obj - return copy.deepcopy(unlock(self), memo=memo) - - def __enter__(self, *args): - if self._is_ready_to_be_unlocked: - self._is_locked = False - - def __exit__(self, *args): - self._is_ready_to_be_unlocked = False - self._is_locked = True - - def __setitem__(self, *args, **kwargs): - self._check_is_unlocked() - return super(LockedDict, self).__setitem__(*args, **kwargs) - - def __delitem__(self, *args, **kwargs): - self._check_is_unlocked() - return super(LockedDict, self).__delitem__(*args, **kwargs) - - def pop(self, *args, **kwargs): - self._check_is_unlocked() - return super(LockedDict, self).pop(*args, **kwargs) - - def popitem(self, *args, **kwargs): - self._check_is_unlocked() - return super(LockedDict, self).popitem(*args, **kwargs) - - def update(self, *args, **kwargs): - self._check_is_unlocked() - return super(LockedDict, self).update(*args, **kwargs) - - def setdefault(self, *args, **kwargs): - self._check_is_unlocked() - return super(LockedDict, self).setdefault(*args, **kwargs) - - def clear(self, *args, **kwargs): - self._check_is_unlocked() - return super(LockedDict, self).clear(*args, **kwargs) - - -def format_float_to_str(num): - """Format number into human-readable float format. - - More precise it convert float into the string and remove redundant - zeros from the floating part. - It will format the number by the following examples: - 0.0000001 -> 0.0 - 0.000000 -> 0.0 - 37 -> 37.0 - 1.0000001 -> 1.0 - 1.0000011 -> 1.000001 - 1.0000019 -> 1.000002 - - :param num: Number to be formatted - :return: string representation of the number - """ - - num_str = "%f" % num - float_part = num_str.split(".")[1].rstrip("0") or "0" - return num_str.split(".")[0] + "." + float_part - - -class DequeAsQueue(object): - """Allows to use some of Queue methods on collections.deque.""" - - def __init__(self, deque): - self.deque = deque - - def qsize(self): - return len(self.deque) - - def put(self, value): - self.deque.append(value) - - def get(self): - return self.deque.popleft() - - def empty(self): - return bool(self.deque) - - -class Stopwatch(object): - """Allows to sleep till specified time since start.""" - - def __init__(self, stop_event=None): - """Creates Stopwatch. - - :param stop_event: optional threading.Event to use for waiting - allows to interrupt sleep. If not provided time.sleep - will be used instead. - """ - self._stop_event = stop_event - - def start(self): - self._start_time = time.time() - - def sleep(self, sec): - """Sleeps till specified second since start.""" - target_time = self._start_time + sec - current_time = time.time() - if current_time >= target_time: - return - time_to_sleep = target_time - current_time - self._sleep(time_to_sleep) - - def _sleep(self, sec): - if self._stop_event: - self._stop_event.wait(sec) - else: - interruptable_sleep(sec) - - -def generate_random_path(root_dir=None): - """Generates a vacant name for a file or dir at the specified place. - - :param root_dir: Name of a directory to generate path in. If None (default - behaviour), temporary directory (i.e /tmp in linux) will be used. - """ - root_dir = root_dir or tempfile.gettempdir() - path = None - while path is None: - candidate = os.path.join(root_dir, str(uuid.uuid4())) - if not os.path.exists(candidate): - path = candidate - return path - - -class BackupHelper(object): - def __init__(self): - self._tempdir = generate_random_path() - - os.mkdir(self._tempdir) - - self._stored_data = {} - self._rollback_actions = [] - - def backup(self, original_path): - if original_path in self._stored_data: - raise exceptions.RallyException( - _LE("Failed to back up %s since it was already stored.") % - original_path) - backup_path = generate_random_path(self._tempdir) - LOG.debug("Creating backup of %s in %s" % (original_path, backup_path)) - try: - shutil.copytree(original_path, backup_path, symlinks=True) - except Exception: - # Ooops. something went wrong - self.rollback() - raise - self._stored_data[original_path] = backup_path - - def rollback(self): - LOG.debug("Performing rollback of stored data.") - for original_path, stored_path in self._stored_data.copy().items(): - if os.path.exists(original_path): - shutil.rmtree(original_path) - shutil.copytree(stored_path, original_path, symlinks=True) - # not to delete the same path in __del__ method - self._stored_data.pop(original_path) - - for m, args, kwargs in self._rollback_actions: - m(*args, **kwargs) - - def add_rollback_action(self, method, *args, **kwargs): - self._rollback_actions.append((method, args, kwargs)) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is not None: - self.rollback() - - def __call__(self, path): - self.backup(path) - return self - - def __del__(self): - for path in self._stored_data.values(): - if os.path.exists(path): - LOG.debug("Deleting %s" % path) - shutil.rmtree(path) diff --git a/rally/common/validation.py b/rally/common/validation.py deleted file mode 100644 index cbe3add8..00000000 --- a/rally/common/validation.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import traceback - -import six - -from rally.common import logging -from rally.common.plugin import plugin -from rally import exceptions - -LOG = logging.getLogger(__name__) - - -@logging.log_deprecated_args("Use 'platform' arg instead", "0.10.0", - ["namespace"], log_function=LOG.warning) -def configure(name, platform="default", namespace=None): - - if namespace: - platform = namespace - - def wrapper(cls): - return plugin.configure(name=name, platform=platform)(cls) - - return wrapper - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class Validator(plugin.Plugin): - - def __init__(self): - pass - - @abc.abstractmethod - def validate(self, credentials, config, plugin_cls, plugin_cfg): - """Method that validates something. - - :param credentials: credentials dict for all platforms - :param config: dict, configuration of workload - :param plugin_cls: plugin class - :param plugin_cfg: dict, with exact configuration of the plugin - :returns: ValidationResult instances - """ - - def fail(self, msg): - return ValidationResult(False, msg=msg) - - @classmethod - def _get_doc(cls): - doc = "" - if cls.__doc__ is not None: - doc = cls.__doc__ - if cls.__init__.__doc__ is not None: - if not cls.__init__.__doc__.startswith("\n"): - doc += "\n" - doc += cls.__init__.__doc__ - return doc - - -@configure(name="required_platform") -class RequiredPlatformValidator(Validator): - - def __init__(self, platform, admin=False, users=False): - """Validates credentials for specified platform. - - This allows us to create 4 kind of benchmarks: - 1) platform independent (validator is not specified) - 2) requires platform with admin - 3) requires platform with admin + users - 4) requires platform with users - - :param platform: name of the platform - :param admin: requires admin credential - :param users: requires user credentials - """ - super(RequiredPlatformValidator, self).__init__() - self.platform = platform - self.admin = admin - self.users = users - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - if not (self.admin or self.users): - return self.fail( - "You should specify admin=True or users=True or both.") - - if credentials is None: - credentials = {} - credentials = credentials.get(self.platform, {}) - - if self.admin and credentials.get("admin") is None: - return self.fail("No admin credential for %s" % self.platform) - if self.users and len(credentials.get("users", ())) == 0: - if credentials.get("admin") is None: - return self.fail("No user credentials for %s" % self.platform) - else: - # NOTE(andreykurilin): It is a case whem the plugin requires - # 'users' for launching, but there are no specified users in - # deployment. Let's assume that 'users' context can create - # them via admin user and do not fail." - pass - - -def add(name, **kwargs): - """Add validator to the plugin class meta. - - Add validator name and arguments to validators list stored in the - plugin meta by 'validators' key. This would be used to iterate - and execute through all validators during execution of subtask. - - :param name: str, name of the validator plugin - :param kwargs: dict, arguments used to initialize validator class - instance - """ - - def wrapper(plugin): - if issubclass(plugin, RequiredPlatformValidator): - raise exceptions.RallyException( - "Cannot add a validator to RequiredPlatformValidator") - elif issubclass(plugin, Validator) and name != "required_platform": - raise exceptions.RallyException( - "Only RequiredPlatformValidator can be added " - "to other validators as a validator") - - plugin._meta_setdefault("validators", []) - plugin._meta_get("validators").append((name, (), kwargs,)) - return plugin - - return wrapper - - -def add_default(name, **kwargs): - """Add validator to the plugin class default meta. - - Validator will be added to all subclasses by default - - :param name: str, name of the validator plugin - :param kwargs: dict, arguments used to initialize validator class - instance - """ - - def wrapper(plugin): - if not hasattr(plugin, "DEFAULT_META"): - plugin.DEFAULT_META = {} - plugin.DEFAULT_META.setdefault("validators", []) - plugin.DEFAULT_META["validators"].append((name, (), kwargs,)) - return plugin - return wrapper - - -class ValidationResult(object): - - def __init__(self, is_valid, msg="", etype=None, etraceback=None): - self.is_valid = is_valid - self.msg = msg - self.etype = etype - self.etraceback = etraceback - - def __str__(self): - if self.is_valid: - return "validation success" - if self.etype: - return ("---------- Exception in validator ----------\n" + - self.etraceback) - return self.msg - - -class ValidatablePluginMixin(object): - - @staticmethod - def _load_validators(plugin): - validators = plugin._meta_get("validators", default=[]) - return [(Validator.get(name), args, kwargs) - for name, args, kwargs in validators] - - @classmethod - def validate(cls, name, credentials, config, plugin_cfg, - namespace=None, allow_hidden=False, vtype=None): - """Execute all validators stored in meta of plugin. - - Iterate during all validators stored in the meta of Validator - and execute proper validate() method and add validation result - to the list. - - :param name: name of the plugin to validate - :param namespace: namespace of the plugin - :param credentials: credentials dict for all platforms - :param config: dict with configuration of specified workload - :param plugin_cfg: dict, with exact configuration of the plugin - :param allow_hidden: do not ignore hidden plugins - :param vtype: Type of validation. Allowed types: syntax, platform, - semantic. HINT: To specify several types use tuple or list with - types - :returns: list of ValidationResult(is_valid=False) instances - """ - try: - plugin = cls.get(name, allow_hidden=allow_hidden, - platform=namespace) - except exceptions.PluginNotFound: - msg = "There is no %s plugin with name: '%s'" % ( - cls.__name__, name) - return [ValidationResult(is_valid=False, msg=msg)] - - if vtype is None: - semantic = True - syntax = True - platform = True - else: - if not isinstance(vtype, (list, tuple)): - vtype = [vtype] - wrong_types = set(vtype) - {"semantic", "syntax", "platform"} - if wrong_types: - raise ValueError("Wrong type of validation: %s" % - ", ".join(wrong_types)) - semantic = "semantic" in vtype - syntax = "syntax" in vtype - platform = "platform" in vtype - - syntax_validators = [] - platform_validators = [] - regular_validators = [] - - plugin_validators = cls._load_validators(plugin) - for validator, args, kwargs in plugin_validators: - if issubclass(validator, RequiredPlatformValidator): - if platform: - platform_validators.append((validator, args, kwargs)) - else: - validators_of_validators = cls._load_validators(validator) - if validators_of_validators: - if semantic: - regular_validators.append((validator, args, kwargs)) - if platform: - # Load platform validators from each validator - platform_validators.extend(validators_of_validators) - else: - if syntax: - syntax_validators.append((validator, args, kwargs)) - - results = [] - for validators in (syntax_validators, platform_validators, - regular_validators): - for validator_cls, args, kwargs in validators: - try: - validator = validator_cls(*args, **kwargs) - - # NOTE(amaretskiy): validator is successful by default - result = (validator.validate(credentials=credentials, - config=config, - plugin_cls=plugin, - plugin_cfg=plugin_cfg) - or ValidationResult(True)) - except Exception as exc: - result = ValidationResult( - is_valid=False, - msg=str(exc), - etype=type(exc).__name__, - etraceback=traceback.format_exc()) - if not result.is_valid: - LOG.debug("Result of validator '%s' is not successful for " - "plugin %s.", validator_cls.get_name(), name) - results.append(result) - - if results: - break - - return results diff --git a/rally/common/version.py b/rally/common/version.py deleted file mode 100644 index 0f48f39c..00000000 --- a/rally/common/version.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pbr import version as pbr_version - -from rally.common.db import api - -RALLY_VENDOR = "OpenStack Foundation" -RALLY_PRODUCT = "OpenStack Rally" -RALLY_PACKAGE = None # OS distro package version suffix - -loaded = False -version_info = pbr_version.VersionInfo("rally") - - -def version_string(): - return version_info.semantic_version().debian_string() - - -def database_revision(): - return api.schema_revision(detailed=True) diff --git a/rally/common/yamlutils.py b/rally/common/yamlutils.py deleted file mode 100644 index 47da7b34..00000000 --- a/rally/common/yamlutils.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import json - -import yaml -from yaml import constructor -from yaml import loader -from yaml import nodes -from yaml import parser -from yaml import resolver - -ParserError = parser.ParserError - - -# NOTE(andreykurilin): Jinja2 uses __repr__ methods of objects while rendering -# templates. Such behaviour converts OrderedDict to the string like -# "OrderedDict([('foo', 'xxx'), ('bar', 'yyy')])" -# which breaks json/yaml load. -# In 99% of cases, we are rendering templates based on the dicts obtained -# after yaml.safe_load which uses collections.OrderedDict , so writing here -# the workaround with overridden __repr__ method looks like the best choice. -class OrderedDict(collections.OrderedDict): - """collections.OrderedDict with __repr__ like in the regular dict.""" - def __repr__(self): - return json.dumps(self, sort_keys=False) - - -def _construct_mapping(loader, node, deep=False): - keys = [] - if isinstance(node, nodes.MappingNode): - for key_node, value_node in node.value: - key = loader.construct_object(key_node, deep=deep) - if key in keys: - raise constructor.ConstructorError( - "while constructing a mapping", - node.start_mark, - "the key (%s) is redefined" % key, - key_node.start_mark) - keys.append(key) - return OrderedDict(loader.construct_pairs(node)) - - -class _SafeLoader(loader.SafeLoader): - pass - - -_SafeLoader.add_constructor(resolver.BaseResolver.DEFAULT_MAPPING_TAG, - _construct_mapping) - - -def safe_load(stream): - """Load stream to create python object - - :param stream: json/yaml stream. - :returns: dict object - """ - return yaml.load(stream, _SafeLoader) diff --git a/rally/consts.py b/rally/consts.py deleted file mode 100644 index 028d5a87..00000000 --- a/rally/consts.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -There is a lot of situations when we would like to work with Enum or Const. -E.g. work around Tasks. We would like to use Enum in DB to store status of task -and also in migration that creates DB and in business logic to set some status -so as to avoid copy paste or direct usage of enums values we create singletons -for each enum. (e.g. TaskStatus) -""" - -from rally.common import utils - - -JSON_SCHEMA = "http://json-schema.org/draft-04/schema" - - -class _TaskStatus(utils.ImmutableMixin, utils.EnumMixin): - - """Consts that represents task possible states.""" - INIT = "init" - VALIDATING = "validating" - VALIDATED = "validated" - VALIDATION_FAILED = "validation_failed" - RUNNING = "running" - FINISHED = "finished" - CRASHED = "crashed" - ABORTING = "aborting" - SLA_FAILED = "sla_failed" - SOFT_ABORTING = "soft_aborting" - ABORTED = "aborted" - PAUSED = "paused" - - -class _SubtaskStatus(utils.ImmutableMixin, utils.EnumMixin): - - """Consts that represents task possible states.""" - INIT = "init" - VALIDATING = "validating" - VALIDATED = "validated" - VALIDATION_FAILED = "validation_failed" - RUNNING = "running" - FINISHED = "finished" - CRASHED = "crashed" - ABORTING = "aborting" - SLA_FAILED = "sla_failed" - SOFT_ABORTING = "soft_aborting" - ABORTED = "aborted" - PAUSED = "paused" - - -class _DeployStatus(utils.ImmutableMixin, utils.EnumMixin): - DEPLOY_INIT = "deploy->init" - DEPLOY_STARTED = "deploy->started" - DEPLOY_SUBDEPLOY = "deploy->subdeploy" - DEPLOY_FINISHED = "deploy->finished" - DEPLOY_FAILED = "deploy->failed" - - DEPLOY_INCONSISTENT = "deploy->inconsistent" - - CLEANUP_STARTED = "cleanup->started" - CLEANUP_FINISHED = "cleanup->finished" - CLEANUP_FAILED = "cleanup->failed" - - -class _EndpointPermission(utils.ImmutableMixin, utils.EnumMixin): - ADMIN = "admin" - USER = "user" - - -class _EndpointType(utils.ImmutableMixin, utils.EnumMixin): - INTERNAL = "internal" - ADMIN = "admin" - PUBLIC = "public" - - -class _Service(utils.ImmutableMixin, utils.EnumMixin): - """OpenStack services names, by rally convention.""" - - NOVA = "nova" - NOVA_NET = "nova-network" - CINDER = "cinder" - MANILA = "manila" - EC2 = "ec2" - GLANCE = "glance" - CLOUD = "cloud" - HEAT = "heat" - KEYSTONE = "keystone" - NEUTRON = "neutron" - DESIGNATE = "designate" - CEILOMETER = "ceilometer" - MONASCA = "monasca" - S3 = "s3" - SENLIN = "senlin" - TROVE = "trove" - SAHARA = "sahara" - SWIFT = "swift" - MISTRAL = "mistral" - MURANO = "murano" - IRONIC = "ironic" - GNOCCHI = "gnocchi" - MAGNUM = "magnum" - WATCHER = "watcher" - - -class _ServiceType(utils.ImmutableMixin, utils.EnumMixin): - """OpenStack services types, mapped to service names.""" - - VOLUME = "volume" - SHARE = "share" - EC2 = "ec2" - IMAGE = "image" - CLOUD = "cloudformation" - ORCHESTRATION = "orchestration" - IDENTITY = "identity" - CLUSTERING = "clustering" - COMPUTE = "compute" - NETWORK = "network" - DNS = "dns" - METERING = "metering" - MONITORING = "monitoring" - S3 = "s3" - DATABASE = "database" - DATA_PROCESSING = "data-processing" - DATA_PROCESSING_MOS = "data_processing" - OBJECT_STORE = "object-store" - WORKFLOW_EXECUTION = "workflowv2" - APPLICATION_CATALOG = "application-catalog" - BARE_METAL = "baremetal" - METRIC = "metric" - CONTAINER_INFRA = "container-infra" - INFRA_OPTIM = "infra-optim" - - def __init__(self): - self.__names = { - self.CLUSTERING: _Service.SENLIN, - self.COMPUTE: _Service.NOVA, - self.VOLUME: _Service.CINDER, - self.SHARE: _Service.MANILA, - self.EC2: _Service.EC2, - self.IMAGE: _Service.GLANCE, - self.CLOUD: _Service.CLOUD, - self.ORCHESTRATION: _Service.HEAT, - self.IDENTITY: _Service.KEYSTONE, - self.NETWORK: _Service.NEUTRON, - self.DNS: _Service.DESIGNATE, - self.METERING: _Service.CEILOMETER, - self.MONITORING: _Service.MONASCA, - self.S3: _Service.S3, - self.DATABASE: _Service.TROVE, - self.DATA_PROCESSING: _Service.SAHARA, - self.DATA_PROCESSING_MOS: _Service.SAHARA, - self.OBJECT_STORE: _Service.SWIFT, - self.WORKFLOW_EXECUTION: _Service.MISTRAL, - self.APPLICATION_CATALOG: _Service.MURANO, - self.BARE_METAL: _Service.IRONIC, - self.METRIC: _Service.GNOCCHI, - self.CONTAINER_INFRA: _Service.MAGNUM, - self.INFRA_OPTIM: _Service.WATCHER, - } - - def __getitem__(self, service_type): - """Mapping protocol to service names. - - :param name: str, service name - :returns: str, service type - """ - return self.__names[service_type] - - -class _HookStatus(utils.ImmutableMixin, utils.EnumMixin): - """Hook result statuses.""" - SUCCESS = "success" - FAILED = "failed" - VALIDATION_FAILED = "validation_failed" - - -class _TagType(utils.ImmutableMixin, utils.EnumMixin): - TASK = "task" - SUBTASK = "subtask" - VERIFICATION = "verification" - - -class _VerifierStatus(utils.ImmutableMixin, utils.EnumMixin): - """Verifier statuses.""" - INIT = "init" - INSTALLING = "installing" - INSTALLED = "installed" - UPDATING = "updating" - EXTENDING = "extending" - FAILED = "failed" - - -# NOTE(andreykurilin): In case of updating these statuses, please do not forget -# to update doc reference too -class _VerificationStatus(utils.ImmutableMixin, utils.EnumMixin): - """Verification statuses.""" - INIT = "init" - RUNNING = "running" - FINISHED = "finished" - FAILED = "failed" - CRASHED = "crashed" - - -class _TimeFormat(utils.ImmutableMixin, utils.EnumMixin): - """International time formats""" - ISO8601 = "%Y-%m-%dT%H:%M:%S%z" - - -TaskStatus = _TaskStatus() -SubtaskStatus = _SubtaskStatus() -DeployStatus = _DeployStatus() -EndpointPermission = _EndpointPermission() -ServiceType = _ServiceType() -Service = _Service() -EndpointType = _EndpointType() -HookStatus = _HookStatus() -TagType = _TagType() -VerifierStatus = _VerifierStatus() -VerificationStatus = _VerificationStatus() -TimeFormat = _TimeFormat() diff --git a/rally/deployment/__init__.py b/rally/deployment/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/deployment/credential.py b/rally/deployment/credential.py deleted file mode 100644 index d9037c79..00000000 --- a/rally/deployment/credential.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import jsonschema -import six - -from rally.common.plugin import plugin - - -def configure(namespace): - def wrapper(cls): - cls = plugin.configure(name="credential", platform=namespace)(cls) - return cls - return wrapper - - -def get(namespace): - return Credential.get(name="credential", platform=namespace) - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class Credential(plugin.Plugin): - """Base class for credentials.""" - - @abc.abstractmethod - def to_dict(self): - """Converts credential object to dict. - - :returns: dict that can be used to recreate credential using - constructor: Credential(**credential.to_dict()) - """ - - @abc.abstractmethod - def verify_connection(self): - """Verifies that credential can be used for connection.""" - - def list_services(self): - """Returns available services. - - :returns: dict - """ - return {} - - -def configure_builder(namespace): - def wrapper(cls): - cls = plugin.configure(name="credential_builder", - platform=namespace)(cls) - return cls - return wrapper - - -def get_builder(namespace): - return CredentialBuilder.get(name="credential_builder", - platform=namespace) - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class CredentialBuilder(plugin.Plugin): - """Base class for extensions of ExistingCloud deployment.""" - - CONFIG_SCHEMA = {"type": "null"} - - def __init__(self, config): - self.config = config - - @classmethod - def validate(cls, config): - jsonschema.validate(config, cls.CONFIG_SCHEMA) - - @abc.abstractmethod - def build_credentials(self): - """Builds credentials from provided configuration""" diff --git a/rally/deployment/engine.py b/rally/deployment/engine.py deleted file mode 100644 index 9c892c1d..00000000 --- a/rally/deployment/engine.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import jsonschema -import six - -from rally.common.i18n import _, _LE -from rally.common import logging -from rally.common.plugin import plugin -from rally import consts -from rally import exceptions - - -LOG = logging.getLogger(__name__) -configure = plugin.configure - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class Engine(plugin.Plugin): - """Base class of all deployment engines. - - It's a base class with self-discovery of subclasses. Each subclass - has to implement deploy() and cleanup() methods. By default, each engine - located as a submodule of the package rally.deployment.engines is - auto-discovered. - - Example of usage with a simple engine: - - # Add new engine with __name__ == "A" - class A(Engine): - def __init__(self, deployment): - # do something - - def deploy(self): - # Make a deployment and return OpenStack credentials. - # The credentials may have either admin or ordinary users - # permissions (depending on how the deploy engine has been - # initialized). - return [credential_1, credential_2, ...] - - def cleanup(self): - # Destroy OpenStack deployment and free resource - - An instance of this class used as a context manager on any unsafe - operations to a deployment. Any unhandled exceptions bring a status - of the deployment to the inconsistent state. - - with Engine.get_engine("A", deployment) as deploy: - # deploy is an instance of the A engine - # perform all usage operations on your cloud - """ - def __init__(self, deployment): - self.deployment = deployment - - @property - def config(self): - return self.deployment["config"] - - def validate(self, config=None): - # TODO(sskripnick): remove this checking when config schema - # is done for all available engines - if hasattr(self, "CONFIG_SCHEMA"): - jsonschema.validate(config or self.config, self.CONFIG_SCHEMA) - - # FIXME(boris-42): Get rid of this method - @staticmethod - def get_engine(name, deployment): - """Returns instance of a deploy engine with corresponding name.""" - try: - engine_cls = Engine.get(name) - return engine_cls(deployment) - except exceptions.PluginNotFound: - LOG.error(_LE("Deployment %(uuid)s: Deploy engine for %(name)s " - "does not exist.") % - {"uuid": deployment["uuid"], "name": name}) - deployment.update_status(consts.DeployStatus.DEPLOY_FAILED) - raise - - @abc.abstractmethod - def deploy(self): - """Deploy OpenStack cloud and return credentials.""" - - @abc.abstractmethod - def cleanup(self): - """Cleanup OpenStack deployment.""" - - @logging.log_deploy_wrapper(LOG.info, _("OpenStack cloud deployment.")) - def make_deploy(self): - self.deployment.set_started() - credentials = self.deploy() - self.deployment.set_completed() - return credentials - - @logging.log_deploy_wrapper(LOG.info, _("Destroy cloud and free " - "allocated resources.")) - def make_cleanup(self): - self.deployment.update_status(consts.DeployStatus.CLEANUP_STARTED) - self.cleanup() - self.deployment.update_status(consts.DeployStatus.CLEANUP_FINISHED) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_traceback): - if exc_type is not None: - exc_info = None - if not issubclass(exc_type, exceptions.InvalidArgumentsException): - exc_info = (exc_type, exc_value, exc_traceback) - LOG.error(_LE("Deployment %(uuid)s: Error has occurred into " - "context of the deployment"), - {"uuid": self.deployment["uuid"]}, - exc_info=exc_info) - status = self.deployment["status"] - if status in (consts.DeployStatus.DEPLOY_INIT, - consts.DeployStatus.DEPLOY_STARTED): - self.deployment.update_status( - consts.DeployStatus.DEPLOY_FAILED) - elif status == consts.DeployStatus.DEPLOY_FINISHED: - self.deployment.update_status( - consts.DeployStatus.DEPLOY_INCONSISTENT) - elif status == consts.DeployStatus.CLEANUP_STARTED: - self.deployment.update_status( - consts.DeployStatus.CLEANUP_FAILED) diff --git a/rally/deployment/engines/__init__.py b/rally/deployment/engines/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/deployment/engines/existing.py b/rally/deployment/engines/existing.py deleted file mode 100644 index c651ed98..00000000 --- a/rally/deployment/engines/existing.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from rally.common import logging -from rally import consts -from rally.deployment import credential -from rally.deployment import engine - -LOG = logging.getLogger(__name__) - - -@engine.configure(name="ExistingCloud") -class ExistingCloud(engine.Engine): - """Platform independent deployment engine. - - This deployment engine allows specifing list of credentials for one - or more platforms. - - Example configuration: - - .. code-block:: json - - { - "type": "ExistingCloud", - "creds": { - "openstack": { - "auth_url": "http://localhost:5000/v3/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "admin", - "user_domain_name": "admin", - "project_name": "admin", - "project_domain_name": "admin", - }, - "https_insecure": False, - "https_cacert": "", - } - } - } - - To specify extra options use can use special "extra" parameter: - - .. code-block:: json - - { - "type": "ExistingCloud", - ... - "extra": {"some_var": "some_value"} - } - - It also support deprecated version of configuration that supports - only OpenStack. - - keystone v2: - - .. code-block:: json - - { - "type": "ExistingCloud", - "auth_url": "http://localhost:5000/v2.0/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "password", - "tenant_name": "demo" - }, - "https_insecure": False, - "https_cacert": "", - } - - keystone v3 API endpoint: - - .. code-block:: json - - { - "type": "ExistingCloud", - "auth_url": "http://localhost:5000/v3/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "admin", - "user_domain_name": "admin", - "project_name": "admin", - "project_domain_name": "admin", - }, - "https_insecure": False, - "https_cacert": "", - } - - """ - - USER_SCHEMA = { - "type": "object", - "oneOf": [ - { - "description": "Keystone V2.0", - "properties": { - "username": {"type": "string"}, - "password": {"type": "string"}, - "tenant_name": {"type": "string"}, - }, - "required": ["username", "password", "tenant_name"], - "additionalProperties": False - }, - { - "description": "Keystone V3.0", - "properties": { - "username": {"type": "string"}, - "password": {"type": "string"}, - "domain_name": {"type": "string"}, - "user_domain_name": {"type": "string"}, - "project_name": {"type": "string"}, - "project_domain_name": {"type": "string"}, - }, - "required": ["username", "password", "project_name"], - "additionalProperties": False - } - ], - } - - OLD_CONFIG_SCHEMA = { - "type": "object", - "description": "Deprecated schema (openstack only)", - "properties": { - "type": {"type": "string"}, - "auth_url": {"type": "string"}, - "region_name": {"type": "string"}, - # NOTE(andreykurilin): it looks like we do not use endpoint - # var at all - "endpoint": {"type": ["string", "null"]}, - "endpoint_type": {"enum": [consts.EndpointType.ADMIN, - consts.EndpointType.INTERNAL, - consts.EndpointType.PUBLIC, - None]}, - "https_insecure": {"type": "boolean"}, - "https_cacert": {"type": "string"}, - "profiler_hmac_key": {"type": ["string", "null"]}, - "admin": USER_SCHEMA, - "users": {"type": "array", "items": USER_SCHEMA, "minItems": 1}, - "extra": {"type": "object", "additionalProperties": True} - }, - "anyOf": [ - {"description": "The case when the admin is specified and the " - "users can be created via 'users' context or " - "'existing_users' will be used.", - "required": ["type", "auth_url", "admin"]}, - {"description": "The case when the only existing users are " - "specified.", - "required": ["type", "auth_url", "users"]} - ], - "additionalProperties": False - } - - NEW_CONFIG_SCHEMA = { - "type": "object", - "description": "New schema for multiplatform deployment", - "properties": { - "type": {"enum": ["ExistingCloud"]}, - "creds": { - "type": "object", - "patternProperties": { - "^[a-z0-9_-]+$": { - "oneOf": [ - { - "description": "Single credential", - "type": "object" - }, - { - "description": "List of credentials", - "type": "array", - "items": {"type": "object"} - }, - ] - } - } - }, - "extra": {"type": "object", "additionalProperties": True} - }, - "required": ["type", "creds"], - "additionalProperties": False - } - - CONFIG_SCHEMA = {"type": "object", - "oneOf": [OLD_CONFIG_SCHEMA, NEW_CONFIG_SCHEMA]} - - def validate(self, config=None): - config = config or self.config - super(ExistingCloud, self).validate(config) - - creds_config = self._get_creds(config) - for platform, config in creds_config.items(): - builder_cls = credential.get_builder(platform) - for creds in config: - builder_cls.validate(creds) - - def _get_creds(self, config): - # NOTE(astudenov): copy config to prevent compatibility changes - # from saving to database - config = copy.deepcopy(config) - if "creds" not in config: - # backward compatibility with old schema - del config["type"] - creds_config = {"openstack": [config]} - else: - creds_config = config["creds"] - - # convert all credentials to list - for platform, config in creds_config.items(): - if isinstance(config, dict): - creds_config[platform] = [config] - return creds_config - - def make_deploy(self): - platforms = (["openstack"] if "creds" not in self.config - else self.config["creds"].keys()) - LOG.info("Save deployment '%(name)s' (uuid=%(uuid)s) with " - "'%(platforms)s' platform%(plural)s." % - {"name": self.deployment["name"], - "uuid": self.deployment["uuid"], - "platforms": "', '".join(platforms), - "plural": "s" if len(platforms) > 1 else ""}) - self.deployment.set_started() - credentials = self.deploy() - self.deployment.set_completed() - return credentials - - def deploy(self): - if "creds" not in self.config: - LOG.warning("Old config schema is deprecated since Rally 0.10.0. " - "Please use new config schema for ExistingCloud") - creds_config = self._get_creds(self.config) - parsed_credentials = {} - for platform, config in creds_config.items(): - builder_cls = credential.get_builder(platform) - credentials = [] - for creds in config: - builder = builder_cls(creds) - credentials.append(builder.build_credentials()) - parsed_credentials[platform] = credentials - return parsed_credentials - - def cleanup(self): - pass diff --git a/rally/exceptions.py b/rally/exceptions.py deleted file mode 100644 index a1f951ba..00000000 --- a/rally/exceptions.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from rally.common.i18n import _ -from rally.common.plugin import discover - - -_exception_map = None - - -class RallyException(Exception): - """Base Rally Exception - - To correctly use this class, inherit from it and define - a "msg_fmt" property. That msg_fmt will get printf'd - with the keyword arguments provided to the constructor. - - """ - msg_fmt = _("%(message)s") - error_code = 500 - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if "%(message)s" in self.msg_fmt: - kwargs.update({"message": message}) - - super(RallyException, self).__init__(self.msg_fmt % kwargs) - - def format_message(self): - return six.text_type(self) - - -def find_exception(response): - """Discover a proper exception class based on response object.""" - global _exception_map - if _exception_map is None: - _exception_map = dict( - (e.error_code, e) for e in discover.itersubclasses(RallyException)) - exc_class = _exception_map.get(response.status_code, RallyException) - - error_data = response.json()["error"] - if error_data["args"]: - return exc_class(error_data["args"]) - return exc_class(error_data["msg"]) - - -def make_exception(exc): - """Check a class of exception and convert it to rally-like if needed.""" - if isinstance(exc, RallyException): - return exc - return RallyException(str(exc)) - - -class InvalidArgumentsException(RallyException): - error_code = 455 - msg_fmt = _("Invalid arguments: '%(message)s'") - - -class InvalidConfigException(RallyException): - error_code = 456 - msg_fmt = _("This config has invalid schema: `%(message)s`") - - -class InvalidTaskException(InvalidConfigException): - error_code = 457 - msg_fmt = _("Task config is invalid: `%(message)s`") - - -class InvalidTaskConfig(InvalidTaskException): - error_code = 458 - msg_fmt = _("Input task is invalid!\n\n" - "Subtask %(name)s[%(pos)s] has wrong configuration" - "\nSubtask configuration:\n%(config)s\n" - "\nReason(s):\n %(reason)s") - - -class NotFoundException(RallyException): - error_code = 404 - msg_fmt = _("The resource can not be found: %(message)s") - - -class ThreadTimeoutException(RallyException): - error_code = 515 - msg_fmt = _("Iteration interrupted due to timeout.") - - -class PluginNotFound(NotFoundException): - error_code = 459 - msg_fmt = _("There is no plugin with name: `%(name)s` in " - "%(platform)s platform.") - - -class PluginWithSuchNameExists(RallyException): - error_code = 516 - msg_fmt = _("Plugin with such name: %(name)s already exists in " - "%(platform)s platform. It's module allocates at " - "%(existing_path)s. You are trying to add plugin whose module " - "allocates at %(new_path)s.") - - -class TaskNotFound(NotFoundException): - error_code = 460 - msg_fmt = _("Task with uuid=%(uuid)s not found.") - - -class DeploymentNotFound(NotFoundException): - error_code = 461 - msg_fmt = _("Deployment %(deployment)s not found.") - - -class DeploymentNameExists(RallyException): - error_code = 462 - msg_fmt = _("Deployment name '%(deployment)s' already registered.") - - -class DeploymentNotFinishedStatus(RallyException): - error_code = 463 - msg_fmt = _("Deployment '%(name)s' (UUID=%(uuid)s) is in" - " '%(status)s' status.") - - -class DeploymentIsBusy(RallyException): - error_code = 464 - msg_fmt = _("There are allocated resources for the deployment with " - "uuid=%(uuid)s.") - - -class RallyAssertionError(RallyException): - msg_fmt = _("Assertion error: %(message)s") - - -class ResourceNotFound(NotFoundException): - error_code = 465 - msg_fmt = _("Resource with id=%(id)s not found.") - - -class TimeoutException(RallyException): - error_code = 517 - msg_fmt = _("Rally tired waiting for %(resource_type)s %(resource_name)s:" - "%(resource_id)s to become %(desired_status)s current " - "status %(resource_status)s") - - -class GetResourceFailure(RallyException): - error_code = 518 - msg_fmt = _("Failed to get the resource %(resource)s: %(err)s") - - -class GetResourceNotFound(GetResourceFailure): - error_code = 519 - msg_fmt = _("Resource %(resource)s is not found.") - - -class GetResourceErrorStatus(GetResourceFailure): - error_code = 520 - msg_fmt = _("Resource %(resource)s has %(status)s status.\n" - "Fault: %(fault)s") - - -class ScriptError(RallyException): - msg_fmt = _("Script execution failed: %(message)s") - - -class TaskInvalidStatus(RallyException): - error_code = 466 - msg_fmt = _("Task `%(uuid)s` in `%(actual)s` status but `%(require)s` is " - "required.") - - -class InvalidAdminException(InvalidArgumentsException): - error_code = 521 - msg_fmt = _("user '%(username)s' doesn't have 'admin' role") - - -class AuthenticationFailed(InvalidArgumentsException): - error_code = 401 - msg_fmt = _("Failed to authenticate to %(url)s for user '%(username)s'" - " in project '%(project)s': %(etype)s: %(error)s") - - -class InvalidScenarioArgument(RallyException): - error_code = 467 - msg_fmt = _("Invalid scenario argument: '%(message)s'") - - -class ContextSetupFailure(RallyException): - error_code = 524 - msg_fmt = _("Unable to setup context '%(ctx_name)s': '%(msg)s'") - - -class ValidationError(RallyException): - error_code = 468 - msg_fmt = _("Validation error: %(message)s") - - -class WorkerNotFound(NotFoundException): - error_code = 469 - msg_fmt = _("Worker %(worker)s could not be found") - - -class WorkerAlreadyRegistered(RallyException): - error_code = 525 - msg_fmt = _("Worker %(worker)s already registered") - - -class MultipleMatchesFound(RallyException): - error_code = 470 - msg_fmt = _("Found multiple %(needle)s: %(haystack)s") - - def __init__(self, **kwargs): - if "hint" in kwargs: - self.msg_fmt += ". Hint: %(hint)s" - super(MultipleMatchesFound, self).__init__(**kwargs) - - -class SSHTimeout(RallyException): - error_code = 526 - pass - - -class SSHError(RallyException): - error_code = 527 - pass - - -class InvalidConnectionString(RallyException): - error_code = 471 - msg_fmt = _("The connection string is not valid: %(message)s. Please " - "check your connection string.") - - -class DowngradeNotSupported(RallyException): - error_code = 528 - msg_fmt = _("Database schema downgrade is not supported.") diff --git a/rally/osclients.py b/rally/osclients.py deleted file mode 100644 index baf75797..00000000 --- a/rally/osclients.py +++ /dev/null @@ -1,855 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import os - -from oslo_config import cfg -from six.moves.urllib import parse - -from rally.cli import envutils -from rally.common.i18n import _ -from rally.common import logging -from rally.common.plugin import plugin -from rally import consts -from rally import exceptions - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -OSCLIENTS_OPTS = [ - cfg.FloatOpt("openstack_client_http_timeout", default=180.0, - help="HTTP timeout for any of OpenStack service in seconds") -] -CONF.register_opts(OSCLIENTS_OPTS) - - -def configure(name, default_version=None, default_service_type=None, - supported_versions=None): - """OpenStack client class wrapper. - - Each client class has to be wrapped by configure() wrapper. It - sets essential configuration of client classes. - - :param name: Name of the client - :param default_version: Default version for client - :param default_service_type: Default service type of endpoint(If this - variable is not specified, validation will assume that your client - doesn't allow to specify service type. - :param supported_versions: List of supported versions(If this variable is - not specified, `OSClients.validate_version` method will raise an - exception that client doesn't support setting any versions. If this - logic is wrong for your client, you should override `validate_version` - in client object) - """ - def wrapper(cls): - # openstack platform is hardcoded in OSclients.get() method - cls = plugin.configure(name=name)(cls) - cls._meta_set("default_version", default_version) - cls._meta_set("default_service_type", default_service_type) - cls._meta_set("supported_versions", supported_versions or []) - return cls - - return wrapper - - -@plugin.base() -class OSClient(plugin.Plugin): - """Base class for openstack clients""" - - def __init__(self, credential, api_info, cache_obj): - self.credential = credential - self.api_info = api_info - self.cache = cache_obj - - def choose_version(self, version=None): - """Return version string. - - Choose version between transmitted(preferable value if present), - version from api_info(configured from a context) and default. - """ - # NOTE(andreykurilin): The result of choose is converted to string, - # since most of clients contain map for versioned modules, where a key - # is a string value of version. Example of map and its usage: - # - # from oslo_utils import importutils - # ... - # version_map = {"1": "someclient.v1.client.Client", - # "2": "someclient.v2.client.Client"} - # - # def Client(version, *args, **kwargs): - # cls = importutils.import_class(version_map[version]) - # return cls(*args, **kwargs) - # - # That is why type of version so important and we should ensure that - # version is a string object. - # For those clients which doesn't accept string value(for example - # zaqarclient), this method should be overridden. - version = (version - or self.api_info.get(self.get_name(), {}).get("version") - or self._meta_get("default_version")) - if version is not None: - version = str(version) - return version - - @classmethod - def get_supported_versions(cls): - return cls._meta_get("supported_versions") - - @classmethod - def validate_version(cls, version): - supported_versions = cls.get_supported_versions() - if supported_versions: - if str(version) not in supported_versions: - raise exceptions.ValidationError(_( - "'%(vers)s' is not supported. Should be one of " - "'%(supported)s'") % {"vers": version, - "supported": supported_versions}) - else: - raise exceptions.RallyException( - _("Setting version is not supported.")) - try: - float(version) - except ValueError: - raise exceptions.ValidationError(_( - "'%s' is invalid. Should be numeric value.") % version) - - def choose_service_type(self, service_type=None): - """Return service_type string. - - Choose service type between transmitted(preferable value if present), - service type from api_info(configured from a context) and default. - """ - return (service_type - or self.api_info.get(self.get_name(), {}).get("service_type") - or self._meta_get("default_service_type")) - - @classmethod - def is_service_type_configurable(cls): - """Just checks that client supports setting service type.""" - if cls._meta_get("default_service_type") is None: - raise exceptions.RallyException(_( - "Setting service type is not supported.")) - - @property - def keystone(self): - return OSClient.get("keystone")(self.credential, self.api_info, - self.cache) - - def _get_session(self, auth_url=None, version=None): - LOG.warning( - "Method `rally.osclient.OSClient._get_session` is deprecated since" - " Rally 0.6.0. Use " - "`rally.osclient.OSClient.keystone.get_session` instead.") - return self.keystone.get_session(version) - - def _get_endpoint(self, service_type=None): - kw = {"service_type": self.choose_service_type(service_type), - "region_name": self.credential.region_name} - if self.credential.endpoint_type: - kw["interface"] = self.credential.endpoint_type - api_url = self.keystone.service_catalog.url_for(**kw) - return api_url - - def _get_auth_info(self, user_key="username", - password_key="password", - auth_url_key="auth_url", - project_name_key="project_id", - domain_name_key="domain_name", - user_domain_name_key="user_domain_name", - project_domain_name_key="project_domain_name", - cacert_key="cacert", - endpoint_type="endpoint_type", - ): - kw = { - user_key: self.credential.username, - password_key: self.credential.password, - auth_url_key: self.credential.auth_url, - cacert_key: self.credential.https_cacert, - } - if project_name_key: - kw.update({project_name_key: self.credential.tenant_name}) - - if "v2.0" not in self.credential.auth_url: - kw.update({ - domain_name_key: self.credential.domain_name}) - kw.update({ - user_domain_name_key: - self.credential.user_domain_name or "Default"}) - kw.update({ - project_domain_name_key: - self.credential.project_domain_name or "Default"}) - if self.credential.endpoint_type: - kw[endpoint_type] = self.credential.endpoint_type - return kw - - @abc.abstractmethod - def create_client(self, *args, **kwargs): - """Create new instance of client.""" - - def __call__(self, *args, **kwargs): - """Return initialized client instance.""" - key = "{0}{1}{2}".format(self.get_name(), - str(args) if args else "", - str(kwargs) if kwargs else "") - if key not in self.cache: - self.cache[key] = self.create_client(*args, **kwargs) - return self.cache[key] - - @classmethod - def get(cls, name, **kwargs): - # NOTE(boris-42): Remove this after we finish rename refactoring. - kwargs.pop("platform", None) - kwargs.pop("namespace", None) - return super(OSClient, cls).get(name, platform="openstack", **kwargs) - - -@configure("keystone", supported_versions=("2", "3")) -class Keystone(OSClient): - """Wrapper for KeystoneClient which hides OpenStack auth details.""" - - @property - def keystone(self): - raise exceptions.RallyException(_("Method 'keystone' is restricted " - "for keystoneclient. :)")) - - @property - def service_catalog(self): - return self.auth_ref.service_catalog - - @property - def auth_ref(self): - try: - if "keystone_auth_ref" not in self.cache: - sess, plugin = self.get_session() - self.cache["keystone_auth_ref"] = plugin.get_access(sess) - except Exception as e: - if logging.is_debug(): - LOG.exception("Unable to authenticate for user" - " %(username)s in project" - " %(tenant_name)s" % - {"username": self.credential.username, - "tenant_name": self.credential.tenant_name}) - raise exceptions.AuthenticationFailed( - username=self.credential.username, - project=self.credential.tenant_name, - url=self.credential.auth_url, - etype=e.__class__.__name__, - error=str(e)) - return self.cache["keystone_auth_ref"] - - def get_session(self, version=None): - key = "keystone_session_and_plugin_%s" % version - if key not in self.cache: - from keystoneauth1 import discover - from keystoneauth1 import identity - from keystoneauth1 import session - - version = self.choose_version(version) - auth_url = self.credential.auth_url - if version is not None: - auth_url = self._remove_url_version() - - password_args = { - "auth_url": auth_url, - "username": self.credential.username, - "password": self.credential.password, - "tenant_name": self.credential.tenant_name - } - - if version is None: - # NOTE(rvasilets): If version not specified than we discover - # available version with the smallest number. To be able to - # discover versions we need session - temp_session = session.Session( - verify=(self.credential.https_cacert or - not self.credential.https_insecure), - timeout=CONF.openstack_client_http_timeout) - version = str(discover.Discover( - temp_session, - password_args["auth_url"]).version_data()[0]["version"][0]) - - if "v2.0" not in password_args["auth_url"] and ( - version != "2"): - password_args.update({ - "user_domain_name": self.credential.user_domain_name, - "domain_name": self.credential.domain_name, - "project_domain_name": self.credential.project_domain_name - }) - identity_plugin = identity.Password(**password_args) - sess = session.Session( - auth=identity_plugin, - verify=(self.credential.https_cacert or - not self.credential.https_insecure), - timeout=CONF.openstack_client_http_timeout) - self.cache[key] = (sess, identity_plugin) - return self.cache[key] - - def _remove_url_version(self): - """Remove any version from the auth_url. - - The keystone Client code requires that auth_url be the root url - if a version override is used. - """ - url = parse.urlparse(self.credential.auth_url) - path = url.path.rstrip("/") - if path.endswith("v2.0") or path.endswith("v3"): - path = os.path.join(*os.path.split(path)[:-1]) - parts = (url.scheme, url.netloc, path, url.params, url.query, - url.fragment) - return parse.urlunparse(parts) - return self.credential.auth_url - - def create_client(self, version=None): - """Return a keystone client. - - :param version: Keystone API version, can be one of: - ("2", "3") - - If this object was constructed with a version in the api_info - then that will be used unless the version parameter is passed. - """ - import keystoneclient - from keystoneclient import client - - # Use the version in the api_info if provided, otherwise fall - # back to the passed version (which may be None, in which case - # keystoneclient chooses). - version = self.choose_version(version) - - sess = self.get_session(version=version)[0] - - kw = {"version": version, "session": sess, - "timeout": CONF.openstack_client_http_timeout} - if keystoneclient.__version__[0] == "1": - # NOTE(andreykurilin): let's leave this hack for envs which uses - # old(<2.0.0) keystoneclient version. Upstream fix: - # https://github.com/openstack/python-keystoneclient/commit/d9031c252848d89270a543b67109a46f9c505c86 - from keystoneauth1 import plugin - kw["auth_url"] = sess.get_endpoint(interface=plugin.AUTH_INTERFACE) - if self.credential.endpoint_type: - kw["interface"] = self.credential.endpoint_type - - # NOTE(amyge): - # In auth_ref(), plugin.get_access(sess) only returns a auth_ref object - # and won't check the authentication access until it is actually being - # called. To catch the authentication failure in auth_ref(), we will - # have to call self.auth_ref.auth_token here to actually use auth_ref. - self.auth_ref # noqa - - return client.Client(**kw) - - -@configure("nova", default_version="2", default_service_type="compute") -class Nova(OSClient): - """Wrapper for NovaClient which returns a authenticated native client.""" - - @classmethod - def validate_version(cls, version): - from novaclient import api_versions - from novaclient import exceptions as nova_exc - - try: - api_versions.get_api_version(version) - except nova_exc.UnsupportedVersion: - raise exceptions.RallyException( - "Version string '%s' is unsupported." % version) - - def create_client(self, version=None, service_type=None): - """Return nova client.""" - from novaclient import client as nova - - client = nova.Client( - session=self.keystone.get_session()[0], - version=self.choose_version(version), - endpoint_override=self._get_endpoint(service_type)) - return client - - -@configure("neutron", default_version="2.0", default_service_type="network", - supported_versions=["2.0"]) -class Neutron(OSClient): - """Wrapper for NeutronClient which returns an authenticated native client. - - """ - - def create_client(self, version=None, service_type=None): - """Return neutron client.""" - from neutronclient.neutron import client as neutron - - kw_args = {} - if self.credential.endpoint_type: - kw_args["endpoint_type"] = self.credential.endpoint_type - - client = neutron.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint_override=self._get_endpoint(service_type), - **kw_args) - return client - - -@configure("glance", default_version="2", default_service_type="image", - supported_versions=["1", "2"]) -class Glance(OSClient): - """Wrapper for GlanceClient which returns an authenticated native client. - - """ - - def create_client(self, version=None, service_type=None): - """Return glance client.""" - import glanceclient as glance - - session = self.keystone.get_session()[0] - client = glance.Client( - version=self.choose_version(version), - endpoint_override=self._get_endpoint(service_type), - session=session) - return client - - -@configure("heat", default_version="1", default_service_type="orchestration", - supported_versions=["1"]) -class Heat(OSClient): - """Wrapper for HeatClient which returns an authenticated native client.""" - def create_client(self, version=None, service_type=None): - """Return heat client.""" - from heatclient import client as heat - - # ToDo: Remove explicit endpoint_type or interface initialization - # when heatclient no longer uses it. - kw_args = {} - if self.credential.endpoint_type: - kw_args["endpoint_type"] = self.credential.endpoint_type - kw_args["interface"] = self.credential.endpoint_type - - client = heat.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - # Remove endpoint once requirement is python-heatclient>=1.6 - endpoint=self._get_endpoint(service_type), - endpoint_override=self._get_endpoint(service_type), - **kw_args) - return client - - -@configure("cinder", default_version="2", default_service_type="volumev2", - supported_versions=["1", "2"]) -class Cinder(OSClient): - """Wrapper for CinderClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return cinder client.""" - from cinderclient import client as cinder - - client = cinder.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint_override=self._get_endpoint(service_type)) - return client - - -@configure("manila", default_version="1", default_service_type="share") -class Manila(OSClient): - """Wrapper for ManilaClient which returns an authenticated native client. - - """ - @classmethod - def validate_version(cls, version): - from manilaclient import api_versions - from manilaclient import exceptions as manila_exc - - try: - api_versions.get_api_version(version) - except manila_exc.UnsupportedVersion: - raise exceptions.RallyException( - "Version string '%s' is unsupported." % version) - - def create_client(self, version=None, service_type=None): - """Return manila client.""" - from manilaclient import client as manila - manila_client = manila.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - service_catalog_url=self._get_endpoint(service_type)) - return manila_client - - -@configure("ceilometer", default_version="2", default_service_type="metering", - supported_versions=["1", "2"]) -class Ceilometer(OSClient): - """Wrapper for CeilometerClient which returns authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return ceilometer client.""" - from ceilometerclient import client as ceilometer - - client = ceilometer.get_client( - self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint_override=self._get_endpoint(service_type)) - return client - - -@configure("gnocchi", default_service_type="metric", default_version="1", - supported_versions=["1"]) -class Gnocchi(OSClient): - """Wrapper for GnocchiClient which returns an authenticated native client. - - """ - - def create_client(self, version=None, service_type=None): - """Return gnocchi client.""" - # NOTE(sumantmurke): gnocchiclient requires keystoneauth1 for - # authenticating and creating a session. - from gnocchiclient import client as gnocchi - - service_type = self.choose_service_type(service_type) - sess = self.keystone.get_session()[0] - gclient = gnocchi.Client(version=self.choose_version( - version), session=sess, service_type=service_type) - return gclient - - -@configure("ironic", default_version="1", default_service_type="baremetal", - supported_versions=["1"]) -class Ironic(OSClient): - """Wrapper for IronicClient which returns an authenticated native client. - - """ - - def create_client(self, version=None, service_type=None): - """Return Ironic client.""" - from ironicclient import client as ironic - - client = ironic.get_client( - self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint=self._get_endpoint(service_type)) - return client - - -@configure("sahara", default_version="1.1", supported_versions=["1.0", "1.1"], - default_service_type="data-processing") -class Sahara(OSClient): - """Wrapper for SaharaClient which returns an authenticated native client. - - """ - - # NOTE(andreykurilin): saharaclient supports "1.0" version and doesn't - # support "1". `choose_version` and `validate_version` methods are written - # as a hack to covert 1 -> 1.0, which can simplify setting saharaclient - # for end-users. - def choose_version(self, version=None): - return float(super(Sahara, self).choose_version(version)) - - @classmethod - def validate_version(cls, version): - super(Sahara, cls).validate_version(float(version)) - - def create_client(self, version=None, service_type=None): - """Return Sahara client.""" - from saharaclient import client as sahara - - client = sahara.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - sahara_url=self._get_endpoint(service_type)) - - return client - - -@configure("zaqar", default_version="1.1", default_service_type="messaging", - supported_versions=["1", "1.1"]) -class Zaqar(OSClient): - """Wrapper for ZaqarClient which returns an authenticated native client. - - """ - def choose_version(self, version=None): - # zaqarclient accepts only int or float obj as version - return float(super(Zaqar, self).choose_version(version)) - - def create_client(self, version=None, service_type=None): - """Return Zaqar client.""" - from zaqarclient.queues import client as zaqar - client = zaqar.Client(url=self._get_endpoint(), - version=self.choose_version(version), - session=self.keystone.get_session()[0]) - return client - - -@configure("murano", default_version="1", - default_service_type="application-catalog", - supported_versions=["1"]) -class Murano(OSClient): - """Wrapper for MuranoClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return Murano client.""" - from muranoclient import client as murano - - client = murano.Client(self.choose_version(version), - endpoint=self._get_endpoint(service_type), - token=self.keystone.auth_ref.auth_token) - - return client - - -@configure("designate", default_version="1", default_service_type="dns", - supported_versions=["1", "2"]) -class Designate(OSClient): - """Wrapper for DesignateClient which returns authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return designate client.""" - from designateclient import client - - version = self.choose_version(version) - - api_url = self._get_endpoint(service_type) - api_url += "/v%s" % version - - session = self.keystone.get_session()[0] - if version == "2": - return client.Client(version, session=session, - endpoint_override=api_url) - return client.Client(version, session=session, - endpoint=api_url) - - -@configure("trove", default_version="1.0", supported_versions=["1.0"], - default_service_type="database") -class Trove(OSClient): - """Wrapper for TroveClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Returns trove client.""" - from troveclient import client as trove - - client = trove.Client(self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint=self._get_endpoint(service_type)) - return client - - -@configure("mistral", default_service_type="workflowv2") -class Mistral(OSClient): - """Wrapper for MistralClient which returns an authenticated native client. - - """ - def create_client(self, service_type=None): - """Return Mistral client.""" - from mistralclient.api import client as mistral - - client = mistral.client( - mistral_url=self._get_endpoint(service_type), - service_type=self.choose_service_type(service_type), - auth_token=self.keystone.auth_ref.auth_token) - return client - - -@configure("swift", default_service_type="object-store") -class Swift(OSClient): - """Wrapper for SwiftClient which returns an authenticated native client. - - """ - def create_client(self, service_type=None): - """Return swift client.""" - from swiftclient import client as swift - - auth_token = self.keystone.auth_ref.auth_token - client = swift.Connection(retries=1, - preauthurl=self._get_endpoint(service_type), - preauthtoken=auth_token, - insecure=self.credential.https_insecure, - cacert=self.credential.https_cacert, - user=self.credential.username, - tenant_name=self.credential.tenant_name, - ) - return client - - -@configure("ec2") -class EC2(OSClient): - """Wrapper for EC2Client which returns an authenticated native client. - - """ - def create_client(self): - """Return ec2 client.""" - LOG.warning("rally.osclient.EC2 is deprecated since Rally 0.10.0.") - - import boto - - kc = self.keystone() - - if kc.version != "v2.0": - raise exceptions.RallyException( - _("Rally EC2 benchmark currently supports only" - "Keystone version 2")) - ec2_credential = kc.ec2.create(user_id=kc.auth_user_id, - tenant_id=kc.auth_tenant_id) - client = boto.connect_ec2_endpoint( - url=self._get_endpoint(), - aws_access_key_id=ec2_credential.access, - aws_secret_access_key=ec2_credential.secret, - is_secure=self.credential.https_insecure) - return client - - -@configure("monasca", default_version="2_0", - default_service_type="monitoring", supported_versions=["2_0"]) -class Monasca(OSClient): - """Wrapper for MonascaClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return monasca client.""" - from monascaclient import client as monasca - - # Change this to use session once it's supported by monascaclient - client = monasca.Client( - self.choose_version(version), - self._get_endpoint(service_type), - token=self.keystone.auth_ref.auth_token, - timeout=CONF.openstack_client_http_timeout, - insecure=self.credential.https_insecure, - **self._get_auth_info(project_name_key="tenant_name")) - return client - - -@configure("senlin", default_version="1", default_service_type="clustering", - supported_versions=["1"]) -class Senlin(OSClient): - """Wrapper for SenlinClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return senlin client.""" - from senlinclient import client as senlin - - return senlin.Client( - self.choose_version(version), - **self._get_auth_info(project_name_key="project_name", - cacert_key="cert", - endpoint_type="interface")) - - -@configure("magnum", default_version="1", supported_versions=["1"], - default_service_type="container-infra",) -class Magnum(OSClient): - """Wrapper for MagnumClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return magnum client.""" - from magnumclient import client as magnum - - api_url = self._get_endpoint(service_type) - session = self.keystone.get_session()[0] - - return magnum.Client( - session=session, - interface=self.credential.endpoint_type, - magnum_url=api_url) - - -@configure("watcher", default_version="1", default_service_type="infra-optim", - supported_versions=["1"]) -class Watcher(OSClient): - """Wrapper for WatcherClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return watcher client.""" - from watcherclient import client as watcher_client - watcher_api_url = self._get_endpoint( - self.choose_service_type(service_type)) - client = watcher_client.Client( - self.choose_version(version), - endpoint=watcher_api_url, - session=self.keystone.get_session()[0]) - return client - - -class Clients(object): - """This class simplify and unify work with OpenStack python clients.""" - - def __init__(self, credential, api_info=None, cache=None): - self.credential = credential - self.api_info = api_info or {} - self.cache = cache or {} - - def __getattr__(self, client_name): - """Lazy load of clients.""" - return OSClient.get(client_name)(self.credential, self.api_info, - self.cache) - - @classmethod - def create_from_env(cls): - creds = envutils.get_creds_from_env_vars() - from rally.plugins.openstack import credential - oscred = credential.OpenStackCredential( - auth_url=creds["auth_url"], - username=creds["admin"]["username"], - password=creds["admin"]["password"], - tenant_name=creds["admin"]["tenant_name"], - endpoint_type=creds["endpoint_type"], - user_domain_name=creds["admin"].get("user_domain_name"), - project_domain_name=creds["admin"].get("project_domain_name"), - endpoint=creds["endpoint"], - region_name=creds["region_name"], - https_cacert=creds["https_cacert"], - https_insecure=creds["https_insecure"]) - return cls(oscred) - - def clear(self): - """Remove all cached client handles.""" - self.cache = {} - - def verified_keystone(self): - """Ensure keystone endpoints are valid and then authenticate - - :returns: Keystone Client - """ - # Ensure that user is admin - if "admin" not in [role.lower() for role in - self.keystone.auth_ref.role_names]: - raise exceptions.InvalidAdminException( - username=self.credential.username) - return self.keystone() - - def services(self): - """Return available services names and types. - - :returns: dict, {"service_type": "service_name", ...} - """ - if "services_data" not in self.cache: - services_data = {} - available_services = self.keystone.service_catalog.get_endpoints() - for stype in available_services.keys(): - if stype in consts.ServiceType: - services_data[stype] = consts.ServiceType[stype] - else: - services_data[stype] = "__unknown__" - self.cache["services_data"] = services_data - - return self.cache["services_data"] diff --git a/rally/plugins/__init__.py b/rally/plugins/__init__.py deleted file mode 100644 index 8eed28ca..00000000 --- a/rally/plugins/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import decorator - -from rally.common.plugin import discover - - -PLUGINS_LOADED = False - - -def load(): - global PLUGINS_LOADED - - if not PLUGINS_LOADED: - discover.import_modules_from_package("rally.deployment.engines") - discover.import_modules_from_package("rally.deployment.serverprovider") - discover.import_modules_from_package("rally.plugins.common") - try: - import rally_openstack # noqa - except ImportError: - # print warnings when rally_openstack will be released - discover.import_modules_from_package("rally.plugins.openstack") - discover.import_modules_from_package("rally.plugins.workload") - - discover.import_modules_by_entry_point() - - discover.load_plugins("/opt/rally/plugins/") - discover.load_plugins(os.path.expanduser("~/.rally/plugins/")) - - PLUGINS_LOADED = True - - -@decorator.decorator -def ensure_plugins_are_loaded(f, *args, **kwargs): - load() - return f(*args, **kwargs) diff --git a/rally/plugins/common/__init__.py b/rally/plugins/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/context/__init__.py b/rally/plugins/common/context/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/context/dummy.py b/rally/plugins/common/context/dummy.py deleted file mode 100644 index a02772f2..00000000 --- a/rally/plugins/common/context/dummy.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally import exceptions -from rally.task import context - - -@context.configure(name="dummy_context", order=750) -class DummyContext(context.Context): - """Dummy context.""" - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "fail_setup": {"type": "boolean"}, - "fail_cleanup": {"type": "boolean"} - }, - } - - def setup(self): - if self.config.get("fail_setup", False): - raise exceptions.RallyException("Oops...setup is failed") - - def cleanup(self): - if self.config.get("fail_cleanup", False): - raise exceptions.RallyException("Oops...cleanup is failed") diff --git a/rally/plugins/common/context/users.py b/rally/plugins/common/context/users.py deleted file mode 100644 index 6c01f3d5..00000000 --- a/rally/plugins/common/context/users.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.task import context - - -@context.configure(name="users", order=100, hidden=True) -class NoUsers(context.Context): - """Context that provides empty list of users for default namespace.""" - - CONFIG_SCHEMA = {"type": "null"} - - def setup(self): - self.context["users"] = [] - - def cleanup(self): - pass diff --git a/rally/plugins/common/exporter/__init__.py b/rally/plugins/common/exporter/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/exporter/reporters.py b/rally/plugins/common/exporter/reporters.py deleted file mode 100644 index 6c1997ea..00000000 --- a/rally/plugins/common/exporter/reporters.py +++ /dev/null @@ -1,120 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import os - -from rally.common.io import junit -from rally.task import exporter -from rally.task.processing import plot - - -@exporter.configure("html") -class HTMLExporter(exporter.TaskExporter): - """Generates task report in HTML format.""" - INCLUDE_LIBS = False - - @classmethod - def validate(cls, output_destination): - """Validate destination of report. - - :param output_destination: Destination of report - """ - # nothing to check :) - pass - - def generate(self): - results = [] - processed_names = {} - for task in self.tasks_results: - for workload in itertools.chain( - *[s["workloads"] for s in task["subtasks"]]): - if workload["name"] in processed_names: - processed_names[workload["name"]] += 1 - workload["position"] = processed_names[workload["name"]] - else: - processed_names[workload["name"]] = 0 - results.append(task) - - report = plot.plot(results, include_libs=self.INCLUDE_LIBS) - - if self.output_destination: - return {"files": {self.output_destination: report}, - "open": "file://" + os.path.abspath( - self.output_destination)} - else: - return {"print": report} - - -@exporter.configure("html-static") -class HTMLStaticExporter(HTMLExporter): - """Generates task report in HTML format with embedded JS/CSS.""" - INCLUDE_LIBS = True - - -@exporter.configure("junit-xml") -class JUnitXMLExporter(exporter.TaskExporter): - """Generates task report in JUnit-XML format. - - An example of the report (All dates, numbers, names appearing in this - example are fictitious. Any resemblance to real things is purely - coincidental): - - .. code-block:: xml - - - - - """ - - @classmethod - def validate(cls, output_destination): - """Validate destination of report. - - :param output_destination: Destination of report - """ - # nothing to check :) - pass - - def generate(self): - test_suite = junit.JUnit("Rally test suite") - for task in self.tasks_results: - for workload in itertools.chain( - *[s["workloads"] for s in task["subtasks"]]): - w_sla = workload["sla_results"].get("sla", []) - - message = ",".join([sla["detail"] for sla in w_sla - if not sla["success"]]) - if message: - outcome = junit.JUnit.FAILURE - else: - outcome = junit.JUnit.SUCCESS - test_suite.add_test(workload["name"], - workload["full_duration"], outcome, - message) - - result = test_suite.to_xml() - - if self.output_destination: - return {"files": {self.output_destination: result}, - "open": "file://" + os.path.abspath( - self.output_destination)} - else: - return {"print": result} diff --git a/rally/plugins/common/hook/__init__.py b/rally/plugins/common/hook/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/hook/sys_call.py b/rally/plugins/common/hook/sys_call.py deleted file mode 100644 index aa2540bb..00000000 --- a/rally/plugins/common/hook/sys_call.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import shlex -import subprocess - -from rally.common import logging -from rally import consts -from rally import exceptions -from rally.task import hook - - -LOG = logging.getLogger(__name__) - - -@hook.configure(name="sys_call") -class SysCallHook(hook.Hook): - """Performs system call.""" - - CONFIG_SCHEMA = { - "$schema": consts.JSON_SCHEMA, - "type": "string", - "description": "Command to execute." - } - - def run(self): - LOG.debug("sys_call hook: Running command %s", self.config) - proc = subprocess.Popen(shlex.split(self.config), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) - out, err = proc.communicate() - LOG.debug("sys_call hook: Command %s returned %s", - self.config, proc.returncode) - if proc.returncode: - self.set_error( - exception_name="n/a", # no exception class - description="Subprocess returned {}".format(proc.returncode), - details=(err or "stdout: %s" % out)) - - # NOTE(amaretskiy): Try to load JSON for charts, - # otherwise save output as-is - try: - output = json.loads(out) - for arg in ("additive", "complete"): - for out_ in output.get(arg, []): - self.add_output(**{arg: out_}) - except (TypeError, ValueError, exceptions.RallyException): - self.add_output( - complete={"title": "System call", - "chart_plugin": "TextArea", - "description": "Args: %s" % self.config, - "data": ["RetCode: %i" % proc.returncode, - "StdOut: %s" % (out or "(empty)"), - "StdErr: %s" % (err or "(empty)")]}) diff --git a/rally/plugins/common/runners/__init__.py b/rally/plugins/common/runners/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/runners/constant.py b/rally/plugins/common/runners/constant.py deleted file mode 100644 index 53c81e58..00000000 --- a/rally/plugins/common/runners/constant.py +++ /dev/null @@ -1,356 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import multiprocessing -import threading -import time - -from six.moves import queue as Queue - -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.task import runner -from rally.task import utils as butils - - -def _worker_process(queue, iteration_gen, timeout, concurrency, times, - context, cls, method_name, args, event_queue, aborted, - info): - """Start the scenario within threads. - - Spawn threads to support scenario execution for a fixed number of times. - This generates a constant load on the cloud under test by executing each - scenario iteration without pausing between iterations. Each thread runs - the scenario method once with passed scenario arguments and context. - After execution the result is appended to the queue. - - :param queue: queue object to append results - :param iteration_gen: next iteration number generator - :param timeout: operation's timeout - :param concurrency: number of concurrently running scenario iterations - :param times: total number of scenario iterations to be run - :param context: scenario context object - :param cls: scenario class - :param method_name: scenario method name - :param args: scenario args - :param event_queue: queue object to append events - :param aborted: multiprocessing.Event that aborts load generation if - the flag is set - :param info: info about all processes count and counter of launched process - """ - - pool = collections.deque() - alive_threads_in_pool = 0 - finished_threads_in_pool = 0 - - runner._log_worker_info(times=times, concurrency=concurrency, - timeout=timeout, cls=cls, method_name=method_name, - args=args) - - if timeout: - timeout_queue = Queue.Queue() - collector_thr_by_timeout = threading.Thread( - target=utils.timeout_thread, - args=(timeout_queue, ) - ) - collector_thr_by_timeout.start() - - iteration = next(iteration_gen) - while iteration < times and not aborted.is_set(): - scenario_context = runner._get_scenario_context(iteration, context) - worker_args = ( - queue, cls, method_name, scenario_context, args, event_queue) - - thread = threading.Thread(target=runner._worker_thread, - args=worker_args) - - thread.start() - if timeout: - timeout_queue.put((thread, time.time() + timeout)) - pool.append(thread) - alive_threads_in_pool += 1 - - while alive_threads_in_pool == concurrency: - prev_finished_threads_in_pool = finished_threads_in_pool - finished_threads_in_pool = 0 - for t in pool: - if not t.isAlive(): - finished_threads_in_pool += 1 - - alive_threads_in_pool -= finished_threads_in_pool - alive_threads_in_pool += prev_finished_threads_in_pool - - if alive_threads_in_pool < concurrency: - # NOTE(boris-42): cleanup pool array. This is required because - # in other case array length will be equal to times which - # is unlimited big - while pool and not pool[0].isAlive(): - pool.popleft().join() - finished_threads_in_pool -= 1 - break - - # we should wait to not create big noise with these checks - time.sleep(0.001) - iteration = next(iteration_gen) - - # Wait until all threads are done - while pool: - pool.popleft().join() - - if timeout: - timeout_queue.put((None, None,)) - collector_thr_by_timeout.join() - - -@validation.configure("check_constant") -class CheckConstantValidator(validation.Validator): - """Additional schema validation for constant runner""" - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - if plugin_cfg.get("concurrency", 1) > plugin_cfg.get("times", 1): - return self.fail( - "Parameter 'concurrency' means a number of parallel executions" - "of iterations. Parameter 'times' means total number of " - "iteration executions. It is redundant (and restricted) to " - "have number of parallel iterations bigger then total number " - "of iterations.") - - -@validation.add("check_constant") -@runner.configure(name="constant") -class ConstantScenarioRunner(runner.ScenarioRunner): - """Creates constant load executing a scenario a specified number of times. - - This runner will place a constant load on the cloud under test by - executing each scenario iteration without pausing between iterations - up to the number of times specified in the scenario config. - - The concurrency parameter of the scenario config controls the - number of concurrent iterations which execute during a single - scenario in order to simulate the activities of multiple users - placing load on the cloud under test. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "type": { - "type": "string", - "description": "Type of Runner." - }, - "concurrency": { - "type": "integer", - "minimum": 1, - "description": "The number of parallel iteration executions." - }, - "times": { - "type": "integer", - "minimum": 1, - "description": "Total number of iteration executions." - }, - "timeout": { - "type": "number", - "description": "Operation's timeout." - }, - "max_cpu_count": { - "type": "integer", - "minimum": 1, - "description": "The maximum number of processes to create load" - " from." - } - }, - "required": ["type"], - "additionalProperties": False - } - - def _run_scenario(self, cls, method_name, context, args): - """Runs the specified benchmark scenario with given arguments. - - This method generates a constant load on the cloud under test by - executing each scenario iteration using a pool of processes without - pausing between iterations up to the number of times specified - in the scenario config. - - :param cls: The Scenario class where the scenario is implemented - :param method_name: Name of the method that implements the scenario - :param context: Benchmark context that contains users, admin & other - information, that was created before benchmark started. - :param args: Arguments to call the scenario method with - - :returns: List of results fore each single scenario iteration, - where each result is a dictionary - """ - timeout = self.config.get("timeout", 0) # 0 means no timeout - times = self.config.get("times", 1) - concurrency = self.config.get("concurrency", 1) - iteration_gen = utils.RAMInt() - - cpu_count = multiprocessing.cpu_count() - max_cpu_used = min(cpu_count, - self.config.get("max_cpu_count", cpu_count)) - - processes_to_start = min(max_cpu_used, times, concurrency) - concurrency_per_worker, concurrency_overhead = divmod( - concurrency, processes_to_start) - - self._log_debug_info(times=times, concurrency=concurrency, - timeout=timeout, max_cpu_used=max_cpu_used, - processes_to_start=processes_to_start, - concurrency_per_worker=concurrency_per_worker, - concurrency_overhead=concurrency_overhead) - - result_queue = multiprocessing.Queue() - event_queue = multiprocessing.Queue() - - def worker_args_gen(concurrency_overhead): - while True: - yield (result_queue, iteration_gen, timeout, - concurrency_per_worker + (concurrency_overhead and 1), - times, context, cls, method_name, args, event_queue, - self.aborted) - if concurrency_overhead: - concurrency_overhead -= 1 - - process_pool = self._create_process_pool( - processes_to_start, _worker_process, - worker_args_gen(concurrency_overhead)) - self._join_processes(process_pool, result_queue, event_queue) - - -def _run_scenario_once_with_unpack_args(args): - # NOTE(andreykurilin): `pool.imap` is used in - # ConstantForDurationScenarioRunner. It does not want to work with - # instance-methods, class-methods and static-methods. Also, it can't - # transmit positional or keyword arguments to destination function. - # While original `rally.task.runner._run_scenario_once` accepts - # multiple arguments instead of one big tuple with all arguments, we - # need to hardcode unpacking here(all other runners are able to - # transmit arguments in proper way). - return runner._run_scenario_once(*args) - - -@runner.configure(name="constant_for_duration") -class ConstantForDurationScenarioRunner(runner.ScenarioRunner): - """Creates constant load executing a scenario for an interval of time. - - This runner will place a constant load on the cloud under test by - executing each scenario iteration without pausing between iterations - until a specified interval of time has elapsed. - - The concurrency parameter of the scenario config controls the - number of concurrent iterations which execute during a single - sceanario in order to simulate the activities of multiple users - placing load on the cloud under test. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "type": { - "type": "string", - "description": "Type of Runner." - }, - "concurrency": { - "type": "integer", - "minimum": 1, - "description": "The number of parallel iteration executions." - }, - "duration": { - "type": "number", - "minimum": 0.0, - "description": "The number of seconds during which to generate" - " a load." - }, - "timeout": { - "type": "number", - "minimum": 1, - "description": "Operation's timeout." - } - }, - "required": ["type", "duration"], - "additionalProperties": False - } - - @staticmethod - def _iter_scenario_args(cls, method, ctx, args, event_queue, aborted): - def _scenario_args(i): - if aborted.is_set(): - raise StopIteration() - return (cls, method, runner._get_scenario_context(i, ctx), args, - event_queue) - return _scenario_args - - def _run_scenario(self, cls, method, context, args): - """Runs the specified benchmark scenario with given arguments. - - :param cls: The Scenario class where the scenario is implemented - :param method: Name of the method that implements the scenario - :param context: Benchmark context that contains users, admin & other - information, that was created before benchmark started. - :param args: Arguments to call the scenario method with - - :returns: List of results fore each single scenario iteration, - where each result is a dictionary - """ - timeout = self.config.get("timeout", 600) - concurrency = self.config.get("concurrency", 1) - duration = self.config.get("duration") - - # FIXME(andreykurilin): unify `_worker_process`, use it here and remove - # usage of `multiprocessing.Pool`(usage of separate process for - # each concurrent iteration is redundant). - pool = multiprocessing.Pool(concurrency) - manager = multiprocessing.Manager() - event_queue = manager.Queue() - stop_event_listener = threading.Event() - - def event_listener(): - while not stop_event_listener.isSet(): - while not event_queue.empty(): - self.send_event(**event_queue.get()) - else: - time.sleep(0.01) - - event_listener_thread = threading.Thread(target=event_listener) - event_listener_thread.start() - - run_args = butils.infinite_run_args_generator( - self._iter_scenario_args( - cls, method, context, args, event_queue, self.aborted)) - iter_result = pool.imap(_run_scenario_once_with_unpack_args, run_args) - - start = time.time() - while True: - try: - result = iter_result.next(timeout) - except multiprocessing.TimeoutError as e: - result = runner.format_result_on_timeout(e, timeout) - except StopIteration: - break - - self._send_result(result) - - if time.time() - start > duration: - break - - stop_event_listener.set() - event_listener_thread.join() - pool.terminate() - pool.join() - self._flush_results() diff --git a/rally/plugins/common/runners/rps.py b/rally/plugins/common/runners/rps.py deleted file mode 100644 index 55ab460e..00000000 --- a/rally/plugins/common/runners/rps.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import multiprocessing -import threading -import time - -from six.moves import queue as Queue - -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.task import runner - -LOG = logging.getLogger(__name__) - - -def _worker_process(queue, iteration_gen, timeout, times, max_concurrent, - context, cls, method_name, args, event_queue, aborted, - runs_per_second, rps_cfg, processes_to_start, info): - """Start scenario within threads. - - Spawn N threads per second. Each thread runs the scenario once, and appends - result to queue. A maximum of max_concurrent threads will be ran - concurrently. - - :param queue: queue object to append results - :param iteration_gen: next iteration number generator - :param timeout: operation's timeout - :param times: total number of scenario iterations to be run - :param max_concurrent: maximum worker concurrency - :param context: scenario context object - :param cls: scenario class - :param method_name: scenario method name - :param args: scenario args - :param aborted: multiprocessing.Event that aborts load generation if - the flag is set - :param runs_per_second: function that should return desired rps value - :param rps_cfg: rps section from task config - :param processes_to_start: int, number of started processes for scenario - execution - :param info: info about all processes count and counter of runned process - """ - - pool = collections.deque() - if isinstance(rps_cfg, dict): - rps = rps_cfg["start"] - else: - rps = rps_cfg - sleep = 1.0 / rps - - runner._log_worker_info(times=times, rps=rps, timeout=timeout, - cls=cls, method_name=method_name, args=args) - - time.sleep( - (sleep * info["processes_counter"]) / info["processes_to_start"]) - - start = time.time() - timeout_queue = Queue.Queue() - - if timeout: - collector_thr_by_timeout = threading.Thread( - target=utils.timeout_thread, - args=(timeout_queue, ) - ) - collector_thr_by_timeout.start() - - i = 0 - while i < times and not aborted.is_set(): - scenario_context = runner._get_scenario_context(next(iteration_gen), - context) - worker_args = ( - queue, cls, method_name, scenario_context, args, event_queue) - thread = threading.Thread(target=runner._worker_thread, - args=worker_args) - - i += 1 - thread.start() - if timeout: - timeout_queue.put((thread, time.time() + timeout)) - pool.append(thread) - - time_gap = time.time() - start - real_rps = i / time_gap if time_gap else "Infinity" - - LOG.debug("Worker: %s rps: %s (requested rps: %s)" % - (i, real_rps, runs_per_second( - rps_cfg, start, processes_to_start))) - - # try to join latest thread(s) until it finished, or until time to - # start new thread (if we have concurrent slots available) - while i / (time.time() - start) > runs_per_second( - rps_cfg, start, processes_to_start) or ( - len(pool) >= max_concurrent): - if pool: - pool[0].join(0.001) - if not pool[0].isAlive(): - pool.popleft() - else: - time.sleep(0.001) - - while pool: - pool.popleft().join() - - if timeout: - timeout_queue.put((None, None,)) - collector_thr_by_timeout.join() - - -@validation.configure("check_rps") -class CheckPRSValidator(validation.Validator): - """Additional schema validation for rps runner""" - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - if isinstance(plugin_cfg["rps"], dict): - if plugin_cfg["rps"]["end"] < plugin_cfg["rps"]["start"]: - msg = "rps end value must not be less than rps start value." - return self.fail(msg) - - -@validation.add("check_rps") -@runner.configure(name="rps") -class RPSScenarioRunner(runner.ScenarioRunner): - """Scenario runner that does the job with specified frequency. - - Every single benchmark scenario iteration is executed with specified - frequency (runs per second) in a pool of processes. The scenario will be - launched for a fixed number of times in total (specified in the config). - - An example of a rps scenario is booting 1 VM per second. This - execution type is thus very helpful in understanding the maximal load that - a certain cloud can handle. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "type": { - "type": "string" - }, - "times": { - "type": "integer", - "minimum": 1 - }, - "rps": { - "anyOf": [ - { - "description": "Generate constant requests per second " - "during the whole workload.", - "type": "number", - "exclusiveMinimum": True, - "minimum": 0 - }, - { - "type": "object", - "description": "Increase requests per second for " - "specified value each time after a " - "certain number of seconds.", - "properties": { - "start": { - "type": "number", - "minimum": 1 - }, - "end": { - "type": "number", - "minimum": 1 - }, - "step": { - "type": "number", - "minimum": 1 - }, - "duration": { - "type": "number", - "minimum": 1 - } - }, - "required": ["start", "end", "step"] - } - ], - }, - "timeout": { - "type": "number", - }, - "max_concurrency": { - "type": "integer", - "minimum": 1 - }, - "max_cpu_count": { - "type": "integer", - "minimum": 1 - } - }, - "required": ["type", "times", "rps"], - "additionalProperties": False - } - - def _run_scenario(self, cls, method_name, context, args): - """Runs the specified benchmark scenario with given arguments. - - Every single benchmark scenario iteration is executed with specified - frequency (runs per second) in a pool of processes. The scenario will - be launched for a fixed number of times in total (specified in the - config). - - :param cls: The Scenario class where the scenario is implemented - :param method_name: Name of the method that implements the scenario - :param context: Benchmark context that contains users, admin & other - information, that was created before benchmark started. - :param args: Arguments to call the scenario method with - - :returns: List of results fore each single scenario iteration, - where each result is a dictionary - """ - times = self.config["times"] - timeout = self.config.get("timeout", 0) # 0 means no timeout - iteration_gen = utils.RAMInt() - - cpu_count = multiprocessing.cpu_count() - max_cpu_used = min(cpu_count, - self.config.get("max_cpu_count", cpu_count)) - - def runs_per_second(rps_cfg, start_timer, number_of_processes): - """At the given second return desired rps.""" - - if not isinstance(rps_cfg, dict): - return float(rps_cfg) / number_of_processes - stage_order = (time.time() - start_timer) / rps_cfg.get( - "duration", 1) - 1 - rps = (float(rps_cfg["start"] + rps_cfg["step"] * stage_order) / - number_of_processes) - - return min(rps, float(rps_cfg["end"])) - - processes_to_start = min(max_cpu_used, times, - self.config.get("max_concurrency", times)) - times_per_worker, times_overhead = divmod(times, processes_to_start) - - # Determine concurrency per worker - concurrency_per_worker, concurrency_overhead = divmod( - self.config.get("max_concurrency", times), processes_to_start) - - self._log_debug_info(times=times, timeout=timeout, - max_cpu_used=max_cpu_used, - processes_to_start=processes_to_start, - times_per_worker=times_per_worker, - times_overhead=times_overhead, - concurrency_per_worker=concurrency_per_worker, - concurrency_overhead=concurrency_overhead) - - result_queue = multiprocessing.Queue() - event_queue = multiprocessing.Queue() - - def worker_args_gen(times_overhead, concurrency_overhead): - """Generate arguments for process worker. - - Remainder of threads per process division is distributed to - process workers equally - one thread per each process worker - until the remainder equals zero. The same logic is applied - to concurrency overhead. - :param times_overhead: remaining number of threads to be - distributed to workers - :param concurrency_overhead: remaining number of maximum - concurrent threads to be - distributed to workers - """ - while True: - yield ( - result_queue, iteration_gen, timeout, - times_per_worker + (times_overhead and 1), - concurrency_per_worker + (concurrency_overhead and 1), - context, cls, method_name, args, event_queue, - self.aborted, runs_per_second, self.config["rps"], - processes_to_start - ) - if times_overhead: - times_overhead -= 1 - if concurrency_overhead: - concurrency_overhead -= 1 - - process_pool = self._create_process_pool( - processes_to_start, _worker_process, - worker_args_gen(times_overhead, concurrency_overhead)) - self._join_processes(process_pool, result_queue, event_queue) diff --git a/rally/plugins/common/runners/serial.py b/rally/plugins/common/runners/serial.py deleted file mode 100644 index 27e0a117..00000000 --- a/rally/plugins/common/runners/serial.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils as rutils -from rally import consts -from rally.task import runner - - -@runner.configure(name="serial") -class SerialScenarioRunner(runner.ScenarioRunner): - """Scenario runner that executes benchmark scenarios serially. - - Unlike scenario runners that execute in parallel, the serial scenario - runner executes scenarios one-by-one in the same python interpreter process - as Rally. This allows you to benchmark your scenario without introducing - any concurrent operations as well as interactively debug the scenario - from the same command that you use to start Rally. - """ - - # NOTE(mmorais): additionalProperties is set True to allow switching - # between parallel and serial runners by modifying only *type* property - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "type": { - "type": "string" - }, - "times": { - "type": "integer", - "minimum": 1 - } - }, - "additionalProperties": True - } - - def _run_scenario(self, cls, method_name, context, args): - """Runs the specified benchmark scenario with given arguments. - - The scenario iterations are executed one-by-one in the same python - interpreter process as Rally. This allows you to benchmark your - scenario without introducing any concurrent operations as well as - interactively debug the scenario from the same command that you use - to start Rally. - - :param cls: The Scenario class where the scenario is implemented - :param method_name: Name of the method that implements the scenario - :param context: Benchmark context that contains users, admin & other - information, that was created before benchmark started. - :param args: Arguments to call the scenario method with - - :returns: List of results fore each single scenario iteration, - where each result is a dictionary - """ - times = self.config.get("times", 1) - - event_queue = rutils.DequeAsQueue(self.event_queue) - - for i in range(times): - if self.aborted.is_set(): - break - result = runner._run_scenario_once( - cls, method_name, runner._get_scenario_context(i, context), - args, event_queue) - self._send_result(result) - - self._flush_results() diff --git a/rally/plugins/common/scenarios/__init__.py b/rally/plugins/common/scenarios/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/scenarios/dummy/__init__.py b/rally/plugins/common/scenarios/dummy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/scenarios/dummy/dummy.py b/rally/plugins/common/scenarios/dummy/dummy.py deleted file mode 100644 index edab847e..00000000 --- a/rally/plugins/common/scenarios/dummy/dummy.py +++ /dev/null @@ -1,243 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common.i18n import _ -from rally.common import utils -from rally.common import validation -from rally import exceptions -from rally.task import atomic -from rally.task import scenario - - -"""Dummy scenarios for testing Rally engine at scale.""" - - -class DummyScenarioException(exceptions.RallyException): - error_code = 530 - msg_fmt = _("Dummy scenario expected exception: '%(message)s'") - - -@scenario.configure(name="Dummy.failure") -class DummyFailure(scenario.Scenario): - - def run(self, sleep=0.1, from_iteration=0, to_iteration=0, each=1): - """Raise errors in some iterations. - - :param sleep: float iteration sleep time in seconds - :param from_iteration: int iteration number which starts range - of failed iterations - :param to_iteration: int iteration number which ends range of - failed iterations - :param each: int cyclic number of iteration which actually raises - an error in selected range. For example, each=3 will - raise error in each 3rd iteration. - """ - utils.interruptable_sleep(sleep) - if from_iteration <= self.context["iteration"] <= to_iteration: - if each and not self.context["iteration"] % each: - raise DummyScenarioException(_("Expected failure")) - - -@scenario.configure(name="Dummy.dummy") -class Dummy(scenario.Scenario): - - def run(self, sleep=0): - """Do nothing and sleep for the given number of seconds (0 by default). - - Dummy.dummy can be used for testing performance of different - ScenarioRunners and of the ability of rally to store a large - amount of results. - - :param sleep: idle time of method (in seconds). - """ - utils.interruptable_sleep(sleep) - - -@validation.add("number", param_name="size_of_message", minval=1, - integer_only=True, nullable=True) -@scenario.configure(name="Dummy.dummy_exception") -class DummyException(scenario.Scenario): - - def run(self, size_of_message=1, sleep=1, message=""): - """Throw an exception. - - Dummy.dummy_exception can be used for test if exceptions are processed - properly by ScenarioRunners and benchmark and analyze rally - results storing process. - - :param size_of_message: int size of the exception message - :param sleep: idle time of method (in seconds). - :param message: message of the exception - :raises DummyScenarioException: raise exception for test - """ - utils.interruptable_sleep(sleep) - - message = message or "M" * size_of_message - raise DummyScenarioException(message) - - -@validation.add("number", param_name="exception_probability", - minval=0, maxval=1, integer_only=False, nullable=True) -@scenario.configure(name="Dummy.dummy_exception_probability") -class DummyExceptionProbability(scenario.Scenario): - - def run(self, exception_probability=0.5): - """Throw an exception with given probability. - - Dummy.dummy_exception_probability can be used to test if exceptions - are processed properly by ScenarioRunners. This scenario will throw - an exception sometimes, depending on the given exception probability. - - :param exception_probability: Sets how likely it is that an exception - will be thrown. Float between 0 and 1 - 0=never 1=always. - """ - if random.random() < exception_probability: - raise DummyScenarioException( - "Dummy Scenario Exception: Probability: %s" - % exception_probability) - - -@scenario.configure(name="Dummy.dummy_output") -class DummyOutput(scenario.Scenario): - - def run(self, random_range=25): - """Generate dummy output. - - This scenario generates example of output data. - :param random_range: max int limit for generated random values - """ - rand = lambda n: [n, random.randint(1, random_range)] - desc = "This is a description text for %s" - - self.add_output(additive={"title": "Additive StatsTable", - "description": desc % "Additive StatsTable", - "chart_plugin": "StatsTable", - "data": [rand("foo stat"), rand("bar stat"), - rand("spam stat")]}) - - self.add_output(additive={"title": ("Additive StackedArea " - "(no description)"), - "chart_plugin": "StackedArea", - "data": [rand("foo %d" % i) - for i in range(1, 7)], - "label": "Measure this in Foo units"}) - - self.add_output(additive={"title": "Additive Lines", - "description": ( - desc % "Additive Lines"), - "chart_plugin": "Lines", - "data": [rand("bar %d" % i) - for i in range(1, 4)], - "label": "Measure this in Bar units"}) - self.add_output(additive={"title": "Additive Pie", - "description": desc % "Additive Pie", - "chart_plugin": "Pie", - "data": [rand("spam %d" % i) - for i in range(1, 4)]}, - complete={"title": "Complete Lines", - "description": desc % "Complete Lines", - "chart_plugin": "Lines", - "data": [ - [name, [rand(i) for i in range(1, 8)]] - for name in ("Foo", "Bar", "Spam")], - "label": "Measure this is some units", - "axis_label": ("This is a custom " - "X-axis label")}) - self.add_output(complete={"title": "Complete StackedArea", - "description": desc % "Complete StackedArea", - "chart_plugin": "StackedArea", - "data": [ - [name, [rand(i) for i in range(50)]] - for name in ("alpha", "beta", "gamma")], - "label": "Yet another measurement units", - "axis_label": ("This is a custom " - "X-axis label")}) - self.add_output( - complete={"title": "Arbitrary Text", - "chart_plugin": "TextArea", - "data": ["Lorem ipsum dolor sit amet, consectetur " - "adipiscing elit, sed do eiusmod tempor " - "incididunt ut labore et dolore magna " - "aliqua." * 2] * 4}) - self.add_output( - complete={"title": "Complete Pie (no description)", - "chart_plugin": "Pie", - "data": [rand("delta"), rand("epsilon"), rand("zeta"), - rand("theta"), rand("lambda"), rand("omega")]}) - - data = {"cols": ["mu column", "xi column", "pi column", - "tau column", "chi column"], - "rows": [([name + " row"] + [rand(i)[1] for i in range(4)]) - for name in ("iota", "nu", "rho", "phi", "psi")]} - self.add_output(complete={"title": "Complete Table", - "description": desc % "Complete Table", - "chart_plugin": "Table", - "data": data}) - - -@scenario.configure(name="Dummy.dummy_random_fail_in_atomic") -class DummyRandomFailInAtomic(scenario.Scenario): - """Randomly throw exceptions in atomic actions.""" - - @atomic.action_timer("dummy_fail_test") - def _random_fail_emitter(self, exception_probability): - """Throw an exception with given probability. - - :raises KeyError: when exception_probability is bigger - """ - if random.random() < exception_probability: - raise KeyError("Dummy test exception") - - def run(self, exception_probability=0.5): - """Dummy.dummy_random_fail_in_atomic in dummy actions. - - Can be used to test atomic actions - failures processing. - - :param exception_probability: Probability with which atomic actions - fail in this dummy scenario (0 <= p <= 1) - """ - self._random_fail_emitter(exception_probability) - self._random_fail_emitter(exception_probability) - - -@scenario.configure(name="Dummy.dummy_random_action") -class DummyRandomAction(scenario.Scenario): - - def run(self, actions_num=5, sleep_min=0, sleep_max=2): - """Sleep random time in dummy actions. - - :param actions_num: int number of actions to generate - :param sleep_min: minimal time to sleep, numeric seconds - :param sleep_max: maximum time to sleep, numeric seconds - """ - for idx in range(actions_num): - duration = random.uniform(sleep_min, sleep_max) - with atomic.ActionTimer(self, "action_%d" % idx): - utils.interruptable_sleep(duration) - - -@scenario.configure(name="Dummy.dummy_timed_atomic_actions") -class DummyTimedAtomicAction(scenario.Scenario): - - def run(self, number_of_actions=5, sleep_factor=1): - """Run some sleepy atomic actions for SLA atomic action tests. - - :param number_of_actions: int number of atomic actions to create - :param sleep_factor: int multiplier for number of seconds to sleep - """ - for sleeptime in range(number_of_actions): - with atomic.ActionTimer(self, "action_%d" % sleeptime): - utils.interruptable_sleep(sleeptime * sleep_factor) diff --git a/rally/plugins/common/scenarios/requests/__init__.py b/rally/plugins/common/scenarios/requests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/scenarios/requests/http_requests.py b/rally/plugins/common/scenarios/requests/http_requests.py deleted file mode 100644 index e28f2aec..00000000 --- a/rally/plugins/common/scenarios/requests/http_requests.py +++ /dev/null @@ -1,56 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.plugins.common.scenarios.requests import utils -from rally.task import scenario - - -"""Scenarios for HTTP requests.""" - - -@scenario.configure(name="HttpRequests.check_request") -class HttpRequestsCheckRequest(utils.RequestScenario): - - def run(self, url, method, status_code, **kwargs): - """Standard way to benchmark web services. - - This benchmark is used to make request and check it with expected - Response. - - :param url: url for the Request object - :param method: method for the Request object - :param status_code: expected response code - :param kwargs: optional additional request parameters - """ - - self._check_request(url, method, status_code, **kwargs) - - -@scenario.configure(name="HttpRequests.check_random_request") -class HttpRequestsCheckRandomRequest(utils.RequestScenario): - - def run(self, requests, status_code): - """Benchmark the list of requests - - This scenario takes random url from list of requests, and raises - exception if the response is not the expected response. - - :param requests: List of request dicts - :param status_code: Expected Response Code it will - be used only if we doesn't specified it in request proper - """ - - request = random.choice(requests) - request.setdefault("status_code", status_code) - self._check_request(**request) diff --git a/rally/plugins/common/scenarios/requests/utils.py b/rally/plugins/common/scenarios/requests/utils.py deleted file mode 100644 index c34c81df..00000000 --- a/rally/plugins/common/scenarios/requests/utils.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import requests - -from rally.common.i18n import _ -from rally.task import atomic -from rally.task import scenario - - -class RequestScenario(scenario.Scenario): - """Base class for Request scenarios with basic atomic actions.""" - - @atomic.action_timer("requests.check_request") - def _check_request(self, url, method, status_code, **kwargs): - """Compare request status code with specified code - - :param status_code: Expected status code of request - :param url: Uniform resource locator - :param method: Type of request method (GET | POST ..) - :param kwargs: Optional additional request parameters - :raises ValueError: if return http status code - not equal to expected status code - """ - - resp = requests.request(method, url, **kwargs) - if status_code != resp.status_code: - error_msg = _("Expected HTTP request code is `%s` actual `%s`") - raise ValueError( - error_msg % (status_code, resp.status_code)) diff --git a/rally/plugins/common/sla/__init__.py b/rally/plugins/common/sla/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/sla/failure_rate.py b/rally/plugins/common/sla/failure_rate.py deleted file mode 100644 index 0a89b061..00000000 --- a/rally/plugins/common/sla/failure_rate.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -SLA (Service-level agreement) is set of details for determining compliance -with contracted values such as maximum error rate or minimum response time. -""" - -from rally.common.i18n import _ -from rally import consts -from rally.task import sla - - -@sla.configure(name="failure_rate") -class FailureRate(sla.SLA): - """Failure rate minimum and maximum in percents.""" - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "min": {"type": "number", "minimum": 0.0, "maximum": 100.0}, - "max": {"type": "number", "minimum": 0.0, "maximum": 100.0} - }, - "minProperties": 1, - "additionalProperties": False, - } - - def __init__(self, criterion_value): - super(FailureRate, self).__init__(criterion_value) - self.min_percent = self.criterion_value.get("min", 0) - self.max_percent = self.criterion_value.get("max", 100) - self.errors = 0 - self.total = 0 - self.error_rate = 0.0 - - def add_iteration(self, iteration): - self.total += 1 - if iteration["error"]: - self.errors += 1 - self.error_rate = self.errors * 100.0 / self.total - self.success = self.min_percent <= self.error_rate <= self.max_percent - return self.success - - def merge(self, other): - self.total += other.total - self.errors += other.errors - if self.total: - self.error_rate = self.errors * 100.0 / self.total - self.success = self.min_percent <= self.error_rate <= self.max_percent - return self.success - - def details(self): - return (_("Failure rate criteria %.2f%% <= %.2f%% <= %.2f%% - %s") % - (self.min_percent, self.error_rate, self.max_percent, - self.status())) diff --git a/rally/plugins/common/sla/iteration_time.py b/rally/plugins/common/sla/iteration_time.py deleted file mode 100644 index 5512dc1b..00000000 --- a/rally/plugins/common/sla/iteration_time.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -SLA (Service-level agreement) is set of details for determining compliance -with contracted values such as maximum error rate or minimum response time. -""" - -from rally.common.i18n import _ -from rally.task import sla - - -@sla.configure(name="max_seconds_per_iteration") -class IterationTime(sla.SLA): - """Maximum time for one iteration in seconds.""" - CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, - "exclusiveMinimum": True} - - def __init__(self, criterion_value): - super(IterationTime, self).__init__(criterion_value) - self.max_iteration_time = 0.0 - - def add_iteration(self, iteration): - if iteration["duration"] > self.max_iteration_time: - self.max_iteration_time = iteration["duration"] - self.success = self.max_iteration_time <= self.criterion_value - return self.success - - def merge(self, other): - if other.max_iteration_time > self.max_iteration_time: - self.max_iteration_time = other.max_iteration_time - self.success = self.max_iteration_time <= self.criterion_value - return self.success - - def details(self): - return (_("Maximum seconds per iteration %.2fs <= %.2fs - %s") % - (self.max_iteration_time, self.criterion_value, self.status())) diff --git a/rally/plugins/common/sla/max_average_duration.py b/rally/plugins/common/sla/max_average_duration.py deleted file mode 100644 index 4ada4413..00000000 --- a/rally/plugins/common/sla/max_average_duration.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -SLA (Service-level agreement) is set of details for determining compliance -with contracted values such as maximum error rate or minimum response time. -""" - -from rally.common.i18n import _ -from rally.common import streaming_algorithms -from rally.task import sla - - -@sla.configure(name="max_avg_duration") -class MaxAverageDuration(sla.SLA): - """Maximum average duration of one iteration in seconds.""" - CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, - "exclusiveMinimum": True} - - def __init__(self, criterion_value): - super(MaxAverageDuration, self).__init__(criterion_value) - self.avg = 0.0 - self.avg_comp = streaming_algorithms.MeanComputation() - - def add_iteration(self, iteration): - if not iteration.get("error"): - self.avg_comp.add(iteration["duration"]) - self.avg = self.avg_comp.result() - self.success = self.avg <= self.criterion_value - return self.success - - def merge(self, other): - self.avg_comp.merge(other.avg_comp) - self.avg = self.avg_comp.result() or 0.0 - self.success = self.avg <= self.criterion_value - return self.success - - def details(self): - return (_("Average duration of one iteration %.2fs <= %.2fs - %s") % - (self.avg, self.criterion_value, self.status())) diff --git a/rally/plugins/common/sla/max_average_duration_per_atomic.py b/rally/plugins/common/sla/max_average_duration_per_atomic.py deleted file mode 100644 index 0773c597..00000000 --- a/rally/plugins/common/sla/max_average_duration_per_atomic.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -SLA (Service-level agreement) is set of details for determining compliance -with contracted values such as maximum error rate or minimum response time. -""" - -import collections - -from rally.common.i18n import _ -from rally.common import streaming_algorithms -from rally import consts -from rally.task import sla - - -@sla.configure(name="max_avg_duration_per_atomic") -class MaxAverageDurationPerAtomic(sla.SLA): - """Maximum average duration of one iterations atomic actions in seconds.""" - CONFIG_SCHEMA = {"type": "object", "$schema": consts.JSON_SCHEMA, - "patternProperties": {".*": { - "type": "number", - "description": "The name of atomic action."}}, - "minProperties": 1, - "additionalProperties": False} - - def __init__(self, criterion_value): - super(MaxAverageDurationPerAtomic, self).__init__(criterion_value) - self.avg_by_action = collections.defaultdict(float) - self.avg_comp_by_action = collections.defaultdict( - streaming_algorithms.MeanComputation) - self.criterion_items = self.criterion_value.items() - - def add_iteration(self, iteration): - if not iteration.get("error"): - for action, value in iteration["atomic_actions"].items(): - self.avg_comp_by_action[action].add(value) - result = self.avg_comp_by_action[action].result() - self.avg_by_action[action] = result - self.success = all(self.avg_by_action[atom] <= val - for atom, val in self.criterion_items) - return self.success - - def merge(self, other): - for atom, comp in self.avg_comp_by_action.items(): - if atom in other.avg_comp_by_action: - comp.merge(other.avg_comp_by_action[atom]) - self.avg_by_action = {a: comp.result() or 0.0 - for a, comp in self.avg_comp_by_action.items()} - self.success = all(self.avg_by_action[atom] <= val - for atom, val in self.criterion_items) - return self.success - - def details(self): - strs = [_("Action: '%s'. %.2fs <= %.2fs") % - (atom, self.avg_by_action[atom], val) - for atom, val in self.criterion_items] - head = _("Average duration of one iteration for atomic actions:") - end = _("Status: %s") % self.status() - return "\n".join([head] + strs + [end]) diff --git a/rally/plugins/common/sla/outliers.py b/rally/plugins/common/sla/outliers.py deleted file mode 100644 index 8f88072c..00000000 --- a/rally/plugins/common/sla/outliers.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -SLA (Service-level agreement) is set of details for determining compliance -with contracted values such as maximum error rate or minimum response time. -""" - -from rally.common.i18n import _ -from rally.common import streaming_algorithms -from rally import consts -from rally.task import sla - - -@sla.configure(name="outliers") -class Outliers(sla.SLA): - """Limit the number of outliers (iterations that take too much time). - - The outliers are detected automatically using the computation of the mean - and standard deviation (std) of the data. - """ - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "max": {"type": "integer", "minimum": 0}, - "min_iterations": {"type": "integer", "minimum": 3}, - "sigmas": {"type": "number", "minimum": 0.0, - "exclusiveMinimum": True} - }, - "additionalProperties": False, - } - - def __init__(self, criterion_value): - super(Outliers, self).__init__(criterion_value) - self.max_outliers = self.criterion_value.get("max", 0) - # NOTE(msdubov): Having 3 as default is reasonable (need enough data). - self.min_iterations = self.criterion_value.get("min_iterations", 3) - self.sigmas = self.criterion_value.get("sigmas", 3.0) - self.iterations = 0 - self.outliers = 0 - self.threshold = None - self.mean_comp = streaming_algorithms.MeanComputation() - self.std_comp = streaming_algorithms.StdDevComputation() - - def add_iteration(self, iteration): - # NOTE(ikhudoshyn): This method can not be implemented properly. - # After adding a new iteration, both mean and standard deviation - # may change. Hence threshold will change as well. In this case we - # should again compare durations of all accounted iterations - # to the threshold. Unfortunately we can not do it since - # we do not store durations. - # Implementation provided here only gives rough approximation - # of outliers number. - if not iteration.get("error"): - duration = iteration["duration"] - self.iterations += 1 - - # NOTE(msdubov): First check if the current iteration is an outlier - if ((self.iterations >= self.min_iterations and self.threshold and - duration > self.threshold)): - self.outliers += 1 - - # NOTE(msdubov): Then update the threshold value - self.mean_comp.add(duration) - self.std_comp.add(duration) - if self.iterations >= 2: - mean = self.mean_comp.result() - std = self.std_comp.result() - self.threshold = mean + self.sigmas * std - - self.success = self.outliers <= self.max_outliers - return self.success - - def merge(self, other): - # NOTE(ikhudoshyn): This method can not be implemented properly. - # After merge, both mean and standard deviation may change. - # Hence threshold will change as well. In this case we - # should again compare durations of all accounted iterations - # to the threshold. Unfortunately we can not do it since - # we do not store durations. - # Implementation provided here only gives rough approximation - # of outliers number. - self.iterations += other.iterations - self.outliers += other.outliers - self.mean_comp.merge(other.mean_comp) - self.std_comp.merge(other.std_comp) - - if self.iterations >= 2: - mean = self.mean_comp.result() - std = self.std_comp.result() - self.threshold = mean + self.sigmas * std - - self.success = self.outliers <= self.max_outliers - return self.success - - def details(self): - return (_("Maximum number of outliers %i <= %i - %s") % - (self.outliers, self.max_outliers, self.status())) diff --git a/rally/plugins/common/sla/performance_degradation.py b/rally/plugins/common/sla/performance_degradation.py deleted file mode 100644 index aede2a02..00000000 --- a/rally/plugins/common/sla/performance_degradation.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -SLA (Service-level agreement) is set of details for determining compliance -with contracted values such as maximum error rate or minimum response time. -""" - -from __future__ import division - -from rally.common.i18n import _ -from rally.common import streaming_algorithms -from rally.common import utils -from rally import consts -from rally.task import sla - - -@sla.configure(name="performance_degradation") -class PerformanceDegradation(sla.SLA): - """Calculates performance degradation based on iteration time - - This SLA plugin finds minimum and maximum duration of - iterations completed without errors during Rally task execution. - Assuming that minimum duration is 100%, it calculates - performance degradation against maximum duration. - """ - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "max_degradation": { - "type": "number", - "minimum": 0.0, - }, - }, - "required": [ - "max_degradation", - ], - "additionalProperties": False, - } - - def __init__(self, criterion_value): - super(PerformanceDegradation, self).__init__(criterion_value) - self.max_degradation = self.criterion_value["max_degradation"] - self.degradation = streaming_algorithms.DegradationComputation() - - def add_iteration(self, iteration): - if not iteration.get("error"): - self.degradation.add(iteration["duration"]) - self.success = self.degradation.result() <= self.max_degradation - return self.success - - def merge(self, other): - self.degradation.merge(other.degradation) - self.success = self.degradation.result() <= self.max_degradation - return self.success - - def details(self): - return (_("Current degradation: %s%% - %s") % - (utils.format_float_to_str(self.degradation.result() or 0.0), - self.status())) diff --git a/rally/plugins/common/trigger/__init__.py b/rally/plugins/common/trigger/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/trigger/event.py b/rally/plugins/common/trigger/event.py deleted file mode 100644 index dc5471bf..00000000 --- a/rally/plugins/common/trigger/event.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.task import trigger - - -@trigger.configure(name="event") -class EventTrigger(trigger.Trigger): - """Triggers hook on specified event and list of values.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "oneOf": [ - { - "description": "Triage hook based on specified seconds after " - "start of workload.", - "properties": { - "unit": {"enum": ["time"]}, - "at": { - "type": "array", - "minItems": 1, - "uniqueItems": True, - "items": { - "type": "integer", - "minimum": 0, - } - }, - }, - "required": ["unit", "at"], - "additionalProperties": False, - }, - { - "description": "Triage hook based on specific iterations.", - "properties": { - "unit": {"enum": ["iteration"]}, - "at": { - "type": "array", - "minItems": 1, - "uniqueItems": True, - "items": { - "type": "integer", - "minimum": 1, - } - }, - }, - "required": ["unit", "at"], - "additionalProperties": False, - }, - ] - } - - def get_listening_event(self): - return self.config["unit"] - - def on_event(self, event_type, value=None): - if not (event_type == self.get_listening_event() - and value in self.config["at"]): - # do nothing - return - super(EventTrigger, self).on_event(event_type, value) diff --git a/rally/plugins/common/trigger/periodic.py b/rally/plugins/common/trigger/periodic.py deleted file mode 100644 index 01135fec..00000000 --- a/rally/plugins/common/trigger/periodic.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.task import trigger - - -@trigger.configure(name="periodic") -class PeriodicTrigger(trigger.Trigger): - """Periodically triggers hook with specified range and step.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "oneOf": [ - { - "description": "Periodically triage hook based on elapsed time" - " after start of workload.", - "properties": { - "unit": {"enum": ["time"]}, - "start": {"type": "integer", "minimum": 0}, - "end": {"type": "integer", "minimum": 1}, - "step": {"type": "integer", "minimum": 1}, - }, - "required": ["unit", "step"], - "additionalProperties": False, - }, - { - "description": "Periodically triage hook based on iterations.", - "properties": { - "unit": {"enum": ["iteration"]}, - "start": {"type": "integer", "minimum": 1}, - "end": {"type": "integer", "minimum": 1}, - "step": {"type": "integer", "minimum": 1}, - }, - "required": ["unit", "step"], - "additionalProperties": False, - }, - ] - } - - def __init__(self, context, task, hook_cls): - super(PeriodicTrigger, self).__init__(context, task, hook_cls) - self.config.setdefault( - "start", 0 if self.config["unit"] == "time" else 1) - self.config.setdefault("end", float("Inf")) - - def get_listening_event(self): - return self.config["unit"] - - def on_event(self, event_type, value=None): - if not (event_type == self.get_listening_event() and - self.config["start"] <= value <= self.config["end"] and - (value - self.config["start"]) % self.config["step"] == 0): - # do nothing - return - super(PeriodicTrigger, self).on_event(event_type, value) diff --git a/rally/plugins/common/types.py b/rally/plugins/common/types.py deleted file mode 100644 index 39785ac8..00000000 --- a/rally/plugins/common/types.py +++ /dev/null @@ -1,103 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import requests - -from rally.common.plugin import plugin -from rally import exceptions -from rally.task import types - - -@plugin.configure(name="path_or_url") -class PathOrUrl(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Check whether file exists or url available. - - :param clients: openstack admin client handles - :param resource_config: path or url - - :returns: url or expanded file path - """ - - path = os.path.expanduser(resource_config) - if os.path.isfile(path): - return path - try: - head = requests.head(path) - if head.status_code == 200: - return path - raise exceptions.InvalidScenarioArgument( - "Url %s unavailable (code %s)" % (path, head.status_code)) - except Exception as ex: - raise exceptions.InvalidScenarioArgument( - "Url error %s (%s)" % (path, ex)) - - -@plugin.configure(name="file") -class FileType(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Return content of the file by its path. - - :param clients: openstack admin client handles - :param resource_config: path to file - - :returns: content of the file - """ - - with open(os.path.expanduser(resource_config), "r") as f: - return f.read() - - -@plugin.configure(name="expand_user_path") -class ExpandUserPath(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Return content of the file by its path. - - :param clients: openstack admin client handles - :param resource_config: path to file - - :returns: content of the file - """ - - return os.path.expanduser(resource_config) - - -@plugin.configure(name="file_dict") -class FileTypeDict(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Return the dictionary of items with file path and file content. - - :param clients: openstack admin client handles - :param resource_config: list of file paths - - :returns: dictionary {file_path: file_content, ...} - """ - - file_type_dict = {} - for file_path in resource_config: - file_path = os.path.expanduser(file_path) - with open(file_path, "r") as f: - file_type_dict[file_path] = f.read() - - return file_type_dict diff --git a/rally/plugins/common/validators.py b/rally/plugins/common/validators.py deleted file mode 100644 index 34c0079b..00000000 --- a/rally/plugins/common/validators.py +++ /dev/null @@ -1,367 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import os - -import jsonschema -import six - -from rally.common import logging -from rally.common import validation - -LOG = logging.getLogger(__name__) - - -class ValidatorUtils(object): - - @staticmethod - def _file_access_ok(filename, mode, param_name, required=True): - if not filename: - return validation.ValidationResult( - not required, - "Parameter %s required" % param_name) - if not os.access(os.path.expanduser(filename), mode): - return validation.ValidationResult( - False, "Could not open %(filename)s with mode %(mode)s " - "for parameter %(param_name)s" - % {"filename": filename, "mode": mode, - "param_name": param_name}) - return validation.ValidationResult(True) - - -@validation.configure(name="jsonschema") -class JsonSchemaValidator(validation.Validator): - """JSON schema validator""" - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - try: - jsonschema.validate(plugin_cfg, plugin_cls.CONFIG_SCHEMA) - except jsonschema.ValidationError as err: - return self.fail(str(err)) - - -@validation.configure(name="args-spec") -class ArgsValidator(validation.Validator): - """Scenario arguments validator""" - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - scenario = plugin_cls - name = scenario.get_name() - namespace = scenario.get_platform() - scenario = scenario().run - args, _varargs, varkwargs, defaults = inspect.getargspec(scenario) - - hint_msg = (" Use `rally plugin show --name %s --namespace %s` " - "to display scenario description." % (name, namespace)) - - # scenario always accepts an instance of scenario cls as a first arg - missed_args = args[1:] - if defaults: - # do not require args with default values - missed_args = missed_args[:-len(defaults)] - if "args" in config: - missed_args = set(missed_args) - set(config["args"]) - if missed_args: - msg = ("Argument(s) '%(args)s' should be specified in task config." - "%(hint)s" % {"args": "', '".join(missed_args), - "hint": hint_msg}) - return self.fail(msg) - - if varkwargs is None and "args" in config: - redundant_args = set(config["args"]) - set(args[1:]) - if redundant_args: - msg = ("Unexpected argument(s) found ['%(args)s'].%(hint)s" % - {"args": "', '".join(redundant_args), - "hint": hint_msg}) - return self.fail(msg) - - -@validation.configure(name="required_params") -class RequiredParameterValidator(validation.Validator): - """Scenario required parameter validator. - - This allows us to search required parameters in subdict of config. - - :param subdict: sub-dict of "config" to search. if - not defined - will search in "config" - :param params: list of required parameters - """ - - def __init__(self, params=None, subdict=None): - super(RequiredParameterValidator, self).__init__() - self.subdict = subdict - self.params = params - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - missing = [] - args = config.get("args", {}) - if self.subdict: - args = args.get(self.subdict, {}) - for arg in self.params: - if isinstance(arg, (tuple, list)): - for case in arg: - if case in args: - break - else: - missing.append(case) - else: - if arg not in args: - missing.append(arg) - - if missing: - msg = ("%s parameters are not defined in " - "the benchmark config file") % ", ".join(missing) - return self.fail(msg) - - -@validation.configure(name="number") -class NumberValidator(validation.Validator): - """Checks that parameter is a number that pass specified condition. - - Ensure a parameter is within the range [minval, maxval]. This is a - closed interval so the end points are included. - - :param param_name: Name of parameter to validate - :param minval: Lower endpoint of valid interval - :param maxval: Upper endpoint of valid interval - :param nullable: Allow parameter not specified, or parameter=None - :param integer_only: Only accept integers - """ - - def __init__(self, param_name, minval=None, maxval=None, nullable=False, - integer_only=False): - self.param_name = param_name - self.minval = minval - self.maxval = maxval - self.nullable = nullable - self.integer_only = integer_only - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - - value = config.get("args", {}).get(self.param_name) - - num_func = float - if self.integer_only: - # NOTE(boris-42): Force check that passed value is not float, this - # is important cause int(float_numb) won't raise exception - if type(value) == float: - return self.fail("%(name)s is %(val)s which hasn't int type" - % {"name": self.param_name, "val": value}) - num_func = int - - # None may be valid if the scenario sets a sensible default. - if self.nullable and value is None: - return - - try: - number = num_func(value) - if self.minval is not None and number < self.minval: - return self.fail( - "%(name)s is %(val)s which is less than the minimum " - "(%(min)s)" % {"name": self.param_name, - "val": number, - "min": self.minval}) - if self.maxval is not None and number > self.maxval: - return self.fail( - "%(name)s is %(val)s which is greater than the maximum " - "(%(max)s)" % {"name": self.param_name, - "val": number, - "max": self.maxval}) - except (ValueError, TypeError): - return self.fail("%(name)s is %(val)s which is not a valid " - "%(type)s" % {"name": self.param_name, - "val": value, - "type": num_func.__name__}) - - -@validation.configure(name="enum") -class EnumValidator(validation.Validator): - """Checks that parameter is in a list. - - Ensure a parameter has the right value. This value need to be defined - in a list. - - :param param_name: Name of parameter to validate - :param values: List of values accepted - :param missed: Allow to accept optional parameter - :param case_insensitive: Ignore case in enum values - """ - - def __init__(self, param_name, values, missed=False, - case_insensitive=False): - self.param_name = param_name - self.missed = missed - self.case_insensitive = case_insensitive - if self.case_insensitive: - self.values = [] - for value in values: - if isinstance(value, (six.text_type, six.string_types)): - value = value.lower() - self.values.append(value) - else: - self.values = values - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - value = config.get("args", {}).get(self.param_name) - if value: - if self.case_insensitive: - if isinstance(value, (six.text_type, six.string_types)): - value = value.lower() - - if value not in self.values: - return self.fail("%(name)s is %(val)s which is not a " - "valid value from %(list)s" - % {"name": self.param_name, - "val": value, - "list": self.values}) - else: - if not self.missed: - return self.fail("%s parameter is not defined in the " - "task config file" % self.param_name) - - -@validation.configure(name="restricted_parameters") -class RestrictedParametersValidator(validation.Validator): - - def __init__(self, param_names, subdict=None): - """Validates that parameters is not set. - - :param param_names: parameter or parameters list to be validated. - :param subdict: sub-dict of "config" to search for param_names. if - not defined - will search in "config" - """ - super(RestrictedParametersValidator, self).__init__() - if isinstance(param_names, (list, tuple)): - self.params = param_names - else: - self.params = [param_names] - self.subdict = subdict - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - restricted_params = [] - for param_name in self.params: - args = config.get("args", {}) - a_dict, a_key = (args, self.subdict) if self.subdict else ( - config, "args") - if param_name in a_dict.get(a_key, {}): - restricted_params.append(param_name) - if restricted_params: - msg = ("You can't specify parameters '{}' in '{}'") - return self.fail(msg.format( - ", ".join(restricted_params), - self.subdict if self.subdict else "args")) - - -@validation.configure(name="required_contexts") -class RequiredContextsValidator(validation.Validator): - - def __init__(self, contexts, *args): - """Validator checks if required benchmark contexts are specified. - - :param contexts: list of strings and tuples with context names that - should be specified. Tuple represent 'at least one - of the'. - """ - super(RequiredContextsValidator, self).__init__() - if isinstance(contexts, (list, tuple)): - # services argument is a list, so it is a new way of validators - # usage, args in this case should not be provided - self.contexts = contexts - if args: - LOG.warning("Positional argument is not what " - "'required_context' decorator expects. " - "Use `contexts` argument instead") - else: - # it is old way validator - self.contexts = [contexts] - self.contexts.extend(args) - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - missing_contexts = [] - context = config.get("context", {}) - - for name in self.contexts: - if isinstance(name, tuple): - if not set(name) & set(context): - # formatted string like: 'foo or bar or baz' - formatted_names = "'{}'".format(" or ".join(name)) - missing_contexts.append(formatted_names) - else: - if name not in context: - missing_contexts.append(name) - - if missing_contexts: - msg = ("The following context(s) are required but missing from " - "the benchmark configuration file: {}").format( - ", ".join(missing_contexts)) - return self.fail(msg) - - -@validation.configure(name="required_param_or_context") -class RequiredParamOrContextValidator(validation.Validator): - - def __init__(self, param_name, ctx_name): - """Validator checks if required image is specified. - - :param param_name: name of parameter - :param ctx_name: name of context - """ - super(RequiredParamOrContextValidator, self).__init__() - self.param_name = param_name - self.ctx_name = ctx_name - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - msg = ("You should specify either scenario argument {} or" - " use context {}.").format(self.param_name, - self.ctx_name) - - if self.ctx_name in config.get("context", {}): - return - if self.param_name in config.get("args", {}): - return - return self.fail(msg) - - -@validation.configure(name="file_exists") -class FileExistsValidator(validation.Validator): - - def __init__(self, param_name, mode=os.R_OK, required=True): - """Validator checks parameter is proper path to file with proper mode. - - Ensure a file exists and can be accessed with the specified mode. - Note that path to file will be expanded before access checking. - - :param param_name: Name of parameter to validate - :param mode: Access mode to test for. This should be one of: - * os.F_OK (file exists) - * os.R_OK (file is readable) - * os.W_OK (file is writable) - * os.X_OK (file is executable) - - If multiple modes are required they can be added, eg: - mode=os.R_OK+os.W_OK - :param required: Boolean indicating whether this argument is required. - """ - super(FileExistsValidator, self).__init__() - - self.param_name = param_name - self.mode = mode - self.required = required - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - - return ValidatorUtils._file_access_ok( - config.get("args", {}).get(self.param_name), - self.mode, self.param_name, self.required) diff --git a/rally/plugins/common/verification/__init__.py b/rally/plugins/common/verification/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/common/verification/reporters.py b/rally/plugins/common/verification/reporters.py deleted file mode 100644 index 2018eac8..00000000 --- a/rally/plugins/common/verification/reporters.py +++ /dev/null @@ -1,505 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import datetime as dt -import json -import re -import xml.etree.ElementTree as ET - -from rally.common import version -from rally import consts -from rally.ui import utils -from rally.verification import reporter - - -SKIP_RE = re.compile("Skipped until Bug: ?(?P\d+) is resolved.") -LP_BUG_LINK = "https://launchpad.net/bugs/%s" -TIME_FORMAT = consts.TimeFormat.ISO8601 - - -@reporter.configure("json") -class JSONReporter(reporter.VerificationReporter): - """Generates verification report in JSON format. - - An example of the report (All dates, numbers, names appearing in this - example are fictitious. Any resemblance to real things is purely - coincidental): - - .. code-block:: json - - {"verifications": { - "verification-uuid-1": { - "status": "finished", - "skipped": 1, - "started_at": "2001-01-01T00:00:00", - "finished_at": "2001-01-01T00:05:00", - "tests_duration": 5, - "run_args": { - "pattern": "set=smoke", - "xfail_list": {"some.test.TestCase.test_xfail": - "Some reason why it is expected."}, - "skip_list": {"some.test.TestCase.test_skipped": - "This test was skipped intentionally"}, - }, - "success": 1, - "expected_failures": 1, - "tests_count": 3, - "failures": 0, - "unexpected_success": 0 - }, - "verification-uuid-2": { - "status": "finished", - "skipped": 1, - "started_at": "2002-01-01T00:00:00", - "finished_at": "2002-01-01T00:05:00", - "tests_duration": 5, - "run_args": { - "pattern": "set=smoke", - "xfail_list": {"some.test.TestCase.test_xfail": - "Some reason why it is expected."}, - "skip_list": {"some.test.TestCase.test_skipped": - "This test was skipped intentionally"}, - }, - "success": 1, - "expected_failures": 1, - "tests_count": 3, - "failures": 1, - "unexpected_success": 0 - } - }, - "tests": { - "some.test.TestCase.test_foo[tag1,tag2]": { - "name": "some.test.TestCase.test_foo", - "tags": ["tag1","tag2"], - "by_verification": { - "verification-uuid-1": { - "status": "success", - "duration": "1.111" - }, - "verification-uuid-2": { - "status": "success", - "duration": "22.222" - } - } - }, - "some.test.TestCase.test_skipped[tag1]": { - "name": "some.test.TestCase.test_skipped", - "tags": ["tag1"], - "by_verification": { - "verification-uuid-1": { - "status": "skipped", - "duration": "0", - "details": "Skipped until Bug: 666 is resolved." - }, - "verification-uuid-2": { - "status": "skipped", - "duration": "0", - "details": "Skipped until Bug: 666 is resolved." - } - } - }, - "some.test.TestCase.test_xfail": { - "name": "some.test.TestCase.test_xfail", - "tags": [], - "by_verification": { - "verification-uuid-1": { - "status": "xfail", - "duration": "3", - "details": "Some reason why it is expected.\\n\\n" - "Traceback (most recent call last): \\n" - " File "fake.py", line 13, in \\n" - " yyy()\\n" - " File "fake.py", line 11, in yyy\\n" - " xxx()\\n" - " File "fake.py", line 8, in xxx\\n" - " bar()\\n" - " File "fake.py", line 5, in bar\\n" - " foo()\\n" - " File "fake.py", line 2, in foo\\n" - " raise Exception()\\n" - "Exception" - }, - "verification-uuid-2": { - "status": "xfail", - "duration": "3", - "details": "Some reason why it is expected.\\n\\n" - "Traceback (most recent call last): \\n" - " File "fake.py", line 13, in \\n" - " yyy()\\n" - " File "fake.py", line 11, in yyy\\n" - " xxx()\\n" - " File "fake.py", line 8, in xxx\\n" - " bar()\\n" - " File "fake.py", line 5, in bar\\n" - " foo()\\n" - " File "fake.py", line 2, in foo\\n" - " raise Exception()\\n" - "Exception" - } - } - }, - "some.test.TestCase.test_failed": { - "name": "some.test.TestCase.test_failed", - "tags": [], - "by_verification": { - "verification-uuid-2": { - "status": "fail", - "duration": "4", - "details": "Some reason why it is expected.\\n\\n" - "Traceback (most recent call last): \\n" - " File "fake.py", line 13, in \\n" - " yyy()\\n" - " File "fake.py", line 11, in yyy\\n" - " xxx()\\n" - " File "fake.py", line 8, in xxx\\n" - " bar()\\n" - " File "fake.py", line 5, in bar\\n" - " foo()\\n" - " File "fake.py", line 2, in foo\\n" - " raise Exception()\\n" - "Exception" - } - } - } - } - } - - """ - - @classmethod - def validate(cls, output_destination): - """Validate destination of report. - - :param output_destination: Destination of report - """ - # nothing to check :) - pass - - def _generate(self): - """Prepare raw report.""" - - verifications = collections.OrderedDict() - tests = {} - - for v in self.verifications: - verifications[v.uuid] = { - "started_at": v.created_at.strftime(TIME_FORMAT), - "finished_at": v.updated_at.strftime(TIME_FORMAT), - "status": v.status, - "run_args": v.run_args, - "tests_count": v.tests_count, - "tests_duration": v.tests_duration, - "skipped": v.skipped, - "success": v.success, - "expected_failures": v.expected_failures, - "unexpected_success": v.unexpected_success, - "failures": v.failures, - } - - for test_id, result in v.tests.items(): - if test_id not in tests: - # NOTE(ylobankov): It is more convenient to see test ID - # at the first place in the report. - tags = sorted(result.get("tags", []), reverse=True, - key=lambda tag: tag.startswith("id-")) - tests[test_id] = {"tags": tags, - "name": result["name"], - "by_verification": {}} - - tests[test_id]["by_verification"][v.uuid] = { - "status": result["status"], - "duration": result["duration"] - } - - reason = result.get("reason", "") - if reason: - match = SKIP_RE.match(reason) - if match: - link = LP_BUG_LINK % match.group("bug_number") - reason = re.sub(match.group("bug_number"), link, - reason) - traceback = result.get("traceback", "") - sep = "\n\n" if reason and traceback else "" - d = (reason + sep + traceback.strip()) or None - if d: - tests[test_id]["by_verification"][v.uuid]["details"] = d - - return {"verifications": verifications, "tests": tests} - - def generate(self): - raw_report = json.dumps(self._generate(), indent=4) - - if self.output_destination: - return {"files": {self.output_destination: raw_report}, - "open": self.output_destination} - else: - return {"print": raw_report} - - -@reporter.configure("html") -class HTMLReporter(JSONReporter): - """Generates verification report in HTML format.""" - INCLUDE_LIBS = False - - # "T" separator of ISO 8601 is not user-friendly enough. - TIME_FORMAT = "%Y-%m-%d %H:%M:%S" - - def generate(self): - report = self._generate() - uuids = report["verifications"].keys() - show_comparison_note = False - - for test in report["tests"].values(): - # make as much as possible processing here to reduce processing - # at JS side - test["has_details"] = False - for test_info in test["by_verification"].values(): - if "details" not in test_info: - test_info["details"] = None - elif not test["has_details"]: - test["has_details"] = True - - durations = [] - # iter by uuids to store right order for comparison - for uuid in uuids: - if uuid in test["by_verification"]: - durations.append(test["by_verification"][uuid]["duration"]) - if float(durations[-1]) < 0.001: - durations[-1] = "0" - # not to display such little duration in the report - test["by_verification"][uuid]["duration"] = "" - - if len(durations) > 1 and not ( - durations[0] == "0" and durations[-1] == "0"): - # compare result with result of the first verification - diff = float(durations[-1]) - float(durations[0]) - result = "%s (" % durations[-1] - if diff >= 0: - result += "+" - result += "%s)" % diff - test["by_verification"][uuid]["duration"] = result - - if not show_comparison_note and len(durations) > 2: - # NOTE(andreykurilin): only in case of comparison of more - # than 2 results of the same test we should display a note - # about the comparison strategy - show_comparison_note = True - - template = utils.get_template("verification/report.html") - context = {"uuids": uuids, - "verifications": report["verifications"], - "tests": report["tests"], - "show_comparison_note": show_comparison_note} - - raw_report = template.render(data=json.dumps(context), - include_libs=self.INCLUDE_LIBS) - - # in future we will support html_static and will need to save more - # files - if self.output_destination: - return {"files": {self.output_destination: raw_report}, - "open": self.output_destination} - else: - return {"print": raw_report} - - -@reporter.configure("html-static") -class HTMLStaticReporter(HTMLReporter): - """Generates verification report in HTML format with embedded JS/CSS.""" - INCLUDE_LIBS = True - - -@reporter.configure("junit-xml") -class JUnitXMLReporter(reporter.VerificationReporter): - """Generates verification report in JUnit-XML format. - - An example of the report (All dates, numbers, names appearing in this - example are fictitious. Any resemblance to real things is purely - coincidental): - - .. code-block:: xml - - - - - - - Skipped until Bug: 666 is resolved. - - - - - - - - It is an unexpected success. The test should fail due to: - It should fail, I said! - - - - - - - HEEEEEEELP - - - Skipped until Bug: 666 is resolved. - - - - - - - - - """ - - @classmethod - def validate(cls, output_destination): - pass - - def _prettify_xml(self, elem, level=0): - """Adds indents. - - Code of this method was copied from - http://effbot.org/zone/element-lib.htm#prettyprint - - """ - i = "\n" + level * " " - if len(elem): - if not elem.text or not elem.text.strip(): - elem.text = i + " " - if not elem.tail or not elem.tail.strip(): - elem.tail = i - for elem in elem: - self._prettify_xml(elem, level + 1) - if not elem.tail or not elem.tail.strip(): - elem.tail = i - else: - if level and (not elem.tail or not elem.tail.strip()): - elem.tail = i - - def generate(self): - root = ET.Element("testsuites") - - root.append(ET.Comment("Report is generated by Rally %s at %s" % ( - version.version_string(), - dt.datetime.utcnow().strftime(TIME_FORMAT)))) - - for v in self.verifications: - verification = ET.SubElement(root, "testsuite", { - "id": v.uuid, - "time": str(v.tests_duration), - "tests": str(v.tests_count), - "errors": "0", - "skipped": str(v.skipped), - "failures": str(v.failures + v.unexpected_success), - "timestamp": v.created_at.strftime(TIME_FORMAT) - }) - tests = sorted(v.tests.values(), - key=lambda t: (t.get("timestamp", ""), t["name"])) - for result in tests: - class_name, name = result["name"].rsplit(".", 1) - test_case = { - "time": result["duration"], - "name": name, "classname": class_name - } - - test_id = [tag[3:] for tag in result.get("tags", []) - if tag.startswith("id-")] - if test_id: - test_case["id"] = test_id[0] - if "timestamp" in result: - test_case["timestamp"] = result["timestamp"] - - test_case_element = ET.SubElement(verification, "testcase", - test_case) - if result["status"] == "success": - # nothing to add - pass - elif result["status"] == "uxsuccess": - # NOTE(andreykurilin): junit doesn't support uxsuccess - # status, so let's display it like "fail" with proper - # comment. - failure = ET.SubElement(test_case_element, "failure") - failure.text = ("It is an unexpected success. The test " - "should fail due to: %s" % - result.get("reason", "Unknown reason")) - elif result["status"] == "fail": - failure = ET.SubElement(test_case_element, "failure") - failure.text = result.get("traceback", None) - elif result["status"] == "xfail": - # NOTE(andreykurilin): junit doesn't support xfail status, - # so let's display it like "success" with proper comment - test_case_element.append(ET.Comment( - "It is an expected failure due to: %s" % - result.get("reason", "Unknown reason"))) - trace = result.get("traceback", None) - if trace: - test_case_element.append(ET.Comment( - "Traceback:\n%s" % trace)) - elif result["status"] == "skip": - skipped = ET.SubElement(test_case_element, "skipped") - skipped.text = result.get("reason", "Unknown reason") - else: - # wtf is it?! we should add validation of results... - pass - - self._prettify_xml(root) - - raw_report = ET.tostring(root, encoding="utf-8").decode("utf-8") - if self.output_destination: - return {"files": {self.output_destination: raw_report}, - "open": self.output_destination} - else: - return {"print": raw_report} diff --git a/rally/plugins/common/verification/testr.py b/rally/plugins/common/verification/testr.py deleted file mode 100644 index 9c82fc36..00000000 --- a/rally/plugins/common/verification/testr.py +++ /dev/null @@ -1,140 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import shutil -import subprocess - -from rally.common.i18n import _LE -from rally.common.io import subunit_v2 -from rally.common import logging -from rally.common import utils as common_utils -from rally import exceptions -from rally.verification import context -from rally.verification import manager -from rally.verification import utils - - -LOG = logging.getLogger(__name__) - -TEST_NAME_RE = re.compile(r"^[a-zA-Z_.0-9]+(\[[a-zA-Z-_,=0-9]*\])?$") - - -@context.configure("testr", order=999) -class TestrContext(context.VerifierContext): - """Context to transform 'run_args' into CLI arguments for testr.""" - - def __init__(self, ctx): - super(TestrContext, self).__init__(ctx) - self._tmp_files = [] - - def setup(self): - self.context["testr_cmd"] = ["testr", "run", "--subunit"] - run_args = self.verifier.manager.prepare_run_args( - self.context.get("run_args", {})) - - concurrency = run_args.get("concurrency", 0) - if concurrency == 0 or concurrency > 1: - self.context["testr_cmd"].append("--parallel") - if concurrency >= 1: - self.context["testr_cmd"].extend( - ["--concurrency", str(concurrency)]) - - load_list = run_args.get("load_list") - skip_list = run_args.get("skip_list") - - if skip_list: - if not load_list: - load_list = self.verifier.manager.list_tests() - load_list = set(load_list) - set(skip_list) - if load_list: - load_list_file = common_utils.generate_random_path() - with open(load_list_file, "w") as f: - f.write("\n".join(load_list)) - self._tmp_files.append(load_list_file) - self.context["testr_cmd"].extend(["--load-list", load_list_file]) - - if run_args.get("failed"): - self.context["testr_cmd"].append("--failing") - - if run_args.get("pattern"): - self.context["testr_cmd"].append(run_args.get("pattern")) - - def cleanup(self): - for f in self._tmp_files: - if os.path.exists(f): - os.remove(f) - - -class TestrLauncher(manager.VerifierManager): - """Testr wrapper.""" - - @property - def run_environ(self): - return self.environ - - def _init_testr(self): - """Initialize testr.""" - test_repository_dir = os.path.join(self.base_dir, ".testrepository") - # NOTE(andreykurilin): Is there any possibility that .testrepository - # presents in clear repo?! - if not os.path.isdir(test_repository_dir): - LOG.debug("Initializing testr.") - try: - utils.check_output(["testr", "init"], cwd=self.repo_dir, - env=self.environ) - except (subprocess.CalledProcessError, OSError): - if os.path.exists(test_repository_dir): - shutil.rmtree(test_repository_dir) - raise exceptions.RallyException( - _LE("Failed to initialize testr.")) - - def install(self): - super(TestrLauncher, self).install() - self._init_testr() - - def list_tests(self, pattern=""): - """List all tests.""" - output = utils.check_output(["testr", "list-tests", pattern], - cwd=self.repo_dir, env=self.environ, - debug_output=False) - return [t for t in output.split("\n") if TEST_NAME_RE.match(t)] - - def run(self, context): - """Run tests.""" - testr_cmd = context["testr_cmd"] - run_args = context.get("run_args", {}) - LOG.debug("Test(s) started by the command: '%s'.", " ".join(testr_cmd)) - stream = subprocess.Popen(testr_cmd, env=self.run_environ, - cwd=self.repo_dir, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - xfail_list = run_args.get("xfail_list") - skip_list = run_args.get("skip_list") - results = subunit_v2.parse(stream.stdout, live=True, - expected_failures=xfail_list, - skipped_tests=skip_list, - logger_name=self.verifier.name) - stream.wait() - - return results - - def prepare_run_args(self, run_args): - """Prepare 'run_args' for testr context. - - This method is called by TestrContext before transforming 'run_args' - into CLI arguments for testr. - """ - return run_args diff --git a/rally/plugins/openstack/__init__.py b/rally/plugins/openstack/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/cfg/__init__.py b/rally/plugins/openstack/cfg/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/cfg/cinder.py b/rally/plugins/openstack/cfg/cinder.py deleted file mode 100644 index 6eae57f7..00000000 --- a/rally/plugins/openstack/cfg/cinder.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt("cinder_volume_create_prepoll_delay", - default=2.0, - help="Time to sleep after creating a resource before" - " polling for it status"), - cfg.FloatOpt("cinder_volume_create_timeout", - default=600.0, - help="Time to wait for cinder volume to be created."), - cfg.FloatOpt("cinder_volume_create_poll_interval", - default=2.0, - help="Interval between checks when waiting for volume" - " creation."), - cfg.FloatOpt("cinder_volume_delete_timeout", - default=600.0, - help="Time to wait for cinder volume to be deleted."), - cfg.FloatOpt("cinder_volume_delete_poll_interval", - default=2.0, - help="Interval between checks when waiting for volume" - " deletion."), - cfg.FloatOpt("cinder_backup_restore_timeout", - default=600.0, - help="Time to wait for cinder backup to be restored."), - cfg.FloatOpt("cinder_backup_restore_poll_interval", - default=2.0, - help="Interval between checks when waiting for backup" - " restoring."), -]} diff --git a/rally/plugins/openstack/cfg/cleanup.py b/rally/plugins/openstack/cfg/cleanup.py deleted file mode 100644 index 05ef75c5..00000000 --- a/rally/plugins/openstack/cfg/cleanup.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"cleanup": [ - cfg.IntOpt("resource_deletion_timeout", default=600, - help="A timeout in seconds for deleting resources"), - cfg.IntOpt("cleanup_threads", default=20, - help="Number of cleanup threads to run") -]} diff --git a/rally/plugins/openstack/cfg/ec2.py b/rally/plugins/openstack/cfg/ec2.py deleted file mode 100644 index f6b585fe..00000000 --- a/rally/plugins/openstack/cfg/ec2.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt( - "ec2_server_boot_prepoll_delay", - default=1.0, - help="Time to sleep after boot before polling for status" - ), - cfg.FloatOpt( - "ec2_server_boot_timeout", - default=300.0, - help="Server boot timeout" - ), - cfg.FloatOpt( - "ec2_server_boot_poll_interval", - default=1.0, - help="Server boot poll interval" - ) -]} diff --git a/rally/plugins/openstack/cfg/glance.py b/rally/plugins/openstack/cfg/glance.py deleted file mode 100644 index b9fa0410..00000000 --- a/rally/plugins/openstack/cfg/glance.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt("glance_image_delete_timeout", - default=120.0, - help="Time to wait for glance image to be deleted."), - cfg.FloatOpt("glance_image_delete_poll_interval", - default=1.0, - help="Interval between checks when waiting for image " - "deletion."), - cfg.FloatOpt("glance_image_create_prepoll_delay", - default=2.0, - help="Time to sleep after creating a resource before " - "polling for it status"), - cfg.FloatOpt("glance_image_create_timeout", - default=120.0, - help="Time to wait for glance image to be created."), - cfg.FloatOpt("glance_image_create_poll_interval", - default=1.0, - help="Interval between checks when waiting for image " - "creation."), - cfg.FloatOpt("glance_image_create_prepoll_delay", - default=2.0, - help="Time to sleep after creating a resource before " - "polling for it status"), - cfg.FloatOpt("glance_image_create_poll_interval", - default=1.0, - help="Interval between checks when waiting for image " - "creation.") -]} diff --git a/rally/plugins/openstack/cfg/heat.py b/rally/plugins/openstack/cfg/heat.py deleted file mode 100644 index c53dd16d..00000000 --- a/rally/plugins/openstack/cfg/heat.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt("heat_stack_create_prepoll_delay", - default=2.0, - help="Time(in sec) to sleep after creating a resource before " - "polling for it status."), - cfg.FloatOpt("heat_stack_create_timeout", - default=3600.0, - help="Time(in sec) to wait for heat stack to be created."), - cfg.FloatOpt("heat_stack_create_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "stack creation."), - cfg.FloatOpt("heat_stack_delete_timeout", - default=3600.0, - help="Time(in sec) to wait for heat stack to be deleted."), - cfg.FloatOpt("heat_stack_delete_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "stack deletion."), - cfg.FloatOpt("heat_stack_check_timeout", - default=3600.0, - help="Time(in sec) to wait for stack to be checked."), - cfg.FloatOpt("heat_stack_check_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "stack checking."), - cfg.FloatOpt("heat_stack_update_prepoll_delay", - default=2.0, - help="Time(in sec) to sleep after updating a resource before " - "polling for it status."), - cfg.FloatOpt("heat_stack_update_timeout", - default=3600.0, - help="Time(in sec) to wait for stack to be updated."), - cfg.FloatOpt("heat_stack_update_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "stack update."), - cfg.FloatOpt("heat_stack_suspend_timeout", - default=3600.0, - help="Time(in sec) to wait for stack to be suspended."), - cfg.FloatOpt("heat_stack_suspend_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "stack suspend."), - cfg.FloatOpt("heat_stack_resume_timeout", - default=3600.0, - help="Time(in sec) to wait for stack to be resumed."), - cfg.FloatOpt("heat_stack_resume_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "stack resume."), - cfg.FloatOpt("heat_stack_snapshot_timeout", - default=3600.0, - help="Time(in sec) to wait for stack snapshot to " - "be created."), - cfg.FloatOpt("heat_stack_snapshot_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "stack snapshot to be created."), - cfg.FloatOpt("heat_stack_restore_timeout", - default=3600.0, - help="Time(in sec) to wait for stack to be restored from " - "snapshot."), - cfg.FloatOpt("heat_stack_restore_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "stack to be restored."), - cfg.FloatOpt("heat_stack_scale_timeout", - default=3600.0, - help="Time (in sec) to wait for stack to scale up or down."), - cfg.FloatOpt("heat_stack_scale_poll_interval", - default=1.0, - help="Time interval (in sec) between checks when waiting for " - "a stack to scale up or down.") -]} diff --git a/rally/plugins/openstack/cfg/ironic.py b/rally/plugins/openstack/cfg/ironic.py deleted file mode 100644 index 9ee598d2..00000000 --- a/rally/plugins/openstack/cfg/ironic.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt("ironic_node_create_poll_interval", - default=1.0, - help="Interval(in sec) between checks when waiting for node " - "creation."), - cfg.FloatOpt("ironic_node_create_timeout", - default=300, - help="Ironic node create timeout"), - cfg.FloatOpt("ironic_node_poll_interval", - default=1.0, - help="Ironic node poll interval"), - cfg.FloatOpt("ironic_node_delete_timeout", - default=300, - help="Ironic node create timeout") -]} diff --git a/rally/plugins/openstack/cfg/keystone_roles.py b/rally/plugins/openstack/cfg/keystone_roles.py deleted file mode 100644 index 857b4ff3..00000000 --- a/rally/plugins/openstack/cfg/keystone_roles.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"roles_context": [ - cfg.IntOpt("resource_management_workers", - default=30, - help="How many concurrent threads to use for serving roles " - "context"), -]} diff --git a/rally/plugins/openstack/cfg/keystone_users.py b/rally/plugins/openstack/cfg/keystone_users.py deleted file mode 100644 index d69eed35..00000000 --- a/rally/plugins/openstack/cfg/keystone_users.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -RESOURCE_MANAGEMENT_WORKERS_DESCR = ("The number of concurrent threads to use " - "for serving users context.") -PROJECT_DOMAIN_DESCR = "ID of domain in which projects will be created." -USER_DOMAIN_DESCR = "ID of domain in which users will be created." - -OPTS = {"users_context": [ - cfg.IntOpt("resource_management_workers", - default=20, - help=RESOURCE_MANAGEMENT_WORKERS_DESCR), - cfg.StrOpt("project_domain", - default="default", - help=PROJECT_DOMAIN_DESCR), - cfg.StrOpt("user_domain", - default="default", - help=USER_DOMAIN_DESCR), - cfg.StrOpt("keystone_default_role", - default="member", - help="The default role name of the keystone to assign to " - "users."), -]} diff --git a/rally/plugins/openstack/cfg/magnum.py b/rally/plugins/openstack/cfg/magnum.py deleted file mode 100644 index b0619980..00000000 --- a/rally/plugins/openstack/cfg/magnum.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt("magnum_cluster_create_prepoll_delay", - default=5.0, - help="Time(in sec) to sleep after creating a resource before " - "polling for the status."), - cfg.FloatOpt("magnum_cluster_create_timeout", - default=1200.0, - help="Time(in sec) to wait for magnum cluster to be " - "created."), - cfg.FloatOpt("magnum_cluster_create_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "cluster creation."), - cfg.FloatOpt("k8s_pod_create_timeout", - default=600.0, - help="Time(in sec) to wait for k8s pod to be created."), - cfg.FloatOpt("k8s_pod_create_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "k8s pod creation."), - cfg.FloatOpt("k8s_rc_create_timeout", - default=600.0, - help="Time(in sec) to wait for k8s rc to be created."), - cfg.FloatOpt("k8s_rc_create_poll_interval", - default=1.0, - help="Time interval(in sec) between checks when waiting for " - "k8s rc creation."), -]} diff --git a/rally/plugins/openstack/cfg/manila.py b/rally/plugins/openstack/cfg/manila.py deleted file mode 100644 index 4786601e..00000000 --- a/rally/plugins/openstack/cfg/manila.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt( - "manila_share_create_prepoll_delay", - default=2.0, - help="Delay between creating Manila share and polling for its " - "status."), - cfg.FloatOpt( - "manila_share_create_timeout", - default=300.0, - help="Timeout for Manila share creation."), - cfg.FloatOpt( - "manila_share_create_poll_interval", - default=3.0, - help="Interval between checks when waiting for Manila share " - "creation."), - cfg.FloatOpt( - "manila_share_delete_timeout", - default=180.0, - help="Timeout for Manila share deletion."), - cfg.FloatOpt( - "manila_share_delete_poll_interval", - default=2.0, - help="Interval between checks when waiting for Manila share " - "deletion.") -]} diff --git a/rally/plugins/openstack/cfg/mistral.py b/rally/plugins/openstack/cfg/mistral.py deleted file mode 100644 index 344537ad..00000000 --- a/rally/plugins/openstack/cfg/mistral.py +++ /dev/null @@ -1,9 +0,0 @@ - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.IntOpt( - "mistral_execution_timeout", - default=200, - help="mistral execution timeout") -]} diff --git a/rally/plugins/openstack/cfg/monasca.py b/rally/plugins/openstack/cfg/monasca.py deleted file mode 100644 index c289ac44..00000000 --- a/rally/plugins/openstack/cfg/monasca.py +++ /dev/null @@ -1,10 +0,0 @@ - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt( - "monasca_metric_create_prepoll_delay", - default=15.0, - help="Delay between creating Monasca metrics and polling for " - "its elements.") -]} diff --git a/rally/plugins/openstack/cfg/murano.py b/rally/plugins/openstack/cfg/murano.py deleted file mode 100644 index e77182f1..00000000 --- a/rally/plugins/openstack/cfg/murano.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.IntOpt("murano_deploy_environment_timeout", default=1200, - deprecated_name="deploy_environment_timeout", - help="A timeout in seconds for an environment deploy"), - cfg.IntOpt("murano_deploy_environment_check_interval", default=5, - deprecated_name="deploy_environment_check_interval", - help="Deploy environment check interval in seconds"), -]} diff --git a/rally/plugins/openstack/cfg/neutron.py b/rally/plugins/openstack/cfg/neutron.py deleted file mode 100644 index ff20fc7a..00000000 --- a/rally/plugins/openstack/cfg/neutron.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt("neutron_create_loadbalancer_timeout", - default=float(500), - help="Neutron create loadbalancer timeout"), - cfg.FloatOpt("neutron_create_loadbalancer_poll_interval", - default=float(2), - help="Neutron create loadbalancer poll interval") -]} diff --git a/rally/plugins/openstack/cfg/nova.py b/rally/plugins/openstack/cfg/nova.py deleted file mode 100644 index 904c9357..00000000 --- a/rally/plugins/openstack/cfg/nova.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - # prepoll delay, timeout, poll interval - # "start": (0, 300, 1) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "start", - default=float(0), - help="Time to sleep after %s before polling" - " for status" % "start"), - cfg.FloatOpt("nova_server_%s_timeout" % "start", - default=float(300), - help="Server %s timeout" % "start"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "start", - default=float(1), - help="Server %s poll interval" % "start"), - # "stop": (0, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "stop", - default=float(0), - help="Time to sleep after %s before polling" - " for status" % "stop"), - cfg.FloatOpt("nova_server_%s_timeout" % "stop", - default=float(300), - help="Server %s timeout" % "stop"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "stop", - default=float(2), - help="Server %s poll interval" % "stop"), - # "boot": (1, 300, 1) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "boot", - default=float(1), - help="Time to sleep after %s before polling" - " for status" % "boot"), - cfg.FloatOpt("nova_server_%s_timeout" % "boot", - default=float(300), - help="Server %s timeout" % "boot"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "boot", - default=float(2), - help="Server %s poll interval" % "boot"), - # "delete": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "delete", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "delete"), - cfg.FloatOpt("nova_server_%s_timeout" % "delete", - default=float(300), - help="Server %s timeout" % "delete"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "delete", - default=float(2), - help="Server %s poll interval" % "delete"), - # "reboot": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "reboot", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "reboot"), - cfg.FloatOpt("nova_server_%s_timeout" % "reboot", - default=float(300), - help="Server %s timeout" % "reboot"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "reboot", - default=float(2), - help="Server %s poll interval" % "reboot"), - # "rebuild": (1, 300, 1) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "rebuild", - default=float(1), - help="Time to sleep after %s before polling" - " for status" % "rebuild"), - cfg.FloatOpt("nova_server_%s_timeout" % "rebuild", - default=float(300), - help="Server %s timeout" % "rebuild"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "rebuild", - default=float(1), - help="Server %s poll interval" % "rebuild"), - # "rescue": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "rescue", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "rescue"), - cfg.FloatOpt("nova_server_%s_timeout" % "rescue", - default=float(300), - help="Server %s timeout" % "rescue"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "rescue", - default=float(2), - help="Server %s poll interval" % "rescue"), - # "unrescue": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "unrescue", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "unrescue"), - cfg.FloatOpt("nova_server_%s_timeout" % "unrescue", - default=float(300), - help="Server %s timeout" % "unrescue"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "unrescue", - default=float(2), - help="Server %s poll interval" % "unrescue"), - # "suspend": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "suspend", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "suspend"), - cfg.FloatOpt("nova_server_%s_timeout" % "suspend", - default=float(300), - help="Server %s timeout" % "suspend"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "suspend", - default=float(2), - help="Server %s poll interval" % "suspend"), - # "resume": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resume", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "resume"), - cfg.FloatOpt("nova_server_%s_timeout" % "resume", - default=float(300), - help="Server %s timeout" % "resume"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "resume", - default=float(2), - help="Server %s poll interval" % "resume"), - # "pause": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "pause", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "pause"), - cfg.FloatOpt("nova_server_%s_timeout" % "pause", - default=float(300), - help="Server %s timeout" % "pause"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "pause", - default=float(2), - help="Server %s poll interval" % "pause"), - # "unpause": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "unpause", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "unpause"), - cfg.FloatOpt("nova_server_%s_timeout" % "unpause", - default=float(300), - help="Server %s timeout" % "unpause"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "unpause", - default=float(2), - help="Server %s poll interval" % "unpause"), - # "shelve": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "shelve", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "shelve"), - cfg.FloatOpt("nova_server_%s_timeout" % "shelve", - default=float(300), - help="Server %s timeout" % "shelve"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "shelve", - default=float(2), - help="Server %s poll interval" % "shelve"), - # "unshelve": (2, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "unshelve", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "unshelve"), - cfg.FloatOpt("nova_server_%s_timeout" % "unshelve", - default=float(300), - help="Server %s timeout" % "unshelve"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "unshelve", - default=float(2), - help="Server %s poll interval" % "unshelve"), - # "image_create": (0, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "image_create", - default=float(0), - help="Time to sleep after %s before polling" - " for status" % "image_create"), - cfg.FloatOpt("nova_server_%s_timeout" % "image_create", - default=float(300), - help="Server %s timeout" % "image_create"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "image_create", - default=float(2), - help="Server %s poll interval" % "image_create"), - # "image_delete": (0, 300, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "image_delete", - default=float(0), - help="Time to sleep after %s before polling" - " for status" % "image_delete"), - cfg.FloatOpt("nova_server_%s_timeout" % "image_delete", - default=float(300), - help="Server %s timeout" % "image_delete"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "image_delete", - default=float(2), - help="Server %s poll interval" % "image_delete"), - # "resize": (2, 400, 5) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resize", - default=float(2), - help="Time to sleep after %s before polling" - " for status" % "resize"), - cfg.FloatOpt("nova_server_%s_timeout" % "resize", - default=float(400), - help="Server %s timeout" % "resize"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "resize", - default=float(5), - help="Server %s poll interval" % "resize"), - # "resize_confirm": (0, 200, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resize_confirm", - default=float(0), - help="Time to sleep after %s before polling" - " for status" % "resize_confirm"), - cfg.FloatOpt("nova_server_%s_timeout" % "resize_confirm", - default=float(200), - help="Server %s timeout" % "resize_confirm"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "resize_confirm", - default=float(2), - help="Server %s poll interval" % "resize_confirm"), - # "resize_revert": (0, 200, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resize_revert", - default=float(0), - help="Time to sleep after %s before polling" - " for status" % "resize_revert"), - cfg.FloatOpt("nova_server_%s_timeout" % "resize_revert", - default=float(200), - help="Server %s timeout" % "resize_revert"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "resize_revert", - default=float(2), - help="Server %s poll interval" % "resize_revert"), - # "live_migrate": (1, 400, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "live_migrate", - default=float(1), - help="Time to sleep after %s before polling" - " for status" % "live_migrate"), - cfg.FloatOpt("nova_server_%s_timeout" % "live_migrate", - default=float(400), - help="Server %s timeout" % "live_migrate"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "live_migrate", - default=float(2), - help="Server %s poll interval" % "live_migrate"), - # "migrate": (1, 400, 2) - cfg.FloatOpt("nova_server_%s_prepoll_delay" % "migrate", - default=float(1), - help="Time to sleep after %s before polling" - " for status" % "migrate"), - cfg.FloatOpt("nova_server_%s_timeout" % "migrate", - default=float(400), - help="Server %s timeout" % "migrate"), - cfg.FloatOpt("nova_server_%s_poll_interval" % "migrate", - default=float(2), - help="Server %s poll interval" % "migrate"), - # "detach": - cfg.FloatOpt("nova_detach_volume_timeout", - default=float(200), - help="Nova volume detach timeout"), - cfg.FloatOpt("nova_detach_volume_poll_interval", - default=float(2), - help="Nova volume detach poll interval") -]} diff --git a/rally/plugins/openstack/cfg/opts.py b/rally/plugins/openstack/cfg/opts.py deleted file mode 100644 index 939051b8..00000000 --- a/rally/plugins/openstack/cfg/opts.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack.cfg import cinder -from rally.plugins.openstack.cfg import ec2 -from rally.plugins.openstack.cfg import glance -from rally.plugins.openstack.cfg import heat -from rally.plugins.openstack.cfg import ironic -from rally.plugins.openstack.cfg import magnum -from rally.plugins.openstack.cfg import manila -from rally.plugins.openstack.cfg import mistral -from rally.plugins.openstack.cfg import monasca -from rally.plugins.openstack.cfg import murano -from rally.plugins.openstack.cfg import neutron -from rally.plugins.openstack.cfg import nova -from rally.plugins.openstack.cfg import profiler -from rally.plugins.openstack.cfg import sahara -from rally.plugins.openstack.cfg import senlin -from rally.plugins.openstack.cfg import vm -from rally.plugins.openstack.cfg import watcher - -from rally.plugins.openstack.cfg import tempest - -from rally.plugins.openstack.cfg import keystone_roles -from rally.plugins.openstack.cfg import keystone_users - -from rally.plugins.openstack.cfg import cleanup - - -def list_opts(): - - opts = {} - for l_opts in (cinder.OPTS, ec2.OPTS, heat.OPTS, ironic.OPTS, magnum.OPTS, - manila.OPTS, mistral.OPTS, monasca.OPTS, murano.OPTS, - nova.OPTS, profiler.OPTS, sahara.OPTS, vm.OPTS, glance.OPTS, - watcher.OPTS, tempest.OPTS, keystone_roles.OPTS, - keystone_users.OPTS, cleanup.OPTS, senlin.OPTS, - neutron.OPTS): - for category, opt in l_opts.items(): - opts.setdefault(category, []) - opts[category].extend(opt) - return opts diff --git a/rally/plugins/openstack/cfg/profiler.py b/rally/plugins/openstack/cfg/profiler.py deleted file mode 100644 index e0c290c4..00000000 --- a/rally/plugins/openstack/cfg/profiler.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2017: Inria. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.BoolOpt("enable_profiler", default=True, - help="Enable or disable osprofiler to trace the scenarios") -]} diff --git a/rally/plugins/openstack/cfg/sahara.py b/rally/plugins/openstack/cfg/sahara.py deleted file mode 100644 index d301d983..00000000 --- a/rally/plugins/openstack/cfg/sahara.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.IntOpt("sahara_cluster_create_timeout", default=1800, - deprecated_name="cluster_create_timeout", - help="A timeout in seconds for a cluster create operation"), - cfg.IntOpt("sahara_cluster_delete_timeout", default=900, - deprecated_name="cluster_delete_timeout", - help="A timeout in seconds for a cluster delete operation"), - cfg.IntOpt("sahara_cluster_check_interval", default=5, - deprecated_name="cluster_check_interval", - help="Cluster status polling interval in seconds"), - cfg.IntOpt("sahara_job_execution_timeout", default=600, - deprecated_name="job_execution_timeout", - help="A timeout in seconds for a Job Execution to complete"), - cfg.IntOpt("sahara_job_check_interval", default=5, - deprecated_name="job_check_interval", - help="Job Execution status polling interval in seconds"), - cfg.IntOpt("sahara_workers_per_proxy", default=20, - help="Amount of workers one proxy should serve to.") -]} diff --git a/rally/plugins/openstack/cfg/senlin.py b/rally/plugins/openstack/cfg/senlin.py deleted file mode 100644 index fd88808b..00000000 --- a/rally/plugins/openstack/cfg/senlin.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt("senlin_action_timeout", - default=3600, - help="Time in seconds to wait for senlin action to finish.") -]} diff --git a/rally/plugins/openstack/cfg/tempest.py b/rally/plugins/openstack/cfg/tempest.py deleted file mode 100644 index d97e611b..00000000 --- a/rally/plugins/openstack/cfg/tempest.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -OPTS = {"tempest": [ - cfg.StrOpt("img_url", - default="http://download.cirros-cloud.net/" - "0.3.5/cirros-0.3.5-x86_64-disk.img", - help="image URL"), - cfg.StrOpt("img_disk_format", - default="qcow2", - help="Image disk format to use when creating the image"), - cfg.StrOpt("img_container_format", - default="bare", - help="Image container format to use when creating the image"), - cfg.StrOpt("img_name_regex", - default="^.*(cirros|testvm).*$", - help="Regular expression for name of a public image to " - "discover it in the cloud and use it for the tests. " - "Note that when Rally is searching for the image, case " - "insensitive matching is performed. Specify nothing " - "('img_name_regex =') if you want to disable discovering. " - "In this case Rally will create needed resources by " - "itself if the values for the corresponding config " - "options are not specified in the Tempest config file"), - cfg.StrOpt("swift_operator_role", - default="Member", - help="Role required for users " - "to be able to create Swift containers"), - cfg.StrOpt("swift_reseller_admin_role", - default="ResellerAdmin", - help="User role that has reseller admin"), - cfg.StrOpt("heat_stack_owner_role", - default="heat_stack_owner", - help="Role required for users " - "to be able to manage Heat stacks"), - cfg.StrOpt("heat_stack_user_role", - default="heat_stack_user", - help="Role for Heat template-defined users"), - cfg.IntOpt("flavor_ref_ram", - default="64", - help="Primary flavor RAM size used by most of the test cases"), - cfg.IntOpt("flavor_ref_alt_ram", - default="128", - help="Alternate reference flavor RAM size used by test that" - "need two flavors, like those that resize an instance"), - cfg.IntOpt("heat_instance_type_ram", - default="64", - help="RAM size flavor used for orchestration test cases") -]} diff --git a/rally/plugins/openstack/cfg/vm.py b/rally/plugins/openstack/cfg/vm.py deleted file mode 100644 index f5e6b434..00000000 --- a/rally/plugins/openstack/cfg/vm.py +++ /dev/null @@ -1,10 +0,0 @@ - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt("vm_ping_poll_interval", default=1.0, - help="Interval between checks when waiting for a VM to " - "become pingable"), - cfg.FloatOpt("vm_ping_timeout", default=120.0, - help="Time to wait for a VM to become pingable") -]} diff --git a/rally/plugins/openstack/cfg/watcher.py b/rally/plugins/openstack/cfg/watcher.py deleted file mode 100644 index 90807b27..00000000 --- a/rally/plugins/openstack/cfg/watcher.py +++ /dev/null @@ -1,9 +0,0 @@ - -from oslo_config import cfg - -OPTS = {"benchmark": [ - cfg.FloatOpt("watcher_audit_launch_poll_interval", default=2.0, - help="Watcher audit launch interval"), - cfg.IntOpt("watcher_audit_launch_timeout", default=300, - help="Watcher audit launch timeout") -]} diff --git a/rally/plugins/openstack/cleanup/__init__.py b/rally/plugins/openstack/cleanup/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/cleanup/base.py b/rally/plugins/openstack/cleanup/base.py deleted file mode 100644 index 5b198c00..00000000 --- a/rally/plugins/openstack/cleanup/base.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg - -from rally.task import utils - - -from rally.common import opts -opts.register() - -CONF = cfg.CONF - -cleanup_group = cfg.OptGroup(name="cleanup", title="Cleanup Options") - - -# NOTE(andreykurilin): There are cases when there is no way to use any kind -# of "name" for resource as an identifier of alignment resource to the -# particular task run and even to Rally itself. Previously, we used empty -# strings as a workaround for name matching specific templates, but -# theoretically such behaviour can hide other cases when resource should have -# a name property, but it is missed. -# Let's use instances of specific class to return as a name of resources -# which do not have names at all. -class NoName(object): - def __init__(self, resource_type): - self.resource_type = resource_type - - def __repr__(self): - return "" % self.resource_type - - -def resource(service, resource, order=0, admin_required=False, - perform_for_admin_only=False, tenant_resource=False, - max_attempts=3, timeout=CONF.cleanup.resource_deletion_timeout, - interval=1, threads=CONF.cleanup.cleanup_threads): - """Decorator that overrides resource specification. - - Just put it on top of your resource class and specify arguments that you - need. - - :param service: It is equal to client name for corresponding service. - E.g. "nova", "cinder" or "zaqar" - :param resource: Client manager name for resource. E.g. in case of - nova.servers you should write here "servers" - :param order: Used to adjust priority of cleanup for different resource - types - :param admin_required: Admin user is required - :param perform_for_admin_only: Perform cleanup for admin user only - :param tenant_resource: Perform deletion only 1 time per tenant - :param max_attempts: Max amount of attempts to delete single resource - :param timeout: Max duration of deletion in seconds - :param interval: Resource status pooling interval - :param threads: Amount of threads (workers) that are deleting resources - simultaneously - """ - - def inner(cls): - # TODO(boris-42): This can be written better I believe =) - cls._service = service - cls._resource = resource - cls._order = order - cls._admin_required = admin_required - cls._perform_for_admin_only = perform_for_admin_only - cls._max_attempts = max_attempts - cls._timeout = timeout - cls._interval = interval - cls._threads = threads - cls._tenant_resource = tenant_resource - - return cls - - return inner - - -@resource(service=None, resource=None) -class ResourceManager(object): - """Base class for cleanup plugins for specific resources. - - You should use @resource decorator to specify major configuration of - resource manager. Usually you should specify: service, resource and order. - - If project python client is very specific, you can override delete(), - list() and is_deleted() methods to make them fit to your case. - """ - - def __init__(self, resource=None, admin=None, user=None, tenant_uuid=None): - self.admin = admin - self.user = user - self.raw_resource = resource - self.tenant_uuid = tenant_uuid - - def _manager(self): - client = self._admin_required and self.admin or self.user - return getattr(getattr(client, self._service)(), self._resource) - - def id(self): - """Returns id of resource.""" - return self.raw_resource.id - - def name(self): - """Returns name of resource.""" - return self.raw_resource.name - - def is_deleted(self): - """Checks if the resource is deleted. - - Fetch resource by id from service and check it status. - In case of NotFound or status is DELETED or DELETE_COMPLETE returns - True, otherwise False. - """ - try: - resource = self._manager().get(self.id()) - except Exception as e: - return getattr(e, "code", getattr(e, "http_status", 400)) == 404 - - return utils.get_status(resource) in ("DELETED", "DELETE_COMPLETE") - - def delete(self): - """Delete resource that corresponds to instance of this class.""" - self._manager().delete(self.id()) - - def list(self): - """List all resources specific for admin or user.""" - return self._manager().list() diff --git a/rally/plugins/openstack/cleanup/manager.py b/rally/plugins/openstack/cleanup/manager.py deleted file mode 100644 index 7c8b6679..00000000 --- a/rally/plugins/openstack/cleanup/manager.py +++ /dev/null @@ -1,289 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from rally.common import broker -from rally.common.i18n import _ -from rally.common import logging -from rally.common.plugin import discover -from rally.common.plugin import plugin -from rally.common import utils as rutils -from rally.plugins.openstack.cleanup import base - - -LOG = logging.getLogger(__name__) - - -class SeekAndDestroy(object): - - def __init__(self, manager_cls, admin, users, api_versions=None, - resource_classes=None, task_id=None): - """Resource deletion class. - - This class contains method exterminate() that finds and deletes - all resources created by Rally. - - :param manager_cls: subclass of base.ResourceManager - :param admin: admin credential like in context["admin"] - :param users: users credentials like in context["users"] - :param api_versions: dict of client API versions - :param resource_classes: Resource classes to match resource names - against - :param task_id: The UUID of task to match resource names against - """ - self.manager_cls = manager_cls - self.admin = admin - self.users = users or [] - self.api_versions = api_versions - self.resource_classes = resource_classes or [ - rutils.RandomNameGeneratorMixin] - self.task_id = task_id - - def _get_cached_client(self, user): - """Simplifies initialization and caching OpenStack clients.""" - if not user: - return None - # NOTE(astudenov): Credential now supports caching by default - return user["credential"].clients(api_info=self.api_versions) - - def _delete_single_resource(self, resource): - """Safe resource deletion with retries and timeouts. - - Send request to delete resource, in case of failures repeat it few - times. After that pull status of resource until it's deleted. - - Writes in LOG warning with UUID of resource that wasn't deleted - - :param resource: instance of resource manager initiated with resource - that should be deleted. - """ - - msg_kw = { - "uuid": resource.id(), - "name": resource.name() or "", - "service": resource._service, - "resource": resource._resource - } - - LOG.debug( - "Deleting %(service)s %(resource)s object %(name)s (%(uuid)s)" % - msg_kw) - - try: - rutils.retry(resource._max_attempts, resource.delete) - except Exception as e: - msg_kw["reason"] = e - LOG.warning( - _("Resource deletion failed, max retries exceeded for " - "%(service)s.%(resource)s: %(uuid)s. Reason: %(reason)s") - % msg_kw) - if logging.is_debug(): - LOG.exception(e) - else: - started = time.time() - failures_count = 0 - while time.time() - started < resource._timeout: - try: - if resource.is_deleted(): - return - except Exception as e: - LOG.warning( - _("Seems like %s.%s.is_deleted(self) method is broken " - "It shouldn't raise any exceptions.") - % (resource.__module__, type(resource).__name__)) - LOG.exception(e) - - # NOTE(boris-42): Avoid LOG spamming in case of bad - # is_deleted() method - failures_count += 1 - if failures_count > resource._max_attempts: - break - - finally: - rutils.interruptable_sleep(resource._interval) - - LOG.warning(_("Resource deletion failed, timeout occurred for " - "%(service)s.%(resource)s: %(uuid)s.") - % msg_kw) - - def _publisher(self, queue): - """Publisher for deletion jobs. - - This method iterates over all users, lists all resources - (using manager_cls) and puts jobs for deletion. - - Every deletion job contains tuple with two values: user and resource - uuid that should be deleted. - - In case of tenant based resource, uuids are fetched only from one user - per tenant. - """ - def _publish(admin, user, manager): - try: - for raw_resource in rutils.retry(3, manager.list): - queue.append((admin, user, raw_resource)) - except Exception as e: - LOG.warning( - _("Seems like %s.%s.list(self) method is broken. " - "It shouldn't raise any exceptions.") - % (manager.__module__, type(manager).__name__)) - LOG.exception(e) - - if self.admin and (not self.users - or self.manager_cls._perform_for_admin_only): - manager = self.manager_cls( - admin=self._get_cached_client(self.admin)) - _publish(self.admin, None, manager) - - else: - visited_tenants = set() - admin_client = self._get_cached_client(self.admin) - for user in self.users: - if (self.manager_cls._tenant_resource - and user["tenant_id"] in visited_tenants): - continue - - visited_tenants.add(user["tenant_id"]) - manager = self.manager_cls( - admin=admin_client, - user=self._get_cached_client(user), - tenant_uuid=user["tenant_id"]) - _publish(self.admin, user, manager) - - def _consumer(self, cache, args): - """Method that consumes single deletion job.""" - admin, user, raw_resource = args - - manager = self.manager_cls( - resource=raw_resource, - admin=self._get_cached_client(admin), - user=self._get_cached_client(user), - tenant_uuid=user and user["tenant_id"]) - - if (isinstance(manager.name(), base.NoName) or - rutils.name_matches_object( - manager.name(), *self.resource_classes, - task_id=self.task_id, exact=False)): - self._delete_single_resource(manager) - - def exterminate(self): - """Delete all resources for passed users, admin and resource_mgr.""" - - broker.run(self._publisher, self._consumer, - consumers_count=self.manager_cls._threads) - - -def list_resource_names(admin_required=None): - """List all resource managers names. - - Returns all service names and all combination of service.resource names. - - :param admin_required: None -> returns all ResourceManagers - True -> returns only admin ResourceManagers - False -> returns only non admin ResourceManagers - """ - res_mgrs = discover.itersubclasses(base.ResourceManager) - if admin_required is not None: - res_mgrs = filter(lambda cls: cls._admin_required == admin_required, - res_mgrs) - - names = set() - for cls in res_mgrs: - names.add(cls._service) - names.add("%s.%s" % (cls._service, cls._resource)) - - return names - - -def find_resource_managers(names=None, admin_required=None): - """Returns resource managers. - - :param names: List of names in format or . - that is used for filtering resource manager classes - :param admin_required: None -> returns all ResourceManagers - True -> returns only admin ResourceManagers - False -> returns only non admin ResourceManagers - """ - names = set(names or []) - - resource_managers = [] - for manager in discover.itersubclasses(base.ResourceManager): - if admin_required is not None: - if admin_required != manager._admin_required: - continue - - if (manager._service in names - or "%s.%s" % (manager._service, manager._resource) in names): - resource_managers.append(manager) - - resource_managers.sort(key=lambda x: x._order) - - found_names = set() - for mgr in resource_managers: - found_names.add(mgr._service) - found_names.add("%s.%s" % (mgr._service, mgr._resource)) - - missing = names - found_names - if missing: - LOG.warning("Missing resource managers: %s" % ", ".join(missing)) - - return resource_managers - - -def cleanup(names=None, admin_required=None, admin=None, users=None, - api_versions=None, superclass=plugin.Plugin, task_id=None): - """Generic cleaner. - - This method goes through all plugins. Filter those and left only plugins - with _service from services or _resource from resources. - - Then goes through all passed users and using cleaners cleans all related - resources. - - :param names: Use only resource managers that have names in this list. - There are in as _service or - (%s.%s % (_service, _resource)) from - :param admin_required: If None -> return all plugins - If True -> return only admin plugins - If False -> return only non admin plugins - :param admin: rally.deployment.credential.Credential that corresponds to - OpenStack admin. - :param users: List of OpenStack users that was used during benchmarking. - Every user has next structure: - { - "id": , - "tenant_id": , - "credential": - } - :param superclass: The plugin superclass to perform cleanup - for. E.g., this could be - ``rally.task.scenario.Scenario`` to cleanup all - Scenario resources. - :param task_id: The UUID of task - """ - resource_classes = [cls for cls in discover.itersubclasses(superclass) - if issubclass(cls, rutils.RandomNameGeneratorMixin)] - if not resource_classes and issubclass(superclass, - rutils.RandomNameGeneratorMixin): - resource_classes.append(superclass) - for manager in find_resource_managers(names, admin_required): - LOG.debug("Cleaning up %(service)s %(resource)s objects" % - {"service": manager._service, - "resource": manager._resource}) - SeekAndDestroy(manager, admin, users, - api_versions=api_versions, - resource_classes=resource_classes, - task_id=task_id).exterminate() diff --git a/rally/plugins/openstack/cleanup/resources.py b/rally/plugins/openstack/cleanup/resources.py deleted file mode 100755 index 92e9487f..00000000 --- a/rally/plugins/openstack/cleanup/resources.py +++ /dev/null @@ -1,985 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from boto import exception as boto_exception -from neutronclient.common import exceptions as neutron_exceptions -from novaclient import exceptions as nova_exc -from oslo_config import cfg -from saharaclient.api import base as saharaclient_base - -from rally.common import logging -from rally import consts -from rally.plugins.openstack.cleanup import base -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.services.image import image -from rally.task import utils as task_utils - -CONF = cfg.CONF -CONF.import_opt("glance_image_delete_timeout", - "rally.plugins.openstack.scenarios.glance.utils", - "benchmark") -CONF.import_opt("glance_image_delete_poll_interval", - "rally.plugins.openstack.scenarios.glance.utils", - "benchmark") - -LOG = logging.getLogger(__name__) - - -def get_order(start): - return iter(range(start, start + 99)) - - -class SynchronizedDeletion(object): - - def is_deleted(self): - return True - - -class QuotaMixin(SynchronizedDeletion, base.ResourceManager): - # NOTE(andreykurilin): Quotas resources are quite complex in terms of - # cleanup. First of all, they do not have name, id fields at all. There - # is only one identifier - reference to Keystone Project/Tenant. Also, - # we should remove them in case of existing users case... To cover both - # cases we should use project name as name field (it will allow to pass - # existing users case) and project id as id of resource - - def list(self): - if not self.tenant_uuid: - return [] - client = self._admin_required and self.admin or self.user - project = identity.Identity(client).get_project(self.tenant_uuid) - return [project] - - -# MAGNUM - -_magnum_order = get_order(80) - - -@base.resource(service=None, resource=None) -class MagnumMixin(base.ResourceManager): - - def id(self): - """Returns id of resource.""" - return self.raw_resource.uuid - - def list(self): - result = [] - marker = None - while True: - resources = self._manager().list(marker=marker) - if not resources: - break - result.extend(resources) - marker = resources[-1].uuid - return result - - -@base.resource("magnum", "clusters", order=next(_magnum_order), - tenant_resource=True) -class MagnumCluster(MagnumMixin): - """Resource class for Magnum cluster.""" - - -@base.resource("magnum", "cluster_templates", order=next(_magnum_order), - tenant_resource=True) -class MagnumClusterTemplate(MagnumMixin): - """Resource class for Magnum cluster_template.""" - - -# HEAT - -@base.resource("heat", "stacks", order=100, tenant_resource=True) -class HeatStack(base.ResourceManager): - def name(self): - return self.raw_resource.stack_name - - -# SENLIN - -_senlin_order = get_order(150) - - -@base.resource(service=None, resource=None, admin_required=True) -class SenlinMixin(base.ResourceManager): - - def id(self): - return self.raw_resource["id"] - - def _manager(self): - client = self._admin_required and self.admin or self.user - return getattr(client, self._service)() - - def list(self): - return getattr(self._manager(), self._resource)() - - def delete(self): - # make singular form of resource name from plural form - res_name = self._resource[:-1] - return getattr(self._manager(), "delete_%s" % res_name)(self.id()) - - -@base.resource("senlin", "clusters", - admin_required=True, order=next(_senlin_order)) -class SenlinCluster(SenlinMixin): - """Resource class for Senlin Cluster.""" - - -@base.resource("senlin", "profiles", order=next(_senlin_order), - admin_required=False, tenant_resource=True) -class SenlinProfile(SenlinMixin): - """Resource class for Senlin Profile.""" - - -# NOVA - -_nova_order = get_order(200) - - -@base.resource("nova", "servers", order=next(_nova_order), - tenant_resource=True) -class NovaServer(base.ResourceManager): - def list(self): - """List all servers.""" - return self._manager().list(limit=-1) - - def delete(self): - if getattr(self.raw_resource, "OS-EXT-STS:locked", False): - self.raw_resource.unlock() - super(NovaServer, self).delete() - - -@base.resource("nova", "server_groups", order=next(_nova_order), - tenant_resource=True) -class NovaServerGroups(base.ResourceManager): - pass - - -@base.resource("nova", "keypairs", order=next(_nova_order)) -class NovaKeypair(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("nova", "quotas", order=next(_nova_order), - admin_required=True, tenant_resource=True) -class NovaQuotas(QuotaMixin): - pass - - -@base.resource("nova", "flavors", order=next(_nova_order), - admin_required=True, perform_for_admin_only=True) -class NovaFlavors(base.ResourceManager): - pass - - def is_deleted(self): - try: - self._manager().get(self.name()) - except nova_exc.NotFound: - return True - - return False - - -@base.resource("nova", "aggregates", order=next(_nova_order), - admin_required=True, perform_for_admin_only=True) -class NovaAggregate(SynchronizedDeletion, base.ResourceManager): - - def delete(self): - for host in self.raw_resource.hosts: - self.raw_resource.remove_host(host) - super(NovaAggregate, self).delete() - - -# EC2 - -_ec2_order = get_order(250) - - -class EC2Mixin(object): - - def _manager(self): - return getattr(self.user, self._service)() - - -@base.resource("ec2", "servers", order=next(_ec2_order)) -class EC2Server(EC2Mixin, base.ResourceManager): - - def is_deleted(self): - try: - instances = self._manager().get_only_instances( - instance_ids=[self.id()]) - except boto_exception.EC2ResponseError as e: - # NOTE(wtakase): Nova EC2 API returns 'InvalidInstanceID.NotFound' - # if instance not found. In this case, we consider - # instance has already been deleted. - return getattr(e, "error_code") == "InvalidInstanceID.NotFound" - - # NOTE(wtakase): After instance deletion, instance can be 'terminated' - # state. If all instance states are 'terminated', this - # returns True. And if get_only_instances() returns an - # empty list, this also returns True because we consider - # instance has already been deleted. - return all(map(lambda i: i.state == "terminated", instances)) - - def delete(self): - self._manager().terminate_instances(instance_ids=[self.id()]) - - def list(self): - return self._manager().get_only_instances() - - -# NEUTRON - -_neutron_order = get_order(300) - - -@base.resource(service=None, resource=None, admin_required=True) -class NeutronMixin(SynchronizedDeletion, base.ResourceManager): - # Neutron has the best client ever, so we need to override everything - - def supports_extension(self, extension): - exts = self._manager().list_extensions().get("extensions", []) - if any(ext.get("alias") == extension for ext in exts): - return True - return False - - def _manager(self): - client = self._admin_required and self.admin or self.user - return getattr(client, self._service)() - - def id(self): - return self.raw_resource["id"] - - def name(self): - return self.raw_resource["name"] - - def delete(self): - delete_method = getattr(self._manager(), "delete_%s" % self._resource) - delete_method(self.id()) - - def list(self): - if self._resource.endswith("y"): - resources = self._resource[:-1] + "ies" - else: - resources = self._resource + "s" - list_method = getattr(self._manager(), "list_%s" % resources) - result = list_method(tenant_id=self.tenant_uuid)[resources] - if self.tenant_uuid: - result = [r for r in result if r["tenant_id"] == self.tenant_uuid] - - return result - - -class NeutronLbaasV1Mixin(NeutronMixin): - - def list(self): - if self.supports_extension("lbaas"): - return super(NeutronLbaasV1Mixin, self).list() - return [] - - -@base.resource("neutron", "vip", order=next(_neutron_order), - tenant_resource=True) -class NeutronV1Vip(NeutronLbaasV1Mixin): - pass - - -@base.resource("neutron", "health_monitor", order=next(_neutron_order), - tenant_resource=True) -class NeutronV1Healthmonitor(NeutronLbaasV1Mixin): - pass - - -@base.resource("neutron", "pool", order=next(_neutron_order), - tenant_resource=True) -class NeutronV1Pool(NeutronLbaasV1Mixin): - pass - - -class NeutronLbaasV2Mixin(NeutronMixin): - - def list(self): - if self.supports_extension("lbaasv2"): - return super(NeutronLbaasV2Mixin, self).list() - return [] - - -@base.resource("neutron", "loadbalancer", order=next(_neutron_order), - tenant_resource=True) -class NeutronV2Loadbalancer(NeutronLbaasV2Mixin): - - def is_deleted(self): - try: - self._manager().show_loadbalancer(self.id()) - except Exception as e: - return getattr(e, "status_code", 400) == 404 - - return False - - -@base.resource("neutron", "bgpvpn", order=next(_neutron_order), - admin_required=True, perform_for_admin_only=True) -class NeutronBgpvpn(NeutronMixin): - def list(self): - if self.supports_extension("bgpvpn"): - return self._manager().list_bgpvpns()["bgpvpns"] - return [] - - -# NOTE(andreykurilin): There are scenarios which uses unified way for creating -# and associating floating ips. They do not care about nova-net and neutron. -# We should clean floating IPs for them, but hardcoding "neutron.floatingip" -# cleanup resource should not work in case of Nova-Net. -# Since we are planning to abandon support of Nova-Network in next rally -# release, let's apply dirty workaround to handle all resources. -@base.resource("neutron", "floatingip", order=next(_neutron_order), - tenant_resource=True) -class NeutronFloatingIP(NeutronMixin): - def name(self): - return base.NoName(self._resource) - - def list(self): - if consts.ServiceType.NETWORK not in self.user.services(): - return [] - return super(NeutronFloatingIP, self).list() - - -@base.resource("neutron", "port", order=next(_neutron_order), - tenant_resource=True) -class NeutronPort(NeutronMixin): - # NOTE(andreykurilin): port is the kind of resource that can be created - # automatically. In this case it doesn't have name field which matches - # our resource name templates. But we still need to identify such - # resources, so let's do it by using parent resources. - - ROUTER_INTERFACE_OWNERS = ("network:router_interface", - "network:router_interface_distributed", - "network:ha_router_replicated_interface") - - ROUTER_GATEWAY_OWNER = "network:router_gateway" - - def __init__(self, *args, **kwargs): - super(NeutronPort, self).__init__(*args, **kwargs) - self._cache = {} - - def _get_resources(self, resource): - if resource not in self._cache: - resources = getattr(self._manager(), "list_%s" % resource)() - self._cache[resource] = [r for r in resources[resource] - if r["tenant_id"] == self.tenant_uuid] - return self._cache[resource] - - def list(self): - ports = self._get_resources("ports") - for port in ports: - if not port.get("name"): - parent_name = None - if (port["device_owner"] in self.ROUTER_INTERFACE_OWNERS or - port["device_owner"] == self.ROUTER_GATEWAY_OWNER): - # first case is a port created while adding an interface to - # the subnet - # second case is a port created while adding gateway for - # the network - port_router = [r for r in self._get_resources("routers") - if r["id"] == port["device_id"]] - if port_router: - parent_name = port_router[0]["name"] - # NOTE(andreykurilin): in case of existing network usage, - # there is no way to identify ports that was created - # automatically. - # FIXME(andreykurilin): find the way to filter ports created - # by rally - # elif port["device_owner"] == "network:dhcp": - # # port created while attaching a floating-ip to the VM - # if port.get("fixed_ips"): - # port_subnets = [] - # for fixedip in port["fixed_ips"]: - # port_subnets.extend( - # [sn for sn in self._get_resources("subnets") - # if sn["id"] == fixedip["subnet_id"]]) - # if port_subnets: - # parent_name = port_subnets[0]["name"] - - # NOTE(andreykurilin): the same case as for floating ips - # if not parent_name: - # port_net = [net for net in self._get_resources("networks") - # if net["id"] == port["network_id"]] - # if port_net: - # parent_name = port_net[0]["name"] - - if parent_name: - port["parent_name"] = parent_name - return ports - - def name(self): - name = self.raw_resource.get("parent_name", - self.raw_resource.get("name", "")) - return name or base.NoName(self._resource) - - def delete(self): - device_owner = self.raw_resource["device_owner"] - if (device_owner in self.ROUTER_INTERFACE_OWNERS or - device_owner == self.ROUTER_GATEWAY_OWNER): - if device_owner == self.ROUTER_GATEWAY_OWNER: - self._manager().remove_gateway_router( - self.raw_resource["device_id"]) - - self._manager().remove_interface_router( - self.raw_resource["device_id"], {"port_id": self.id()}) - else: - try: - self._manager().delete_port(self.id()) - except neutron_exceptions.PortNotFoundClient: - # Port can be already auto-deleted, skip silently - LOG.debug("Port %s was not deleted. Skip silently because " - "port can be already auto-deleted." - % self.id()) - - -@base.resource("neutron", "subnet", order=next(_neutron_order), - tenant_resource=True) -class NeutronSubnet(NeutronMixin): - pass - - -@base.resource("neutron", "network", order=next(_neutron_order), - tenant_resource=True) -class NeutronNetwork(NeutronMixin): - pass - - -@base.resource("neutron", "router", order=next(_neutron_order), - tenant_resource=True) -class NeutronRouter(NeutronMixin): - pass - - -@base.resource("neutron", "security_group", order=next(_neutron_order), - tenant_resource=True) -class NeutronSecurityGroup(NeutronMixin): - def list(self): - tenant_sgs = super(NeutronSecurityGroup, self).list() - # NOTE(pirsriva): Filter out "default" security group deletion - # by non-admin role user - return filter(lambda r: r["name"] != "default", - tenant_sgs) - - -@base.resource("neutron", "quota", order=next(_neutron_order), - admin_required=True, tenant_resource=True) -class NeutronQuota(QuotaMixin): - - def delete(self): - self.admin.neutron().delete_quota(self.tenant_uuid) - - -# CINDER - -_cinder_order = get_order(400) - - -@base.resource("cinder", "backups", order=next(_cinder_order), - tenant_resource=True) -class CinderVolumeBackup(base.ResourceManager): - pass - - -@base.resource("cinder", "volume_types", order=next(_cinder_order), - admin_required=True, perform_for_admin_only=True) -class CinderVolumeType(base.ResourceManager): - pass - - -@base.resource("cinder", "volume_snapshots", order=next(_cinder_order), - tenant_resource=True) -class CinderVolumeSnapshot(base.ResourceManager): - pass - - -@base.resource("cinder", "transfers", order=next(_cinder_order), - tenant_resource=True) -class CinderVolumeTransfer(base.ResourceManager): - pass - - -@base.resource("cinder", "volumes", order=next(_cinder_order), - tenant_resource=True) -class CinderVolume(base.ResourceManager): - pass - - -@base.resource("cinder", "image_volumes_cache", order=next(_cinder_order), - admin_required=True, perform_for_admin_only=True) -class CinderImageVolumeCache(base.ResourceManager): - - def _glance(self): - return image.Image(self.admin) - - def _manager(self): - return self.admin.cinder().volumes - - def list(self): - images = dict(("image-%s" % i.id, i) - for i in self._glance().list_images()) - return [{"volume": v, "image": images[v.name]} - for v in self._manager().list(search_opts={"all_tenants": 1}) - if v.name in images] - - def name(self): - return self.raw_resource["image"].name - - def id(self): - return self.raw_resource["volume"].id - - -@base.resource("cinder", "quotas", order=next(_cinder_order), - admin_required=True, tenant_resource=True) -class CinderQuotas(QuotaMixin, base.ResourceManager): - pass - - -@base.resource("cinder", "qos_specs", order=next(_cinder_order), - admin_required=True, perform_for_admin_only=True) -class CinderQos(base.ResourceManager): - pass - -# MANILA - -_manila_order = get_order(450) - - -@base.resource("manila", "shares", order=next(_manila_order), - tenant_resource=True) -class ManilaShare(base.ResourceManager): - pass - - -@base.resource("manila", "share_networks", order=next(_manila_order), - tenant_resource=True) -class ManilaShareNetwork(base.ResourceManager): - pass - - -@base.resource("manila", "security_services", order=next(_manila_order), - tenant_resource=True) -class ManilaSecurityService(base.ResourceManager): - pass - - -# GLANCE - -@base.resource("glance", "images", order=500, tenant_resource=True) -class GlanceImage(base.ResourceManager): - - def _client(self): - return image.Image(self.admin or self.user) - - def list(self): - return self._client().list_images(owner=self.tenant_uuid) - - def delete(self): - client = self._client() - client.delete_image(self.raw_resource.id) - task_utils.wait_for_status( - self.raw_resource, ["deleted"], - check_deletion=True, - update_resource=self._client().get_image, - timeout=CONF.benchmark.glance_image_delete_timeout, - check_interval=CONF.benchmark.glance_image_delete_poll_interval) - - -# SAHARA - -_sahara_order = get_order(600) - - -@base.resource("sahara", "job_executions", order=next(_sahara_order), - tenant_resource=True) -class SaharaJobExecution(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "jobs", order=next(_sahara_order), - tenant_resource=True) -class SaharaJob(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "job_binary_internals", order=next(_sahara_order), - tenant_resource=True) -class SaharaJobBinaryInternals(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "job_binaries", order=next(_sahara_order), - tenant_resource=True) -class SaharaJobBinary(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "data_sources", order=next(_sahara_order), - tenant_resource=True) -class SaharaDataSource(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "clusters", order=next(_sahara_order), - tenant_resource=True) -class SaharaCluster(base.ResourceManager): - - # Need special treatment for Sahara Cluster because of the way the - # exceptions are described in: - # https://github.com/openstack/python-saharaclient/blob/master/ - # saharaclient/api/base.py#L145 - - def is_deleted(self): - try: - self._manager().get(self.id()) - return False - except saharaclient_base.APIException as e: - return e.error_code == 404 - - -@base.resource("sahara", "cluster_templates", order=next(_sahara_order), - tenant_resource=True) -class SaharaClusterTemplate(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "node_group_templates", order=next(_sahara_order), - tenant_resource=True) -class SaharaNodeGroup(SynchronizedDeletion, base.ResourceManager): - pass - - -# CEILOMETER - -@base.resource("ceilometer", "alarms", order=700, tenant_resource=True) -class CeilometerAlarms(SynchronizedDeletion, base.ResourceManager): - - def id(self): - return self.raw_resource.alarm_id - - def list(self): - query = [{ - "field": "project_id", - "op": "eq", - "value": self.tenant_uuid - }] - return self._manager().list(q=query) - - -# ZAQAR - -@base.resource("zaqar", "queues", order=800) -class ZaqarQueues(SynchronizedDeletion, base.ResourceManager): - - def list(self): - return self.user.zaqar().queues() - - -# DESIGNATE -_designate_order = get_order(900) - - -class DesignateResource(SynchronizedDeletion, base.ResourceManager): - - # TODO(boris-42): This should be handled somewhere else. - NAME_PREFIX = "s_rally_" - - def _manager(self, resource=None): - # Map resource names to api / client version - resource = resource or self._resource - version = { - "domains": "1", - "servers": "1", - "records": "1", - "recordsets": "2", - "zones": "2" - }[resource] - - client = self._admin_required and self.admin or self.user - return getattr(getattr(client, self._service)(version), resource) - - def id(self): - """Returns id of resource.""" - return self.raw_resource["id"] - - def name(self): - """Returns name of resource.""" - return self.raw_resource["name"] - - def list(self): - return [item for item in self._manager().list() - if item["name"].startswith(self.NAME_PREFIX)] - - -@base.resource("designate", "domains", order=next(_designate_order), - tenant_resource=True, threads=1) -class DesignateDomain(DesignateResource): - pass - - -@base.resource("designate", "servers", order=next(_designate_order), - admin_required=True, perform_for_admin_only=True, threads=1) -class DesignateServer(DesignateResource): - pass - - -@base.resource("designate", "zones", order=next(_designate_order), - tenant_resource=True, threads=1) -class DesignateZones(DesignateResource): - - def list(self): - marker = None - criterion = {"name": "%s*" % self.NAME_PREFIX} - - while True: - items = self._manager().list(marker=marker, limit=100, - criterion=criterion) - if not items: - break - for item in items: - yield item - marker = items[-1]["id"] - - -# SWIFT - -_swift_order = get_order(1000) - - -class SwiftMixin(SynchronizedDeletion, base.ResourceManager): - - def _manager(self): - client = self._admin_required and self.admin or self.user - return getattr(client, self._service)() - - def id(self): - return self.raw_resource - - def name(self): - # NOTE(stpierre): raw_resource is a list of either [container - # name, object name] (as in SwiftObject) or just [container - # name] (as in SwiftContainer). - return self.raw_resource[-1] - - def delete(self): - delete_method = getattr(self._manager(), "delete_%s" % self._resource) - # NOTE(weiwu): *self.raw_resource is required because for deleting - # container we are passing only container name, to delete object we - # should pass as first argument container and second is object name. - delete_method(*self.raw_resource) - - -@base.resource("swift", "object", order=next(_swift_order), - tenant_resource=True) -class SwiftObject(SwiftMixin): - - def list(self): - object_list = [] - containers = self._manager().get_account(full_listing=True)[1] - for con in containers: - objects = self._manager().get_container(con["name"], - full_listing=True)[1] - for obj in objects: - raw_resource = [con["name"], obj["name"]] - object_list.append(raw_resource) - return object_list - - -@base.resource("swift", "container", order=next(_swift_order), - tenant_resource=True) -class SwiftContainer(SwiftMixin): - - def list(self): - containers = self._manager().get_account(full_listing=True)[1] - return [[con["name"]] for con in containers] - - -# MISTRAL - -_mistral_order = get_order(1100) - - -class MistralMixin(SynchronizedDeletion, base.ResourceManager): - - def delete(self): - self._manager().delete(self.raw_resource["id"]) - - -@base.resource("mistral", "workbooks", order=next(_mistral_order), - tenant_resource=True) -class MistralWorkbooks(MistralMixin): - def delete(self): - self._manager().delete(self.raw_resource["name"]) - - -@base.resource("mistral", "workflows", order=next(_mistral_order), - tenant_resource=True) -class MistralWorkflows(MistralMixin): - pass - - -@base.resource("mistral", "executions", order=next(_mistral_order), - tenant_resource=True) -class MistralExecutions(MistralMixin): - pass - - -# MURANO - -_murano_order = get_order(1200) - - -@base.resource("murano", "environments", tenant_resource=True, - order=next(_murano_order)) -class MuranoEnvironments(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("murano", "packages", tenant_resource=True, - order=next(_murano_order)) -class MuranoPackages(base.ResourceManager): - def list(self): - return filter(lambda x: x.name != "Core library", - super(MuranoPackages, self).list()) - - -# IRONIC - -_ironic_order = get_order(1300) - - -@base.resource("ironic", "node", admin_required=True, - order=next(_ironic_order), perform_for_admin_only=True) -class IronicNodes(base.ResourceManager): - - def id(self): - return self.raw_resource.uuid - - -# WATCHER - -_watcher_order = get_order(1500) - - -class WatcherMixin(SynchronizedDeletion, base.ResourceManager): - - def id(self): - return self.raw_resource.uuid - - def list(self): - return self._manager().list(limit=0) - - def is_deleted(self): - from watcherclient.common.apiclient import exceptions - try: - self._manager().get(self.id()) - return False - except exceptions.NotFound: - return True - - -@base.resource("watcher", "audit_template", order=next(_watcher_order), - admin_required=True, perform_for_admin_only=True) -class WatcherTemplate(WatcherMixin): - pass - - -@base.resource("watcher", "action_plan", order=next(_watcher_order), - admin_required=True, perform_for_admin_only=True) -class WatcherActionPlan(WatcherMixin): - - def name(self): - return self.raw_resource.uuid - - -@base.resource("watcher", "audit", order=next(_watcher_order), - admin_required=True, perform_for_admin_only=True) -class WatcherAudit(WatcherMixin): - - def name(self): - return self.raw_resource.uuid - - -# KEYSTONE - -_keystone_order = get_order(9000) - - -class KeystoneMixin(SynchronizedDeletion): - - def _manager(self): - return identity.Identity(self.admin) - - def delete(self): - delete_method = getattr(self._manager(), "delete_%s" % self._resource) - delete_method(self.id()) - - def list(self): - resources = self._resource + "s" - return getattr(self._manager(), "list_%s" % resources)() - - -@base.resource("keystone", "user", order=next(_keystone_order), - admin_required=True, perform_for_admin_only=True) -class KeystoneUser(KeystoneMixin, base.ResourceManager): - pass - - -@base.resource("keystone", "project", order=next(_keystone_order), - admin_required=True, perform_for_admin_only=True) -class KeystoneProject(KeystoneMixin, base.ResourceManager): - pass - - -@base.resource("keystone", "service", order=next(_keystone_order), - admin_required=True, perform_for_admin_only=True) -class KeystoneService(KeystoneMixin, base.ResourceManager): - pass - - -@base.resource("keystone", "role", order=next(_keystone_order), - admin_required=True, perform_for_admin_only=True) -class KeystoneRole(KeystoneMixin, base.ResourceManager): - pass - - -# NOTE(andreykurilin): unfortunately, ec2 credentials doesn't have name -# and id fields. It makes impossible to identify resources belonging to -# particular task. -@base.resource("keystone", "ec2", tenant_resource=True, - order=next(_keystone_order)) -class KeystoneEc2(SynchronizedDeletion, base.ResourceManager): - def _manager(self): - return identity.Identity(self.user) - - def id(self): - return "n/a" - - def name(self): - return base.NoName(self._resource) - - @property - def user_id(self): - return self.user.keystone.auth_ref.user_id - - def list(self): - return self._manager().list_ec2credentials(self.user_id) - - def delete(self): - self._manager().delete_ec2credential( - self.user_id, access=self.raw_resource.access) diff --git a/rally/plugins/openstack/context/__init__.py b/rally/plugins/openstack/context/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/api_versions.py b/rally/plugins/openstack/context/api_versions.py deleted file mode 100644 index 6cbfc8eb..00000000 --- a/rally/plugins/openstack/context/api_versions.py +++ /dev/null @@ -1,265 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common.i18n import _ -from rally.common import validation -from rally import consts -from rally import exceptions -from rally import osclients -from rally.task import context - - -@validation.configure("check_api_versions") -class CheckOpenStackAPIVersionsValidator(validation.Validator): - """Additional validation for api_versions context""" - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - for client in plugin_cfg: - client_cls = osclients.OSClient.get(client) - try: - if ("service_type" in plugin_cfg[client] or - "service_name" in plugin_cfg[client]): - client_cls.is_service_type_configurable() - - if "version" in plugin_cfg[client]: - client_cls.validate_version(plugin_cfg[client]["version"]) - - except exceptions.RallyException as e: - return self.fail( - "Invalid settings for '%(client)s': %(error)s" % { - "client": client, - "error": e.format_message()}) - - -@validation.add("check_api_versions") -@context.configure(name="api_versions", platform="openstack", order=150) -class OpenStackAPIVersions(context.Context): - """Context for specifying OpenStack clients versions and service types. - - Some OpenStack services support several API versions. To recognize - the endpoints of each version, separate service types are provided in - Keystone service catalog. - - Rally has the map of default service names - service types. But since - service type is an entity, which can be configured manually by admin( - via keystone api) without relation to service name, such map can be - insufficient. - - Also, Keystone service catalog does not provide a map types to name - (this statement is true for keystone < 3.3 ). - - This context was designed for not-default service types and not-default - API versions usage. - - An example of specifying API version: - - .. code-block:: json - - # In this example we will launch NovaKeypair.create_and_list_keypairs - # scenario on 2.2 api version. - { - "NovaKeypair.create_and_list_keypairs": [ - { - "args": { - "key_type": "x509" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "api_versions": { - "nova": { - "version": 2.2 - } - } - } - } - ] - } - - An example of specifying API version along with service type: - - .. code-block:: json - - # In this example we will launch CinderVolumes.create_and_attach_volume - # scenario on Cinder V2 - { - "CinderVolumes.create_and_attach_volume": [ - { - "args": { - "size": 10, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "m1.tiny" - }, - "create_volume_params": { - "availability_zone": "nova" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "api_versions": { - "cinder": { - "version": 2, - "service_type": "volumev2" - } - } - } - } - ] - } - - Also, it possible to use service name as an identifier of service endpoint, - but an admin user is required (Keystone can return map of service - names - types, but such API is permitted only for admin). An example: - - .. code-block:: json - - # Similar to the previous example, but `service_name` argument is used - # instead of `service_type` - { - "CinderVolumes.create_and_attach_volume": [ - { - "args": { - "size": 10, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "m1.tiny" - }, - "create_volume_params": { - "availability_zone": "nova" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "api_versions": { - "cinder": { - "version": 2, - "service_name": "cinderv2" - } - } - } - } - ] - } - - """ - VERSION_SCHEMA = { - "anyOf": [ - {"type": "string", "description": "a string-like version."}, - {"type": "number", "description": "a number-like version."} - ] - } - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "patternProperties": { - "^[a-z]+$": { - "type": "object", - "oneOf": [ - { - "description": "version only", - "properties": { - "version": VERSION_SCHEMA, - }, - "required": ["version"], - "additionalProperties": False - }, - { - "description": "version and service_name", - "properties": { - "version": VERSION_SCHEMA, - "service_name": {"type": "string"} - }, - "required": ["service_name"], - "additionalProperties": False - }, - { - "description": "version and service_type", - "properties": { - "version": VERSION_SCHEMA, - "service_type": {"type": "string"} - }, - "required": ["service_type"], - "additionalProperties": False - } - ], - } - }, - "minProperties": 1, - "additionalProperties": False - } - - def setup(self): - # FIXME(andreykurilin): move all checks to validate method. - - # use admin only when `service_name` is presented - admin_clients = osclients.Clients( - self.context.get("admin", {}).get("credential")) - clients = osclients.Clients(random.choice( - self.context["users"])["credential"]) - services = clients.keystone.service_catalog.get_endpoints() - services_from_admin = None - for client_name, conf in self.config.items(): - if "service_type" in conf and conf["service_type"] not in services: - raise exceptions.ValidationError(_( - "There is no service with '%s' type in your environment.") - % conf["service_type"]) - elif "service_name" in conf: - if not self.context.get("admin", {}).get("credential"): - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=_("Setting 'service_name' is allowed" - " only for 'admin' user.")) - if not services_from_admin: - services_from_admin = dict( - [(s.name, s.type) - for s in admin_clients.keystone().services.list()]) - if conf["service_name"] not in services_from_admin: - raise exceptions.ValidationError( - _("There is no '%s' service in your environment") % - conf["service_name"]) - - self.context["config"]["api_versions"][client_name][ - "service_type"] = services_from_admin[conf["service_name"]] - - def cleanup(self): - # nothing to do here - pass diff --git a/rally/plugins/openstack/context/ceilometer/__init__.py b/rally/plugins/openstack/context/ceilometer/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/ceilometer/samples.py b/rally/plugins/openstack/context/ceilometer/samples.py deleted file mode 100644 index 65fff570..00000000 --- a/rally/plugins/openstack/context/ceilometer/samples.py +++ /dev/null @@ -1,155 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six import moves - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.scenarios.ceilometer import utils as ceilo_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="ceilometer", platform="openstack", order=450) -class CeilometerSampleGenerator(context.Context): - """Context for creating samples and collecting resources for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "counter_name": { - "type": "string" - }, - "counter_type": { - "type": "string" - }, - "counter_unit": { - "type": "string" - }, - "counter_volume": { - "type": "number", - "minimum": 0 - }, - "resources_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "samples_per_resource": { - "type": "integer", - "minimum": 1 - }, - "timestamp_interval": { - "type": "integer", - "minimum": 1 - }, - "metadata_list": { - "type": "array", - "items": { - "type": "object", - "properties": { - "status": { - "type": "string" - }, - "name": { - "type": "string" - }, - "deleted": { - "type": "string" - }, - "created_at": { - "type": "string" - } - } - } - }, - "batch_size": { - "type": "integer", - "minimum": 1 - }, - "batches_allow_lose": { - "type": "integer", - "minimum": 0 - } - }, - "required": ["counter_name", "counter_type", "counter_unit", - "counter_volume"], - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "resources_per_tenant": 5, - "samples_per_resource": 5, - "timestamp_interval": 60 - } - - def _store_batch_samples(self, scenario, batches, batches_allow_lose): - batches_allow_lose = batches_allow_lose or 0 - unsuccess = 0 - for i, batch in enumerate(batches, start=1): - try: - samples = scenario._create_samples(batch) - except Exception: - unsuccess += 1 - LOG.warning(_("Failed to store batch %d of Ceilometer samples" - " during context creation") % i) - if unsuccess > batches_allow_lose: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=_("Context failed to store too many batches of samples")) - return samples - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Ceilometer`")) - def setup(self): - new_sample = { - "counter_name": self.config["counter_name"], - "counter_type": self.config["counter_type"], - "counter_unit": self.config["counter_unit"], - "counter_volume": self.config["counter_volume"], - } - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id]["samples"] = [] - self.context["tenants"][tenant_id]["resources"] = [] - scenario = ceilo_utils.CeilometerScenario( - context={"user": user, "task": self.context["task"]} - ) - for i in moves.xrange(self.config["resources_per_tenant"]): - samples_to_create = scenario._make_samples( - count=self.config["samples_per_resource"], - interval=self.config["timestamp_interval"], - metadata_list=self.config.get("metadata_list"), - batch_size=self.config.get("batch_size"), - **new_sample) - samples = self._store_batch_samples( - scenario, samples_to_create, - self.config.get("batches_allow_lose") - ) - for sample in samples: - self.context["tenants"][tenant_id]["samples"].append( - sample.to_dict()) - self.context["tenants"][tenant_id]["resources"].append( - samples[0].resource_id) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Ceilometer`")) - def cleanup(self): - # We don't have API for removal of samples and resources - pass diff --git a/rally/plugins/openstack/context/cinder/__init__.py b/rally/plugins/openstack/context/cinder/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/cinder/volume_types.py b/rally/plugins/openstack/context/cinder/volume_types.py deleted file mode 100644 index dd676978..00000000 --- a/rally/plugins/openstack/context/cinder/volume_types.py +++ /dev/null @@ -1,62 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.services.storage import block -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True) -@context.configure(name="volume_types", platform="openstack", order=410) -class VolumeTypeGenerator(context.Context): - """Context class for adding volumes types for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "array", - "$schema": consts.JSON_SCHEMA, - "items": {"type": "string"} - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `volume_types`")) - def setup(self): - admin_clients = osclients.Clients( - self.context.get("admin", {}).get("credential"), - api_info=self.context["config"].get("api_versions")) - cinder_service = block.BlockStorage( - admin_clients, name_generator=self.generate_random_name) - self.context["volume_types"] = [] - for vtype_name in self.config: - LOG.debug("Creating Cinder volume type %s" % vtype_name) - vtype = cinder_service.create_volume_type(vtype_name) - self.context["volume_types"].append({"id": vtype.id, - "name": vtype_name}) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `volume_types`")) - def cleanup(self): - mather = utils.make_name_matcher(*self.config) - resource_manager.cleanup( - names=["cinder.volume_types"], - admin=self.context["admin"], - api_versions=self.context["config"].get("api_versions"), - superclass=mather, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/cinder/volumes.py b/rally/plugins/openstack/context/cinder/volumes.py deleted file mode 100644 index f1fc37b8..00000000 --- a/rally/plugins/openstack/context/cinder/volumes.py +++ /dev/null @@ -1,88 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally import consts -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.services.storage import block -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@context.configure(name="volumes", platform="openstack", order=420) -class VolumeGenerator(context.Context): - """Context class for adding volumes to each user for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "size": { - "type": "integer", - "minimum": 1 - }, - "type": { - "oneOf": [{"type": "string", - "description": "a string-like type of volume to " - "create."}, - {"type": "null", - "description": "Use default type for volume to " - "create."}] - }, - "volumes_per_tenant": { - "type": "integer", - "minimum": 1 - } - }, - "required": ["size"], - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "volumes_per_tenant": 1 - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Volumes`")) - def setup(self): - size = self.config["size"] - volume_type = self.config.get("type", None) - volumes_per_tenant = self.config["volumes_per_tenant"] - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id].setdefault("volumes", []) - clients = osclients.Clients( - user["credential"], - api_info=self.context["config"].get("api_versions")) - cinder_service = block.BlockStorage( - clients, name_generator=self.generate_random_name) - for i in range(volumes_per_tenant): - vol = cinder_service.create_volume(size, - volume_type=volume_type) - self.context["tenants"][tenant_id]["volumes"].append( - vol._asdict()) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Volumes`")) - def cleanup(self): - resource_manager.cleanup( - names=["cinder.volumes"], - users=self.context.get("users", []), - api_versions=self.context["config"].get("api_versions"), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/cleanup/__init__.py b/rally/plugins/openstack/context/cleanup/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/cleanup/admin.py b/rally/plugins/openstack/context/cleanup/admin.py deleted file mode 100644 index 3a300872..00000000 --- a/rally/plugins/openstack/context/cleanup/admin.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import validation -from rally.plugins.openstack.cleanup import manager -from rally.plugins.openstack.context.cleanup import base -from rally.plugins.openstack import scenario -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add(name="check_cleanup_resources", admin_required=True) -# NOTE(amaretskiy): Set order to run this just before UserCleanup -@context.configure(name="admin_cleanup", platform="openstack", - order=(sys.maxsize - 1), hidden=True) -class AdminCleanup(base.CleanupMixin, context.Context): - """Context class for admin resources cleanup.""" - - @logging.log_task_wrapper(LOG.info, _("admin resources cleanup")) - def cleanup(self): - manager.cleanup( - names=self.config, - admin_required=True, - admin=self.context["admin"], - users=self.context.get("users", []), - api_versions=self.context["config"].get("api_versions"), - superclass=scenario.OpenStackScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/cleanup/base.py b/rally/plugins/openstack/context/cleanup/base.py deleted file mode 100644 index 9f39a2c3..00000000 --- a/rally/plugins/openstack/context/cleanup/base.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager - - -@validation.configure("check_cleanup_resources") -class CheckCleanupResourcesValidator(validation.Validator): - - def __init__(self, admin_required): - """Validates that openstack resource managers exist - - :param admin_required: describes access level to resource - """ - super(CheckCleanupResourcesValidator, self).__init__() - self.admin_required = admin_required - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - missing = set(plugin_cfg) - missing -= manager.list_resource_names( - admin_required=self.admin_required) - missing = ", ".join(missing) - if missing: - return self.fail( - "Couldn't find cleanup resource managers: %s" % missing) - - -class CleanupMixin(object): - - CONFIG_SCHEMA = { - "type": "array", - "$schema": consts.JSON_SCHEMA, - "items": { - "type": "string", - } - } - - def setup(self): - pass diff --git a/rally/plugins/openstack/context/cleanup/user.py b/rally/plugins/openstack/context/cleanup/user.py deleted file mode 100644 index fe27bb22..00000000 --- a/rally/plugins/openstack/context/cleanup/user.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import validation -from rally.plugins.openstack.cleanup import manager -from rally.plugins.openstack.context.cleanup import base -from rally.plugins.openstack import scenario -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add(name="check_cleanup_resources", admin_required=False) -# NOTE(amaretskiy): Set maximum order to run this last -@context.configure(name="cleanup", platform="openstack", order=sys.maxsize, - hidden=True) -class UserCleanup(base.CleanupMixin, context.Context): - """Context class for user resources cleanup.""" - - @logging.log_task_wrapper(LOG.info, _("user resources cleanup")) - def cleanup(self): - manager.cleanup( - names=self.config, - admin_required=False, - users=self.context.get("users", []), - api_versions=self.context["config"].get("api_versions"), - superclass=scenario.OpenStackScenario, - task_id=self.get_owner_id() - ) diff --git a/rally/plugins/openstack/context/dataplane/__init__.py b/rally/plugins/openstack/context/dataplane/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/dataplane/heat.py b/rally/plugins/openstack/context/dataplane/heat.py deleted file mode 100644 index 2cbb49f9..00000000 --- a/rally/plugins/openstack/context/dataplane/heat.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pkgutil - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.heat import utils as heat_utils -from rally.task import context - -LOG = logging.getLogger(__name__) - - -def get_data(filename_or_resource): - if isinstance(filename_or_resource, list): - return pkgutil.get_data(*filename_or_resource) - return open(filename_or_resource).read() - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="heat_dataplane", platform="openstack", order=435) -class HeatDataplane(context.Context): - """Context class for create stack by given template. - - This context will create stacks by given template for each tenant and - add details to context. Following details will be added: - id of stack; - template file contents; - files dictionary; - stack parameters; - Heat template should define a "gate" node which will interact with Rally - by ssh and workload nodes by any protocol. To make this possible heat - template should accept the following parameters: - network_id: id of public network - router_id: id of external router to connect "gate" node - key_name: name of nova ssh keypair to use for "gate" node - """ - FILE_SCHEMA = { - "description": "", - "type": "string", - } - RESOURCE_SCHEMA = { - "description": "", - "type": "array", - "minItems": 2, - "maxItems": 2, - "items": {"type": "string"} - } - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "stacks_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "template": { - "oneOf": [FILE_SCHEMA, RESOURCE_SCHEMA], - }, - "files": { - "type": "object", - }, - "parameters": { - "type": "object", - }, - "context_parameters": { - "type": "object", - }, - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "stacks_per_tenant": 1, - } - - def _get_context_parameter(self, user, tenant_id, path): - value = {"user": user, "tenant": self.context["tenants"][tenant_id]} - for key in path.split("."): - try: - # try to cast string to int in order to support integer keys - # e.g 'spam.1.eggs' will be translated to ["spam"][1]["eggs"] - key = int(key) - except ValueError: - pass - try: - value = value[key] - except KeyError: - raise exceptions.RallyException( - "There is no key %s in context" % path) - return value - - def _get_public_network_id(self): - nc = osclients.Clients(self.context["admin"]["credential"]).neutron() - networks = nc.list_networks(**{"router:external": True})["networks"] - return networks[0]["id"] - - @logging.log_task_wrapper(LOG.info, _("Enter context: `HeatDataplane`")) - def setup(self): - template = get_data(self.config["template"]) - files = {} - for key, filename in self.config.get("files", {}).items(): - files[key] = get_data(filename) - parameters = self.config.get("parameters", rutils.LockedDict()) - with parameters.unlocked(): - if "network_id" not in parameters: - parameters["network_id"] = self._get_public_network_id() - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - for name, path in self.config.get("context_parameters", - {}).items(): - parameters[name] = self._get_context_parameter(user, - tenant_id, - path) - if "router_id" not in parameters: - networks = self.context["tenants"][tenant_id]["networks"] - parameters["router_id"] = networks[0]["router_id"] - if "key_name" not in parameters: - parameters["key_name"] = user["keypair"]["name"] - heat_scenario = heat_utils.HeatScenario( - {"user": user, "task": self.context["task"], - "owner_id": self.context["owner_id"]}) - self.context["tenants"][tenant_id]["stack_dataplane"] = [] - for i in range(self.config["stacks_per_tenant"]): - stack = heat_scenario._create_stack(template, files=files, - parameters=parameters) - tenant_data = self.context["tenants"][tenant_id] - tenant_data["stack_dataplane"].append([stack.id, template, - files, parameters]) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `HeatDataplane`")) - def cleanup(self): - resource_manager.cleanup(names=["heat.stacks"], - users=self.context.get("users", []), - superclass=heat_utils.HeatScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/designate/__init__.py b/rally/plugins/openstack/context/designate/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/designate/zones.py b/rally/plugins/openstack/context/designate/zones.py deleted file mode 100644 index b969374d..00000000 --- a/rally/plugins/openstack/context/designate/zones.py +++ /dev/null @@ -1,67 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.designate import utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="zones", platform="openstack", order=600) -class ZoneGenerator(context.Context): - """Context to add `zones_per_tenant` zones for each tenant.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "zones_per_tenant": { - "type": "integer", - "minimum": 1 - }, - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "zones_per_tenant": 1 - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Zones`")) - def setup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id].setdefault("zones", []) - designate_util = utils.DesignateScenario( - {"user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"]}) - for i in range(self.config["zones_per_tenant"]): - zone = designate_util._create_zone() - self.context["tenants"][tenant_id]["zones"].append(zone) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Zones`")) - def cleanup(self): - resource_manager.cleanup(names=["designate.zones"], - users=self.context.get("users", []), - superclass=utils.DesignateScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/ec2/__init__.py b/rally/plugins/openstack/context/ec2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/ec2/servers.py b/rally/plugins/openstack/context/ec2/servers.py deleted file mode 100644 index a133c7a3..00000000 --- a/rally/plugins/openstack/context/ec2/servers.py +++ /dev/null @@ -1,102 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally import consts -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.ec2 import utils as ec2_utils -from rally.plugins.openstack import types -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@context.configure(name="ec2_servers", platform="openstack", order=460) -class EC2ServerGenerator(context.Context): - """Context class for adding temporary servers for benchmarks. - - Servers are added for each tenant. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, - "flavor": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, - "servers_per_tenant": { - "type": "integer", - "minimum": 1 - } - }, - "required": ["image", "flavor", "servers_per_tenant"], - "additionalProperties": False - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `EC2 Servers`")) - def setup(self): - image = self.config["image"] - flavor = self.config["flavor"] - - clients = osclients.Clients(self.context["users"][0]["credential"]) - image_id = types.EC2Image.transform(clients=clients, - resource_config=image) - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - LOG.debug("Booting servers for tenant %s " - % (user["tenant_id"])) - ec2_scenario = ec2_utils.EC2Scenario({ - "user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"]}) - - LOG.debug( - "Calling _boot_servers with " - "image_id={image_id} flavor_name={flavor_name} " - "servers_per_tenant={servers_per_tenant}".format( - image_id=image_id, flavor_name=flavor["name"], - servers_per_tenant=self.config["servers_per_tenant"])) - - servers = ec2_scenario._boot_servers( - image_id, flavor["name"], self.config["servers_per_tenant"]) - - current_servers = [server.id for server in servers] - - self.context["tenants"][tenant_id]["ec2_servers"] = current_servers - - @logging.log_task_wrapper(LOG.info, _("Exit context: `EC2 Servers`")) - def cleanup(self): - resource_manager.cleanup(names=["ec2.servers"], - users=self.context.get("users", []), - superclass=ec2_utils.EC2Scenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/glance/__init__.py b/rally/plugins/openstack/context/glance/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/glance/images.py b/rally/plugins/openstack/context/glance/images.py deleted file mode 100644 index de143ea7..00000000 --- a/rally/plugins/openstack/context/glance/images.py +++ /dev/null @@ -1,215 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.services.image import image -from rally.task import context - -CONF = cfg.CONF -CONF.import_opt("glance_image_delete_timeout", - "rally.plugins.openstack.scenarios.glance.utils", - "benchmark") -CONF.import_opt("glance_image_delete_poll_interval", - "rally.plugins.openstack.scenarios.glance.utils", - "benchmark") - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="images", platform="openstack", order=410) -class ImageGenerator(context.Context): - """Context class for adding images to each user for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image_url": { - "type": "string", - "description": "Location of the source to create image from." - }, - "disk_format": { - "description": "The format of the disk.", - "enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki", - "ari", "ami"] - }, - "container_format": { - "description": "Format of the image container.", - "enum": ["aki", "ami", "ari", "bare", "docker", "ova", "ovf"] - }, - "image_name": { - "type": "string", - "description": "The name of image to create. NOTE: it will be " - "ignored in case when `images_per_tenant` is " - "bigger then 1." - }, - "min_ram": { - "description": "Amount of RAM in MB", - "type": "integer", - "minimum": 0 - }, - "min_disk": { - "description": "Amount of disk space in GB", - "type": "integer", - "minimum": 0 - }, - "visibility": { - "description": "Visibility for this image ('shared' and " - "'community' are available only in case of " - "Glance V2).", - "enum": ["public", "private", "shared", "community"] - }, - "images_per_tenant": { - "description": "The number of images to create per one single " - "tenant.", - "type": "integer", - "minimum": 1 - }, - "image_args": { - "description": "This param is deprecated since Rally-0.10.0, " - "specify exact arguments in a root section of " - "context instead.", - "type": "object", - "additionalProperties": True - }, - "image_container": { - "description": "This param is deprecated since Rally-0.10.0, " - "use `container_format` instead.", - "type": "string", - }, - "image_type": { - "description": "This param is deprecated since Rally-0.10.0, " - "use `disk_format` instead.", - "enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki", - "ari", "ami"], - }, - }, - "oneOf": [{"description": "It is been used since Rally 0.10.0", - "required": ["image_url", "disk_format", - "container_format"]}, - {"description": "One of backward compatible way", - "required": ["image_url", "image_type", - "container_format"]}, - {"description": "One of backward compatible way", - "required": ["image_url", "disk_format", - "image_container"]}, - {"description": "One of backward compatible way", - "required": ["image_url", "image_type", - "image_container"]}], - "additionalProperties": False - } - - DEFAULT_CONFIG = {"images_per_tenant": 1} - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Images`")) - def setup(self): - image_url = self.config.get("image_url") - disk_format = self.config.get("disk_format") - container_format = self.config.get("container_format") - images_per_tenant = self.config.get("images_per_tenant") - visibility = self.config.get("visibility", "private") - min_disk = self.config.get("min_disk", 0) - min_ram = self.config.get("min_ram", 0) - image_args = self.config.get("image_args", {}) - - if "image_type" in self.config: - LOG.warning(_("The 'image_type' argument is deprecated " - "since Rally 0.10.0, use disk_format " - "arguments instead.")) - if not disk_format: - disk_format = self.config["image_type"] - - if "image_container" in self.config: - LOG.warning(_("The 'image_container' argument is deprecated " - "since Rally 0.10.0; use container_format " - "arguments instead")) - if not container_format: - container_format = self.config["image_container"] - - if image_args: - LOG.warning(_("The 'image_args' argument is deprecated since " - "Rally 0.10.0; specify exact arguments in a root " - "section of context instead.")) - - if "is_public" in image_args: - if "visibility" not in self.config: - visibility = ("public" if image_args["is_public"] - else "private") - if "min_ram" in image_args: - if "min_ram" not in self.config: - min_ram = image_args["min_ram"] - - if "min_disk" in image_args: - if "min_disk" not in self.config: - min_disk = image_args["min_disk"] - - # None image_name means that image.Image will generate a random name - image_name = None - if "image_name" in self.config and images_per_tenant == 1: - image_name = self.config["image_name"] - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - current_images = [] - clients = osclients.Clients( - user["credential"], - api_info=self.context["config"].get("api_versions")) - image_service = image.Image( - clients, name_generator=self.generate_random_name) - - for i in range(images_per_tenant): - image_obj = image_service.create_image( - image_name=image_name, - container_format=container_format, - image_location=image_url, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - current_images.append(image_obj.id) - - self.context["tenants"][tenant_id]["images"] = current_images - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Images`")) - def cleanup(self): - if self.context.get("admin", {}): - admin = self.context["admin"] - admin_required = None - else: - admin = None - admin_required = False - - if "image_name" in self.config: - matcher = rutils.make_name_matcher(self.config["image_name"]) - else: - matcher = self.__class__ - - resource_manager.cleanup(names=["glance.images", - "cinder.image_volumes_cache"], - admin=admin, - admin_required=admin_required, - users=self.context.get("users", []), - api_versions=self.context["config"].get( - "api_versions"), - superclass=matcher, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/heat/__init__.py b/rally/plugins/openstack/context/heat/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/heat/stacks.py b/rally/plugins/openstack/context/heat/stacks.py deleted file mode 100644 index 38f30879..00000000 --- a/rally/plugins/openstack/context/heat/stacks.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.heat import utils as heat_utils -from rally.task import context - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="stacks", platform="openstack", order=435) -class StackGenerator(context.Context): - """Context class for create temporary stacks with resources. - - Stack generator allows to generate arbitrary number of stacks for - each tenant before test scenarios. In addition, it allows to define - number of resources (namely OS::Heat::RandomString) that will be created - inside each stack. After test execution the stacks will be - automatically removed from heat. - """ - - # The schema of the context configuration format - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - - "properties": { - "stacks_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "resources_per_stack": { - "type": "integer", - "minimum": 1 - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "stacks_per_tenant": 2, - "resources_per_stack": 10 - } - - @staticmethod - def _prepare_stack_template(res_num): - template = { - "heat_template_version": "2014-10-16", - "description": "Test template for rally", - "resources": {} - } - rand_string = {"type": "OS::Heat::RandomString"} - for i in range(res_num): - template["resources"]["TestResource%d" % i] = rand_string - return template - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Stacks`")) - def setup(self): - template = self._prepare_stack_template( - self.config["resources_per_stack"]) - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - heat_scenario = heat_utils.HeatScenario( - {"user": user, "task": self.context["task"], - "owner_id": self.context["owner_id"]}) - self.context["tenants"][tenant_id]["stacks"] = [] - for i in range(self.config["stacks_per_tenant"]): - stack = heat_scenario._create_stack(template) - self.context["tenants"][tenant_id]["stacks"].append(stack.id) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Stacks`")) - def cleanup(self): - resource_manager.cleanup(names=["heat.stacks"], - users=self.context.get("users", []), - superclass=heat_utils.HeatScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/keystone/__init__.py b/rally/plugins/openstack/context/keystone/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/keystone/roles.py b/rally/plugins/openstack/context/keystone/roles.py deleted file mode 100644 index 18c5a62e..00000000 --- a/rally/plugins/openstack/context/keystone/roles.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally.common import broker -from rally.common.i18n import _ -from rally.common import logging -from rally.common import validation -from rally import consts -from rally import exceptions -from rally import osclients -from rally.plugins.openstack.services.identity import identity -from rally.task import context - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="roles", platform="openstack", order=330) -class RoleGenerator(context.Context): - """Context class for assigning roles for users.""" - - CONFIG_SCHEMA = { - "type": "array", - "$schema": consts.JSON_SCHEMA, - "items": { - "type": "string", - "description": "The name of role to assign to user" - } - } - - def __init__(self, ctx): - super(RoleGenerator, self).__init__(ctx) - self.credential = self.context["admin"]["credential"] - self.workers = cfg.CONF.roles_context.resource_management_workers - - def _get_role_object(self, context_role): - """Check if role exists. - - :param context_role: name of existing role. - """ - keystone = identity.Identity(osclients.Clients(self.credential)) - default_roles = keystone.list_roles() - for def_role in default_roles: - if str(def_role.name) == context_role: - return def_role - else: - raise exceptions.NotFoundException(_( - "There is no role with name `%s`") % context_role) - - def _get_consumer(self, func_name): - def consume(cache, args): - role_id, user_id, project_id = args - if "client" not in cache: - clients = osclients.Clients(self.credential) - cache["client"] = identity.Identity(clients) - getattr(cache["client"], func_name)(role_id=role_id, - user_id=user_id, - project_id=project_id) - return consume - - @logging.log_task_wrapper(LOG.info, _("Enter context: `roles`")) - def setup(self): - """Add all roles to users.""" - threads = self.workers - roles_dict = {} - - def publish(queue): - for context_role in self.config: - role = self._get_role_object(context_role) - roles_dict[role.id] = role.name - LOG.debug("Adding role %(role_name)s having ID %(role_id)s " - "to all users using %(threads)s threads" % - {"role_name": role.name, - "role_id": role.id, - "threads": threads}) - for user in self.context["users"]: - args = (role.id, user["id"], user["tenant_id"]) - queue.append(args) - - broker.run(publish, self._get_consumer("add_role"), threads) - self.context["roles"] = roles_dict - - @logging.log_task_wrapper(LOG.info, _("Exit context: `roles`")) - def cleanup(self): - """Remove all roles from users.""" - threads = self.workers - - def publish(queue): - for role_id in self.context["roles"]: - LOG.debug("Removing role %s from all users" % role_id) - for user in self.context["users"]: - args = (role_id, user["id"], user["tenant_id"]) - queue.append(args) - - broker.run(publish, self._get_consumer("revoke_role"), threads) diff --git a/rally/plugins/openstack/context/keystone/users.py b/rally/plugins/openstack/context/keystone/users.py deleted file mode 100644 index def8989f..00000000 --- a/rally/plugins/openstack/context/keystone/users.py +++ /dev/null @@ -1,324 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import uuid - -from oslo_config import cfg - -from rally.common import broker -from rally.common.i18n import _ -from rally.common import logging -from rally.common import objects -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally import osclients -from rally.plugins.openstack import credential -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.wrappers import network -from rally.task import context - -from rally.common import opts -opts.register() - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -RESOURCE_MANAGEMENT_WORKERS_DESCR = ("The number of concurrent threads to use " - "for serving users context.") -PROJECT_DOMAIN_DESCR = "ID of domain in which projects will be created." -USER_DOMAIN_DESCR = "ID of domain in which users will be created." - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="users", platform="openstack", order=100) -class UserGenerator(context.Context): - """Context class for generating temporary users/tenants for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "oneOf": [ - {"description": "Create new temporary users and tenants.", - "properties": { - "tenants": { - "type": "integer", - "minimum": 1, - "description": "The number of tenants to create." - }, - "users_per_tenant": { - "type": "integer", - "minimum": 1, - "description": "The number of users to create per one " - "tenant."}, - "resource_management_workers": { - "type": "integer", - "minimum": 1, - "description": RESOURCE_MANAGEMENT_WORKERS_DESCR}, - "project_domain": { - "type": "string", - "description": PROJECT_DOMAIN_DESCR}, - "user_domain": { - "type": "string", - "description": USER_DOMAIN_DESCR}, - "user_choice_method": { - "$ref": "#/definitions/user_choice_method"}}, - "additionalProperties": False}, - # TODO(andreykurilin): add ability to specify users here. - {"description": "Use existing users and tenants.", - "properties": { - "user_choice_method": { - "$ref": "#/definitions/user_choice_method"} - }, - "additionalProperties": False} - ], - "definitions": { - "user_choice_method": { - "enum": ["random", "round_robin"], - "description": "The mode of balancing usage of users between " - "scenario iterations."} - - } - } - - DEFAULT_CONFIG = {"user_choice_method": "random"} - - DEFAULT_FOR_NEW_USERS = { - "tenants": 1, - "users_per_tenant": 1, - "resource_management_workers": - cfg.CONF.users_context.resource_management_workers, - } - - def __init__(self, context): - super(UserGenerator, self).__init__(context) - - deployment = objects.Deployment.get(context["task"]["deployment_uuid"]) - existing_users = deployment.get_credentials_for("openstack")["users"] - if existing_users and not (set(self.config) - {"user_choice_method"}): - self.existing_users = existing_users - else: - self.existing_users = [] - self.credential = context["admin"]["credential"] - project_domain = (self.credential.project_domain_name or - cfg.CONF.users_context.project_domain) - user_domain = (self.credential.user_domain_name or - cfg.CONF.users_context.user_domain) - self.DEFAULT_FOR_NEW_USERS["project_domain"] = project_domain - self.DEFAULT_FOR_NEW_USERS["user_domain"] = user_domain - with self.config.unlocked(): - for key, value in self.DEFAULT_FOR_NEW_USERS.items(): - self.config.setdefault(key, value) - - def _remove_default_security_group(self): - """Delete default security group for tenants.""" - clients = osclients.Clients(self.credential) - - if consts.Service.NEUTRON not in clients.services().values(): - return - - use_sg, msg = network.wrap(clients, self).supports_extension( - "security-group") - if not use_sg: - LOG.debug("Security group context is disabled: %s" % msg) - return - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - with logging.ExceptionLogger( - LOG, _("Unable to delete default security group")): - uclients = osclients.Clients(user["credential"]) - security_groups = uclients.neutron().list_security_groups() - default = [sg for sg in security_groups["security_groups"] - if sg["name"] == "default"] - if default: - clients.neutron().delete_security_group(default[0]["id"]) - - def _create_tenants(self): - threads = self.config["resource_management_workers"] - - tenants = collections.deque() - - def publish(queue): - for i in range(self.config["tenants"]): - args = (self.config["project_domain"], self.task["uuid"], i) - queue.append(args) - - def consume(cache, args): - domain, task_id, i = args - if "client" not in cache: - clients = osclients.Clients(self.credential) - cache["client"] = identity.Identity( - clients, name_generator=self.generate_random_name) - tenant = cache["client"].create_project(domain_name=domain) - tenant_dict = {"id": tenant.id, "name": tenant.name, "users": []} - tenants.append(tenant_dict) - - # NOTE(msdubov): consume() will fill the tenants list in the closure. - broker.run(publish, consume, threads) - tenants_dict = {} - for t in tenants: - tenants_dict[t["id"]] = t - - return tenants_dict - - def _create_users(self): - # NOTE(msdubov): This should be called after _create_tenants(). - threads = self.config["resource_management_workers"] - users_per_tenant = self.config["users_per_tenant"] - default_role = cfg.CONF.users_context.keystone_default_role - - users = collections.deque() - - def publish(queue): - for tenant_id in self.context["tenants"]: - for user_id in range(users_per_tenant): - username = self.generate_random_name() - password = str(uuid.uuid4()) - args = (username, password, self.config["project_domain"], - self.config["user_domain"], tenant_id) - queue.append(args) - - def consume(cache, args): - username, password, project_dom, user_dom, tenant_id = args - if "client" not in cache: - clients = osclients.Clients(self.credential) - cache["client"] = identity.Identity( - clients, name_generator=self.generate_random_name) - client = cache["client"] - user = client.create_user(username, password=password, - project_id=tenant_id, - domain_name=user_dom, - default_role=default_role) - user_credential = credential.OpenStackCredential( - auth_url=self.credential.auth_url, - username=user.name, - password=password, - tenant_name=self.context["tenants"][tenant_id]["name"], - permission=consts.EndpointPermission.USER, - project_domain_name=project_dom, - user_domain_name=user_dom, - endpoint_type=self.credential.endpoint_type, - https_insecure=self.credential.https_insecure, - https_cacert=self.credential.https_cacert, - region_name=self.credential.region_name, - profiler_hmac_key=self.credential.profiler_hmac_key) - users.append({"id": user.id, - "credential": user_credential, - "tenant_id": tenant_id}) - - # NOTE(msdubov): consume() will fill the users list in the closure. - broker.run(publish, consume, threads) - return list(users) - - def _get_consumer_for_deletion(self, func_name): - def consume(cache, resource_id): - if "client" not in cache: - clients = osclients.Clients(self.credential) - cache["client"] = identity.Identity(clients) - getattr(cache["client"], func_name)(resource_id) - return consume - - def _delete_tenants(self): - threads = self.config["resource_management_workers"] - - def publish(queue): - for tenant_id in self.context["tenants"]: - queue.append(tenant_id) - - broker.run(publish, self._get_consumer_for_deletion("delete_project"), - threads) - self.context["tenants"] = {} - - def _delete_users(self): - threads = self.config["resource_management_workers"] - - def publish(queue): - for user in self.context["users"]: - queue.append(user["id"]) - - broker.run(publish, self._get_consumer_for_deletion("delete_user"), - threads) - self.context["users"] = [] - - def create_users(self): - """Create tenants and users, using the broker pattern.""" - threads = self.config["resource_management_workers"] - - LOG.debug("Creating %(tenants)d tenants using %(threads)s threads" % - {"tenants": self.config["tenants"], "threads": threads}) - self.context["tenants"] = self._create_tenants() - - if len(self.context["tenants"]) < self.config["tenants"]: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=_("Failed to create the requested number of tenants.")) - - users_num = self.config["users_per_tenant"] * self.config["tenants"] - LOG.debug("Creating %(users)d users using %(threads)s threads" % - {"users": users_num, "threads": threads}) - self.context["users"] = self._create_users() - for user in self.context["users"]: - self.context["tenants"][user["tenant_id"]]["users"].append(user) - - if len(self.context["users"]) < users_num: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=_("Failed to create the requested number of users.")) - - def use_existing_users(self): - LOG.debug("Using existing users") - for user_credential in self.existing_users: - user_clients = user_credential.clients() - user_id = user_clients.keystone.auth_ref.user_id - tenant_id = user_clients.keystone.auth_ref.project_id - - if tenant_id not in self.context["tenants"]: - self.context["tenants"][tenant_id] = { - "id": tenant_id, - "name": user_credential.tenant_name - } - - self.context["users"].append({ - "credential": user_credential, - "id": user_id, - "tenant_id": tenant_id - }) - - @logging.log_task_wrapper(LOG.info, _("Enter context: `users`")) - def setup(self): - self.context["users"] = [] - self.context["tenants"] = {} - self.context["user_choice_method"] = self.config["user_choice_method"] - - if self.existing_users: - self.use_existing_users() - else: - self.create_users() - - @logging.log_task_wrapper(LOG.info, _("Exit context: `users`")) - def cleanup(self): - """Delete tenants and users, using the broker pattern.""" - if self.existing_users: - # nothing to do here. - return - else: - self._remove_default_security_group() - self._delete_users() - self._delete_tenants() diff --git a/rally/plugins/openstack/context/magnum/__init__.py b/rally/plugins/openstack/context/magnum/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/magnum/ca_certs.py b/rally/plugins/openstack/context/magnum/ca_certs.py deleted file mode 100644 index 4b31a264..00000000 --- a/rally/plugins/openstack/context/magnum/ca_certs.py +++ /dev/null @@ -1,138 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from cryptography.hazmat import backends -from cryptography.hazmat.primitives.asymmetric import rsa -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives import serialization -from cryptography import x509 -from cryptography.x509 import oid - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="ca_certs", platform="openstack", order=490) -class CaCertGenerator(context.Context): - """Context class for generating temporary ca cert for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "directory": { - "type": "string", - } - }, - "additionalProperties": False - } - - def _generate_csr_and_key(self): - """Return a dict with a new csr and key.""" - key = rsa.generate_private_key( - public_exponent=65537, - key_size=2048, - backend=backends.default_backend()) - - csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([ - x509.NameAttribute(oid.NameOID.COMMON_NAME, u"Magnum User"), - ])).sign(key, hashes.SHA256(), backends.default_backend()) - - result = { - "csr": csr.public_bytes(encoding=serialization.Encoding.PEM), - "key": key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption()), - } - - return result - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Ca Cert`")) - def setup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - magnum_scenario = magnum_utils.MagnumScenario({ - "user": user, - "task": self.context["task"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - - # get the cluster and cluster_template - cluster_uuid = str(self.context["tenants"][tenant_id]["cluster"]) - cluster = magnum_scenario._get_cluster(cluster_uuid) - cluster_template = magnum_scenario._get_cluster_template( - cluster.cluster_template_id) - - if not cluster_template.tls_disabled: - tls = self._generate_csr_and_key() - dir = "" - if self.config.get("directory") is not None: - dir = self.config.get("directory") - self.context["ca_certs_directory"] = dir - fname = os.path.join(dir, cluster_uuid + ".key") - with open(fname, "w") as key_file: - key_file.write(tls["key"]) - # get CA certificate for this cluster - ca_cert = magnum_scenario._get_ca_certificate(cluster_uuid) - fname = os.path.join(dir, cluster_uuid + "_ca.crt") - with open(fname, "w") as ca_cert_file: - ca_cert_file.write(ca_cert.pem) - # send csr to Magnum to have it signed - csr_req = {"cluster_uuid": cluster_uuid, - "csr": tls["csr"]} - cert = magnum_scenario._create_ca_certificate(csr_req) - fname = os.path.join(dir, cluster_uuid + ".crt") - with open(fname, "w") as cert_file: - cert_file.write(cert.pem) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Ca Cert`")) - def cleanup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - magnum_scenario = magnum_utils.MagnumScenario({ - "user": user, - "task": self.context["task"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - - # get the cluster and cluster_template - cluster_uuid = str(self.context["tenants"][tenant_id]["cluster"]) - cluster = magnum_scenario._get_cluster(cluster_uuid) - cluster_template = magnum_scenario._get_cluster_template( - cluster.cluster_template_id) - - if not cluster_template.tls_disabled: - dir = self.context["ca_certs_directory"] - fname = os.path.join(dir, cluster_uuid + ".key") - os.remove(fname) - fname = os.path.join(dir, cluster_uuid + "_ca.crt") - os.remove(fname) - fname = os.path.join(dir, cluster_uuid + ".crt") - os.remove(fname) diff --git a/rally/plugins/openstack/context/magnum/cluster_templates.py b/rally/plugins/openstack/context/magnum/cluster_templates.py deleted file mode 100644 index 7919603a..00000000 --- a/rally/plugins/openstack/context/magnum/cluster_templates.py +++ /dev/null @@ -1,130 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="cluster_templates", platform="openstack", order=470) -class ClusterTemplateGenerator(context.Context): - """Context class for generating temporary cluster model for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image_id": { - "type": "string" - }, - "flavor_id": { - "type": "string" - }, - "master_flavor_id": { - "type": "string" - }, - "external_network_id": { - "type": "string" - }, - "fixed_network": { - "type": "string" - }, - "fixed_subnet": { - "type": "string" - }, - "dns_nameserver": { - "type": "string" - }, - "docker_volume_size": { - "type": "integer" - }, - "labels": { - "type": "string" - }, - "coe": { - "type": "string" - }, - "http_proxy": { - "type": "string" - }, - "https_proxy": { - "type": "string" - }, - "no_proxy": { - "type": "string" - }, - "network_driver": { - "type": "string" - }, - "tls_disabled": { - "type": "boolean" - }, - "public": { - "type": "boolean" - }, - "registry_enabled": { - "type": "boolean" - }, - "volume_driver": { - "type": "string" - }, - "server_type": { - "type": "string" - }, - "docker_storage_driver": { - "type": "string" - }, - "master_lb_enabled": { - "type": "boolean" - } - }, - "required": ["image_id", "external_network_id", "coe"], - "additionalProperties": False - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `ClusterTemplate`")) - def setup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - magnum_scenario = magnum_utils.MagnumScenario({ - "user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - - cluster_template = magnum_scenario._create_cluster_template( - **self.config) - - ct_uuid = cluster_template.uuid - self.context["tenants"][tenant_id]["cluster_template"] = ct_uuid - - @logging.log_task_wrapper(LOG.info, _("Exit context: `ClusterTemplate`")) - def cleanup(self): - resource_manager.cleanup( - names=["magnum.cluster_templates"], - users=self.context.get("users", []), - superclass=magnum_utils.MagnumScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/magnum/clusters.py b/rally/plugins/openstack/context/magnum/clusters.py deleted file mode 100644 index a530aa77..00000000 --- a/rally/plugins/openstack/context/magnum/clusters.py +++ /dev/null @@ -1,88 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="clusters", platform="openstack", order=480) -class ClusterGenerator(context.Context): - """Context class for generating temporary cluster for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "cluster_template_uuid": { - "type": "string" - }, - "node_count": { - "type": "integer", - "minimum": 1, - }, - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = {"node_count": 1} - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Cluster`")) - def setup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - nova_scenario = nova_utils.NovaScenario({ - "user": user, - "task": self.context["task"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - keypair = nova_scenario._create_keypair() - - magnum_scenario = magnum_utils.MagnumScenario({ - "user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - - # create a cluster - ct_uuid = self.config.get("cluster_template_uuid", None) - if ct_uuid is None: - ctx = self.context["tenants"][tenant_id] - ct_uuid = ctx.get("cluster_template") - cluster = magnum_scenario._create_cluster( - cluster_template=ct_uuid, - node_count=self.config.get("node_count"), keypair=keypair) - self.context["tenants"][tenant_id]["cluster"] = cluster.uuid - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Cluster`")) - def cleanup(self): - resource_manager.cleanup( - names=["magnum.clusters", "nova.keypairs"], - users=self.context.get("users", []), - superclass=magnum_utils.MagnumScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/manila/__init__.py b/rally/plugins/openstack/context/manila/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/manila/consts.py b/rally/plugins/openstack/context/manila/consts.py deleted file mode 100644 index f38db74b..00000000 --- a/rally/plugins/openstack/context/manila/consts.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -SHARES_CONTEXT_NAME = "manila_shares" -SHARE_NETWORKS_CONTEXT_NAME = "manila_share_networks" -SECURITY_SERVICES_CONTEXT_NAME = "manila_security_services" diff --git a/rally/plugins/openstack/context/manila/manila_security_services.py b/rally/plugins/openstack/context/manila/manila_security_services.py deleted file mode 100644 index f7760d4b..00000000 --- a/rally/plugins/openstack/context/manila/manila_security_services.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts as rally_consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.scenarios.manila import utils as manila_utils -from rally.task import context - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -CONTEXT_NAME = consts.SECURITY_SERVICES_CONTEXT_NAME - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name=CONTEXT_NAME, platform="openstack", order=445) -class SecurityServices(context.Context): - """This context creates 'security services' for Manila project.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": rally_consts.JSON_SCHEMA, - "properties": { - "security_services": { - "type": "array", - "description": - "It is expected to be list of dicts with data for creation" - " of security services.", - "items": { - "type": "object", - "properties": {"type": {"enum": ["active_directory", - "kerberos", "ldap"]}}, - "required": ["type"], - "additionalProperties": True, - "description": - "Data for creation of security services. \n " - "Example:\n\n" - " .. code-block:: json\n\n" - " {'type': 'LDAP', 'dns_ip': 'foo_ip', \n" - " 'server': 'bar_ip', 'domain': 'quuz_domain',\n" - " 'user': 'ololo', 'password': 'fake_password'}\n" - } - }, - }, - "additionalProperties": False - } - DEFAULT_CONFIG = { - "security_services": [], - } - - @logging.log_task_wrapper( - LOG.info, _("Enter context: `%s`") % CONTEXT_NAME) - def setup(self): - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - self.context["tenants"][tenant_id][CONTEXT_NAME] = { - "security_services": [], - } - if self.config["security_services"]: - manila_scenario = manila_utils.ManilaScenario({ - "task": self.task, - "owner_id": self.context["owner_id"], - "user": user, - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - for ss in self.config["security_services"]: - inst = manila_scenario._create_security_service( - **ss).to_dict() - self.context["tenants"][tenant_id][CONTEXT_NAME][ - "security_services"].append(inst) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `%s`") % CONTEXT_NAME) - def cleanup(self): - resource_manager.cleanup( - names=["manila.security_services"], - users=self.context.get("users", []), - superclass=manila_utils.ManilaScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/manila/manila_share_networks.py b/rally/plugins/openstack/context/manila/manila_share_networks.py deleted file mode 100644 index cb23af6b..00000000 --- a/rally/plugins/openstack/context/manila/manila_share_networks.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts as rally_consts -from rally import exceptions -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.scenarios.manila import utils as manila_utils -from rally.task import context - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -CONTEXT_NAME = consts.SHARE_NETWORKS_CONTEXT_NAME - -SHARE_NETWORKS_ARG_DESCR = """ -This context arg will be used only when context arg "use_share_networks" is -set to True. - -If context arg 'share_networks' has values then they will be used else share -networks will be autocreated - one for each tenant network. If networks do not -exist then will be created one share network for each tenant without network -data. - -Expected value is dict of lists where tenant Name or ID is key and list of -share_network Names or IDs is value. Example: - - .. code-block:: json - - "context": { - "manila_share_networks": { - "use_share_networks": true, - "share_networks": { - "tenant_1_name_or_id": ["share_network_1_name_or_id", - "share_network_2_name_or_id"], - "tenant_2_name_or_id": ["share_network_3_name_or_id"]} - } - } - -Also, make sure that all 'existing users' in appropriate registered deployment -have share networks if its usage is enabled, else Rally will randomly take -users that does not satisfy criteria. -""" - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name=CONTEXT_NAME, platform="openstack", order=450) -class ShareNetworks(context.Context): - """This context creates share networks for Manila project.""" - CONFIG_SCHEMA = { - "type": "object", - "$schema": rally_consts.JSON_SCHEMA, - "properties": { - "use_share_networks": { - "type": "boolean", - "description": "specifies whether manila should use share " - "networks for share creation or not."}, - - "share_networks": { - "type": "object", - "description": SHARE_NETWORKS_ARG_DESCR - }, - }, - "additionalProperties": False - } - DEFAULT_CONFIG = { - "use_share_networks": False, - "share_networks": {}, - } - - def _setup_for_existing_users(self): - if (self.config["use_share_networks"] and - not self.config["share_networks"]): - msg = _("Usage of share networks was enabled but for deployment " - "with existing users share networks also should be " - "specified via arg 'share_networks'") - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), msg=msg) - - for tenant_name_or_id, share_networks in self.config[ - "share_networks"].items(): - # Verify project existence - for tenant in self.context["tenants"].values(): - if tenant_name_or_id in (tenant["id"], tenant["name"]): - tenant_id = tenant["id"] - existing_user = None - for user in self.context["users"]: - if user["tenant_id"] == tenant_id: - existing_user = user - break - break - else: - msg = _("Provided tenant Name or ID '%s' was not found in " - "existing tenants.") % tenant_name_or_id - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), msg=msg) - self.context["tenants"][tenant_id][CONTEXT_NAME] = {} - self.context["tenants"][tenant_id][CONTEXT_NAME][ - "share_networks"] = [] - - manila_scenario = manila_utils.ManilaScenario({ - "user": existing_user, - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - existing_sns = manila_scenario._list_share_networks( - detailed=False, search_opts={"project_id": tenant_id}) - - for sn_name_or_id in share_networks: - # Verify share network existence - for sn in existing_sns: - if sn_name_or_id in (sn.id, sn.name): - break - else: - msg = _("Specified share network '%(sn)s' does not " - "exist for tenant '%(tenant_id)s'") % { - "sn": sn_name_or_id, "tenant_id": tenant_id} - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), msg=msg) - - # Set share network for project - self.context["tenants"][tenant_id][CONTEXT_NAME][ - "share_networks"].append(sn.to_dict()) - - def _setup_for_autocreated_users(self): - # Create share network for each network of tenant - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - networks = self.context["tenants"][tenant_id].get("networks") - manila_scenario = manila_utils.ManilaScenario({ - "task": self.task, - "owner_id": self.get_owner_id(), - "user": user, - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - manila_scenario.RESOURCE_NAME_FORMAT = self.RESOURCE_NAME_FORMAT - self.context["tenants"][tenant_id][CONTEXT_NAME] = { - "share_networks": []} - data = {} - - def _setup_share_network(tenant_id, data): - share_network = manila_scenario._create_share_network( - **data).to_dict() - self.context["tenants"][tenant_id][CONTEXT_NAME][ - "share_networks"].append(share_network) - for ss in self.context["tenants"][tenant_id].get( - consts.SECURITY_SERVICES_CONTEXT_NAME, {}).get( - "security_services", []): - manila_scenario._add_security_service_to_share_network( - share_network["id"], ss["id"]) - - if networks: - for network in networks: - if network.get("cidr"): - data["nova_net_id"] = network["id"] - elif network.get("subnets"): - data["neutron_net_id"] = network["id"] - data["neutron_subnet_id"] = network["subnets"][0] - else: - LOG.warning(_( - "Can not determine network service provider. " - "Share network will have no data.")) - _setup_share_network(tenant_id, data) - else: - _setup_share_network(tenant_id, data) - - @logging.log_task_wrapper(LOG.info, _("Enter context: `%s`") - % CONTEXT_NAME) - def setup(self): - self.context[CONTEXT_NAME] = {} - if not self.config["use_share_networks"]: - pass - elif self.context["config"].get("existing_users"): - self._setup_for_existing_users() - else: - self._setup_for_autocreated_users() - - @logging.log_task_wrapper(LOG.info, _("Exit context: `%s`") % CONTEXT_NAME) - def cleanup(self): - if (not self.context["config"].get("existing_users") or - self.config["use_share_networks"]): - resource_manager.cleanup( - names=["manila.share_networks"], - users=self.context.get("users", []), - superclass=self.__class__, - api_versions=self.context["config"].get("api_versions"), - task_id=self.get_owner_id()) - else: - # NOTE(vponomaryov): assume that share networks were not created - # by test run. - return diff --git a/rally/plugins/openstack/context/manila/manila_shares.py b/rally/plugins/openstack/context/manila/manila_shares.py deleted file mode 100644 index f86d4055..00000000 --- a/rally/plugins/openstack/context/manila/manila_shares.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2016 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts as rally_consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.scenarios.manila import utils as manila_utils -from rally.task import context - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -CONTEXT_NAME = consts.SHARES_CONTEXT_NAME - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name=CONTEXT_NAME, platform="openstack", order=455) -class Shares(context.Context): - """This context creates shares for Manila project.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": rally_consts.JSON_SCHEMA, - "properties": { - "shares_per_tenant": { - "type": "integer", - "minimum": 1, - }, - "size": { - "type": "integer", - "minimum": 1 - }, - "share_proto": { - "type": "string", - }, - "share_type": { - "type": "string", - }, - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "shares_per_tenant": 1, - "size": 1, - "share_proto": "NFS", - "share_type": None, - } - - def _create_shares(self, manila_scenario, tenant_id, share_proto, size=1, - share_type=None): - tenant_ctxt = self.context["tenants"][tenant_id] - tenant_ctxt.setdefault("shares", []) - for i in range(self.config["shares_per_tenant"]): - kwargs = {"share_proto": share_proto, "size": size} - if share_type: - kwargs["share_type"] = share_type - share_networks = tenant_ctxt.get("manila_share_networks", {}).get( - "share_networks", []) - if share_networks: - kwargs["share_network"] = share_networks[ - i % len(share_networks)]["id"] - share = manila_scenario._create_share(**kwargs) - tenant_ctxt["shares"].append(share.to_dict()) - - @logging.log_task_wrapper( - LOG.info, _("Enter context: `%s`") % CONTEXT_NAME) - def setup(self): - for user, tenant_id in ( - utils.iterate_per_tenants(self.context.get("users", []))): - manila_scenario = manila_utils.ManilaScenario({ - "task": self.task, - "owner_id": self.context["owner_id"], - "user": user, - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - self._create_shares( - manila_scenario, - tenant_id, - self.config["share_proto"], - self.config["size"], - self.config["share_type"], - ) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `%s`") % CONTEXT_NAME) - def cleanup(self): - resource_manager.cleanup( - names=["manila.shares"], - users=self.context.get("users", []), - superclass=manila_utils.ManilaScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/monasca/__init__.py b/rally/plugins/openstack/context/monasca/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/monasca/metrics.py b/rally/plugins/openstack/context/monasca/metrics.py deleted file mode 100644 index 29b8d63c..00000000 --- a/rally/plugins/openstack/context/monasca/metrics.py +++ /dev/null @@ -1,108 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six import moves - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.scenarios.monasca import utils as monasca_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="monasca_metrics", platform="openstack", order=510) -class MonascaMetricGenerator(context.Context): - """Context for creating metrics for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "name": { - "type": "string" - }, - "dimensions": { - "type": "object", - "properties": { - "region": { - "type": "string" - }, - "service": { - "type": "string" - }, - "hostname": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "metrics_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "value_meta": { - "type": "array", - "items": { - "type": "object", - "properties": { - "value_meta_key": { - "type": "string" - }, - "value_meta_value": { - "type": "string" - } - } - } - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "metrics_per_tenant": 2 - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Monasca`")) - def setup(self): - new_metric = {} - - if "dimensions" in self.config: - new_metric = { - "dimensions": self.config["dimensions"] - } - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - scenario = monasca_utils.MonascaScenario( - context={"user": user, "task": self.context["task"]} - ) - for i in moves.xrange(self.config["metrics_per_tenant"]): - scenario._create_metrics(**new_metric) - rutils.interruptable_sleep(0.001) - rutils.interruptable_sleep( - monasca_utils.CONF.benchmark.monasca_metric_create_prepoll_delay, - atomic_delay=1) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Monasca`")) - def cleanup(self): - # We don't have API for removal of metrics - pass diff --git a/rally/plugins/openstack/context/murano/__init__.py b/rally/plugins/openstack/context/murano/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/murano/murano_environments.py b/rally/plugins/openstack/context/murano/murano_environments.py deleted file mode 100644 index c21522a4..00000000 --- a/rally/plugins/openstack/context/murano/murano_environments.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.murano import utils as murano_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="murano_environments", platform="openstack", order=402) -class EnvironmentGenerator(context.Context): - """Context class for creating murano environments.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "environments_per_tenant": { - "type": "integer", - "minimum": 1 - }, - }, - "required": ["environments_per_tenant"], - "additionalProperties": False - } - - @logging.log_task_wrapper(LOG.info, - _("Enter context: `Murano environments`")) - def setup(self): - for user, tenant_id in utils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id]["environments"] = [] - for i in range(self.config["environments_per_tenant"]): - murano_util = murano_utils.MuranoScenario( - {"user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"], - "config": self.context["config"]}) - env = murano_util._create_environment() - self.context["tenants"][tenant_id]["environments"].append(env) - - @logging.log_task_wrapper(LOG.info, - _("Exit context: `Murano environments`")) - def cleanup(self): - resource_manager.cleanup(names=["murano.environments"], - users=self.context.get("users", []), - superclass=murano_utils.MuranoScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/murano/murano_packages.py b/rally/plugins/openstack/context/murano/murano_packages.py deleted file mode 100644 index ffa04a53..00000000 --- a/rally/plugins/openstack/context/murano/murano_packages.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import zipfile - -from rally.common import fileutils -from rally.common.i18n import _ -from rally.common.i18n import _LE -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="murano_packages", platform="openstack", order=401) -class PackageGenerator(context.Context): - """Context class for uploading applications for murano.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "app_package": { - "type": "string", - } - }, - "required": ["app_package"], - "additionalProperties": False - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Murano packages`")) - def setup(self): - is_config_app_dir = False - pckg_path = os.path.expanduser(self.config["app_package"]) - if zipfile.is_zipfile(pckg_path): - zip_name = pckg_path - elif os.path.isdir(pckg_path): - is_config_app_dir = True - zip_name = fileutils.pack_dir(pckg_path) - else: - msg = (_LE("There is no zip archive or directory by this path:" - " %s") % pckg_path) - raise exceptions.ContextSetupFailure(msg=msg, - ctx_name=self.get_name()) - - for user, tenant_id in utils.iterate_per_tenants( - self.context["users"]): - clients = osclients.Clients(user["credential"]) - self.context["tenants"][tenant_id]["packages"] = [] - if is_config_app_dir: - self.context["tenants"][tenant_id]["murano_ctx"] = zip_name - # TODO(astudenov): use self.generate_random_name() - package = clients.murano().packages.create( - {"categories": ["Web"], "tags": ["tag"]}, - {"file": open(zip_name)}) - - self.context["tenants"][tenant_id]["packages"].append(package) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Murano packages`")) - def cleanup(self): - resource_manager.cleanup(names=["murano.packages"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/network/__init__.py b/rally/plugins/openstack/context/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/network/allow_ssh.py b/rally/plugins/openstack/context/network/allow_ssh.py deleted file mode 100644 index d536205f..00000000 --- a/rally/plugins/openstack/context/network/allow_ssh.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import osclients -from rally.plugins.openstack.wrappers import network -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -def _prepare_open_secgroup(credential, secgroup_name): - """Generate secgroup allowing all tcp/udp/icmp access. - - In order to run tests on instances it is necessary to have SSH access. - This function generates a secgroup which allows all tcp/udp/icmp access. - - :param credential: clients credential - :param secgroup_name: security group name - - :returns: dict with security group details - """ - neutron = osclients.Clients(credential).neutron() - security_groups = neutron.list_security_groups()["security_groups"] - rally_open = [sg for sg in security_groups if sg["name"] == secgroup_name] - if not rally_open: - descr = "Allow ssh access to VMs created by Rally for benchmarking" - rally_open = neutron.create_security_group( - {"security_group": {"name": secgroup_name, - "description": descr}})["security_group"] - else: - rally_open = rally_open[0] - - rules_to_add = [ - { - "protocol": "tcp", - "port_range_max": 65535, - "port_range_min": 1, - "remote_ip_prefix": "0.0.0.0/0", - "direction": "ingress" - }, - { - "protocol": "udp", - "port_range_max": 65535, - "port_range_min": 1, - "remote_ip_prefix": "0.0.0.0/0", - "direction": "ingress" - }, - { - "protocol": "icmp", - "remote_ip_prefix": "0.0.0.0/0", - "direction": "ingress" - } - ] - - def rule_match(criteria, existing_rule): - return all(existing_rule[key] == value - for key, value in criteria.items()) - - for new_rule in rules_to_add: - if not any(rule_match(new_rule, existing_rule) for existing_rule - in rally_open.get("security_group_rules", [])): - new_rule["security_group_id"] = rally_open["id"] - neutron.create_security_group_rule( - {"security_group_rule": new_rule}) - - return rally_open - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="allow_ssh", platform="openstack", order=320) -class AllowSSH(context.Context): - """Sets up security groups for all users to access VM via SSH.""" - - @logging.log_task_wrapper(LOG.info, _("Enter context: `allow_ssh`")) - def setup(self): - admin_or_user = (self.context.get("admin") or - self.context.get("users")[0]) - - net_wrapper = network.wrap( - osclients.Clients(admin_or_user["credential"]), - self, config=self.config) - use_sg, msg = net_wrapper.supports_extension("security-group") - if not use_sg: - LOG.info(_("Security group context is disabled: %s") % msg) - return - - secgroup_name = self.generate_random_name() - for user in self.context["users"]: - user["secgroup"] = _prepare_open_secgroup(user["credential"], - secgroup_name) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `allow_ssh`")) - def cleanup(self): - for user, tenant_id in utils.iterate_per_tenants( - self.context["users"]): - with logging.ExceptionLogger( - LOG, _("Unable to delete secgroup: %s.") % - user["secgroup"]["name"]): - clients = osclients.Clients(user["credential"]) - clients.neutron().delete_security_group(user["secgroup"]["id"]) diff --git a/rally/plugins/openstack/context/network/existing_network.py b/rally/plugins/openstack/context/network/existing_network.py deleted file mode 100644 index ca8de99f..00000000 --- a/rally/plugins/openstack/context/network/existing_network.py +++ /dev/null @@ -1,55 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="existing_network", platform="openstack", order=349) -class ExistingNetwork(context.Context): - """This context supports using existing networks in Rally. - - This context should be used on a deployment with existing users. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "additionalProperties": False - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `existing_network`")) - def setup(self): - for user, tenant_id in utils.iterate_per_tenants( - self.context.get("users", [])): - net_wrapper = network_wrapper.wrap( - osclients.Clients(user["credential"]), self, - config=self.config) - self.context["tenants"][tenant_id]["networks"] = ( - net_wrapper.list_networks()) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `existing_network`")) - def cleanup(self): - """Networks were not created by Rally, so nothing to do.""" diff --git a/rally/plugins/openstack/context/network/networks.py b/rally/plugins/openstack/context/network/networks.py deleted file mode 100644 index 03f99573..00000000 --- a/rally/plugins/openstack/context/network/networks.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -# NOTE(andreykurilin): admin is used only by cleanup -@validation.add("required_platform", platform="openstack", admin=True, - users=True) -@context.configure(name="network", platform="openstack", order=350) -class Network(context.Context): - """Create networking resources. - - This creates networks for all tenants, and optionally creates - another resources like subnets and routers. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "start_cidr": { - "type": "string" - }, - "networks_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "subnets_per_network": { - "type": "integer", - "minimum": 1 - }, - "network_create_args": { - "type": "object", - "additionalProperties": True - }, - "dns_nameservers": { - "type": "array", - "items": {"type": "string"}, - "uniqueItems": True - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "start_cidr": "10.2.0.0/24", - "networks_per_tenant": 1, - "subnets_per_network": 1, - "network_create_args": {}, - "dns_nameservers": None - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `network`")) - def setup(self): - # NOTE(rkiran): Some clients are not thread-safe. Thus during - # multithreading/multiprocessing, it is likely the - # sockets are left open. This problem is eliminated by - # creating a connection in setup and cleanup separately. - net_wrapper = network_wrapper.wrap( - osclients.Clients(self.context["admin"]["credential"]), - self, config=self.config) - kwargs = {} - if self.config["dns_nameservers"] is not None: - kwargs["dns_nameservers"] = self.config["dns_nameservers"] - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - self.context["tenants"][tenant_id]["networks"] = [] - for i in range(self.config["networks_per_tenant"]): - # NOTE(amaretskiy): add_router and subnets_num take effect - # for Neutron only. - network_create_args = self.config["network_create_args"].copy() - network = net_wrapper.create_network( - tenant_id, - add_router=True, - subnets_num=self.config["subnets_per_network"], - network_create_args=network_create_args, - **kwargs) - self.context["tenants"][tenant_id]["networks"].append(network) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `network`")) - def cleanup(self): - net_wrapper = network_wrapper.wrap( - osclients.Clients(self.context["admin"]["credential"]), - self, config=self.config) - for tenant_id, tenant_ctx in self.context["tenants"].items(): - for network in tenant_ctx.get("networks", []): - with logging.ExceptionLogger( - LOG, - _("Failed to delete network for tenant %s") - % tenant_id): - net_wrapper.delete_network(network) diff --git a/rally/plugins/openstack/context/network/routers.py b/rally/plugins/openstack/context/network/routers.py deleted file mode 100644 index 625fe8cb..00000000 --- a/rally/plugins/openstack/context/network/routers.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2017: Orange -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True, - users=True) -@context.configure(name="router", platform="openstack", order=351) -class Router(context.Context): - """Create networking resources. - - This creates router for all tenants. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "routers_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "admin_state_up ": { - "description": "A human-readable description for the resource", - "type": "boolean", - }, - "external_gateway_info": { - "description": "The external gateway information .", - "type": "object", - "properties": { - "network_id": {"type": "string"}, - "enable_snat": {"type": "boolean"} - } - }, - "network_id": { - "description": "Network ID", - "type": "string" - }, - "external_fixed_ips": { - "description": "Ip(s) of the external gateway interface.", - "type": "array", - "items": { - "type": "object", - "properties": { - "ip_address": {"type": "string"}, - "subnet_id": {"type": "string"} - } - } - }, - "distributed": { - "description": "Distributed router. Require dvr extension.", - "type": "boolean" - }, - "ha": { - "description": "Highly-available router. Require l3-ha.", - "type": "boolean" - }, - "availability_zone_hints": { - "description": "Require router_availability_zone extension.", - "type": "boolean" - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "routers_per_tenant": 1, - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `router`")) - def setup(self): - kwargs = {} - parameters = ("admin_state_up", "external_gateway_info", "network_id", - "external_fixed_ips", "distributed", "ha", - "availability_zone_hints") - for parameter in parameters: - if parameter in self.config: - kwargs[parameter] = self.config[parameter] - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - self.context["tenants"][tenant_id]["routers"] = [] - scenario = neutron_utils.NeutronScenario( - context={"user": user, "task": self.context["task"], - "owner_id": self.context["owner_id"]} - ) - for i in range(self.config["routers_per_tenant"]): - router = scenario._create_router(kwargs) - self.context["tenants"][tenant_id]["routers"].append(router) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `router`")) - def cleanup(self): - resource_manager.cleanup( - names=["neutron.router"], - users=self.context.get("users", []), - superclass=neutron_utils.NeutronScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/neutron/__init__.py b/rally/plugins/openstack/context/neutron/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/neutron/lbaas.py b/rally/plugins/openstack/context/neutron/lbaas.py deleted file mode 100644 index f0eb6a71..00000000 --- a/rally/plugins/openstack/context/neutron/lbaas.py +++ /dev/null @@ -1,95 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True, - users=True) -@context.configure(name="lbaas", platform="openstack", order=360) -class Lbaas(context.Context): - """Creates a lb-pool for every subnet created in network context.""" - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "pool": { - "type": "object" - }, - "lbaas_version": { - "type": "integer", - "minimum": 1 - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "pool": { - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP" - }, - "lbaas_version": 1 - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `lbaas`")) - def setup(self): - net_wrapper = network_wrapper.wrap( - osclients.Clients(self.context["admin"]["credential"]), - self, config=self.config) - - use_lb, msg = net_wrapper.supports_extension("lbaas") - if not use_lb: - LOG.info(msg) - return - - # Creates a lb-pool for every subnet created in network context. - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - for network in self.context["tenants"][tenant_id]["networks"]: - for subnet in network.get("subnets", []): - if self.config["lbaas_version"] == 1: - network.setdefault("lb_pools", []).append( - net_wrapper.create_v1_pool( - tenant_id, - subnet, - **self.config["pool"])) - else: - raise NotImplementedError( - "Context for LBaaS version %s not implemented." - % self.config["lbaas_version"]) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `lbaas`")) - def cleanup(self): - net_wrapper = network_wrapper.wrap( - osclients.Clients(self.context["admin"]["credential"]), - self, config=self.config) - for tenant_id, tenant_ctx in self.context["tenants"].items(): - for network in tenant_ctx.get("networks", []): - for pool in network.get("lb_pools", []): - with logging.ExceptionLogger( - LOG, - _("Failed to delete pool %(pool)s for tenant " - "%(tenant)s") % {"pool": pool["pool"]["id"], - "tenant": tenant_id}): - if self.config["lbaas_version"] == 1: - net_wrapper.delete_v1_pool(pool["pool"]["id"]) diff --git a/rally/plugins/openstack/context/nova/__init__.py b/rally/plugins/openstack/context/nova/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/nova/flavors.py b/rally/plugins/openstack/context/nova/flavors.py deleted file mode 100644 index 7de90367..00000000 --- a/rally/plugins/openstack/context/nova/flavors.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from novaclient import exceptions as nova_exceptions - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.task import context - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True) -@context.configure(name="flavors", platform="openstack", order=340) -class FlavorsGenerator(context.Context): - """Context creates a list of flavors.""" - - CONFIG_SCHEMA = { - "type": "array", - "$schema": consts.JSON_SCHEMA, - "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - }, - "ram": { - "type": "integer", - "minimum": 1 - }, - "vcpus": { - "type": "integer", - "minimum": 1 - }, - "disk": { - "type": "integer", - "minimum": 0 - }, - "swap": { - "type": "integer", - "minimum": 0 - }, - "ephemeral": { - "type": "integer", - "minimum": 0 - }, - "extra_specs": { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": False, - "required": ["name", "ram"] - } - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `flavors`")) - def setup(self): - """Create list of flavors.""" - self.context["flavors"] = {} - - clients = osclients.Clients(self.context["admin"]["credential"]) - for flavor_config in self.config: - - extra_specs = flavor_config.get("extra_specs") - - flavor_config = FlavorConfig(**flavor_config) - try: - flavor = clients.nova().flavors.create(**flavor_config) - except nova_exceptions.Conflict as e: - LOG.warning("Using already existing flavor %s" % - flavor_config["name"]) - if logging.is_debug(): - LOG.exception(e) - continue - - if extra_specs: - flavor.set_keys(extra_specs) - - self.context["flavors"][flavor_config["name"]] = flavor.to_dict() - LOG.debug("Created flavor with id '%s'" % flavor.id) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `flavors`")) - def cleanup(self): - """Delete created flavors.""" - mather = rutils.make_name_matcher(*[f["name"] for f in self.config]) - resource_manager.cleanup( - names=["nova.flavors"], - admin=self.context["admin"], - api_versions=self.context["config"].get("api_versions"), - superclass=mather, - task_id=self.get_owner_id()) - - -class FlavorConfig(dict): - def __init__(self, name, ram, vcpus=1, disk=0, swap=0, ephemeral=0, - extra_specs=None): - """Flavor configuration for context and flavor & image validation code. - - Context code uses this code to provide default values for flavor - creation. Validation code uses this class as a Flavor instance to - check image validity against a flavor that is to be created by - the context. - - :param name: name of the newly created flavor - :param ram: RAM amount for the flavor (MBs) - :param vcpus: VCPUs amount for the flavor - :param disk: disk amount for the flavor (GBs) - :param swap: swap amount for the flavor (MBs) - :param ephemeral: ephemeral disk amount for the flavor (GBs) - :param extra_specs: is ignored - """ - super(FlavorConfig, self).__init__( - name=name, ram=ram, vcpus=vcpus, disk=disk, - swap=swap, ephemeral=ephemeral) - self.__dict__.update(self) diff --git a/rally/plugins/openstack/context/nova/keypairs.py b/rally/plugins/openstack/context/nova/keypairs.py deleted file mode 100644 index 79ade801..00000000 --- a/rally/plugins/openstack/context/nova/keypairs.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2014: Rackspace UK -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import validation -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="keypair", platform="openstack", order=310) -class Keypair(context.Context): - """Create Nova KeyPair for each user.""" - - # NOTE(andreykurilin): "type" != "null", since we need to support backward - # compatibility(previously empty dict was valid) and I hope in near - # future, we will extend this context to accept keys. - CONFIG_SCHEMA = {"type": "object", - "additionalProperties": False} - - def _generate_keypair(self, credential): - nova_client = osclients.Clients(credential).nova() - # NOTE(hughsaunders): If keypair exists, it should re-generate name. - - keypairs = nova_client.keypairs.list() - keypair_names = [keypair.name for keypair in keypairs] - while True: - keypair_name = self.generate_random_name() - if keypair_name not in keypair_names: - break - - keypair = nova_client.keypairs.create(keypair_name) - return {"private": keypair.private_key, - "public": keypair.public_key, - "name": keypair_name, - "id": keypair.id} - - @logging.log_task_wrapper(LOG.info, _("Enter context: `keypair`")) - def setup(self): - for user in self.context["users"]: - user["keypair"] = self._generate_keypair(user["credential"]) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `keypair`")) - def cleanup(self): - resource_manager.cleanup(names=["nova.keypairs"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/nova/servers.py b/rally/plugins/openstack/context/nova/servers.py deleted file mode 100755 index 234367e0..00000000 --- a/rally/plugins/openstack/context/nova/servers.py +++ /dev/null @@ -1,141 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.plugins.openstack import types -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="servers", platform="openstack", order=430) -class ServerGenerator(context.Context): - """Context class for adding temporary servers for benchmarks. - - Servers are added for each tenant. - """ - - CONFIG_SCHEMA = { - "type": "object", - "properties": { - "image": { - "description": "Name of image to boot server(s) from.", - "type": "object", - "properties": { - "name": {"type": "string"} - } - }, - "flavor": { - "description": "Name of flavor to boot server(s) with.", - "type": "object", - "properties": { - "name": {"type": "string"} - } - }, - "servers_per_tenant": { - "description": "Number of servers to boot in each Tenant.", - "type": "integer", - "minimum": 1 - }, - "auto_assign_nic": { - "description": "True if NICs should be assigned.", - "type": "boolean", - }, - "nics": { - "type": "array", - "description": "List of networks to attach to server.", - "items": {"oneOf": [ - {"type": "object", - "properties": {"net-id": {"type": "string"}}, - "description": "Network ID in a format like OpenStack API" - " expects to see."}, - {"type": "string", "description": "Network ID."}]}, - "minItems": 1 - } - }, - "required": ["image", "flavor"], - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "servers_per_tenant": 5, - "auto_assign_nic": False - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Servers`")) - def setup(self): - image = self.config["image"] - flavor = self.config["flavor"] - auto_nic = self.config["auto_assign_nic"] - servers_per_tenant = self.config["servers_per_tenant"] - kwargs = {} - if self.config.get("nics"): - if isinstance(self.config["nics"][0], dict): - # it is a format that Nova API expects - kwargs["nics"] = list(self.config["nics"]) - else: - kwargs["nics"] = [{"net-id": nic} - for nic in self.config["nics"]] - - clients = osclients.Clients(self.context["users"][0]["credential"]) - image_id = types.GlanceImage.transform(clients=clients, - resource_config=image) - flavor_id = types.Flavor.transform(clients=clients, - resource_config=flavor) - - for iter_, (user, tenant_id) in enumerate(rutils.iterate_per_tenants( - self.context["users"])): - LOG.debug("Booting servers for user tenant %s " - % (user["tenant_id"])) - tmp_context = {"user": user, - "tenant": self.context["tenants"][tenant_id], - "task": self.context["task"], - "owner_id": self.context["owner_id"], - "iteration": iter_} - nova_scenario = nova_utils.NovaScenario(tmp_context) - - LOG.debug("Calling _boot_servers with image_id=%(image_id)s " - "flavor_id=%(flavor_id)s " - "servers_per_tenant=%(servers_per_tenant)s" - % {"image_id": image_id, - "flavor_id": flavor_id, - "servers_per_tenant": servers_per_tenant}) - - servers = nova_scenario._boot_servers(image_id, flavor_id, - requests=servers_per_tenant, - auto_assign_nic=auto_nic, - **kwargs) - - current_servers = [server.id for server in servers] - - LOG.debug("Adding booted servers %s to context" - % current_servers) - - self.context["tenants"][tenant_id][ - "servers"] = current_servers - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Servers`")) - def cleanup(self): - resource_manager.cleanup(names=["nova.servers"], - users=self.context.get("users", []), - superclass=nova_utils.NovaScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/quotas/__init__.py b/rally/plugins/openstack/context/quotas/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/quotas/cinder_quotas.py b/rally/plugins/openstack/context/quotas/cinder_quotas.py deleted file mode 100644 index 50156833..00000000 --- a/rally/plugins/openstack/context/quotas/cinder_quotas.py +++ /dev/null @@ -1,58 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class CinderQuotas(object): - """Management of Cinder quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "gigabytes": { - "type": "integer", - "minimum": -1 - }, - "snapshots": { - "type": "integer", - "minimum": -1 - }, - "volumes": { - "type": "integer", - "minimum": -1 - }, - "backups": { - "type": "integer", - "minimum": -1 - }, - "backup_gigabytes": { - "type": "integer", - "minimum": -1 - } - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - self.clients.cinder().quotas.update(tenant_id, **kwargs) - - def delete(self, tenant_id): - self.clients.cinder().quotas.delete(tenant_id) - - def get(self, tenant_id): - response = self.clients.cinder().quotas.get(tenant_id) - return dict([(k, getattr(response, k)) - for k in self.QUOTAS_SCHEMA["properties"]]) diff --git a/rally/plugins/openstack/context/quotas/designate_quotas.py b/rally/plugins/openstack/context/quotas/designate_quotas.py deleted file mode 100644 index b2c647d9..00000000 --- a/rally/plugins/openstack/context/quotas/designate_quotas.py +++ /dev/null @@ -1,56 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class DesignateQuotas(object): - """Management of Designate quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "domains": { - "type": "integer", - "minimum": 1 - }, - "domain_recordsets": { - "type": "integer", - "minimum": 1 - }, - "domain_records": { - "type": "integer", - "minimum": 1 - }, - "recordset_records": { - "type": "integer", - "minimum": 1 - }, - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - self.clients.designate().quotas.update(tenant_id, kwargs) - - def delete(self, tenant_id): - self.clients.designate().quotas.reset(tenant_id) - - def get(self, tenant_id): - # NOTE(andreykurilin): we have broken designate jobs, so I can't check - # that this method is right :( - response = self.clients.designate().quotas.get(tenant_id) - return dict([(k, response.get(k)) - for k in self.QUOTAS_SCHEMA["properties"]]) diff --git a/rally/plugins/openstack/context/quotas/manila_quotas.py b/rally/plugins/openstack/context/quotas/manila_quotas.py deleted file mode 100644 index 0c36c45e..00000000 --- a/rally/plugins/openstack/context/quotas/manila_quotas.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class ManilaQuotas(object): - """Management of Manila quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "shares": { - "type": "integer", - "minimum": -1 - }, - "gigabytes": { - "type": "integer", - "minimum": -1 - }, - "snapshots": { - "type": "integer", - "minimum": -1 - }, - "snapshot_gigabytes": { - "type": "integer", - "minimum": -1 - }, - "share_networks": { - "type": "integer", - "minimum": -1 - } - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - self.clients.manila().quotas.update(tenant_id, **kwargs) - - def delete(self, tenant_id): - self.clients.manila().quotas.delete(tenant_id) - - def get(self, tenant_id): - response = self.clients.manila().quotas.get(tenant_id) - return dict([(k, getattr(response, k)) - for k in self.QUOTAS_SCHEMA["properties"]]) diff --git a/rally/plugins/openstack/context/quotas/neutron_quotas.py b/rally/plugins/openstack/context/quotas/neutron_quotas.py deleted file mode 100644 index f24d3a2b..00000000 --- a/rally/plugins/openstack/context/quotas/neutron_quotas.py +++ /dev/null @@ -1,78 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class NeutronQuotas(object): - """Management of Neutron quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "network": { - "type": "integer", - "minimum": -1 - }, - "subnet": { - "type": "integer", - "minimum": -1 - }, - "port": { - "type": "integer", - "minimum": -1 - }, - "router": { - "type": "integer", - "minimum": -1 - }, - "floatingip": { - "type": "integer", - "minimum": -1 - }, - "security_group": { - "type": "integer", - "minimum": -1 - }, - "security_group_rule": { - "type": "integer", - "minimum": -1 - }, - "pool": { - "type": "integer", - "minimum": -1 - }, - "vip": { - "type": "integer", - "minimum": -1 - }, - "health_monitor": { - "type": "integer", - "minimum": -1 - } - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - body = {"quota": kwargs} - self.clients.neutron().update_quota(tenant_id, body=body) - - def delete(self, tenant_id): - # Reset quotas to defaults and tag database objects as deleted - self.clients.neutron().delete_quota(tenant_id) - - def get(self, tenant_id): - return self.clients.neutron().show_quota(tenant_id)["quota"] diff --git a/rally/plugins/openstack/context/quotas/nova_quotas.py b/rally/plugins/openstack/context/quotas/nova_quotas.py deleted file mode 100644 index 065a3a74..00000000 --- a/rally/plugins/openstack/context/quotas/nova_quotas.py +++ /dev/null @@ -1,95 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class NovaQuotas(object): - """Management of Nova quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "instances": { - "type": "integer", - "minimum": -1 - }, - "cores": { - "type": "integer", - "minimum": -1 - }, - "ram": { - "type": "integer", - "minimum": -1 - }, - "floating_ips": { - "type": "integer", - "minimum": -1 - }, - "fixed_ips": { - "type": "integer", - "minimum": -1 - }, - "metadata_items": { - "type": "integer", - "minimum": -1 - }, - "injected_files": { - "type": "integer", - "minimum": -1 - }, - "injected_file_content_bytes": { - "type": "integer", - "minimum": -1 - }, - "injected_file_path_bytes": { - "type": "integer", - "minimum": -1 - }, - "key_pairs": { - "type": "integer", - "minimum": -1 - }, - "security_groups": { - "type": "integer", - "minimum": -1 - }, - "security_group_rules": { - "type": "integer", - "minimum": -1 - }, - "server_groups": { - "type": "integer", - "minimum": -1 - }, - "server_group_members": { - "type": "integer", - "minimum": -1 - } - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - self.clients.nova().quotas.update(tenant_id, **kwargs) - - def delete(self, tenant_id): - # Reset quotas to defaults and tag database objects as deleted - self.clients.nova().quotas.delete(tenant_id) - - def get(self, tenant_id): - response = self.clients.nova().quotas.get(tenant_id) - return dict([(k, getattr(response, k)) - for k in self.QUOTAS_SCHEMA["properties"]]) diff --git a/rally/plugins/openstack/context/quotas/quotas.py b/rally/plugins/openstack/context/quotas/quotas.py deleted file mode 100644 index bcde82d6..00000000 --- a/rally/plugins/openstack/context/quotas/quotas.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2014: Dassault Systemes -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.context.quotas import cinder_quotas -from rally.plugins.openstack.context.quotas import designate_quotas -from rally.plugins.openstack.context.quotas import manila_quotas -from rally.plugins.openstack.context.quotas import neutron_quotas -from rally.plugins.openstack.context.quotas import nova_quotas -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True) -@context.configure(name="quotas", platform="openstack", order=300) -class Quotas(context.Context): - """Context class for updating benchmarks' tenants quotas.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "additionalProperties": False, - "properties": { - "nova": nova_quotas.NovaQuotas.QUOTAS_SCHEMA, - "cinder": cinder_quotas.CinderQuotas.QUOTAS_SCHEMA, - "manila": manila_quotas.ManilaQuotas.QUOTAS_SCHEMA, - "designate": designate_quotas.DesignateQuotas.QUOTAS_SCHEMA, - "neutron": neutron_quotas.NeutronQuotas.QUOTAS_SCHEMA - } - } - - def __init__(self, ctx): - super(Quotas, self).__init__(ctx) - self.clients = osclients.Clients( - self.context["admin"]["credential"], - api_info=self.context["config"].get("api_versions")) - - self.manager = { - "nova": nova_quotas.NovaQuotas(self.clients), - "cinder": cinder_quotas.CinderQuotas(self.clients), - "manila": manila_quotas.ManilaQuotas(self.clients), - "designate": designate_quotas.DesignateQuotas(self.clients), - "neutron": neutron_quotas.NeutronQuotas(self.clients) - } - self.original_quotas = [] - - def _service_has_quotas(self, service): - return len(self.config.get(service, {})) > 0 - - @logging.log_task_wrapper(LOG.info, _("Enter context: `quotas`")) - def setup(self): - for tenant_id in self.context["tenants"]: - for service in self.manager: - if self._service_has_quotas(service): - # NOTE(andreykurilin): in case of existing users it is - # required to restore original quotas instead of reset - # to default ones. - if "existing_users" in self.context: - self.original_quotas.append( - (service, tenant_id, - self.manager[service].get(tenant_id))) - self.manager[service].update(tenant_id, - **self.config[service]) - - def _restore_quotas(self): - for service, tenant_id, quotas in self.original_quotas: - try: - self.manager[service].update(tenant_id, **quotas) - except Exception as e: - LOG.warning("Failed to restore quotas for tenant %(tenant_id)s" - " in service %(service)s \n reason: %(exc)s" % - {"tenant_id": tenant_id, "service": service, - "exc": e}) - - def _delete_quotas(self): - for service in self.manager: - if self._service_has_quotas(service): - for tenant_id in self.context["tenants"]: - try: - self.manager[service].delete(tenant_id) - except Exception as e: - LOG.warning("Failed to remove quotas for tenant " - "%(tenant_id)s in service %(service)s " - "\n reason: %(exc)s" - % {"tenant_id": tenant_id, - "service": service, "exc": e}) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `quotas`")) - def cleanup(self): - if self.original_quotas: - # existing users - self._restore_quotas() - else: - self._delete_quotas() diff --git a/rally/plugins/openstack/context/sahara/__init__.py b/rally/plugins/openstack/context/sahara/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/sahara/sahara_cluster.py b/rally/plugins/openstack/context/sahara/sahara_cluster.py deleted file mode 100644 index b6e74aee..00000000 --- a/rally/plugins/openstack/context/sahara/sahara_cluster.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import context -from rally.task import utils as bench_utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_cluster", platform="openstack", order=441) -class SaharaCluster(context.Context): - """Context class for setting up the Cluster an EDP job.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "plugin_name": { - "type": "string" - }, - "hadoop_version": { - "type": "string", - }, - "workers_count": { - "type": "integer", - "minimum": 1 - }, - "flavor_id": { - "type": "string", - }, - "master_flavor_id": { - "type": "string", - }, - "worker_flavor_id": { - "type": "string", - }, - "floating_ip_pool": { - "type": "string", - }, - "volumes_per_node": { - "type": "integer", - "minimum": 1 - }, - "volumes_size": { - "type": "integer", - "minimum": 1 - }, - "auto_security_group": { - "type": "boolean", - }, - "security_groups": { - "type": "array", - "items": { - "type": "string" - } - }, - "node_configs": { - "type": "object" - }, - "cluster_configs": { - "type": "object" - }, - "enable_anti_affinity": { - "type": "boolean" - }, - "enable_proxy": { - "type": "boolean" - }, - "use_autoconfig": { - "type": "boolean" - }, - }, - "additionalProperties": False, - "required": ["plugin_name", "hadoop_version", "workers_count", - "master_flavor_id", "worker_flavor_id"] - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Sahara Cluster`")) - def setup(self): - utils.init_sahara_context(self) - self.context["sahara"]["clusters"] = {} - - wait_dict = {} - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - image_id = self.context["tenants"][tenant_id]["sahara"]["image"] - - floating_ip_pool = self.config.get("floating_ip_pool") - - temporary_context = { - "user": user, - "tenant": self.context["tenants"][tenant_id], - "task": self.context["task"], - "owner_id": self.context["owner_id"] - } - scenario = utils.SaharaScenario(context=temporary_context) - - cluster = scenario._launch_cluster( - plugin_name=self.config["plugin_name"], - hadoop_version=self.config["hadoop_version"], - flavor_id=self.config.get("flavor_id"), - master_flavor_id=self.config["master_flavor_id"], - worker_flavor_id=self.config["worker_flavor_id"], - workers_count=self.config["workers_count"], - image_id=image_id, - floating_ip_pool=floating_ip_pool, - volumes_per_node=self.config.get("volumes_per_node"), - volumes_size=self.config.get("volumes_size", 1), - auto_security_group=self.config.get("auto_security_group", - True), - security_groups=self.config.get("security_groups"), - node_configs=self.config.get("node_configs"), - cluster_configs=self.config.get("cluster_configs"), - enable_anti_affinity=self.config.get("enable_anti_affinity", - False), - enable_proxy=self.config.get("enable_proxy", False), - wait_active=False, - use_autoconfig=self.config.get("use_autoconfig", True) - ) - - self.context["tenants"][tenant_id]["sahara"]["cluster"] = ( - cluster.id) - - # Need to save the client instance to poll for active status - wait_dict[cluster] = scenario.clients("sahara") - - bench_utils.wait_for( - resource=wait_dict, - update_resource=self.update_clusters_dict, - is_ready=self.all_clusters_active, - timeout=CONF.benchmark.sahara_cluster_create_timeout, - check_interval=CONF.benchmark.sahara_cluster_check_interval) - - def update_clusters_dict(self, dct): - new_dct = {} - for cluster, client in dct.items(): - new_cl = client.clusters.get(cluster.id) - new_dct[new_cl] = client - - return new_dct - - def all_clusters_active(self, dct): - for cluster, client in dct.items(): - cluster_status = cluster.status.lower() - if cluster_status == "error": - msg = _("Sahara cluster %(name)s has failed to" - " %(action)s. Reason: '%(reason)s'") % { - "name": cluster.name, "action": "start", - "reason": cluster.status_description} - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=msg) - elif cluster_status != "active": - return False - return True - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Sahara Cluster`")) - def cleanup(self): - resource_manager.cleanup(names=["sahara.clusters"], - users=self.context.get("users", []), - superclass=utils.SaharaScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/sahara/sahara_image.py b/rally/plugins/openstack/context/sahara/sahara_image.py deleted file mode 100644 index 89a8e53f..00000000 --- a/rally/plugins/openstack/context/sahara/sahara_image.py +++ /dev/null @@ -1,138 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.sahara import utils -from rally.plugins.openstack.services.image import image as image_services -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_image", platform="openstack", order=440) -class SaharaImage(context.Context): - """Context class for adding and tagging Sahara images.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image_uuid": { - "type": "string" - }, - "image_url": { - "type": "string", - }, - "username": { - "type": "string" - }, - "plugin_name": { - "type": "string", - }, - "hadoop_version": { - "type": "string", - } - }, - "oneOf": [ - {"description": "Create an image.", - "required": ["image_url", "username", "plugin_name", - "hadoop_version"]}, - {"description": "Use an existing image.", - "required": ["image_uuid"]} - ], - "additionalProperties": False - } - - def _create_image(self, hadoop_version, image_url, plugin_name, user, - user_name): - clients = osclients.Clients( - user["credential"], - api_info=self.context["config"].get("api_versions")) - image_service = image_services.Image( - clients, name_generator=self.generate_random_name) - image = image_service.create_image(container_format="bare", - image_location=image_url, - disk_format="qcow2") - clients.sahara().images.update_image( - image_id=image.id, user_name=user_name, desc="") - clients.sahara().images.update_tags( - image_id=image.id, new_tags=[plugin_name, hadoop_version]) - return image.id - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Sahara Image`")) - def setup(self): - utils.init_sahara_context(self) - self.context["sahara"]["images"] = {} - - # The user may want to use the existing image. In this case he should - # make sure that the image is public and has all required metadata. - image_uuid = self.config.get("image_uuid") - - self.context["sahara"]["need_image_cleanup"] = not image_uuid - - if image_uuid: - # Using the first user to check the existing image. - user = self.context["users"][0] - clients = osclients.Clients(user["credential"]) - - image = clients.glance().images.get(image_uuid) - - visibility = None - if hasattr(image, "is_public"): - visibility = "public" if image.is_public else "private" - else: - visibility = image["visibility"] - - if visibility != "public": - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=_("Image provided in the Sahara context" - " should be public.") - ) - image_id = image_uuid - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id]["sahara"]["image"] = ( - image_id) - else: - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - image_id = self._create_image( - hadoop_version=self.config["hadoop_version"], - image_url=self.config["image_url"], - plugin_name=self.config["plugin_name"], - user=user, - user_name=self.config["username"]) - - self.context["tenants"][tenant_id]["sahara"]["image"] = ( - image_id) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Sahara Image`")) - def cleanup(self): - if self.context["sahara"]["need_image_cleanup"]: - resource_manager.cleanup(names=["glance.images"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/sahara/sahara_input_data_sources.py b/rally/plugins/openstack/context/sahara/sahara_input_data_sources.py deleted file mode 100644 index c330b01d..00000000 --- a/rally/plugins/openstack/context/sahara/sahara_input_data_sources.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import requests -from six.moves.urllib import parse - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.sahara import utils -from rally.plugins.openstack.scenarios.swift import utils as swift_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_input_data_sources", platform="openstack", - order=443) -class SaharaInputDataSources(context.Context): - """Context class for setting up Input Data Sources for an EDP job.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "input_type": { - "enum": ["swift", "hdfs"], - }, - "input_url": { - "type": "string", - }, - "swift_files": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "download_url": { - "type": "string" - } - }, - "additionalProperties": False, - "required": ["name", "download_url"] - } - } - }, - "additionalProperties": False, - "required": ["input_type", "input_url"] - } - - @logging.log_task_wrapper(LOG.info, - _("Enter context: `Sahara Input Data Sources`")) - def setup(self): - utils.init_sahara_context(self) - self.context["sahara"]["swift_objects"] = [] - self.context["sahara"]["container_name"] = None - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - clients = osclients.Clients(user["credential"]) - if self.config["input_type"] == "swift": - self.setup_inputs_swift(clients, tenant_id, - self.config["input_url"], - self.config["swift_files"], - user["credential"].username, - user["credential"].password) - else: - self.setup_inputs(clients, tenant_id, - self.config["input_type"], - self.config["input_url"]) - - def setup_inputs(self, clients, tenant_id, input_type, input_url): - input_ds = clients.sahara().data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type=input_type, - url=input_url) - - self.context["tenants"][tenant_id]["sahara"]["input"] = input_ds.id - - def setup_inputs_swift(self, clients, tenant_id, input_url, - swift_files, username, password): - swift_scenario = swift_utils.SwiftScenario(clients=clients, - context=self.context) - # TODO(astudenov): use self.generate_random_name() - container_name = "rally_" + parse.urlparse(input_url).netloc.rstrip( - ".sahara") - self.context["sahara"]["container_name"] = ( - swift_scenario._create_container(container_name=container_name)) - for swift_file in swift_files: - content = requests.get(swift_file["download_url"]).content - self.context["sahara"]["swift_objects"].append( - swift_scenario._upload_object( - self.context["sahara"]["container_name"], content, - object_name=swift_file["name"])) - input_ds_swift = clients.sahara().data_sources.create( - name=self.generate_random_name(), description="", - data_source_type="swift", url=input_url, - credential_user=username, credential_pass=password) - - self.context["tenants"][tenant_id]["sahara"]["input"] = ( - input_ds_swift.id) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Sahara Input Data" - "Sources`")) - def cleanup(self): - resource_manager.cleanup( - names=["swift.object", "swift.container"], - users=self.context.get("users", []), - superclass=swift_utils.SwiftScenario, - task_id=self.get_owner_id()) - resource_manager.cleanup( - names=["sahara.data_sources"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/sahara/sahara_job_binaries.py b/rally/plugins/openstack/context/sahara/sahara_job_binaries.py deleted file mode 100644 index ee0536d8..00000000 --- a/rally/plugins/openstack/context/sahara/sahara_job_binaries.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import requests - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_job_binaries", platform="openstack", order=442) -class SaharaJobBinaries(context.Context): - """Context class for setting up Job Binaries for an EDP job.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "mains": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "download_url": { - "type": "string" - } - }, - "additionalProperties": False, - "required": ["name", "download_url"] - } - }, - "libs": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "download_url": { - "type": "string" - } - }, - "additionalProperties": False, - "required": ["name", "download_url"] - } - } - }, - "additionalProperties": False - } - - # This cache will hold the downloaded libs content to prevent repeated - # downloads for each tenant - lib_cache = {} - - @logging.log_task_wrapper(LOG.info, - _("Enter context: `Sahara Job Binaries`")) - def setup(self): - utils.init_sahara_context(self) - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - clients = osclients.Clients(user["credential"]) - sahara = clients.sahara() - - self.context["tenants"][tenant_id]["sahara"]["mains"] = [] - self.context["tenants"][tenant_id]["sahara"]["libs"] = [] - - for main in self.config.get("mains", []): - self.download_and_save_lib( - sahara=sahara, - lib_type="mains", - name=main["name"], - download_url=main["download_url"], - tenant_id=tenant_id) - - for lib in self.config.get("libs", []): - self.download_and_save_lib( - sahara=sahara, - lib_type="libs", - name=lib["name"], - download_url=lib["download_url"], - tenant_id=tenant_id) - - def setup_inputs(self, sahara, tenant_id, input_type, input_url): - if input_type == "swift": - raise exceptions.RallyException( - _("Swift Data Sources are not implemented yet")) - # Todo(nkonovalov): Add swift credentials parameters and data upload - input_ds = sahara.data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type=input_type, - url=input_url) - - self.context["tenants"][tenant_id]["sahara"]["input"] = input_ds.id - - def download_and_save_lib(self, sahara, lib_type, name, download_url, - tenant_id): - if download_url not in self.lib_cache: - lib_data = requests.get(download_url).content - self.lib_cache[download_url] = lib_data - else: - lib_data = self.lib_cache[download_url] - - job_binary_internal = sahara.job_binary_internals.create( - name=name, - data=lib_data) - - url = "internal-db://%s" % job_binary_internal.id - job_binary = sahara.job_binaries.create(name=name, - url=url, - description="", - extra={}) - - self.context["tenants"][tenant_id]["sahara"][lib_type].append( - job_binary.id) - - @logging.log_task_wrapper(LOG.info, - _("Exit context: `Sahara Job Binaries`")) - def cleanup(self): - resources = ["job_binary_internals", "job_binaries"] - - resource_manager.cleanup( - names=["sahara.%s" % res for res in resources], - users=self.context.get("users", []), - superclass=utils.SaharaScenario, - task_id=self.context["task"]["uuid"]) diff --git a/rally/plugins/openstack/context/sahara/sahara_output_data_sources.py b/rally/plugins/openstack/context/sahara/sahara_output_data_sources.py deleted file mode 100644 index dc126060..00000000 --- a/rally/plugins/openstack/context/sahara/sahara_output_data_sources.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.sahara import utils -from rally.plugins.openstack.scenarios.swift import utils as swift_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_output_data_sources", platform="openstack", - order=444) -class SaharaOutputDataSources(context.Context): - """Context class for setting up Output Data Sources for an EDP job.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "output_type": { - "enum": ["swift", "hdfs"], - }, - "output_url_prefix": { - "type": "string", - } - }, - "additionalProperties": False, - "required": ["output_type", "output_url_prefix"] - } - - @logging.log_task_wrapper(LOG.info, - _("Enter context: `Sahara Output Data Sources`")) - def setup(self): - utils.init_sahara_context(self) - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - clients = osclients.Clients(user["credential"]) - sahara = clients.sahara() - - if self.config["output_type"] == "swift": - swift = swift_utils.SwiftScenario(clients=clients, - context=self.context) - container_name = self.generate_random_name() - self.context["tenants"][tenant_id]["sahara"]["container"] = { - "name": swift._create_container( - container_name=container_name), - "output_swift_objects": [] - } - self.setup_outputs_swift(swift, sahara, tenant_id, - container_name, - user["credential"].username, - user["credential"].password) - else: - self.setup_outputs_hdfs(sahara, tenant_id, - self.config["output_url_prefix"]) - - def setup_outputs_hdfs(self, sahara, tenant_id, output_url): - output_ds = sahara.data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type="hdfs", - url=output_url) - - self.context["tenants"][tenant_id]["sahara"]["output"] = output_ds.id - - def setup_outputs_swift(self, swift, sahara, tenant_id, container_name, - username, password): - output_ds_swift = sahara.data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type="swift", - url="swift://" + container_name + ".sahara/", - credential_user=username, - credential_pass=password) - - self.context["tenants"][tenant_id]["sahara"]["output"] = ( - output_ds_swift.id - ) - - @logging.log_task_wrapper(LOG.info, - _("Exit context: `Sahara Output Data Sources`")) - def cleanup(self): - resource_manager.cleanup( - names=["swift.object", "swift.container"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) - resource_manager.cleanup( - names=["sahara.data_sources"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/senlin/__init__.py b/rally/plugins/openstack/context/senlin/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/senlin/profiles.py b/rally/plugins/openstack/context/senlin/profiles.py deleted file mode 100644 index b93c417c..00000000 --- a/rally/plugins/openstack/context/senlin/profiles.py +++ /dev/null @@ -1,78 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.scenarios.senlin import utils as senlin_utils -from rally.task import context - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="profiles", platform="openstack", order=190) -class ProfilesGenerator(context.Context): - """Context creates a temporary profile for Senlin test.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "type": { - "type": "string", - }, - "version": { - "type": "string", - }, - "properties": { - "type": "object", - } - }, - "additionalProperties": False, - "required": ["type", "version", "properties"] - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Senlin profiles`")) - def setup(self): - """Create test profiles.""" - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - senlin_scenario = senlin_utils.SenlinScenario({ - "user": user, - "task": self.context["task"], - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - profile = senlin_scenario._create_profile(self.config) - - self.context["tenants"][tenant_id]["profile"] = profile.id - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Senlin profiles`")) - def cleanup(self): - """Delete created test profiles.""" - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - senlin_scenario = senlin_utils.SenlinScenario({ - "user": user, - "task": self.context["task"], - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - senlin_scenario._delete_profile( - self.context["tenants"][tenant_id]["profile"]) diff --git a/rally/plugins/openstack/context/swift/__init__.py b/rally/plugins/openstack/context/swift/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/swift/objects.py b/rally/plugins/openstack/context/swift/objects.py deleted file mode 100644 index ac42b099..00000000 --- a/rally/plugins/openstack/context/swift/objects.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.context.swift import utils as swift_utils -from rally.task import context - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="swift_objects", platform="openstack", order=360) -class SwiftObjectGenerator(swift_utils.SwiftObjectMixin, context.Context): - """Create containers and objects in each tenant.""" - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "containers_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "objects_per_container": { - "type": "integer", - "minimum": 1 - }, - "object_size": { - "type": "integer", - "minimum": 1 - }, - "resource_management_workers": { - "type": "integer", - "minimum": 1 - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "containers_per_tenant": 1, - "objects_per_container": 1, - "object_size": 1024, - "resource_management_workers": 30 - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `swift_objects`")) - def setup(self): - """Create containers and objects, using the broker pattern.""" - threads = self.config["resource_management_workers"] - - containers_per_tenant = self.config["containers_per_tenant"] - containers_num = len(self.context["tenants"]) * containers_per_tenant - LOG.debug("Creating %d containers using %d threads." % (containers_num, - threads)) - containers_count = len(self._create_containers(self.context, - containers_per_tenant, - threads)) - if containers_count != containers_num: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=_("Failed to create the requested number of containers, " - "expected %(expected)s but got %(actual)s.") - % {"expected": containers_num, "actual": containers_count}) - - objects_per_container = self.config["objects_per_container"] - objects_num = containers_num * objects_per_container - LOG.debug("Creating %d objects using %d threads." % (objects_num, - threads)) - objects_count = len(self._create_objects(self.context, - objects_per_container, - self.config["object_size"], - threads)) - if objects_count != objects_num: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=_("Failed to create the requested number of objects, " - "expected %(expected)s but got %(actual)s.") - % {"expected": objects_num, "actual": objects_count}) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `swift_objects`")) - def cleanup(self): - """Delete containers and objects, using the broker pattern.""" - threads = self.config["resource_management_workers"] - - self._delete_objects(self.context, threads) - self._delete_containers(self.context, threads) diff --git a/rally/plugins/openstack/context/swift/utils.py b/rally/plugins/openstack/context/swift/utils.py deleted file mode 100644 index 127c0a4a..00000000 --- a/rally/plugins/openstack/context/swift/utils.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tempfile - -from rally.common import broker -from rally.common import utils as rutils -from rally.plugins.openstack.scenarios.swift import utils as swift_utils - - -class SwiftObjectMixin(object): - """Mix-in method for Swift Object Context.""" - - def _create_containers(self, context, containers_per_tenant, threads): - """Create containers and store results in Rally context. - - :param context: dict, Rally context environment - :param containers_per_tenant: int, number of containers to create - per tenant - :param threads: int, number of threads to use for broker pattern - - :returns: list of tuples containing (account, container) - """ - containers = [] - - def publish(queue): - for user, tenant_id in (rutils.iterate_per_tenants( - context.get("users", []))): - context["tenants"][tenant_id]["containers"] = [] - for i in range(containers_per_tenant): - args = (user, context["tenants"][tenant_id]["containers"]) - queue.append(args) - - def consume(cache, args): - user, tenant_containers = args - if user["id"] not in cache: - cache[user["id"]] = swift_utils.SwiftScenario( - {"user": user, "task": context.get("task", {})}) - container_name = cache[user["id"]]._create_container() - tenant_containers.append({"user": user, - "container": container_name, - "objects": []}) - containers.append((user["tenant_id"], container_name)) - - broker.run(publish, consume, threads) - - return containers - - def _create_objects(self, context, objects_per_container, object_size, - threads): - """Create objects and store results in Rally context. - - :param context: dict, Rally context environment - :param objects_per_container: int, number of objects to create - per container - :param object_size: int, size of created swift objects in byte - :param threads: int, number of threads to use for broker pattern - - :returns: list of tuples containing (account, container, object) - """ - objects = [] - - with tempfile.TemporaryFile() as dummy_file: - # set dummy file to specified object size - dummy_file.truncate(object_size) - - def publish(queue): - for tenant_id in context["tenants"]: - containers = context["tenants"][tenant_id]["containers"] - for container in containers: - for i in range(objects_per_container): - queue.append(container) - - def consume(cache, container): - user = container["user"] - if user["id"] not in cache: - cache[user["id"]] = swift_utils.SwiftScenario( - {"user": user, "task": context.get("task", {})}) - dummy_file.seek(0) - object_name = cache[user["id"]]._upload_object( - container["container"], - dummy_file)[1] - container["objects"].append(object_name) - objects.append((user["tenant_id"], container["container"], - object_name)) - - broker.run(publish, consume, threads) - - return objects - - def _delete_containers(self, context, threads): - """Delete containers created by Swift context and update Rally context. - - :param context: dict, Rally context environment - :param threads: int, number of threads to use for broker pattern - """ - def publish(queue): - for tenant_id in context["tenants"]: - containers = context["tenants"][tenant_id]["containers"] - for container in containers[:]: - args = container, containers - queue.append(args) - - def consume(cache, args): - container, tenant_containers = args - user = container["user"] - if user["id"] not in cache: - cache[user["id"]] = swift_utils.SwiftScenario( - {"user": user, "task": context.get("task", {})}) - cache[user["id"]]._delete_container(container["container"]) - tenant_containers.remove(container) - - broker.run(publish, consume, threads) - - def _delete_objects(self, context, threads): - """Delete objects created by Swift context and update Rally context. - - :param context: dict, Rally context environment - :param threads: int, number of threads to use for broker pattern - """ - def publish(queue): - for tenant_id in context["tenants"]: - containers = context["tenants"][tenant_id]["containers"] - for container in containers: - for object_name in container["objects"][:]: - args = object_name, container - queue.append(args) - - def consume(cache, args): - object_name, container = args - user = container["user"] - if user["id"] not in cache: - cache[user["id"]] = swift_utils.SwiftScenario( - {"user": user, "task": context.get("task", {})}) - cache[user["id"]]._delete_object(container["container"], - object_name) - container["objects"].remove(object_name) - - broker.run(publish, consume, threads) diff --git a/rally/plugins/openstack/context/vm/__init__.py b/rally/plugins/openstack/context/vm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/vm/custom_image.py b/rally/plugins/openstack/context/vm/custom_image.py deleted file mode 100644 index 003df50f..00000000 --- a/rally/plugins/openstack/context/vm/custom_image.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six - -from rally.common import broker -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally import consts -from rally import osclients -from rally.plugins.openstack.scenarios.vm import vmtasks -from rally.plugins.openstack.services.image import image -from rally.plugins.openstack import types -from rally.task import context - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class BaseCustomImageGenerator(context.Context): - """Base class for the contexts providing customized image with. - - Every context class for the specific customization must implement - the method `_customize_image` that is able to connect to the server - using SSH and e.g. install applications inside it. - - This is used e.g. to install the benchmark application using SSH - access. - - This base context class provides a way to prepare an image with - custom preinstalled applications. Basically, this code boots a VM, calls - the `_customize_image` and then snapshots the VM disk, removing the VM - afterwards. The image UUID is stored in the user["custom_image"]["id"] - and can be used afterwards by scenario. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, - "flavor": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "floating_network": { - "type": "string" - }, - "internal_network": { - "type": "string" - }, - "port": { - "type": "integer", - "minimum": 1, - "maximum": 65535 - }, - "userdata": { - "type": "string" - }, - "workers": { - "type": "integer", - "minimum": 1, - } - }, - "required": ["image", "flavor"], - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "username": "root", - "port": 22, - "workers": 1 - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `custom_image`")) - def setup(self): - """Creates custom image(s) with preinstalled applications. - - When admin is present creates one public image that is usable - from all the tenants and users. Otherwise create one image - per user and tenant. - """ - - if "admin" in self.context: - if self.context["users"]: - # NOTE(pboldin): Create by first user and make it public by - # the admin - user = self.context["users"][0] - else: - user = self.context["admin"] - tenant = self.context["tenants"][user["tenant_id"]] - - nics = None - if "networks" in tenant: - nics = [{"net-id": tenant["networks"][0]["id"]}] - - custom_image = self.create_one_image(user, nics=nics) - glance_service = image.Image( - self.context["admin"]["credential"].clients()) - glance_service.set_visibility(custom_image.id) - - for tenant in self.context["tenants"].values(): - tenant["custom_image"] = custom_image - else: - def publish(queue): - users = self.context.get("users", []) - for user, tenant_id in utils.iterate_per_tenants(users): - queue.append((user, tenant_id)) - - def consume(cache, args): - user, tenant_id = args - tenant = self.context["tenants"][tenant_id] - tenant["custom_image"] = self.create_one_image(user) - - broker.run(publish, consume, self.config["workers"]) - - def create_one_image(self, user, **kwargs): - """Create one image for the user.""" - - clients = osclients.Clients(user["credential"]) - - image_id = types.GlanceImage.transform( - clients=clients, resource_config=self.config["image"]) - flavor_id = types.Flavor.transform( - clients=clients, resource_config=self.config["flavor"]) - - vm_scenario = vmtasks.BootRuncommandDelete(self.context, - clients=clients) - - server, fip = vm_scenario._boot_server_with_fip( - image=image_id, flavor=flavor_id, - floating_network=self.config.get("floating_network"), - userdata=self.config.get("userdata"), - key_name=user["keypair"]["name"], - security_groups=[user["secgroup"]["name"]], - **kwargs) - - try: - LOG.debug("Installing benchmark on %r %s", server, fip["ip"]) - self.customize_image(server, fip, user) - - LOG.debug("Stopping server %r", server) - vm_scenario._stop_server(server) - - LOG.debug("Creating snapshot for %r", server) - custom_image = vm_scenario._create_image(server) - finally: - vm_scenario._delete_server_with_fip(server, fip) - - return custom_image - - @logging.log_task_wrapper(LOG.info, _("Exit context: `custom_image`")) - def cleanup(self): - """Delete created custom image(s).""" - - if "admin" in self.context: - user = self.context["users"][0] - tenant = self.context["tenants"][user["tenant_id"]] - if "custom_image" in tenant: - self.delete_one_image(user, tenant["custom_image"]) - tenant.pop("custom_image") - else: - def publish(queue): - users = self.context.get("users", []) - for user, tenant_id in utils.iterate_per_tenants(users): - queue.append((user, tenant_id)) - - def consume(cache, args): - user, tenant_id = args - tenant = self.context["tenants"][tenant_id] - if "custom_image" in tenant: - self.delete_one_image(user, tenant["custom_image"]) - tenant.pop("custom_image") - - broker.run(publish, consume, self.config["workers"]) - - def delete_one_image(self, user, custom_image): - """Delete the image created for the user and tenant.""" - - with logging.ExceptionLogger( - LOG, _("Unable to delete image %s") % custom_image.id): - - glance_service = image.Image(user["credential"].clients()) - glance_service.delete_image(custom_image.id) - - @logging.log_task_wrapper(LOG.info, - _("Custom image context: customizing")) - def customize_image(self, server, ip, user): - return self._customize_image(server, ip, user) - - @abc.abstractmethod - def _customize_image(self, server, ip, user): - """Override this method with one that customizes image. - - Basically, code can simply call `VMScenario._run_command` function - specifying an installation script and interpreter. This script will - be then executed using SSH. - - :param server: nova.Server instance - :param ip: dict with server IP details - :param user: user who started a VM instance. Used to extract keypair - """ - pass diff --git a/rally/plugins/openstack/context/vm/image_command_customizer.py b/rally/plugins/openstack/context/vm/image_command_customizer.py deleted file mode 100644 index 1d007a80..00000000 --- a/rally/plugins/openstack/context/vm/image_command_customizer.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from rally.common import validation -from rally import exceptions -from rally.plugins.openstack.context.vm import custom_image -from rally.plugins.openstack.scenarios.vm import utils as vm_utils -import rally.task.context as context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="image_command_customizer", platform="openstack", - order=501) -class ImageCommandCustomizerContext(custom_image.BaseCustomImageGenerator): - """Context class for generating image customized by a command execution. - - Run a command specified by configuration to prepare image. - - Use this script e.g. to download and install something. - """ - - CONFIG_SCHEMA = copy.deepcopy( - custom_image.BaseCustomImageGenerator.CONFIG_SCHEMA) - CONFIG_SCHEMA["definitions"] = { - "stringOrStringList": { - "anyOf": [ - {"type": "string", "description": "just a string"}, - { - "type": "array", "description": "just a list of strings", - "items": {"type": "string"} - } - ] - }, - "scriptFile": { - "type": "object", - "properties": { - "script_file": {"$ref": "#/definitions/stringOrStringList"}, - "interpreter": {"$ref": "#/definitions/stringOrStringList"}, - "command_args": {"$ref": "#/definitions/stringOrStringList"} - }, - "required": ["script_file", "interpreter"], - "additionalProperties": False, - }, - "scriptInline": { - "type": "object", - "properties": { - "script_inline": {"type": "string"}, - "interpreter": {"$ref": "#/definitions/stringOrStringList"}, - "command_args": {"$ref": "#/definitions/stringOrStringList"} - }, - "required": ["script_inline", "interpreter"], - "additionalProperties": False, - }, - "commandPath": { - "type": "object", - "properties": { - "remote_path": {"$ref": "#/definitions/stringOrStringList"}, - "local_path": {"type": "string"}, - "command_args": {"$ref": "#/definitions/stringOrStringList"} - }, - "required": ["remote_path"], - "additionalProperties": False, - }, - "commandDict": { - "oneOf": [ - {"$ref": "#/definitions/scriptFile"}, - {"$ref": "#/definitions/scriptInline"}, - {"$ref": "#/definitions/commandPath"}, - ], - } - } - CONFIG_SCHEMA["properties"]["command"] = { - "$ref": "#/definitions/commandDict" - } - - def _customize_image(self, server, fip, user): - code, out, err = vm_utils.VMScenario(self.context)._run_command( - fip["ip"], self.config["port"], - self.config["username"], self.config.get("password"), - command=self.config["command"], - pkey=user["keypair"]["private"]) - - if code: - raise exceptions.ScriptError( - message="Command `%(command)s' execution failed," - " code %(code)d:\n" - "STDOUT:\n============================\n" - "%(out)s\n" - "STDERR:\n============================\n" - "%(err)s\n" - "============================\n" - % {"command": self.config["command"], "code": code, - "out": out, "err": err}) - - return code, out, err diff --git a/rally/plugins/openstack/context/watcher/__init__.py b/rally/plugins/openstack/context/watcher/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/context/watcher/audit_templates.py b/rally/plugins/openstack/context/watcher/audit_templates.py deleted file mode 100644 index b208399b..00000000 --- a/rally/plugins/openstack/context/watcher/audit_templates.py +++ /dev/null @@ -1,117 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -import six - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import validation -from rally import consts -from rally import osclients -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.watcher import utils as watcher_utils -from rally.plugins.openstack import types -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True) -@context.configure(name="audit_templates", platform="openstack", order=550) -class AuditTemplateGenerator(context.Context): - """Context class for adding temporary audit template for benchmarks.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "audit_templates_per_admin": {"type": "integer", "minimum": 1}, - "fill_strategy": {"enum": ["round_robin", "random", None]}, - "params": { - "type": "array", - "minItems": 1, - "uniqueItems": True, - "items": { - "type": "object", - "properties": { - "goal": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, - "strategy": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, - }, - }, - } - }, - "additionalProperties": False, - "required": ["params"] - } - - DEFAULT_CONFIG = { - "audit_templates_per_admin": 1, - "fill_strategy": "round_robin" - } - - @logging.log_task_wrapper(LOG.info, _("Enter context: `Audit Templates`")) - def setup(self): - watcher_scenario = watcher_utils.WatcherScenario( - {"admin": self.context["admin"], "task": self.context["task"], - "owner_id": self.context["owner_id"], - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - - clients = osclients.Clients(self.context["admin"]["credential"]) - - self.context["audit_templates"] = [] - for i in six.moves.range(self.config["audit_templates_per_admin"]): - cfg_size = len(self.config["params"]) - if self.config["fill_strategy"] == "round_robin": - audit_params = self.config["params"][i % cfg_size] - elif self.config["fill_strategy"] == "random": - audit_params = random.choice(self.config["params"]) - - goal_id = types.WatcherGoal.transform( - clients=clients, - resource_config=audit_params["goal"]) - strategy_id = types.WatcherStrategy.transform( - clients=clients, - resource_config=audit_params["strategy"]) - - audit_template = watcher_scenario._create_audit_template( - goal_id, strategy_id) - self.context["audit_templates"].append(audit_template.uuid) - - @logging.log_task_wrapper(LOG.info, _("Exit context: `Audit Templates`")) - def cleanup(self): - resource_manager.cleanup(names=["watcher.action_plan", - "watcher.audit_template"], - admin=self.context.get("admin", []), - superclass=watcher_utils.WatcherScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/credential.py b/rally/plugins/openstack/credential.py deleted file mode 100644 index 4c6e461c..00000000 --- a/rally/plugins/openstack/credential.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.deployment import credential -from rally import osclients - -LOG = logging.getLogger(__file__) - - -@credential.configure("openstack") -class OpenStackCredential(credential.Credential): - """Credential for OpenStack.""" - - def __init__(self, auth_url, username, password, tenant_name=None, - project_name=None, - permission=consts.EndpointPermission.USER, - region_name=None, endpoint_type=None, - domain_name=None, endpoint=None, user_domain_name=None, - project_domain_name=None, - https_insecure=False, https_cacert=None, - profiler_hmac_key=None): - self.auth_url = auth_url - self.username = username - self.password = password - self.tenant_name = tenant_name or project_name - self.permission = permission - self.region_name = region_name - self.endpoint_type = endpoint_type - self.domain_name = domain_name - self.user_domain_name = user_domain_name - self.project_domain_name = project_domain_name - self.endpoint = endpoint - self.https_insecure = https_insecure - self.https_cacert = https_cacert - self.profiler_hmac_key = profiler_hmac_key - - self._clients_cache = {} - - # backward compatibility - @property - def insecure(self): - LOG.warning("Property 'insecure' is deprecated since Rally 0.10.0. " - "Use 'https_insecure' instead.") - return self.https_insecure - - # backward compatibility - @property - def cacert(self): - LOG.warning("Property 'cacert' is deprecated since Rally 0.10.0. " - "Use 'https_cacert' instead.") - return self.https_cacert - - def to_dict(self): - return {"auth_url": self.auth_url, - "username": self.username, - "password": self.password, - "tenant_name": self.tenant_name, - "region_name": self.region_name, - "endpoint_type": self.endpoint_type, - "domain_name": self.domain_name, - "endpoint": self.endpoint, - "https_insecure": self.https_insecure, - "https_cacert": self.https_cacert, - "user_domain_name": self.user_domain_name, - "project_domain_name": self.project_domain_name, - "permission": self.permission, - "profiler_hmac_key": self.profiler_hmac_key} - - def verify_connection(self): - if self.permission == consts.EndpointPermission.ADMIN: - self.clients().verified_keystone() - else: - self.clients().keystone() - - def list_services(self): - return sorted([{"type": stype, "name": sname} - for stype, sname in self.clients().services().items()], - key=lambda s: s["name"]) - - def clients(self, api_info=None): - return osclients.Clients(self, api_info=api_info, - cache=self._clients_cache) - - -@credential.configure_builder("openstack") -class OpenStackCredentialBuilder(credential.CredentialBuilder): - """Builds credentials provided by ExistingCloud config.""" - - USER_SCHEMA = { - "type": "object", - "oneOf": [ - { - "description": "Keystone V2.0", - "properties": { - "username": {"type": "string"}, - "password": {"type": "string"}, - "tenant_name": {"type": "string"}, - }, - "required": ["username", "password", "tenant_name"], - "additionalProperties": False - }, - { - "description": "Keystone V3.0", - "properties": { - "username": {"type": "string"}, - "password": {"type": "string"}, - "domain_name": {"type": "string"}, - "user_domain_name": {"type": "string"}, - "project_name": {"type": "string"}, - "project_domain_name": {"type": "string"}, - }, - "required": ["username", "password", "project_name"], - "additionalProperties": False - } - ], - } - - CONFIG_SCHEMA = { - "type": "object", - "properties": { - "admin": USER_SCHEMA, - "users": {"type": "array", "items": USER_SCHEMA, "minItems": 1}, - "auth_url": {"type": "string"}, - "region_name": {"type": "string"}, - # NOTE(andreykurilin): it looks like we do not use endpoint - # var at all - "endpoint": {"type": ["string", "null"]}, - "endpoint_type": { - "enum": [consts.EndpointType.ADMIN, - consts.EndpointType.INTERNAL, - consts.EndpointType.PUBLIC, - None]}, - "https_insecure": {"type": "boolean"}, - "https_cacert": {"type": "string"}, - "profiler_hmac_key": {"type": ["string", "null"]} - }, - "anyOf": [ - {"description": "The case when the admin is specified and the " - "users can be created via 'users' context or " - "'existing_users' will be used.", - "required": ["auth_url", "admin"]}, - {"description": "The case when the only existing users are " - "specified.", - "required": ["auth_url", "users"]} - ], - "additionalProperties": False - } - - def _create_credential(self, common, user, permission): - cred = OpenStackCredential( - auth_url=common["auth_url"], - username=user["username"], - password=user["password"], - tenant_name=user.get("project_name", user.get("tenant_name")), - permission=permission, - region_name=common.get("region_name"), - endpoint_type=common.get("endpoint_type"), - endpoint=common.get("endpoint"), - domain_name=user.get("domain_name"), - user_domain_name=user.get("user_domain_name", None), - project_domain_name=user.get("project_domain_name", None), - https_insecure=common.get("https_insecure", False), - https_cacert=common.get("https_cacert"), - profiler_hmac_key=common.get("profiler_hmac_key")) - return cred.to_dict() - - def build_credentials(self): - permissions = consts.EndpointPermission - - users = [self._create_credential(self.config, user, permissions.USER) - for user in self.config.get("users", [])] - - admin = None - if self.config.get("admin"): - admin = self._create_credential(self.config, - self.config.get("admin"), - permissions.ADMIN) - - return {"admin": admin, "users": users} - - -# NOTE(astudenov): Let's consider moving rally.osclients here diff --git a/rally/plugins/openstack/hook/__init__.py b/rally/plugins/openstack/hook/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/hook/fault_injection.py b/rally/plugins/openstack/hook/fault_injection.py deleted file mode 100644 index c4c3c0ad..00000000 --- a/rally/plugins/openstack/hook/fault_injection.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import objects -from rally import consts -from rally.task import hook - -LOG = logging.getLogger(__name__) - - -@hook.configure(name="fault_injection", platform="openstack") -class FaultInjectionHook(hook.Hook): - """Performs fault injection using os-faults library. - - Configuration: - action - string that represents an action (more info in [1]) - verify - whether to verify connection to cloud nodes or not - - This plugin discovers extra config of ExistingCloud - and looks for "cloud_config" field. If cloud_config is present then - it will be used to connect to the cloud by os-faults. - - Another option is to provide os-faults config file through - OS_FAULTS_CONFIG env variable. Format of the config can - be found in [1]. - - [1] http://os-faults.readthedocs.io/en/latest/usage.html - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "action": {"type": "string"}, - "verify": {"type": "boolean"}, - }, - "required": [ - "action", - ], - "additionalProperties": False, - } - - def get_cloud_config(self): - deployment = objects.Deployment.get(self.task["deployment_uuid"]) - deployment_config = deployment["config"] - if deployment_config["type"] != "ExistingCloud": - return None - - extra_config = deployment_config.get("extra", {}) - return extra_config.get("cloud_config") - - def run(self): - import os_faults - - # get cloud configuration - cloud_config = self.get_cloud_config() - - # connect to the cloud - injector = os_faults.connect(cloud_config) - - # verify that all nodes are available - if self.config.get("verify"): - injector.verify() - - LOG.debug("Injecting fault: %s", self.config["action"]) - os_faults.human_api(injector, self.config["action"]) diff --git a/rally/plugins/openstack/scenario.py b/rally/plugins/openstack/scenario.py deleted file mode 100644 index 80e12a21..00000000 --- a/rally/plugins/openstack/scenario.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import random - -from oslo_config import cfg -from osprofiler import profiler -from rally import osclients -from rally.task import scenario - -configure = functools.partial(scenario.configure, platform="openstack") - -CONF = cfg.CONF - - -class OpenStackScenario(scenario.Scenario): - """Base class for all OpenStack scenarios.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(OpenStackScenario, self).__init__(context) - if context: - api_info = {} - if "api_versions" in context.get("config", {}): - api_versions = context["config"]["api_versions"] - for service in api_versions: - api_info[service] = { - "version": api_versions[service].get("version"), - "service_type": api_versions[service].get( - "service_type")} - - if admin_clients is None and "admin" in context: - self._admin_clients = osclients.Clients( - context["admin"]["credential"], api_info) - if clients is None: - if "users" in context and "user" not in context: - self._choose_user(context) - - if "user" in context: - self._clients = osclients.Clients( - context["user"]["credential"], api_info) - - if admin_clients: - self._admin_clients = admin_clients - - if clients: - self._clients = clients - - self._init_profiler(context) - - def _choose_user(self, context): - """Choose one user from users context - - We are choosing on each iteration one user - - """ - if context["user_choice_method"] == "random": - user = random.choice(context["users"]) - tenant = context["tenants"][user["tenant_id"]] - else: - # Second and last case - 'round_robin'. - tenants_amount = len(context["tenants"]) - # NOTE(amaretskiy): iteration is subtracted by `1' because it - # starts from `1' but we count from `0' - iteration = context["iteration"] - 1 - tenant_index = int(iteration % tenants_amount) - tenant_id = sorted(context["tenants"].keys())[tenant_index] - tenant = context["tenants"][tenant_id] - users = context["tenants"][tenant_id]["users"] - user_index = int((iteration / tenants_amount) % len(users)) - user = users[user_index] - - context["user"], context["tenant"] = user, tenant - - def clients(self, client_type, version=None): - """Returns a python openstack client of the requested type. - - The client will be that for one of the temporary non-administrator - users created before the benchmark launch. - - :param client_type: Client type ("nova"/"glance" etc.) - :param version: client version ("1"/"2" etc.) - - :returns: Standard python OpenStack client instance - """ - client = getattr(self._clients, client_type) - - return client(version) if version is not None else client() - - def admin_clients(self, client_type, version=None): - """Returns a python admin openstack client of the requested type. - - :param client_type: Client type ("nova"/"glance" etc.) - :param version: client version ("1"/"2" etc.) - - :returns: Python openstack client object - """ - client = getattr(self._admin_clients, client_type) - - return client(version) if version is not None else client() - - def _init_profiler(self, context): - """Inits the profiler.""" - if not CONF.benchmark.enable_profiler: - return - if context is not None: - cred = None - profiler_hmac_key = None - if context.get("admin"): - cred = context["admin"]["credential"] - if cred.profiler_hmac_key is not None: - profiler_hmac_key = cred.profiler_hmac_key - if context.get("user"): - cred = context["user"]["credential"] - if cred.profiler_hmac_key is not None: - profiler_hmac_key = cred.profiler_hmac_key - if profiler_hmac_key is None: - return - profiler.init(profiler_hmac_key) - trace_id = profiler.get().get_base_id() - self.add_output(complete={ - "title": "OSProfiler Trace-ID", - "chart_plugin": "TextArea", - "data": [trace_id]}) diff --git a/rally/plugins/openstack/scenarios/__init__.py b/rally/plugins/openstack/scenarios/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/authenticate/__init__.py b/rally/plugins/openstack/scenarios/authenticate/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/authenticate/authenticate.py b/rally/plugins/openstack/scenarios/authenticate/authenticate.py deleted file mode 100644 index 2e99c309..00000000 --- a/rally/plugins/openstack/scenarios/authenticate/authenticate.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import validation - - -"""Scenarios for Authentication mechanism.""" - - -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.keystone", platform="openstack") -class Keystone(scenario.OpenStackScenario): - - @atomic.action_timer("authenticate.keystone") - def run(self): - """Check Keystone Client.""" - self.clients("keystone") - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_glance", platform="openstack") -class ValidateGlance(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Glance Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - In following we are checking for non-existent image. - - :param repetitions: number of times to validate - """ - glance_client = self.clients("glance") - image_name = "__intentionally_non_existent_image___" - with atomic.ActionTimer( - self, - "authenticate.validate_glance_%s_times" % repetitions): - for i in range(repetitions): - list(glance_client.images.list(name=image_name)) - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_nova", platform="openstack") -class ValidateNova(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Nova Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - nova_client = self.clients("nova") - with atomic.ActionTimer( - self, - "authenticate.validate_nova_%s_times" % repetitions): - for i in range(repetitions): - nova_client.flavors.list() - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_ceilometer", - platform="openstack") -class ValidateCeilometer(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Ceilometer Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - ceilometer_client = self.clients("ceilometer") - with atomic.ActionTimer( - self, - "authenticate.validate_ceilometer_%s_times" % repetitions): - for i in range(repetitions): - ceilometer_client.meters.list() - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_cinder", platform="openstack") -class ValidateCinder(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Cinder Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - cinder_client = self.clients("cinder") - with atomic.ActionTimer( - self, - "authenticate.validate_cinder_%s_times" % repetitions): - for i in range(repetitions): - cinder_client.volume_types.list() - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_neutron", platform="openstack") -class ValidateNeutron(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Neutron Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - neutron_client = self.clients("neutron") - with atomic.ActionTimer( - self, - "authenticate.validate_neutron_%s_times" % repetitions): - for i in range(repetitions): - neutron_client.list_networks() - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_heat", platform="openstack") -class ValidateHeat(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Heat Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - heat_client = self.clients("heat") - with atomic.ActionTimer( - self, - "authenticate.validate_heat_%s_times" % repetitions): - for i in range(repetitions): - list(heat_client.stacks.list(limit=0)) - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MONASCA]) -@scenario.configure(name="Authenticate.validate_monasca", platform="openstack") -class ValidateMonasca(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Monasca Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - monasca_client = self.clients("monasca") - with atomic.ActionTimer( - self, - "authenticate.validate_monasca_%s_times" % repetitions): - for i in range(repetitions): - list(monasca_client.metrics.list(limit=0)) diff --git a/rally/plugins/openstack/scenarios/ceilometer/__init__.py b/rally/plugins/openstack/scenarios/ceilometer/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/ceilometer/alarms.py b/rally/plugins/openstack/scenarios/ceilometer/alarms.py deleted file mode 100644 index b439adaa..00000000 --- a/rally/plugins/openstack/scenarios/ceilometer/alarms.py +++ /dev/null @@ -1,196 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - -"""Benchmark scenarios for Ceilometer Alarms API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ceilometer"]}, - name="CeilometerAlarms.create_alarm", - platform="openstack") -class CreateAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create an alarm. - - This scenarios test POST /v2/alarms. - meter_name and threshold are required parameters for alarm creation. - kwargs stores other optional parameters like 'ok_actions', - 'project_id' etc that may be passed while creating an alarm. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - - self._create_alarm(meter_name, threshold, kwargs) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerAlarms.list_alarms", platform="openstack") -class ListAlarms(ceiloutils.CeilometerScenario): - - def run(self): - """Fetch all alarms. - - This scenario fetches list of all alarms using GET /v2/alarms. - """ - self._list_alarms() - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ceilometer"]}, - name="CeilometerAlarms.create_and_list_alarm", - platform="openstack") -class CreateAndListAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create and get the newly created alarm. - - This scenarios test GET /v2/alarms/(alarm_id) - Initially alarm is created and then the created alarm is fetched using - its alarm_id. meter_name and threshold are required parameters - for alarm creation. kwargs stores other optional parameters like - 'ok_actions', 'project_id' etc. that may be passed while creating - an alarm. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - self._list_alarms(alarm.alarm_id) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ceilometer"]}, - name="CeilometerAlarms.create_and_get_alarm", - platform="openstack") -class CreateAndGetAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create and get the newly created alarm. - - These scenarios test GET /v2/alarms/(alarm_id) - Initially an alarm is created and then its detailed information is - fetched using its alarm_id. meter_name and threshold are required - parameters for alarm creation. kwargs stores other optional parameters - like 'ok_actions', 'project_id' etc. that may be passed while creating - an alarm. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - self._get_alarm(alarm.alarm_id) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ceilometer"]}, - name="CeilometerAlarms.create_and_update_alarm", - platform="openstack") -class CreateAndUpdateAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create and update the newly created alarm. - - This scenarios test PUT /v2/alarms/(alarm_id) - Initially alarm is created and then the created alarm is updated using - its alarm_id. meter_name and threshold are required parameters - for alarm creation. kwargs stores other optional parameters like - 'ok_actions', 'project_id' etc that may be passed while alarm creation. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - alarm_dict_diff = {"description": "Changed Test Description"} - self._update_alarm(alarm.alarm_id, alarm_dict_diff) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ceilometer"]}, - name="CeilometerAlarms.create_and_delete_alarm", - platform="openstack") -class CreateAndDeleteAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create and delete the newly created alarm. - - This scenarios test DELETE /v2/alarms/(alarm_id) - Initially alarm is created and then the created alarm is deleted using - its alarm_id. meter_name and threshold are required parameters - for alarm creation. kwargs stores other optional parameters like - 'ok_actions', 'project_id' etc that may be passed while alarm creation. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - self._delete_alarm(alarm.alarm_id) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ceilometer"]}, - name="CeilometerAlarms.create_alarm_and_get_history", - platform="openstack") -class CreateAlarmAndGetHistory(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, state, timeout=60, **kwargs): - """Create an alarm, get and set the state and get the alarm history. - - This scenario makes following queries: - GET /v2/alarms/{alarm_id}/history - GET /v2/alarms/{alarm_id}/state - PUT /v2/alarms/{alarm_id}/state - Initially alarm is created and then get the state of the created alarm - using its alarm_id. Then get the history of the alarm. And finally the - state of the alarm is updated using given state. meter_name and - threshold are required parameters for alarm creation. kwargs stores - other optional parameters like 'ok_actions', 'project_id' etc that may - be passed while alarm creation. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param state: an alarm state to be set - :param timeout: The number of seconds for which to attempt a - successful check of the alarm state - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - self._get_alarm_state(alarm.alarm_id) - self._get_alarm_history(alarm.alarm_id) - self._set_alarm_state(alarm, state, timeout) diff --git a/rally/plugins/openstack/scenarios/ceilometer/events.py b/rally/plugins/openstack/scenarios/ceilometer/events.py deleted file mode 100644 index 9d90d67f..00000000 --- a/rally/plugins/openstack/scenarios/ceilometer/events.py +++ /dev/null @@ -1,97 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Scenarios for Ceilometer Events API. -""" - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as cutils -from rally.plugins.openstack.scenarios.keystone import basic as kbasic -from rally.task import validation - - -# NOTE(idegtiarov): to work with event we need to create it, there are -# no other way except emit suitable notification from one of services, -# for example create new user in keystone. - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"], - "cleanup": ["ceilometer"]}, - name="CeilometerEvents.create_user_and_list_events", - platform="openstack") -class CeilometerEventsCreateUserAndListEvents(cutils.CeilometerScenario, - kbasic.KeystoneBasic): - - def run(self): - """Create user and fetch all events. - - This scenario creates user to store new event and - fetches list of all events using GET /v2/events. - """ - self.admin_keystone.create_user() - events = self._list_events() - msg = ("Events list is empty, but it should include at least one " - "event about user creation") - self.assertTrue(events, msg) - - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"], - "cleanup": ["ceilometer"]}, - name="CeilometerEvents.create_user_and_list_event_types", - platform="openstack") -class CeilometerEventsCreateUserAndListEventTypes(cutils.CeilometerScenario, - kbasic.KeystoneBasic): - - def run(self): - """Create user and fetch all event types. - - This scenario creates user to store new event and - fetches list of all events types using GET /v2/event_types. - """ - self.admin_keystone.create_user() - event_types = self._list_event_types() - msg = ("Event types list is empty, but it should include at least one" - " type about user creation") - self.assertTrue(event_types, msg) - - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"], - "cleanup": ["ceilometer"]}, - name="CeilometerEvents.create_user_and_get_event", - platform="openstack") -class CeilometerEventsCreateUserAndGetEvent(cutils.CeilometerScenario, - kbasic.KeystoneBasic): - - def run(self): - """Create user and gets event. - - This scenario creates user to store new event and - fetches one event using GET /v2/events/. - """ - self.admin_keystone.create_user() - events = self._list_events() - msg = ("Events list is empty, but it should include at least one " - "event about user creation") - self.assertTrue(events, msg) - self._get_event(event_id=events[0].message_id) diff --git a/rally/plugins/openstack/scenarios/ceilometer/meters.py b/rally/plugins/openstack/scenarios/ceilometer/meters.py deleted file mode 100644 index 0552faf6..00000000 --- a/rally/plugins/openstack/scenarios/ceilometer/meters.py +++ /dev/null @@ -1,71 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - - -"""Scenarios for Ceilometer Meters API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerMeters.list_meters", platform="openstack") -class ListMeters(ceiloutils.CeilometerScenario): - - def run(self, metadata_query=None, limit=None): - """Check all available queries for list resource request. - - :param metadata_query: dict with metadata fields and values - :param limit: limit of meters in response - """ - - scenario = ListMatchedMeters(self.context) - scenario.run(filter_by_project_id=True) - scenario.run(filter_by_user_id=True) - scenario.run(filter_by_resource_id=True) - if metadata_query: - scenario.run(metadata_query=metadata_query) - if limit: - scenario.run(limit=limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerMeters.list_matched_meters", - platform="openstack") -class ListMatchedMeters(ceiloutils.CeilometerScenario): - - def run(self, filter_by_user_id=False, filter_by_project_id=False, - filter_by_resource_id=False, metadata_query=None, limit=None): - """Get meters that matched fields from context and args. - - :param filter_by_user_id: flag for query by user_id - :param filter_by_project_id: flag for query by project_id - :param filter_by_resource_id: flag for query by resource_id - :param metadata_query: dict with metadata fields and values for query - :param limit: count of resources in response - """ - - query = self._make_general_query(filter_by_project_id, - filter_by_user_id, - filter_by_resource_id, - metadata_query) - self._list_meters(query, limit) diff --git a/rally/plugins/openstack/scenarios/ceilometer/queries.py b/rally/plugins/openstack/scenarios/ceilometer/queries.py deleted file mode 100644 index d089cf1b..00000000 --- a/rally/plugins/openstack/scenarios/ceilometer/queries.py +++ /dev/null @@ -1,112 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - - -"""Scenarios for Ceilometer Queries API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ceilometer"]}, - name="CeilometerQueries.create_and_query_alarms", - platform="openstack") -class CeilometerQueriesCreateAndQueryAlarms(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, filter=None, orderby=None, - limit=None, **kwargs): - """Create an alarm and then query it with specific parameters. - - This scenario tests POST /v2/query/alarms - An alarm is first created and then fetched using the input query. - - :param meter_name: specifies meter name of alarm - :param threshold: specifies alarm threshold - :param filter: optional filter query dictionary - :param orderby: optional param for specifying ordering of results - :param limit: optional param for maximum number of results returned - :param kwargs: optional parameters for alarm creation - """ - if filter: - filter = json.dumps(filter) - - self._create_alarm(meter_name, threshold, kwargs) - self._query_alarms(filter, orderby, limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ceilometer"]}, - name="CeilometerQueries.create_and_query_alarm_history", - platform="openstack") -class CeilometerQueriesCreateAndQueryAlarmHistory(ceiloutils - .CeilometerScenario): - - def run(self, meter_name, threshold, orderby=None, limit=None, **kwargs): - """Create an alarm and then query for its history. - - This scenario tests POST /v2/query/alarms/history - An alarm is first created and then its alarm_id is used to fetch the - history of that specific alarm. - - :param meter_name: specifies meter name of alarm - :param threshold: specifies alarm threshold - :param orderby: optional param for specifying ordering of results - :param limit: optional param for maximum number of results returned - :param kwargs: optional parameters for alarm creation - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - alarm_filter = json.dumps({"=": {"alarm_id": alarm.alarm_id}}) - self._query_alarm_history(alarm_filter, orderby, limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ceilometer"]}, - name="CeilometerQueries.create_and_query_samples", - platform="openstack") -class CeilometerQueriesCreateAndQuerySamples(ceiloutils.CeilometerScenario): - - def run(self, counter_name, counter_type, counter_unit, counter_volume, - resource_id, filter=None, orderby=None, limit=None, **kwargs): - """Create a sample and then query it with specific parameters. - - This scenario tests POST /v2/query/samples - A sample is first created and then fetched using the input query. - - :param counter_name: specifies name of the counter - :param counter_type: specifies type of the counter - :param counter_unit: specifies unit of the counter - :param counter_volume: specifies volume of the counter - :param resource_id: specifies resource id for the sample created - :param filter: optional filter query dictionary - :param orderby: optional param for specifying ordering of results - :param limit: optional param for maximum number of results returned - :param kwargs: parameters for sample creation - """ - self._create_sample(counter_name, counter_type, counter_unit, - counter_volume, resource_id, **kwargs) - - if filter: - filter = json.dumps(filter) - self._query_samples(filter, orderby, limit) diff --git a/rally/plugins/openstack/scenarios/ceilometer/resources.py b/rally/plugins/openstack/scenarios/ceilometer/resources.py deleted file mode 100644 index b6d9cde5..00000000 --- a/rally/plugins/openstack/scenarios/ceilometer/resources.py +++ /dev/null @@ -1,107 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - - -"""Scenarios for Ceilometer Resource API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerResource.list_resources", - platform="openstack") -class ListResources(ceiloutils.CeilometerScenario): - - def run(self, metadata_query=None, start_time=None, - end_time=None, limit=None): - """Check all available queries for list resource request. - - This scenario fetches list of all resources using GET /v2/resources. - - :param metadata_query: dict with metadata fields and values for query - :param start_time: lower bound of resource timestamp in isoformat - :param end_time: upper bound of resource timestamp in isoformat - :param limit: count of resources in response - """ - scenario = ListMatchedResources(self.context) - scenario.run(filter_by_project_id=True) - scenario.run(filter_by_user_id=True) - scenario.run(filter_by_resource_id=True) - if metadata_query: - scenario.run(metadata_query=metadata_query) - if start_time: - scenario.run(start_time=start_time) - if end_time: - scenario.run(end_time=end_time) - if start_time and end_time: - scenario.run(start_time=start_time, end_time=end_time) - if limit: - scenario.run(limit=limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerResource.get_tenant_resources", - platform="openstack") -class GetTenantResources(ceiloutils.CeilometerScenario): - - def run(self): - """Get all tenant resources. - - This scenario retrieves information about tenant resources using - GET /v2/resources/(resource_id) - """ - resources = self.context["tenant"].get("resources", []) - msg = ("No resources found for tenant: %s" - % self.context["tenant"].get("name")) - self.assertTrue(resources, msg) - for res_id in resources: - self._get_resource(res_id) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerResource.list_matched_resources", - platform="openstack") -class ListMatchedResources(ceiloutils.CeilometerScenario): - - def run(self, filter_by_user_id=False, filter_by_project_id=False, - filter_by_resource_id=False, metadata_query=None, start_time=None, - end_time=None, limit=None): - """Get resources that matched fields from context and args. - - :param filter_by_user_id: flag for query by user_id - :param filter_by_project_id: flag for query by project_id - :param filter_by_resource_id: flag for query by resource_id - :param metadata_query: dict with metadata fields and values for query - :param start_time: lower bound of resource timestamp in isoformat - :param end_time: upper bound of resource timestamp in isoformat - :param limit: count of resources in response - """ - - query = self._make_general_query(filter_by_project_id, - filter_by_user_id, - filter_by_resource_id, - metadata_query) - query += self._make_timestamp_query(start_time, end_time) - self._list_resources(query, limit) diff --git a/rally/plugins/openstack/scenarios/ceilometer/samples.py b/rally/plugins/openstack/scenarios/ceilometer/samples.py deleted file mode 100644 index 2f1f19d7..00000000 --- a/rally/plugins/openstack/scenarios/ceilometer/samples.py +++ /dev/null @@ -1,71 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - - -"""Scenarios for Ceilometer Samples API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerSamples.list_matched_samples", - platform="openstack") -class ListMatchedSamples(ceiloutils.CeilometerScenario): - - def run(self, filter_by_resource_id=False, filter_by_project_id=False, - filter_by_user_id=False, metadata_query=None, limit=None): - """Get list of samples that matched fields from context and args. - - :param filter_by_user_id: flag for query by user_id - :param filter_by_project_id: flag for query by project_id - :param filter_by_resource_id: flag for query by resource_id - :param metadata_query: dict with metadata fields and values for query - :param limit: count of samples in response - """ - query = self._make_general_query(filter_by_project_id, - filter_by_user_id, - filter_by_resource_id, - metadata_query) - self._list_samples(query, limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerSamples.list_samples", - platform="openstack") -class ListSamples(ceiloutils.CeilometerScenario): - - def run(self, metadata_query=None, limit=None): - """Fetch all available queries for list sample request. - - :param metadata_query: dict with metadata fields and values for query - :param limit: count of samples in response - """ - - scenario = ListMatchedSamples(self.context) - scenario.run(filter_by_project_id=True) - scenario.run(filter_by_user_id=True) - scenario.run(filter_by_resource_id=True) - if metadata_query: - scenario.run(metadata_query=metadata_query) - if limit: - scenario.run(limit=limit) diff --git a/rally/plugins/openstack/scenarios/ceilometer/stats.py b/rally/plugins/openstack/scenarios/ceilometer/stats.py deleted file mode 100644 index ff8aedeb..00000000 --- a/rally/plugins/openstack/scenarios/ceilometer/stats.py +++ /dev/null @@ -1,76 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils -from rally.task import validation - - -"""Scenarios for Ceilometer Stats API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerStats.create_meter_and_get_stats", - platform="openstack") -class CreateMeterAndGetStats(utils.CeilometerScenario): - - @logging.log_deprecated("Use 'get_stats' method, now samples are created" - "in context", "0.1.2") - def run(self, **kwargs): - """Create a meter and fetch its statistics. - - Meter is first created and then statistics is fetched for the same - using GET /v2/meters/(meter_name)/statistics. - - :param kwargs: contains optional arguments to create a meter - """ - meter = self._create_meter(**kwargs) - self._get_stats(meter.counter_name) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerStats.get_stats", platform="openstack") -class GetStats(utils.CeilometerScenario): - - def run(self, meter_name, filter_by_user_id=False, - filter_by_project_id=False, filter_by_resource_id=False, - metadata_query=None, period=None, groupby=None, aggregates=None): - """Fetch statistics for certain meter. - - Statistics is fetched for the using - GET /v2/meters/(meter_name)/statistics. - - :param meter_name: meter to take statistic for - :param filter_by_user_id: flag for query by user_id - :param filter_by_project_id: flag for query by project_id - :param filter_by_resource_id: flag for query by resource_id - :param metadata_query: dict with metadata fields and values for query - :param period: the length of the time range covered by these stats - :param groupby: the fields used to group the samples - :param aggregates: name of function for samples aggregation - - :returns: list of statistics data - """ - query = self._make_general_query(filter_by_project_id, - filter_by_user_id, - filter_by_resource_id, - metadata_query) - self._get_stats(meter_name, query, period, groupby, aggregates) diff --git a/rally/plugins/openstack/scenarios/ceilometer/traits.py b/rally/plugins/openstack/scenarios/ceilometer/traits.py deleted file mode 100644 index 256eddc0..00000000 --- a/rally/plugins/openstack/scenarios/ceilometer/traits.py +++ /dev/null @@ -1,73 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as cutils -from rally.plugins.openstack.scenarios.keystone import basic as kbasic -from rally.task import validation - - -"""Scenarios for Ceilometer Events API.""" - - -# NOTE(idegtiarov): to work with traits we need to create event firstly, -# there are no other way except emit suitable notification from one of -# services, for example create new user in keystone. - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"], - "cleanup": ["ceilometer"]}, - name="CeilometerTraits.create_user_and_list_traits", - platform="openstack") -class CreateUserAndListTraits(cutils.CeilometerScenario, - kbasic.KeystoneBasic): - - def run(self): - """Create user and fetch all event traits. - - This scenario creates user to store new event and - fetches list of all traits for certain event type and - trait name using GET /v2/event_types//traits/. - """ - self.admin_keystone.create_user() - event = self._list_events()[0] - trait_name = event.traits[0]["name"] - self._list_event_traits(event_type=event.event_type, - trait_name=trait_name) - - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"], - "cleanup": ["ceilometer"]}, - name="CeilometerTraits.create_user_and" - "_list_trait_descriptions", - platform="openstack") -class CreateUserAndListTraitDescriptions( - cutils.CeilometerScenario, kbasic.KeystoneBasic): - - def run(self): - """Create user and fetch all trait descriptions. - - This scenario creates user to store new event and - fetches list of all traits for certain event type using - GET /v2/event_types//traits. - """ - self.admin_keystone.create_user() - event = self._list_events()[0] - self._list_event_trait_descriptions(event_type=event.event_type) diff --git a/rally/plugins/openstack/scenarios/ceilometer/utils.py b/rally/plugins/openstack/scenarios/ceilometer/utils.py deleted file mode 100644 index 74501749..00000000 --- a/rally/plugins/openstack/scenarios/ceilometer/utils.py +++ /dev/null @@ -1,466 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime as dt - -import six - -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils as bench_utils - - -class CeilometerScenario(scenario.OpenStackScenario): - """Base class for Ceilometer scenarios with basic atomic actions.""" - - def _make_samples(self, count=1, interval=0, counter_name="cpu_util", - counter_type="gauge", counter_unit="%", counter_volume=1, - project_id=None, user_id=None, source=None, - timestamp=None, metadata_list=None, batch_size=None): - """Prepare and return a list of samples. - - :param count: specifies number of samples in array - :param interval: specifies interval between timestamps of near-by - samples - :param counter_name: specifies name of the counter - :param counter_type: specifies type of the counter - :param counter_unit: specifies unit of the counter - :param counter_volume: specifies volume of the counter - :param project_id: specifies project id for samples - :param user_id: specifies user id for samples - :param source: specifies source for samples - :param timestamp: specifies timestamp for samples - :param metadata_list: specifies list of resource metadata - :param batch_size: specifies number of samples to store in one query - :returns: generator that produces lists of samples - """ - batch_size = batch_size or count - sample = { - "counter_name": counter_name, - "counter_type": counter_type, - "counter_unit": counter_unit, - "counter_volume": counter_volume, - "resource_id": self.generate_random_name() - } - opt_fields = { - "project_id": project_id, - "user_id": user_id, - "source": source, - "timestamp": timestamp, - } - for k, v in opt_fields.items(): - if v: - sample.update({k: v}) - len_meta = len(metadata_list) if metadata_list else 0 - now = timestamp or dt.datetime.utcnow() - samples = [] - for i in six.moves.xrange(count): - if i and not (i % batch_size): - yield samples - samples = [] - sample_item = dict(sample) - sample_item["timestamp"] = ( - now - dt.timedelta(seconds=(interval * i)) - ).isoformat() - if metadata_list: - # NOTE(idegtiarov): Adding more than one template of metadata - # required it's proportional distribution among whole samples. - sample_item["resource_metadata"] = metadata_list[ - i * len_meta // count - ] - samples.append(sample_item) - yield samples - - def _make_query_item(self, field, op="eq", value=None): - """Create a SimpleQuery item for requests. - - :param field: filtered field - :param op: operator for filtering - :param value: matched value - - :returns: dict with field, op and value keys for query - """ - return {"field": field, "op": op, "value": value} - - def _make_general_query(self, filter_by_project_id=None, - filter_by_user_id=None, - filter_by_resource_id=None, - metadata_query=None): - """Create a SimpleQuery for the list benchmarks. - - :param filter_by_project_id: add a project id to query - :param filter_by_user_id: add a user id to query - :param filter_by_resource_id: add a resource id to query - :param metadata_query: metadata dict that will add to query - - :returns: SimpleQuery with specified items - - """ - query = [] - metadata_query = metadata_query or {} - - if filter_by_user_id: - query.append(self._make_query_item("user_id", "eq", - self.context["user"]["id"])) - if filter_by_project_id: - query.append(self._make_query_item( - "project_id", "eq", self.context["tenant"]["id"])) - if filter_by_resource_id: - query.append(self._make_query_item( - "resource_id", "eq", self.context["tenant"]["resources"][0])) - - for key, value in metadata_query.items(): - query.append(self._make_query_item("metadata.%s" % key, - value=value)) - return query - - def _make_timestamp_query(self, start_time=None, end_time=None): - """Create ceilometer query for timestamp range. - - :param start_time: start datetime in isoformat - :param end_time: end datetime in isoformat - :returns: query with timestamp range - """ - query = [] - if end_time and start_time and end_time < start_time: - msg = "End time should be great or equal than start time" - raise exceptions.InvalidArgumentsException(msg) - if start_time: - query.append(self._make_query_item("timestamp", ">=", start_time)) - if end_time: - query.append(self._make_query_item("timestamp", "<=", end_time)) - return query - - def _make_profiler_key(self, method, query=None, limit=None): - """Create key for profiling method with query. - - :param method: Original profiler tag for method - :param query: ceilometer query which fields will be added to key - :param limit: if it exists `limit` will be added to key - :returns: profiler key that includes method and queried fields - """ - query = query or [] - limit_line = limit and "limit" or "" - fields_line = "&".join("%s" % a["field"] for a in query) - key_identifiers = "&".join(x for x in (limit_line, fields_line) if x) - key = ":".join(x for x in (method, key_identifiers) if x) - return key - - def _get_alarm_dict(self, **kwargs): - """Prepare and return an alarm dict for creating an alarm. - - :param kwargs: optional parameters to create alarm - :returns: alarm dictionary used to create an alarm - """ - alarm_id = self.generate_random_name() - alarm = {"alarm_id": alarm_id, - "name": alarm_id, - "description": "Test Alarm"} - - alarm.update(kwargs) - return alarm - - @atomic.action_timer("ceilometer.list_alarms") - def _list_alarms(self, alarm_id=None): - """List alarms. - - List alarm matching alarm_id. It fetches all alarms - if alarm_id is None. - - :param alarm_id: specifies id of the alarm - :returns: list of alarms - """ - if alarm_id: - return self.clients("ceilometer").alarms.get(alarm_id) - else: - return self.clients("ceilometer").alarms.list() - - @atomic.action_timer("ceilometer.get_alarm") - def _get_alarm(self, alarm_id): - """Get detailed information of an alarm. - - :param alarm_id: Specifies id of the alarm - :returns: If alarm_id is existed and correct, returns - detailed information of an alarm, else returns None - """ - return self.clients("ceilometer").alarms.get(alarm_id) - - @atomic.action_timer("ceilometer.create_alarm") - def _create_alarm(self, meter_name, threshold, kwargs): - """Create an alarm. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: contains optional features of alarm to be created - :returns: alarm - """ - alarm_dict = self._get_alarm_dict(**kwargs) - alarm_dict.update({"meter_name": meter_name, - "threshold": threshold}) - alarm = self.clients("ceilometer").alarms.create(**alarm_dict) - return alarm - - @atomic.action_timer("ceilometer.delete_alarm") - def _delete_alarm(self, alarm_id): - """Delete an alarm. - - :param alarm_id: specifies id of the alarm - """ - self.clients("ceilometer").alarms.delete(alarm_id) - - @atomic.action_timer("ceilometer.update_alarm") - def _update_alarm(self, alarm_id, alarm_dict_delta): - """Update an alarm. - - :param alarm_id: specifies id of the alarm - :param alarm_dict_delta: features of alarm to be updated - """ - self.clients("ceilometer").alarms.update(alarm_id, **alarm_dict_delta) - - @atomic.action_timer("ceilometer.get_alarm_history") - def _get_alarm_history(self, alarm_id): - """Assemble the alarm history requested. - - :param alarm_id: specifies id of the alarm - :returns: list of alarm changes - """ - return self.clients("ceilometer").alarms.get_history(alarm_id) - - @atomic.action_timer("ceilometer.get_alarm_state") - def _get_alarm_state(self, alarm_id): - """Get the state of the alarm. - - :param alarm_id: specifies id of the alarm - :returns: state of the alarm - """ - return self.clients("ceilometer").alarms.get_state(alarm_id) - - @atomic.action_timer("ceilometer.set_alarm_state") - def _set_alarm_state(self, alarm, state, timeout): - """Set the state of the alarm. - - :param alarm: alarm instance - :param state: an alarm state to be set - :param timeout: The number of seconds for which to attempt a - successful check of the alarm state. - :returns: alarm in the set state - """ - self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state) - return bench_utils.wait_for(alarm, - ready_statuses=[state], - update_resource=bench_utils - .get_from_manager(), - timeout=timeout, check_interval=1) - - @atomic.action_timer("ceilometer.list_events") - def _list_events(self): - """Get list of user's events. - - It fetches all events. - :returns: list of events - """ - return self.admin_clients("ceilometer").events.list() - - @atomic.action_timer("ceilometer.get_event") - def _get_event(self, event_id): - """Get event with specific id. - - Get event matching event_id. - - :param event_id: specifies id of the event - :returns: event - """ - return self.admin_clients("ceilometer").events.get(event_id) - - @atomic.action_timer("ceilometer.list_event_types") - def _list_event_types(self): - """Get list of all event types. - - :returns: list of event types - """ - return self.admin_clients("ceilometer").event_types.list() - - @atomic.action_timer("ceilometer.list_event_traits") - def _list_event_traits(self, event_type, trait_name): - """Get list of event traits. - - :param event_type: specifies the type of event - :param trait_name: specifies trait name - :returns: list of event traits - """ - return self.admin_clients("ceilometer").traits.list(event_type, - trait_name) - - @atomic.action_timer("ceilometer.list_event_trait_descriptions") - def _list_event_trait_descriptions(self, event_type): - """Get list of event trait descriptions. - - :param event_type: specifies the type of event - :returns: list of event trait descriptions - """ - return self.admin_clients("ceilometer").trait_descriptions.list( - event_type) - - def _list_samples(self, query=None, limit=None): - """List all Samples. - - :param query: optional param that specify query - :param limit: optional param for maximum number of samples returned - :returns: list of samples - """ - key = self._make_profiler_key("ceilometer.list_samples", query, - limit) - with atomic.ActionTimer(self, key): - return self.clients("ceilometer").new_samples.list(q=query, - limit=limit) - - @atomic.action_timer("ceilometer.get_resource") - def _get_resource(self, resource_id): - """Retrieve details about one resource.""" - return self.clients("ceilometer").resources.get(resource_id) - - @atomic.action_timer("ceilometer.get_stats") - def _get_stats(self, meter_name, query=None, period=None, groupby=None, - aggregates=None): - """Get stats for a specific meter. - - :param meter_name: Name of ceilometer meter - :param query: list of queries - :param period: the length of the time range covered by these stats - :param groupby: the fields used to group the samples - :param aggregates: function for samples aggregation - - :returns: list of statistics data - """ - return self.clients("ceilometer").statistics.list(meter_name, q=query, - period=period, - groupby=groupby, - aggregates=aggregates - ) - - @atomic.action_timer("ceilometer.create_meter") - def _create_meter(self, **kwargs): - """Create a new meter. - - :param kwargs: Contains the optional attributes for meter creation - :returns: Newly created meter - """ - name = self.generate_random_name() - samples = self.clients("ceilometer").samples.create( - counter_name=name, **kwargs) - return samples[0] - - @atomic.action_timer("ceilometer.query_alarms") - def _query_alarms(self, filter, orderby, limit): - """Query alarms with specific parameters. - - If no input params are provided, it returns all the results - in the database. - - :param limit: optional param for maximum number of results returned - :param orderby: optional param for specifying ordering of results - :param filter: optional filter query - :returns: queried alarms - """ - return self.clients("ceilometer").query_alarms.query( - filter, orderby, limit) - - @atomic.action_timer("ceilometer.query_alarm_history") - def _query_alarm_history(self, filter, orderby, limit): - """Query history of an alarm. - - If no input params are provided, it returns all the results - in the database. - - :param limit: optional param for maximum number of results returned - :param orderby: optional param for specifying ordering of results - :param filter: optional filter query - :returns: alarm history - """ - return self.clients("ceilometer").query_alarm_history.query( - filter, orderby, limit) - - @atomic.action_timer("ceilometer.create_sample") - def _create_sample(self, counter_name, counter_type, counter_unit, - counter_volume, resource_id=None, **kwargs): - """Create a Sample with specified parameters. - - :param counter_name: specifies name of the counter - :param counter_type: specifies type of the counter - :param counter_unit: specifies unit of the counter - :param counter_volume: specifies volume of the counter - :param resource_id: specifies resource id for the sample created - :param kwargs: contains optional parameters for creating a sample - :returns: created sample - """ - kwargs.update({"counter_name": counter_name, - "counter_type": counter_type, - "counter_unit": counter_unit, - "counter_volume": counter_volume, - "resource_id": resource_id if resource_id - else self.generate_random_name()}) - return self.clients("ceilometer").samples.create(**kwargs) - - @atomic.action_timer("ceilometer.create_samples") - def _create_samples(self, samples): - """Create Samples with specified parameters. - - :param samples: a list of samples to create - :returns: created list samples - """ - return self.clients("ceilometer").samples.create_list(samples) - - @atomic.action_timer("ceilometer.query_samples") - def _query_samples(self, filter, orderby, limit): - """Query samples with specified parameters. - - If no input params are provided, it returns all the results - in the database. - - :param limit: optional param for maximum number of results returned - :param orderby: optional param for specifying ordering of results - :param filter: optional filter query - :returns: queried samples - """ - return self.clients("ceilometer").query_samples.query( - filter, orderby, limit) - - def _list_resources(self, query=None, limit=None): - """List all resources. - - :param query: query list for Ceilometer api - :param limit: count of returned resources - :returns: list of all resources - """ - - key = self._make_profiler_key("ceilometer.list_resources", query, - limit) - with atomic.ActionTimer(self, key): - return self.clients("ceilometer").resources.list(q=query, - limit=limit) - - def _list_meters(self, query=None, limit=None): - """Get list of user's meters. - - :param query: query list for Ceilometer api - :param limit: count of returned meters - :returns: list of all meters - """ - - key = self._make_profiler_key("ceilometer.list_meters", query, - limit) - with atomic.ActionTimer(self, key): - return self.clients("ceilometer").meters.list(q=query, - limit=limit) diff --git a/rally/plugins/openstack/scenarios/cinder/__init__.py b/rally/plugins/openstack/scenarios/cinder/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/cinder/qos_specs.py b/rally/plugins/openstack/scenarios/cinder/qos_specs.py deleted file mode 100644 index 67a3e065..00000000 --- a/rally/plugins/openstack/scenarios/cinder/qos_specs.py +++ /dev/null @@ -1,101 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.task import validation - - -"""Scenarios for Cinder QoS.""" - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderQos.create_and_list_qos", platform="openstack") -class CreateAndListQos(cinder_utils.CinderBasic): - def run(self, consumer, write_iops_sec, read_iops_sec): - """Create a qos, then list all qos. - - :param consumer: Consumer behavior - :param write_iops_sec: random write limitation - :param read_iops_sec: random read limitation - """ - specs = { - "consumer": consumer, - "write_iops_sec": write_iops_sec, - "read_iops_sec": read_iops_sec - } - - qos = self.admin_cinder.create_qos(specs) - - pool_list = self.admin_cinder.list_qos() - msg = ("Qos not included into list of available qos\n" - "created qos:{}\n" - "Pool of qos:{}").format(qos, pool_list) - self.assertIn(qos, pool_list, err_msg=msg) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderQos.create_and_get_qos", platform="openstack") -class CreateAndGetQos(cinder_utils.CinderBasic): - def run(self, consumer, write_iops_sec, read_iops_sec): - """Create a qos, then get details of the qos. - - :param consumer: Consumer behavior - :param write_iops_sec: random write limitation - :param read_iops_sec: random read limitation - """ - specs = { - "consumer": consumer, - "write_iops_sec": write_iops_sec, - "read_iops_sec": read_iops_sec - } - - qos = self.admin_cinder.create_qos(specs) - self.admin_cinder.get_qos(qos.id) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderQos.create_and_set_qos", platform="openstack") -class CreateAndSetQos(cinder_utils.CinderBasic): - def run(self, consumer, write_iops_sec, read_iops_sec, - set_consumer, set_write_iops_sec, set_read_iops_sec): - """Create a qos, then Add/Update keys in qos specs. - - :param consumer: Consumer behavior - :param write_iops_sec: random write limitation - :param read_iops_sec: random read limitation - :param set_consumer: update Consumer behavior - :param set_write_iops_sec: update random write limitation - :param set_read_iops_sec: update random read limitation - """ - create_specs = { - "consumer": consumer, - "write_iops_sec": write_iops_sec, - "read_iops_sec": read_iops_sec - } - set_specs = { - "consumer": set_consumer, - "write_iops_sec": set_write_iops_sec, - "read_iops_sec": set_read_iops_sec - } - - qos = self.admin_cinder.create_qos(create_specs) - self.admin_cinder.set_qos(qos=qos, set_specs_args=set_specs) diff --git a/rally/plugins/openstack/scenarios/cinder/utils.py b/rally/plugins/openstack/scenarios/cinder/utils.py deleted file mode 100644 index 2101dfb3..00000000 --- a/rally/plugins/openstack/scenarios/cinder/utils.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright 2013 Huawei Technologies Co.,LTD. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from oslo_config import cfg - -from rally.common.i18n import _, _LW -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.services.storage import block -from rally.plugins.openstack.wrappers import cinder as cinder_wrapper -from rally.plugins.openstack.wrappers import glance as glance_wrapper -from rally.task import atomic -from rally.task import utils as bench_utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class CinderBasic(scenario.OpenStackScenario): - def __init__(self, context=None, admin_clients=None, clients=None): - super(CinderBasic, self).__init__(context, admin_clients, clients) - if hasattr(self, "_admin_clients"): - self.admin_cinder = block.BlockStorage( - self._admin_clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - if hasattr(self, "_clients"): - self.cinder = block.BlockStorage( - self._clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - - def get_random_server(self): - server_id = random.choice(self.context["tenant"]["servers"]) - return self.clients("nova").servers.get(server_id) - - -class CinderScenario(scenario.OpenStackScenario): - """Base class for Cinder scenarios with basic atomic actions.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(CinderScenario, self).__init__(context, admin_clients, clients) - LOG.warning(_LW( - "Class %s is deprecated since Rally 0.10.0 and will be removed " - "soon. Use " - "rally.plugins.openstack.services.storage.block.BlockStorage " - "instead.") % self.__class__) - - @atomic.action_timer("cinder.list_volumes") - def _list_volumes(self, detailed=True): - """Returns user volumes list.""" - - return self.clients("cinder").volumes.list(detailed) - - @atomic.action_timer("cinder.get_volume") - def _get_volume(self, volume_id): - """get volume detailed information. - - :param volume_id: id of volume - :returns: class:`Volume` - """ - return self.clients("cinder").volumes.get(volume_id) - - @atomic.action_timer("cinder.list_snapshots") - def _list_snapshots(self, detailed=True): - """Returns user snapshots list.""" - - return self.clients("cinder").volume_snapshots.list(detailed) - - @atomic.action_timer("cinder.list_types") - def _list_types(self, search_opts=None, is_public=None): - """Lists all volume types. - - :param search_opts: Options used when search for volume types - :param is_public: If query public volume type - :returns: A list of volume types - """ - return self.clients("cinder").volume_types.list(search_opts, - is_public) - - def _set_metadata(self, volume, sets=10, set_size=3): - """Set volume metadata. - - :param volume: The volume to set metadata on - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :returns: A list of keys that were set - """ - key = "cinder.set_%s_metadatas_%s_times" % (set_size, sets) - with atomic.ActionTimer(self, key): - keys = [] - for i in range(sets): - metadata = {} - for j in range(set_size): - key = self.generate_random_name() - keys.append(key) - metadata[key] = self.generate_random_name() - - self.clients("cinder").volumes.set_metadata(volume, metadata) - return keys - - def _delete_metadata(self, volume, keys, deletes=10, delete_size=3): - """Delete volume metadata keys. - - Note that ``len(keys)`` must be greater than or equal to - ``deletes * delete_size``. - - :param volume: The volume to delete metadata from - :param deletes: how many operations to perform - :param delete_size: number of metadata keys to delete in each operation - :param keys: a list of keys to choose deletion candidates from - """ - if len(keys) < deletes * delete_size: - raise exceptions.InvalidArgumentsException( - "Not enough metadata keys to delete: " - "%(num_keys)s keys, but asked to delete %(num_deletes)s" % - {"num_keys": len(keys), - "num_deletes": deletes * delete_size}) - # make a shallow copy of the list of keys so that, when we pop - # from it later, we don't modify the original list. - keys = list(keys) - random.shuffle(keys) - action_name = "cinder.delete_%s_metadatas_%s_times" % (delete_size, - deletes) - with atomic.ActionTimer(self, action_name): - for i in range(deletes): - to_del = keys[i * delete_size:(i + 1) * delete_size] - self.clients("cinder").volumes.delete_metadata(volume, to_del) - - @atomic.action_timer("cinder.create_volume") - def _create_volume(self, size, **kwargs): - """Create one volume. - - Returns when the volume is actually created and is in the "Available" - state. - - :param size: int be size of volume in GB, or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param kwargs: Other optional parameters to initialize the volume - :returns: Created volume object - """ - if isinstance(size, dict): - size = random.randint(size["min"], size["max"]) - - client = cinder_wrapper.wrap(self._clients.cinder, self) - volume = client.create_volume(size, **kwargs) - - # NOTE(msdubov): It is reasonable to wait 5 secs before starting to - # check whether the volume is ready => less API calls. - self.sleep_between(CONF.benchmark.cinder_volume_create_prepoll_delay) - - volume = bench_utils.wait_for( - volume, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_volume_create_timeout, - check_interval=CONF.benchmark.cinder_volume_create_poll_interval - ) - return volume - - @atomic.action_timer("cinder.update_volume") - def _update_volume(self, volume, **update_volume_args): - """Update name and description for this volume - - This atomic function updates volume information. The volume - display name is always changed, and additional update - arguments may also be specified. - - :param volume: volume object - :param update_volume_args: dict, contains values to be updated. - """ - client = cinder_wrapper.wrap(self._clients.cinder, self) - client.update_volume(volume, **update_volume_args) - - @atomic.action_timer("cinder.update_readonly_flag") - def _update_readonly_flag(self, volume, read_only): - """Update the read-only access mode flag of the specified volume. - - :param volume: The UUID of the volume to update. - :param read_only: The value to indicate whether to update volume to - read-only access mode. - :returns: A tuple of http Response and body - """ - return self.clients("cinder").volumes.update_readonly_flag( - volume, read_only) - - @atomic.action_timer("cinder.delete_volume") - def _delete_volume(self, volume): - """Delete the given volume. - - Returns when the volume is actually deleted. - - :param volume: volume object - """ - volume.delete() - bench_utils.wait_for_status( - volume, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_volume_delete_timeout, - check_interval=CONF.benchmark.cinder_volume_delete_poll_interval - ) - - @atomic.action_timer("cinder.extend_volume") - def _extend_volume(self, volume, new_size): - """Extend the given volume. - - Returns when the volume is actually extended. - - :param volume: volume object - :param new_size: new volume size in GB, or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - Notice: should be bigger volume size - """ - - if isinstance(new_size, dict): - new_size = random.randint(new_size["min"], new_size["max"]) - - volume.extend(volume, new_size) - volume = bench_utils.wait_for( - volume, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_volume_create_timeout, - check_interval=CONF.benchmark.cinder_volume_create_poll_interval - ) - - @atomic.action_timer("cinder.upload_volume_to_image") - def _upload_volume_to_image(self, volume, force=False, - container_format="bare", disk_format="raw"): - """Upload the given volume to image. - - Returns created image. - - :param volume: volume object - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso - :returns: Returns created image object - """ - resp, img = volume.upload_to_image(force, self.generate_random_name(), - container_format, disk_format) - # NOTE (e0ne): upload_to_image changes volume status to uploading so - # we need to wait until it will be available. - volume = bench_utils.wait_for( - volume, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_volume_create_timeout, - check_interval=CONF.benchmark.cinder_volume_create_poll_interval - ) - image_id = img["os-volume_upload_image"]["image_id"] - image = self.clients("glance").images.get(image_id) - wrapper = glance_wrapper.wrap(self._clients.glance, self) - image = bench_utils.wait_for( - image, - ready_statuses=["active"], - update_resource=wrapper.get_image, - timeout=CONF.benchmark.glance_image_create_timeout, - check_interval=CONF.benchmark.glance_image_create_poll_interval - ) - - return image - - @atomic.action_timer("cinder.create_snapshot") - def _create_snapshot(self, volume_id, force=False, **kwargs): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param kwargs: Other optional parameters to initialize the volume - :returns: Created snapshot object - """ - kwargs["force"] = force - - client = cinder_wrapper.wrap(self._clients.cinder, self) - snapshot = client.create_snapshot(volume_id, **kwargs) - - self.sleep_between(CONF.benchmark.cinder_volume_create_prepoll_delay) - snapshot = bench_utils.wait_for( - snapshot, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_volume_create_timeout, - check_interval=CONF.benchmark.cinder_volume_create_poll_interval - ) - return snapshot - - @atomic.action_timer("cinder.delete_snapshot") - def _delete_snapshot(self, snapshot): - """Delete the given snapshot. - - Returns when the snapshot is actually deleted. - - :param snapshot: snapshot object - """ - snapshot.delete() - bench_utils.wait_for_status( - snapshot, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_volume_delete_timeout, - check_interval=CONF.benchmark.cinder_volume_delete_poll_interval - ) - - @atomic.action_timer("cinder.create_backup") - def _create_backup(self, volume_id, **kwargs): - """Create a volume backup of the given volume. - - :param volume_id: The ID of the volume to backup. - :param kwargs: Other optional parameters - """ - backup = self.clients("cinder").backups.create(volume_id, **kwargs) - return bench_utils.wait_for( - backup, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_volume_create_timeout, - check_interval=CONF.benchmark.cinder_volume_create_poll_interval - ) - - @atomic.action_timer("cinder.delete_backup") - def _delete_backup(self, backup): - """Delete the given backup. - - Returns when the backup is actually deleted. - - :param backup: backup instance - """ - backup.delete() - bench_utils.wait_for_status( - backup, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_volume_delete_timeout, - check_interval=CONF.benchmark.cinder_volume_delete_poll_interval - ) - - @atomic.action_timer("cinder.restore_backup") - def _restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - """ - restore = self.clients("cinder").restores.restore(backup_id, volume_id) - restored_volume = self.clients("cinder").volumes.get(restore.volume_id) - backup_for_restore = self.clients("cinder").backups.get(backup_id) - bench_utils.wait_for( - backup_for_restore, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_backup_restore_timeout, - check_interval=CONF.benchmark.cinder_backup_restore_poll_interval - ) - return bench_utils.wait_for( - restored_volume, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.benchmark.cinder_volume_create_timeout, - check_interval=CONF.benchmark.cinder_volume_create_poll_interval - ) - - @atomic.action_timer("cinder.list_backups") - def _list_backups(self, detailed=True): - """Return user volume backups list. - - :param detailed: True if detailed information about backup - should be listed - """ - return self.clients("cinder").backups.list(detailed) - - def get_random_server(self): - server_id = random.choice(self.context["tenant"]["servers"]) - return self.clients("nova").servers.get(server_id) - - @atomic.action_timer("cinder.list_transfers") - def _list_transfers(self, detailed=True, search_opts=None): - """Get a list of all volume transfers. - - :param detailed: If True, detailed information about transfer - should be listed - :param search_opts: Search options to filter out volume transfers - :returns: list of :class:`VolumeTransfer` - """ - return self.clients("cinder").transfers.list(detailed, search_opts) - - @atomic.action_timer("cinder.create_volume_type") - def _create_volume_type(self, **kwargs): - """create volume type. - - :param kwargs: Optional additional arguments for volume type creation - :returns: VolumeType object - """ - kwargs["name"] = self.generate_random_name() - return self.admin_clients("cinder").volume_types.create(**kwargs) - - @atomic.action_timer("cinder.delete_volume_type") - def _delete_volume_type(self, volume_type): - """delete a volume type. - - :param volume_type: Name or Id of the volume type - :returns: base on client response return True if the request - has been accepted or not - """ - tuple_res = self.admin_clients("cinder").volume_types.delete( - volume_type) - return (tuple_res[0].status_code == 202) - - @atomic.action_timer("cinder.set_volume_type_keys") - def _set_volume_type_keys(self, volume_type, metadata): - """Set extra specs on a volume type. - - :param volume_type: The :class:`VolumeType` to set extra spec on - :param metadata: A dict of key/value pairs to be set - :returns: extra_specs if the request has been accepted - """ - return volume_type.set_keys(metadata) - - @atomic.action_timer("cinder.get_volume_type") - def _get_volume_type(self, volume_type): - """get details of volume_type. - - :param volume_type: The ID of the :class:`VolumeType` to get - :rtype: :class:`VolumeType` - """ - return self.admin_clients("cinder").volume_types.get(volume_type) - - @atomic.action_timer("cinder.transfer_create") - def _transfer_create(self, volume_id): - """Create a volume transfer. - - :param volume_id: The ID of the volume to transfer - :rtype: VolumeTransfer - """ - name = self.generate_random_name() - return self.clients("cinder").transfers.create(volume_id, name) - - @atomic.action_timer("cinder.transfer_accept") - def _transfer_accept(self, transfer_id, auth_key): - """Accept a volume transfer. - - :param transfer_id: The ID of the transfer to accept. - :param auth_key: The auth_key of the transfer. - :rtype: VolumeTransfer - """ - return self.clients("cinder").transfers.accept(transfer_id, auth_key) - - @atomic.action_timer("cinder.create_encryption_type") - def _create_encryption_type(self, volume_type, specs): - """Create encryption type for a volume type. Default: admin only. - - :param volume_type: the volume type on which to add an encryption type - :param specs: the encryption type specifications to add - :return: an instance of :class: VolumeEncryptionType - """ - return self.admin_clients("cinder").volume_encryption_types.create( - volume_type, specs) - - @atomic.action_timer("cinder.list_encryption_type") - def _list_encryption_type(self, search_opts=None): - """List all volume encryption types. - - :param search_opts: Options used when search for encryption types - :return: a list of :class: VolumeEncryptionType instances - """ - return self.admin_clients("cinder").volume_encryption_types.list( - search_opts) - - @atomic.action_timer("cinder.delete_encryption_type") - def _delete_encryption_type(self, volume_type): - """Delete the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be deleted - """ - resp = self.admin_clients("cinder").volume_encryption_types.delete( - volume_type) - if (resp[0].status_code != 202): - raise exceptions.RallyException( - _("EncryptionType Deletion Failed")) diff --git a/rally/plugins/openstack/scenarios/cinder/volume_backups.py b/rally/plugins/openstack/scenarios/cinder/volume_backups.py deleted file mode 100644 index 9c99d4ea..00000000 --- a/rally/plugins/openstack/scenarios/cinder/volume_backups.py +++ /dev/null @@ -1,60 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.task import validation - - -"""Scenarios for Cinder Volume Backup.""" - - -@validation.add("number", param_name="size", minval=1, integer_only=True) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names="name", - subdict="create_backup_kwargs") -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_cinder_services", services="cinder-backup") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumeBackups." - "create_incremental_volume_backup", platform="openstack") -class CreateIncrementalVolumeBackup(cinder_utils.CinderBasic): - def run(self, size, do_delete=True, create_volume_kwargs=None, - create_backup_kwargs=None): - """Create a incremental volume backup. - - The scenario first create a volume, the create a backup, the backup - is full backup. Because Incremental backup must be based on the - full backup. finally create a incremental backup. - - :param size: volume size in GB - :param do_delete: deletes backup and volume after creating if True - :param create_volume_kwargs: optional args to create a volume - :param create_backup_kwargs: optional args to create a volume backup - """ - create_volume_kwargs = create_volume_kwargs or {} - create_backup_kwargs = create_backup_kwargs or {} - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - backup1 = self.cinder.create_backup(volume.id, **create_backup_kwargs) - - backup2 = self.cinder.create_backup(volume.id, incremental=True) - - if do_delete: - self.cinder.delete_backup(backup2) - self.cinder.delete_backup(backup1) - self.cinder.delete_volume(volume) diff --git a/rally/plugins/openstack/scenarios/cinder/volume_types.py b/rally/plugins/openstack/scenarios/cinder/volume_types.py deleted file mode 100644 index 20565448..00000000 --- a/rally/plugins/openstack/scenarios/cinder/volume_types.py +++ /dev/null @@ -1,406 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.services.storage import cinder_v2 -from rally.task import validation - - -LOG = logging.getLogger(__name__) - - -"""Scenarios for Cinder Volume Type.""" - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_and_delete_volume_type", - platform="openstack") -class CreateAndDeleteVolumeType(cinder_utils.CinderBasic): - - def run(self, description=None, is_public=True): - """Create and delete a volume Type. - - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - self.admin_cinder.delete_volume_type(volume_type) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_and_get_volume_type", - platform="openstack") -class CreateAndGetVolumeType(cinder_utils.CinderBasic): - - def run(self, description=None, is_public=True): - """Create a volume Type, then get the details of the type. - - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - self.admin_cinder.get_volume_type(volume_type) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_api_versions", component="cinder", versions=["2"]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_and_update_volume_type", - platform="openstack") -class CreateAndUpdateVolumeType(scenario.OpenStackScenario): - - def run(self, description=None, is_public=True, update_name=False, - update_description=None, update_is_public=None): - """create a volume type, then update the type. - - :param description: Description of the volume type - :param is_public: Volume type visibility - :param update_name: if True, can update name by generating random name. - if False, don't update name. - :param update_description: update Description of the volume type - :param update_is_public: update Volume type visibility - """ - service = cinder_v2.CinderV2Service(self._admin_clients, - self.generate_random_name, - atomic_inst=self.atomic_actions()) - - volume_type = service.create_volume_type( - description=description, - is_public=is_public) - - service.update_volume_type( - volume_type, - update_name=update_name, - description=update_description, - is_public=update_is_public) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_and_list_volume_types", - platform="openstack") -class CreateAndListVolumeTypes(cinder_utils.CinderBasic): - - def run(self, description=None, is_public=True): - """Create a volume Type, then list all types. - - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - - pool_list = self.admin_cinder.list_types() - msg = ("type not included into list of available types" - "created type: {}\n" - "pool of types: {}\n").format(volume_type, pool_list) - self.assertIn(volume_type.id, - [vtype.id for vtype in pool_list], - err_msg=msg) - - -@validation.add("required_params", params=[("create_specs", "provider")]) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_volume_type" - "_and_encryption_type", - platform="openstack") -class CreateVolumeTypeAndEncryptionType(cinder_utils.CinderBasic): - - def run(self, create_specs=None, provider=None, cipher=None, - key_size=None, control_location="front-end", description=None, - is_public=True): - """Create encryption type - - This scenario first creates a volume type, then creates an encryption - type for the volume type. - - :param create_specs: The encryption type specifications to add. - DEPRECATED, specify arguments explicitly. - :param provider: The class that provides encryption support. For - example, LuksEncryptor. - :param cipher: The encryption algorithm or mode. - :param key_size: Size of encryption key, in bits. - :param control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - if create_specs is None: - specs = { - "provider": provider, - "cipher": cipher, - "key_size": key_size, - "control_location": control_location - } - else: - LOG.warning("The argument `create_spec` is deprecated since" - " Rally 0.10.0. Specify all arguments from it" - " explicitly.") - specs = create_specs - self.admin_cinder.create_encryption_type(volume_type, - specs=specs) - - -@validation.add("required_params", params=[("create_specs", "provider")]) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_and_list_" - "encryption_type", - platform="openstack") -class CreateAndListEncryptionType(cinder_utils.CinderBasic): - - def run(self, create_specs=None, provider=None, cipher=None, - key_size=None, control_location="front-end", search_opts=None): - """Create and list encryption type - - This scenario firstly creates a volume type, secondly creates an - encryption type for the volume type, thirdly lists all encryption - types. - - :param create_specs: The encryption type specifications to add. - DEPRECATED, specify arguments explicitly. - :param provider: The class that provides encryption support. For - example, LuksEncryptor. - :param cipher: The encryption algorithm or mode. - :param key_size: Size of encryption key, in bits. - :param control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - :param search_opts: Options used when search for encryption types - """ - vt_idx = self.context["iteration"] % len(self.context["volume_types"]) - volume_type = self.context["volume_types"][vt_idx] - if create_specs is None: - specs = { - "provider": provider, - "cipher": cipher, - "key_size": key_size, - "control_location": control_location - } - else: - LOG.warning("The argument `create_spec` is deprecated since" - " Rally 0.10.0. Specify all arguments from it" - " explicitly.") - specs = create_specs - self.admin_cinder.create_encryption_type(volume_type["id"], - specs=specs) - self.admin_cinder.list_encryption_type(search_opts) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_and_set_volume_type_keys", - platform="openstack") -class CreateAndSetVolumeTypeKeys(cinder_utils.CinderBasic): - - def run(self, volume_type_key, description=None, is_public=True): - """Create and set a volume type's extra specs. - - :param volume_type_key: A dict of key/value pairs to be set - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - self.admin_cinder.set_volume_type_keys(volume_type, - metadata=volume_type_key) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_contexts", contexts="volume_types") -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_get_and_delete_" - "encryption_type", - platform="openstack") -class CreateGetAndDeleteEncryptionType(cinder_utils.CinderBasic): - - def run(self, provider=None, cipher=None, - key_size=None, control_location="front-end"): - """Create get and delete an encryption type - - This scenario firstly creates an encryption type for a volome - type created in the context, then gets detailed information of - the created encryption type, finally deletes the created - encryption type. - - :param provider: The class that provides encryption support. For - example, LuksEncryptor. - :param cipher: The encryption algorithm or mode. - :param key_size: Size of encryption key, in bits. - :param control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - """ - vt_idx = self.context["iteration"] % len(self.context["volume_types"]) - volume_type = self.context["volume_types"][vt_idx] - specs = { - "provider": provider, - "cipher": cipher, - "key_size": key_size, - "control_location": control_location - } - self.admin_cinder.create_encryption_type(volume_type["id"], - specs=specs) - self.admin_cinder.get_encryption_type(volume_type["id"]) - self.admin_cinder.delete_encryption_type(volume_type["id"]) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_contexts", contexts="volume_types") -@validation.add("required_params", params=[("create_specs", "provider")]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_and_delete_" - "encryption_type", - platform="openstack") -class CreateAndDeleteEncryptionType(cinder_utils.CinderBasic): - - def run(self, create_specs=None, provider=None, cipher=None, - key_size=None, control_location="front-end"): - """Create and delete encryption type - - This scenario firstly creates an encryption type for a given - volume type, then deletes the created encryption type. - - :param create_specs: the encryption type specifications to add - :param provider: The class that provides encryption support. For - example, LuksEncryptor. - :param cipher: The encryption algorithm or mode. - :param key_size: Size of encryption key, in bits. - :param control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - """ - vt_idx = self.context["iteration"] % len(self.context["volume_types"]) - volume_type = self.context["volume_types"][vt_idx] - if create_specs is None: - specs = { - "provider": provider, - "cipher": cipher, - "key_size": key_size, - "control_location": control_location - } - else: - LOG.warning("The argument `create_spec` is deprecated since" - " Rally 0.10.0. Specify all arguments from it" - " explicitly.") - specs = create_specs - self.admin_cinder.create_encryption_type(volume_type["id"], - specs=specs) - self.admin_cinder.delete_encryption_type(volume_type["id"]) - - -@validation.add("required_services", services=consts.Service.CINDER) -@validation.add("required_contexts", contexts="volume_types") -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_and_update_encryption_type", - platform="openstack") -class CreateAndUpdateEncryptionType(cinder_utils.CinderBasic): - - def run(self, create_provider=None, create_cipher=None, - create_key_size=None, create_control_location="front-end", - update_provider=None, update_cipher=None, - update_key_size=None, update_control_location=None): - """Create and update encryption type - - This scenario firstly creates a volume type, secondly creates an - encryption type for the volume type, thirdly updates the encryption - type. - - :param create_provider: The class that provides encryption support. For - example, LuksEncryptor. - :param create_cipher: The encryption algorithm or mode. - :param create_key_size: Size of encryption key, in bits. - :param create_control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - :param update_provider: The class that provides encryption support. For - example, LuksEncryptor. - :param update_cipher: The encryption algorithm or mode. - :param update_key_size: Size of encryption key, in bits. - :param update_control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - """ - vt_idx = self.context["iteration"] % len(self.context["volume_types"]) - volume_type = self.context["volume_types"][vt_idx] - create_specs = { - "provider": create_provider, - "cipher": create_cipher, - "key_size": create_key_size, - "control_location": create_control_location - } - update_specs = { - "provider": update_provider, - "cipher": update_cipher, - "key_size": update_key_size, - "control_location": update_control_location - } - self.admin_cinder.create_encryption_type(volume_type["id"], - specs=create_specs) - self.admin_cinder.update_encryption_type(volume_type["id"], - specs=update_specs) - - -@validation.add("required_platform", platform="openstack", admin=True) -@validation.add("required_api_versions", component="cinder", versions=["2"]) -@validation.add("required_services", services=consts.Service.CINDER) -@scenario.configure(context={"admin_cleanup": ["cinder"]}, - name="CinderVolumeTypes.create_volume_type_" - "add_and_list_type_access", - platform="openstack") -class CreateVolumeTypeAddAndListTypeAccess(scenario.OpenStackScenario): - - def run(self, description=None, is_public=False): - """Add and list volume type access for the given project. - - This scenario first creates a private volume type, then add project - access and list project access to it. - - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - service = cinder_v2.CinderV2Service(self._admin_clients, - self.generate_random_name, - atomic_inst=self.atomic_actions()) - volume_type = service.create_volume_type(description=description, - is_public=is_public) - service.add_type_access(volume_type, - project=self.context["tenant"]["id"]) - service.list_type_access(volume_type) diff --git a/rally/plugins/openstack/scenarios/cinder/volumes.py b/rally/plugins/openstack/scenarios/cinder/volumes.py deleted file mode 100644 index fd85d580..00000000 --- a/rally/plugins/openstack/scenarios/cinder/volumes.py +++ /dev/null @@ -1,851 +0,0 @@ -# Copyright 2013 Huawei Technologies Co.,LTD. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import logging -from rally import consts -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.scenarios.glance import images -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.task import atomic -from rally.task import types -from rally.task import validation - -LOG = logging.getLogger(__name__) - -"""Scenarios for Cinder Volumes.""" - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_list_volume", - platform="openstack") -class CreateAndListVolume(cinder_utils.CinderBasic): - - def run(self, size, detailed=True, image=None, **kwargs): - """Create a volume and list all volumes. - - Measure the "cinder volume-list" command performance. - - If you have only 1 user in your context, you will - add 1 volume on every iteration. So you will have more - and more volumes and will be able to measure the - performance of the "cinder volume-list" command depending on - the number of images owned by users. - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param detailed: determines whether the volume listing should contain - detailed information about all of them - :param image: image to be used to create volume - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - - self.cinder.create_volume(size, **kwargs) - self.cinder.list_volumes(detailed) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_get_volume", - platform="openstack") -class CreateAndGetVolume(cinder_utils.CinderBasic): - - def run(self, size, image=None, **kwargs): - """Create a volume and get the volume. - - Measure the "cinder show" command performance. - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create volume - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - - volume = self.cinder.create_volume(size, **kwargs) - self.cinder.get_volume(volume.id) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.list_volumes", - platform="openstack") -class ListVolumes(cinder_utils.CinderBasic): - - def run(self, detailed=True): - """List all volumes. - - This simple scenario tests the cinder list command by listing - all the volumes. - - :param detailed: True if detailed information about volumes - should be listed - """ - - self.cinder.list_volumes(detailed) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CinderVolumes.list_types", platform="openstack") -class ListTypes(cinder_utils.CinderBasic): - - def run(self, search_opts=None, is_public=None): - """List all volume types. - - This simple scenario tests the cinder type-list command by listing - all the volume types. - - :param search_opts: Options used when search for volume types - :param is_public: If query public volume type - """ - - self.cinder.list_types(search_opts, is_public=is_public) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CinderVolumes.list_transfers", platform="openstack") -class ListTransfers(cinder_utils.CinderBasic): - - def run(self, detailed=True, search_opts=None): - """List all transfers. - - This simple scenario tests the "cinder transfer-list" command by - listing all the volume transfers. - - :param detailed: If True, detailed information about volume transfer - should be listed - :param search_opts: Search options to filter out volume transfers. - """ - - self.cinder.list_transfers(detailed, search_opts=search_opts) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="update_volume_kwargs") -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_update_volume", - platform="openstack") -class CreateAndUpdateVolume(cinder_utils.CinderBasic): - - def run(self, size, image=None, create_volume_kwargs=None, - update_volume_kwargs=None): - """Create a volume and update its name and description. - - :param size: volume size (integer, in GB) - :param image: image to be used to create volume - :param create_volume_kwargs: dict, to be used to create volume - :param update_volume_kwargs: dict, to be used to update volume - update_volume_kwargs["update_name"]=True, if updating the - name of volume. - update_volume_kwargs["description"]="desp", if updating the - description of volume. - """ - create_volume_kwargs = create_volume_kwargs or {} - update_volume_kwargs = update_volume_kwargs or {} - if image: - create_volume_kwargs["imageRef"] = image - - if update_volume_kwargs.pop("update_name", False): - update_volume_kwargs["name"] = self.generate_random_name() - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - self.cinder.update_volume(volume, **update_volume_kwargs) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_delete_volume", - platform="openstack") -class CreateAndDeleteVolume(cinder_utils.CinderBasic): - - def run(self, size, image=None, min_sleep=0, max_sleep=0, **kwargs): - """Create and then delete a volume. - - Good for testing a maximal bandwidth of cloud. Optional 'min_sleep' - and 'max_sleep' parameters allow the scenario to simulate a pause - between volume creation and deletion (of random duration from - [min_sleep, max_sleep]). - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create volume - :param min_sleep: minimum sleep time between volume creation and - deletion (in seconds) - :param max_sleep: maximum sleep time between volume creation and - deletion (in seconds) - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - - volume = self.cinder.create_volume(size, **kwargs) - self.sleep_between(min_sleep, max_sleep) - self.cinder.delete_volume(volume) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_volume", - platform="openstack") -class CreateVolume(cinder_utils.CinderBasic): - - def run(self, size, image=None, **kwargs): - """Create a volume. - - Good test to check how influence amount of active volumes on - performance of creating new. - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create volume - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - - self.cinder.create_volume(size, **kwargs) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("volumes")) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.modify_volume_metadata", - platform="openstack") -class ModifyVolumeMetadata(cinder_utils.CinderBasic): - - def run(self, sets=10, set_size=3, deletes=5, delete_size=3): - """Modify a volume's metadata. - - This requires a volume to be created with the volumes - context. Additionally, ``sets * set_size`` must be greater - than or equal to ``deletes * delete_size``. - - :param sets: how many set_metadata operations to perform - :param set_size: number of metadata keys to set in each - set_metadata operation - :param deletes: how many delete_metadata operations to perform - :param delete_size: number of metadata keys to delete in each - delete_metadata operation - """ - if sets * set_size < deletes * delete_size: - raise exceptions.InvalidArgumentsException( - "Not enough metadata keys will be created: " - "Setting %(num_keys)s keys, but deleting %(num_deletes)s" % - {"num_keys": sets * set_size, - "num_deletes": deletes * delete_size}) - - volume = random.choice(self.context["tenant"]["volumes"]) - keys = self.cinder.set_metadata(volume["id"], sets=sets, - set_size=set_size) - self.cinder.delete_metadata(volume["id"], keys=keys, - deletes=deletes, - delete_size=delete_size) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_extend_volume", - platform="openstack") -class CreateAndExtendVolume(cinder_utils.CinderBasic): - - def run(self, size, new_size, min_sleep=0, max_sleep=0, **kwargs): - """Create and extend a volume and then delete it. - - - :param size: volume size (in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param new_size: volume new size (in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - to extend. - Notice: should be bigger volume size - :param min_sleep: minimum sleep time between volume extension and - deletion (in seconds) - :param max_sleep: maximum sleep time between volume extension and - deletion (in seconds) - :param kwargs: optional args to extend the volume - """ - volume = self.cinder.create_volume(size, **kwargs) - self.cinder.extend_volume(volume, new_size=new_size) - self.sleep_between(min_sleep, max_sleep) - self.cinder.delete_volume(volume) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_contexts", contexts=("volumes")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_from_volume_and_delete_volume", - platform="openstack") -class CreateFromVolumeAndDeleteVolume(cinder_utils.CinderBasic): - - def run(self, size, min_sleep=0, max_sleep=0, **kwargs): - """Create volume from volume and then delete it. - - Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep' - parameters allow the scenario to simulate a pause between volume - creation and deletion (of random duration from [min_sleep, max_sleep]). - - :param size: volume size (in GB), or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - Should be equal or bigger source volume size - - :param min_sleep: minimum sleep time between volume creation and - deletion (in seconds) - :param max_sleep: maximum sleep time between volume creation and - deletion (in seconds) - :param kwargs: optional args to create a volume - """ - source_vol = random.choice(self.context["tenant"]["volumes"]) - volume = self.cinder.create_volume(size, source_volid=source_vol["id"], - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self.cinder.delete_volume(volume) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_contexts", contexts=("volumes")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_delete_snapshot", - platform="openstack") -class CreateAndDeleteSnapshot(cinder_utils.CinderBasic): - - def run(self, force=False, min_sleep=0, max_sleep=0, **kwargs): - """Create and then delete a volume-snapshot. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between snapshot creation and deletion - (of random duration from [min_sleep, max_sleep]). - - :param force: when set to True, allows snapshot of a volume when - the volume is attached to an instance - :param min_sleep: minimum sleep time between snapshot creation and - deletion (in seconds) - :param max_sleep: maximum sleep time between snapshot creation and - deletion (in seconds) - :param kwargs: optional args to create a snapshot - """ - volume = random.choice(self.context["tenant"]["volumes"]) - snapshot = self.cinder.create_snapshot(volume["id"], force=force, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self.cinder.delete_snapshot(snapshot) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_params") -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder", "nova"]}, - name="CinderVolumes.create_and_attach_volume", - platform="openstack") -class CreateAndAttachVolume(cinder_utils.CinderBasic, - nova_utils.NovaScenario): - - @logging.log_deprecated_args( - "Use 'create_vm_params' for additional instance parameters.", - "0.2.0", ["kwargs"], once=True) - def run(self, size, image, flavor, create_volume_params=None, - create_vm_params=None, **kwargs): - """Create a VM and attach a volume to it. - - Simple test to create a VM and attach a volume, then - detach the volume and delete volume/VM. - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param create_volume_params: optional arguments for volume creation - :param create_vm_params: optional arguments for VM creation - :param kwargs: (deprecated) optional arguments for VM creation - """ - - create_volume_params = create_volume_params or {} - - if kwargs and create_vm_params: - raise ValueError("You can not set both 'kwargs'" - "and 'create_vm_params' attributes." - "Please use 'create_vm_params'.") - - create_vm_params = create_vm_params or kwargs or {} - - server = self._boot_server(image, flavor, **create_vm_params) - volume = self.cinder.create_volume(size, **create_volume_params) - - attachment = self._attach_volume(server, volume) - self._detach_volume(server, volume, attachment) - - self.cinder.delete_volume(volume) - self._delete_server(server) - - -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("volume_type_exists", param_name="volume_type", nullable=True) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder", "nova"]}, - name="CinderVolumes.create_snapshot_and_attach_volume", - platform="openstack") -class CreateSnapshotAndAttachVolume(cinder_utils.CinderBasic, - nova_utils.NovaScenario): - - def run(self, volume_type=None, size=None, **kwargs): - """Create volume, snapshot and attach/detach volume. - - :param volume_type: Name of volume type to use - :param size: Volume size - dictionary, contains two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - default values: {"min": 1, "max": 5} - :param kwargs: Optional parameters used during volume - snapshot creation. - """ - if size is None: - size = {"min": 1, "max": 5} - - volume = self.cinder.create_volume(size, volume_type=volume_type) - snapshot = self.cinder.create_snapshot(volume.id, force=False, - **kwargs) - - server = self.get_random_server() - - attachment = self._attach_volume(server, volume) - self._detach_volume(server, volume, attachment) - - self.cinder.delete_snapshot(snapshot) - self.cinder.delete_volume(volume) - - -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_snapshot_kwargs") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder", "nova"]}, - name="CinderVolumes.create_nested_snapshots" - "_and_attach_volume", - platform="openstack") -class CreateNestedSnapshotsAndAttachVolume(cinder_utils.CinderBasic, - nova_utils.NovaScenario): - - @logging.log_deprecated_args( - "Use 'create_snapshot_kwargs' for additional snapshot kwargs.", - "0.4.1", ["kwargs"], once=True) - def run(self, size=None, nested_level=1, create_volume_kwargs=None, - create_snapshot_kwargs=None, **kwargs): - """Create a volume from snapshot and attach/detach the volume - - This scenario create volume, create it's snapshot, attach volume, - then create new volume from existing snapshot and so on, - with defined nested level, after all detach and delete them. - volume->snapshot->volume->snapshot->volume ... - - :param size: Volume size - dictionary, contains two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - default values: {"min": 1, "max": 5} - :param nested_level: amount of nested levels - :param create_volume_kwargs: optional args to create a volume - :param create_snapshot_kwargs: optional args to create a snapshot - :param kwargs: Optional parameters used during volume - snapshot creation. - """ - if size is None: - size = {"min": 1, "max": 5} - - # NOTE: Volume size cannot be smaller than the snapshot size, so - # volume with specified size should be created to avoid - # size mismatching between volume and snapshot due random - # size in _create_volume method. - size = random.randint(size["min"], size["max"]) - - create_volume_kwargs = create_volume_kwargs or {} - create_snapshot_kwargs = create_snapshot_kwargs or kwargs or {} - server = self.get_random_server() - - source_vol = self.cinder.create_volume(size, **create_volume_kwargs) - snapshot = self.cinder.create_snapshot(source_vol.id, force=False, - **create_snapshot_kwargs) - attachment = self._attach_volume(server, source_vol) - - nes_objs = [(server, source_vol, snapshot, attachment)] - for i in range(nested_level - 1): - volume = self.cinder.create_volume(size, snapshot_id=snapshot.id) - snapshot = self.cinder.create_snapshot(volume.id, force=False, - **create_snapshot_kwargs) - server = self.get_random_server() - attachment = self._attach_volume(server, volume) - - nes_objs.append((server, volume, snapshot, attachment)) - - nes_objs.reverse() - for server, volume, snapshot, attachment in nes_objs: - self._detach_volume(server, volume, attachment) - self.cinder.delete_snapshot(snapshot) - self.cinder.delete_volume(volume) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_contexts", contexts=("volumes")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_list_snapshots", - platform="openstack") -class CreateAndListSnapshots(cinder_utils.CinderBasic, - nova_utils.NovaScenario): - - def run(self, force=False, detailed=True, **kwargs): - """Create and then list a volume-snapshot. - - :param force: when set to True, allows snapshot of a volume when - the volume is attached to an instance - :param detailed: True if detailed information about snapshots - should be listed - :param kwargs: optional args to create a snapshot - """ - volume = random.choice(self.context["tenant"]["volumes"]) - self.cinder.create_snapshot(volume["id"], force=force, **kwargs) - self.cinder.list_snapshots(detailed) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("required_services", services=[consts.Service.CINDER, - consts.Service.GLANCE]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder", "glance"]}, - name="CinderVolumes.create_and_upload_volume_to_image", - platform="openstack") -class CreateAndUploadVolumeToImage(cinder_utils.CinderBasic, - images.GlanceBasic): - - def run(self, size, image=None, force=False, container_format="bare", - disk_format="raw", do_delete=True, **kwargs): - """Create and upload a volume to image. - - :param size: volume size (integers, in GB), or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create volume. - :param force: when set to True volume that is attached to an instance - could be uploaded to image - :param container_format: image container format - :param disk_format: disk format for image - :param do_delete: deletes image and volume after uploading if True - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - volume = self.cinder.create_volume(size, **kwargs) - image = self.cinder.upload_volume_to_image( - volume, force=force, container_format=container_format, - disk_format=disk_format) - - if do_delete: - self.cinder.delete_volume(volume) - self.glance.delete_image(image.id) - - -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names="name", - subdict="create_backup_kwargs") -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_cinder_services", services="cinder-backup") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_volume_backup", - platform="openstack") -class CreateVolumeBackup(cinder_utils.CinderBasic): - - def run(self, size, do_delete=True, create_volume_kwargs=None, - create_backup_kwargs=None): - """Create a volume backup. - - :param size: volume size in GB - :param do_delete: if True, a volume and a volume backup will - be deleted after creation. - :param create_volume_kwargs: optional args to create a volume - :param create_backup_kwargs: optional args to create a volume backup - """ - create_volume_kwargs = create_volume_kwargs or {} - create_backup_kwargs = create_backup_kwargs or {} - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - backup = self.cinder.create_backup(volume.id, **create_backup_kwargs) - - if do_delete: - self.cinder.delete_volume(volume) - self.cinder.delete_backup(backup) - - -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names="name", - subdict="create_backup_kwargs") -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_cinder_services", services="cinder-backup") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_restore_volume_backup", - platform="openstack") -class CreateAndRestoreVolumeBackup(cinder_utils.CinderBasic): - - def run(self, size, do_delete=True, create_volume_kwargs=None, - create_backup_kwargs=None): - """Restore volume backup. - - :param size: volume size in GB - :param do_delete: if True, the volume and the volume backup will - be deleted after creation. - :param create_volume_kwargs: optional args to create a volume - :param create_backup_kwargs: optional args to create a volume backup - """ - create_volume_kwargs = create_volume_kwargs or {} - create_backup_kwargs = create_backup_kwargs or {} - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - backup = self.cinder.create_backup(volume.id, **create_backup_kwargs) - self.cinder.restore_backup(backup.id) - - if do_delete: - self.cinder.delete_volume(volume) - self.cinder.delete_backup(backup) - - -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names="name", - subdict="create_backup_kwargs") -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_cinder_services", services="cinder-backup") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_list_volume_backups", - platform="openstack") -class CreateAndListVolumeBackups(cinder_utils.CinderBasic): - - def run(self, size, detailed=True, do_delete=True, - create_volume_kwargs=None, create_backup_kwargs=None): - """Create and then list a volume backup. - - :param size: volume size in GB - :param detailed: True if detailed information about backup - should be listed - :param do_delete: if True, a volume backup will be deleted - :param create_volume_kwargs: optional args to create a volume - :param create_backup_kwargs: optional args to create a volume backup - """ - create_volume_kwargs = create_volume_kwargs or {} - create_backup_kwargs = create_backup_kwargs or {} - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - backup = self.cinder.create_backup(volume.id, **create_backup_kwargs) - self.cinder.list_backups(detailed) - - if do_delete: - self.cinder.delete_volume(volume) - self.cinder.delete_backup(backup) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_volume_and_clone", - platform="openstack") -class CreateVolumeAndClone(cinder_utils.CinderBasic): - - def run(self, size, image=None, nested_level=1, **kwargs): - """Create a volume, then clone it to another volume. - - This creates a volume, then clone it to anothor volume, - and then clone the new volume to next volume... - 1. create source volume (from image) - 2. clone source volume to volume1 - 3. clone volume1 to volume2 - 4. clone volume2 to volume3 - 5. ... - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create initial volume - :param nested_level: amount of nested levels - :param kwargs: optional args to create volumes - """ - if image: - kwargs["imageRef"] = image - - source_vol = self.cinder.create_volume(size, **kwargs) - - kwargs.pop("imageRef", None) - for i in range(nested_level): - with atomic.ActionTimer(self, "cinder.clone_volume"): - source_vol = self.cinder.create_volume( - source_vol.size, source_volid=source_vol.id, - **kwargs) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_snapshot_kwargs") -@validation.add("required_contexts", contexts=("volumes")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_volume_from_snapshot", - platform="openstack") -class CreateVolumeFromSnapshot(cinder_utils.CinderBasic): - - def run(self, do_delete=True, create_snapshot_kwargs=None, **kwargs): - """Create a volume-snapshot, then create a volume from this snapshot. - - :param do_delete: if True, a snapshot and a volume will - be deleted after creation. - :param create_snapshot_kwargs: optional args to create a snapshot - :param kwargs: optional args to create a volume - """ - create_snapshot_kwargs = create_snapshot_kwargs or {} - src_volume = random.choice(self.context["tenant"]["volumes"]) - - snapshot = self.cinder.create_snapshot(src_volume["id"], - **create_snapshot_kwargs) - volume = self.cinder.create_volume(src_volume["size"], - snapshot_id=snapshot.id, - **kwargs) - - if do_delete: - self.cinder.delete_snapshot(snapshot) - self.cinder.delete_volume(volume) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_volume_" - "and_update_readonly_flag", - platform="openstack") -class CreateVolumeAndUpdateReadonlyFlag(cinder_utils.CinderBasic): - - def run(self, size, image=None, read_only=True, **kwargs): - """Create a volume and then update its readonly flag. - - :param size: volume size (integer, in GB) - :param image: image to be used to create volume - :param read_only: The value to indicate whether to update volume to - read-only access mode - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - volume = self.cinder.create_volume(size, **kwargs) - self.cinder.update_readonly_flag(volume.id, read_only=read_only) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["cinder"]}, - name="CinderVolumes.create_and_accept_transfer", - platform="openstack") -class CreateAndAcceptTransfer(cinder_utils.CinderBasic): - - def run(self, size, image=None, **kwargs): - """Create a volume transfer, then accept it - - Measure the "cinder transfer-create" and "cinder transfer-accept" - command performace. - :param size: volume size (integer, in GB) - :param image: image to be used to create initial volume - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - volume = self.cinder.create_volume(size, **kwargs) - transfer = self.cinder.transfer_create(volume.id) - self.cinder.transfer_accept(transfer.id, auth_key=transfer.auth_key) diff --git a/rally/plugins/openstack/scenarios/designate/__init__.py b/rally/plugins/openstack/scenarios/designate/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/designate/basic.py b/rally/plugins/openstack/scenarios/designate/basic.py deleted file mode 100644 index 17053cb4..00000000 --- a/rally/plugins/openstack/scenarios/designate/basic.py +++ /dev/null @@ -1,381 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Author: Endre Karlson -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import random - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.designate import utils -from rally.task import validation - - -"""Basic scenarios for Designate.""" - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.create_and_list_domains", - platform="openstack") -class CreateAndListDomains(utils.DesignateScenario): - - def run(self): - """Create a domain and list all domains. - - Measure the "designate domain-list" command performance. - - If you have only 1 user in your context, you will - add 1 domain on every iteration. So you will have more - and more domain and will be able to measure the - performance of the "designate domain-list" command depending on - the number of domains owned by users. - """ - domain = self._create_domain() - msg = "Domain isn't created" - self.assertTrue(domain, msg) - list_domains = self._list_domains() - self.assertIn(domain, list_domains) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.list_domains", - platform="openstack") -class ListDomains(utils.DesignateScenario): - - def run(self): - """List Designate domains. - - This simple scenario tests the designate domain-list command by listing - all the domains. - - Suppose if we have 2 users in context and each has 2 domains - uploaded for them we will be able to test the performance of - designate domain-list command in this case. - """ - self._list_domains() - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.create_and_delete_domain", - platform="openstack") -class CreateAndDeleteDomain(utils.DesignateScenario): - - def run(self): - """Create and then delete a domain. - - Measure the performance of creating and deleting domains - with different level of load. - """ - domain = self._create_domain() - self._delete_domain(domain["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.create_and_update_domain", - platform="openstack") -class CreateAndUpdateDomain(utils.DesignateScenario): - - def run(self): - """Create and then update a domain. - - Measure the performance of creating and updating domains - with different level of load. - """ - domain = self._create_domain() - self._update_domain(domain) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.create_and_delete_records", - platform="openstack") -class CreateAndDeleteRecords(utils.DesignateScenario): - - def run(self, records_per_domain=5): - """Create and then delete records. - - Measure the performance of creating and deleting records - with different level of load. - - :param records_per_domain: Records to create pr domain. - """ - domain = self._create_domain() - - records = [] - - for i in range(records_per_domain): - record = self._create_record(domain) - records.append(record) - - for record in records: - self._delete_record( - domain["id"], record["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.list_records", - platform="openstack") -class ListRecords(utils.DesignateScenario): - - def run(self, domain_id): - """List Designate records. - - This simple scenario tests the designate record-list command by listing - all the records in a domain. - - Suppose if we have 2 users in context and each has 2 domains - uploaded for them we will be able to test the performance of - designate record-list command in this case. - - :param domain_id: Domain ID - """ - - self._list_records(domain_id) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.create_and_list_records", - platform="openstack") -class CreateAndListRecords(utils.DesignateScenario): - - def run(self, records_per_domain=5): - """Create and then list records. - - If you have only 1 user in your context, you will - add 1 record on every iteration. So you will have more - and more records and will be able to measure the - performance of the "designate record-list" command depending on - the number of domains/records owned by users. - - :param records_per_domain: Records to create pr domain. - """ - domain = self._create_domain() - for i in range(records_per_domain): - self._create_record(domain) - - self._list_records(domain["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["designate"]}, - name="DesignateBasic.create_and_list_servers", - platform="openstack") -class CreateAndListServers(utils.DesignateScenario): - - def run(self): - """Create a Designate server and list all servers. - - If you have only 1 user in your context, you will - add 1 server on every iteration. So you will have more - and more server and will be able to measure the - performance of the "designate server-list" command depending on - the number of servers owned by users. - """ - server = self._create_server() - self.assertTrue(server) - list_servers = self._list_servers() - self.assertIn(server, list_servers) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["designate"]}, - name="DesignateBasic.create_and_delete_server", - platform="openstack") -class CreateAndDeleteServer(utils.DesignateScenario): - - def run(self): - """Create and then delete a server. - - Measure the performance of creating and deleting servers - with different level of load. - """ - server = self._create_server() - self._delete_server(server["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="DesignateBasic.list_servers", platform="openstack") -class ListServers(utils.DesignateScenario): - - def run(self): - """List Designate servers. - - This simple scenario tests the designate server-list command by listing - all the servers. - """ - self._list_servers() - - -# NOTE: API V2 -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.create_and_list_zones", - platform="openstack") -class CreateAndListZones(utils.DesignateScenario): - - def run(self): - """Create a zone and list all zones. - - Measure the "openstack zone list" command performance. - - If you have only 1 user in your context, you will - add 1 zone on every iteration. So you will have more - and more zone and will be able to measure the - performance of the "openstack zone list" command depending on - the number of zones owned by users. - """ - zone = self._create_zone() - self.assertTrue(zone) - list_zones = self._list_zones() - self.assertIn(zone, list_zones) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.list_zones", platform="openstack") -class ListZones(utils.DesignateScenario): - - def run(self): - """List Designate zones. - - This simple scenario tests the openstack zone list command by listing - all the zones. - """ - - self._list_zones() - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.create_and_delete_zone", - platform="openstack") -class CreateAndDeleteZone(utils.DesignateScenario): - - def run(self): - """Create and then delete a zone. - - Measure the performance of creating and deleting zones - with different level of load. - """ - zone = self._create_zone() - self._delete_zone(zone["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.list_recordsets", - platform="openstack") -class ListRecordsets(utils.DesignateScenario): - - def run(self, zone_id): - """List Designate recordsets. - - This simple scenario tests the openstack recordset list command by - listing all the recordsets in a zone. - - :param zone_id: Zone ID - """ - - self._list_recordsets(zone_id) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("zones")) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.create_and_delete_recordsets", - platform="openstack") -class CreateAndDeleteRecordsets(utils.DesignateScenario): - - def run(self, recordsets_per_zone=5): - """Create and then delete recordsets. - - Measure the performance of creating and deleting recordsets - with different level of load. - - :param recordsets_per_zone: recordsets to create pr zone. - """ - zone = random.choice(self.context["tenant"]["zones"]) - - recordsets = [] - - for i in range(recordsets_per_zone): - recordset = self._create_recordset(zone) - recordsets.append(recordset) - - for recordset in recordsets: - self._delete_recordset( - zone["id"], recordset["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("zones")) -@scenario.configure(context={"cleanup": ["designate"]}, - name="DesignateBasic.create_and_list_recordsets", - platform="openstack") -class CreateAndListRecordsets(utils.DesignateScenario): - - def run(self, recordsets_per_zone=5): - """Create and then list recordsets. - - If you have only 1 user in your context, you will - add 1 recordset on every iteration. So you will have more - and more recordsets and will be able to measure the - performance of the "openstack recordset list" command depending on - the number of zones/recordsets owned by users. - - :param recordsets_per_zone: recordsets to create pr zone. - """ - zone = random.choice(self.context["tenant"]["zones"]) - - for i in range(recordsets_per_zone): - self._create_recordset(zone) - - self._list_recordsets(zone["id"]) diff --git a/rally/plugins/openstack/scenarios/designate/utils.py b/rally/plugins/openstack/scenarios/designate/utils.py deleted file mode 100644 index 80380300..00000000 --- a/rally/plugins/openstack/scenarios/designate/utils.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Author: Endre Karlson -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack import scenario -from rally.task import atomic - - -class DesignateScenario(scenario.OpenStackScenario): - """Base class for Designate scenarios with basic atomic actions.""" - - @atomic.action_timer("designate.create_domain") - def _create_domain(self, domain=None): - """Create domain. - - :param domain: dict, POST /v1/domains request options - :returns: designate domain dict - """ - domain = domain or {} - - domain.setdefault("email", "root@random.name") - domain["name"] = "%s.name." % self.generate_random_name() - return self.clients("designate").domains.create(domain) - - @atomic.action_timer("designate.list_domains") - def _list_domains(self): - """Return user domain list.""" - return self.clients("designate").domains.list() - - @atomic.action_timer("designate.delete_domain") - def _delete_domain(self, domain_id): - """Delete designate zone. - - :param domain_id: domain ID - """ - self.clients("designate").domains.delete(domain_id) - - @atomic.action_timer("designate.update_domain") - def _update_domain(self, domain): - """Update designate domain. - - :param domain: designate domain - :returns: designate updated domain dict - """ - domain["description"] = "updated domain" - domain["email"] = "updated@random.name" - return self.clients("designate").domains.update(domain) - - @atomic.action_timer("designate.create_record") - def _create_record(self, domain, record=None): - """Create a record in a domain. - - :param domain: domain dict - :param record: record dict - :returns: Designate record dict - """ - record = record or {} - record.setdefault("type", "A") - record["name"] = "%s.%s" % (self.generate_random_name(), - domain["name"]) - record.setdefault("data", "10.0.0.1") - - return self.clients("designate").records.create(domain["id"], record) - - @atomic.action_timer("designate.list_records") - def _list_records(self, domain_id): - """List domain records. - - :param domain_id: domain ID - :returns: domain records list - """ - return self.clients("designate").records.list(domain_id) - - @atomic.action_timer("designate.delete_record") - def _delete_record(self, domain_id, record_id): - """Delete a domain record. - - :param domain_id: domain ID - :param record_id: record ID - """ - self.clients("designate").records.delete(domain_id, record_id) - - @atomic.action_timer("designate.create_server") - def _create_server(self, server=None): - """Create server. - - :param server: dict, POST /v1/servers request options - :returns: designate server dict - """ - server = server or {} - - server["name"] = "%s.name." % self.generate_random_name() - return self.admin_clients("designate").servers.create(server) - - @atomic.action_timer("designate.list_servers") - def _list_servers(self): - """Return user server list.""" - return self.admin_clients("designate").servers.list() - - @atomic.action_timer("designate.delete_server") - def _delete_server(self, server_id): - """Delete Server. - - :param server_id: unicode server ID - """ - self.admin_clients("designate").servers.delete(server_id) - - # NOTE: API V2 - @atomic.action_timer("designate.create_zone") - def _create_zone(self, name=None, type_=None, email=None, description=None, - ttl=None): - """Create zone. - - :param name: Zone name - :param type_: Zone type, PRIMARY or SECONDARY - :param email: Zone owner email - :param description: Zone description - :param ttl: Zone ttl - Time to live in seconds - :returns: designate zone dict - """ - type_ = type_ or "PRIMARY" - - if type_ == "PRIMARY": - email = email or "root@random.name" - # Name is only useful to be random for PRIMARY - name = name or "%s.name." % self.generate_random_name() - - return self.clients("designate", version="2").zones.create( - name=name, - type_=type_, - email=email, - description=description, - ttl=ttl - ) - - @atomic.action_timer("designate.list_zones") - def _list_zones(self, criterion=None, marker=None, limit=None): - """Return user zone list. - - :param criterion: API Criterion to filter by - :param marker: UUID marker of the item to start the page from - :param limit: How many items to return in the page. - :returns: list of designate zones - """ - return self.clients("designate", version="2").zones.list() - - @atomic.action_timer("designate.delete_zone") - def _delete_zone(self, zone_id): - """Delete designate zone. - - :param zone_id: Zone ID - """ - self.clients("designate", version="2").zones.delete(zone_id) - - @atomic.action_timer("designate.list_recordsets") - def _list_recordsets(self, zone_id, criterion=None, marker=None, - limit=None): - """List zone recordsets. - - :param zone_id: Zone ID - :param criterion: API Criterion to filter by - :param marker: UUID marker of the item to start the page from - :param limit: How many items to return in the page. - :returns: zone recordsets list - """ - return self.clients("designate", version="2").recordsets.list( - zone_id, criterion=criterion, marker=marker, limit=limit) - - @atomic.action_timer("designate.create_recordset") - def _create_recordset(self, zone, recordset=None): - """Create a recordset in a zone. - - :param zone: zone dict - :param recordset: recordset dict - :returns: Designate recordset dict - """ - recordset = recordset or {} - recordset.setdefault("type_", recordset.pop("type", "A")) - if "name" not in recordset: - recordset["name"] = "%s.%s" % (self.generate_random_name(), - zone["name"]) - if "records" not in recordset: - recordset["records"] = ["10.0.0.1"] - - return self.clients("designate", version="2").recordsets.create( - zone["id"], **recordset) - - @atomic.action_timer("designate.delete_recordset") - def _delete_recordset(self, zone_id, recordset_id): - """Delete a zone recordset. - - :param zone_id: Zone ID - :param recordset_id: Recordset ID - """ - - self.clients("designate", version="2").recordsets.delete( - zone_id, recordset_id) diff --git a/rally/plugins/openstack/scenarios/dummy.py b/rally/plugins/openstack/scenarios/dummy.py deleted file mode 100644 index 9a321238..00000000 --- a/rally/plugins/openstack/scenarios/dummy.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.common.scenarios.dummy import dummy -from rally.task import scenario - - -@scenario.configure(name="Dummy.openstack", platform="openstack") -class DummyOpenStack(dummy.Dummy): - """Clone of Dummy.dummy for OpenStack""" diff --git a/rally/plugins/openstack/scenarios/ec2/__init__.py b/rally/plugins/openstack/scenarios/ec2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/ec2/servers.py b/rally/plugins/openstack/scenarios/ec2/servers.py deleted file mode 100644 index 9a15aa3c..00000000 --- a/rally/plugins/openstack/scenarios/ec2/servers.py +++ /dev/null @@ -1,59 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ec2 import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for servers using EC2.""" - - -@validation.add("required_services", services=[consts.Service.EC2]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ec2"]}, - name="EC2Servers.list_servers", platform="openstack") -class ListServers(utils.EC2Scenario): - - def run(self): - """List all servers. - - This simple scenario tests the EC2 API list function by listing - all the servers. - """ - self._list_servers() - - -@types.convert(image={"type": "ec2_image"}, - flavor={"type": "ec2_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.EC2]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["ec2"]}, - name="EC2Servers.boot_server", platform="openstack") -class BootServer(utils.EC2Scenario): - - def run(self, image, flavor, **kwargs): - """Boot a server. - - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param kwargs: optional additional arguments for server creation - """ - self._boot_servers(image, flavor, **kwargs) diff --git a/rally/plugins/openstack/scenarios/ec2/utils.py b/rally/plugins/openstack/scenarios/ec2/utils.py deleted file mode 100644 index 1c7d3d75..00000000 --- a/rally/plugins/openstack/scenarios/ec2/utils.py +++ /dev/null @@ -1,69 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg - -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class EC2Scenario(scenario.OpenStackScenario): - """Base class for EC2 scenarios with basic atomic actions.""" - - @atomic.action_timer("ec2.list_servers") - def _list_servers(self): - """Returns user servers list.""" - return self.clients("ec2").get_only_instances() - - @atomic.action_timer("ec2.boot_servers") - def _boot_servers(self, image_id, flavor_name, - instance_num=1, **kwargs): - """Boot multiple servers. - - Returns when all the servers are actually booted and are in the - "Running" state. - - :param image_id: ID of the image to be used for server creation - :param flavor_name: Name of the flavor to be used for server creation - :param instance_num: Number of instances to boot - :param kwargs: Other optional parameters to boot servers - - :returns: List of created server objects - """ - reservation = self.clients("ec2").run_instances( - image_id=image_id, - instance_type=flavor_name, - min_count=instance_num, - max_count=instance_num, - **kwargs) - servers = [instance for instance in reservation.instances] - - self.sleep_between(CONF.benchmark.ec2_server_boot_prepoll_delay) - servers = [utils.wait_for( - server, - ready_statuses=["RUNNING"], - update_resource=self._update_resource, - timeout=CONF.benchmark.ec2_server_boot_timeout, - check_interval=CONF.benchmark.ec2_server_boot_poll_interval - ) for server in servers] - return servers - - def _update_resource(self, resource): - resource.update() - return resource diff --git a/rally/plugins/openstack/scenarios/glance/__init__.py b/rally/plugins/openstack/scenarios/glance/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/glance/images.py b/rally/plugins/openstack/scenarios/glance/images.py deleted file mode 100644 index 6de13c6c..00000000 --- a/rally/plugins/openstack/scenarios/glance/images.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.plugins.openstack.services.image import image -from rally.task import types -from rally.task import validation - -LOG = logging.getLogger(__name__) - -"""Scenarios for Glance images.""" - - -class GlanceBasic(scenario.OpenStackScenario): - def __init__(self, context=None, admin_clients=None, clients=None): - super(GlanceBasic, self).__init__(context, admin_clients, clients) - if hasattr(self, "_admin_clients"): - self.admin_glance = image.Image( - self._admin_clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - if hasattr(self, "_clients"): - self.glance = image.Image( - self._clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - - -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@types.convert(image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["glance"]}, - name="GlanceImages.create_and_list_image", - platform="openstack") -class CreateAndListImage(GlanceBasic): - - def run(self, container_format, image_location, disk_format, - visibility="private", min_disk=0, min_ram=0): - """Create an image and then list all images. - - Measure the "glance image-list" command performance. - - If you have only 1 user in your context, you will - add 1 image on every iteration. So you will have more - and more images and will be able to measure the - performance of the "glance image-list" command depending on - the number of images owned by users. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - """ - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - self.assertTrue(image) - image_list = self.glance.list_images() - self.assertIn(image.id, [i.id for i in image_list]) - - -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@types.convert(image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["glance"]}, - name="GlanceImages.create_and_get_image", - platform="openstack") -class CreateAndGetImage(GlanceBasic): - - def run(self, container_format, image_location, disk_format, - visibility="private", min_disk=0, min_ram=0): - """Create and get detailed information of an image. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - """ - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - self.assertTrue(image) - image_info = self.glance.get_image(image) - self.assertEqual(image.id, image_info.id) - - -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["glance"]}, - name="GlanceImages.list_images", - platform="openstack") -class ListImages(GlanceBasic): - - def run(self): - """List all images. - - This simple scenario tests the glance image-list command by listing - all the images. - - Suppose if we have 2 users in context and each has 2 images - uploaded for them we will be able to test the performance of - glance image-list command in this case. - """ - self.glance.list_images() - - -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@types.convert(image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["glance"]}, - name="GlanceImages.create_and_delete_image", - platform="openstack") -class CreateAndDeleteImage(GlanceBasic): - - def run(self, container_format, image_location, disk_format, - visibility="private", min_disk=0, min_ram=0): - """Create and then delete an image. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - """ - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - self.glance.delete_image(image.id) - - -@types.convert(flavor={"type": "nova_flavor"}, - image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@validation.add("restricted_parameters", param_names=["image_name", "name"]) -@validation.flavor_exists("flavor") -@validation.add("required_services", services=[consts.Service.GLANCE, - consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["glance", "nova"]}, - name="GlanceImages.create_image_and_boot_instances", - platform="openstack") -class CreateImageAndBootInstances(GlanceBasic, nova_utils.NovaScenario): - - def run(self, container_format, image_location, disk_format, - flavor, number_instances, visibility="private", min_disk=0, - min_ram=0, boot_server_kwargs=None, **kwargs): - """Create an image and boot several instances from it. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param flavor: Nova flavor to be used to launch an instance - :param number_instances: number of Nova servers to boot - :param create_image_kwargs: optional parameters to create image - :param boot_server_kwargs: optional parameters to boot server - :param kwargs: optional parameters to create server (deprecated) - """ - boot_server_kwargs = boot_server_kwargs or kwargs or {} - - if kwargs: - LOG.warning("'kwargs' is deprecated in Rally v0.8.0: Use " - "'boot_server_kwargs' for additional parameters when " - "booting servers.") - - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - - self._boot_servers(image.id, flavor, number_instances, - **boot_server_kwargs) - - -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@types.convert(image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["glance"]}, - name="GlanceImages.create_and_update_image", - platform="openstack") -class CreateAndUpdateImage(GlanceBasic): - - def run(self, container_format, image_location, disk_format, - remove_props=None, visibility="private", create_min_disk=0, - create_min_ram=0, update_min_disk=0, update_min_ram=0): - """Create an image then update it. - - Measure the "glance image-create" and "glance image-update" commands - performance. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param remove_props: List of property names to remove. - (It is only supported by Glance v2.) - :param visibility: The access permission for the created image - :param create_min_disk: The min disk of created images - :param create_min_ram: The min ram of created images - :param update_min_disk: The min disk of updated images - :param update_min_ram: The min ram of updated images - """ - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=create_min_disk, - min_ram=create_min_ram) - - self.glance.update_image(image.id, - min_disk=update_min_disk, - min_ram=update_min_ram, - remove_props=remove_props) diff --git a/rally/plugins/openstack/scenarios/glance/utils.py b/rally/plugins/openstack/scenarios/glance/utils.py deleted file mode 100644 index 6be988e8..00000000 --- a/rally/plugins/openstack/scenarios/glance/utils.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally.common.i18n import _ -from rally.common import logging -from rally.plugins.openstack import scenario -from rally.plugins.openstack.wrappers import glance as glance_wrapper -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class GlanceScenario(scenario.OpenStackScenario): - """Base class for Glance scenarios with basic atomic actions.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(GlanceScenario, self).__init__(context, admin_clients, clients) - LOG.warning(_( - "Class %s is deprecated since Rally 0.10.0 and will be removed " - "soon. Use " - "rally.plugins.openstack.services.image.image.Image " - "instead.") % self.__class__) - - @atomic.action_timer("glance.list_images") - def _list_images(self): - """Returns user images list.""" - return list(self.clients("glance").images.list()) - - @atomic.action_timer("glance.create_image") - def _create_image(self, container_format, image_location, disk_format, - **kwargs): - """Create a new image. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param kwargs: optional parameters to create image - - :returns: image object - """ - if not kwargs.get("name"): - kwargs["name"] = self.generate_random_name() - client = glance_wrapper.wrap(self._clients.glance, self) - return client.create_image(container_format, image_location, - disk_format, **kwargs) - - @atomic.action_timer("glance.delete_image") - def _delete_image(self, image): - """Deletes given image. - - Returns when the image is actually deleted. - - :param image: Image object - """ - self.clients("glance").images.delete(image.id) - wrapper = glance_wrapper.wrap(self._clients.glance, self) - utils.wait_for_status( - image, ["deleted", "pending_delete"], - check_deletion=True, - update_resource=wrapper.get_image, - timeout=CONF.benchmark.glance_image_delete_timeout, - check_interval=CONF.benchmark.glance_image_delete_poll_interval) diff --git a/rally/plugins/openstack/scenarios/heat/__init__.py b/rally/plugins/openstack/scenarios/heat/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/heat/stacks.py b/rally/plugins/openstack/scenarios/heat/stacks.py deleted file mode 100644 index 3c7b55ce..00000000 --- a/rally/plugins/openstack/scenarios/heat/stacks.py +++ /dev/null @@ -1,396 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.heat import utils -from rally.task import atomic -from rally.task import types -from rally.task import validation - - -"""Scenarios for Heat stacks.""" - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_and_list_stack", - platform="openstack") -class CreateAndListStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create a stack and then list all stacks. - - Measure the "heat stack-create" and "heat stack-list" commands - performance. - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack(template_path, parameters, - files, environment) - self.assertTrue(stack) - list_stacks = self._list_stacks() - self.assertIn(stack.id, [i.id for i in list_stacks]) - - -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="HeatStacks.list_stacks_and_resources", - platform="openstack") -class ListStacksAndResources(utils.HeatScenario): - - def run(self): - """List all resources from tenant stacks.""" - stacks = self._list_stacks() - with atomic.ActionTimer( - self, "heat.list_resources_of_%s_stacks" % len(stacks)): - for stack in stacks: - self.clients("heat").resources.list(stack.id) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_and_delete_stack", - platform="openstack") -class CreateAndDeleteStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create and then delete a stack. - - Measure the "heat stack-create" and "heat stack-delete" commands - performance. - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - - stack = self._create_stack(template_path, parameters, - files, environment) - self._delete_stack(stack) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_check_delete_stack", - platform="openstack") -class CreateCheckDeleteStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create, check and delete a stack. - - Measure the performance of the following commands: - - heat stack-create - - heat action-check - - heat stack-delete - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - - stack = self._create_stack(template_path, parameters, - files, environment) - self._check_stack(stack) - self._delete_stack(stack) - - -@types.convert(template_path={"type": "file"}, - updated_template_path={"type": "file"}, - files={"type": "file_dict"}, - updated_files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_update_delete_stack", - platform="openstack") -class CreateUpdateDeleteStack(utils.HeatScenario): - - def run(self, template_path, updated_template_path, - parameters=None, updated_parameters=None, - files=None, updated_files=None, - environment=None, updated_environment=None): - """Create, update and then delete a stack. - - Measure the "heat stack-create", "heat stack-update" - and "heat stack-delete" commands performance. - - :param template_path: path to stack template file - :param updated_template_path: path to updated stack template file - :param parameters: parameters to use in heat template - :param updated_parameters: parameters to use in updated heat template - If not specified then parameters will be - used instead - :param files: files used in template - :param updated_files: files used in updated template. If not specified - files value will be used instead - :param environment: stack environment definition - :param updated_environment: environment definition for updated stack - """ - - stack = self._create_stack(template_path, parameters, - files, environment) - self._update_stack(stack, updated_template_path, - updated_parameters or parameters, - updated_files or files, - updated_environment or environment) - self._delete_stack(stack) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_stack_and_scale", - platform="openstack") -class CreateStackAndScale(utils.HeatScenario): - - def run(self, template_path, output_key, delta, - parameters=None, files=None, - environment=None): - """Create an autoscaling stack and invoke a scaling policy. - - Measure the performance of autoscaling webhooks. - - :param template_path: path to template file that includes an - OS::Heat::AutoScalingGroup resource - :param output_key: the stack output key that corresponds to - the scaling webhook - :param delta: the number of instances the stack is expected to - change by. - :param parameters: parameters to use in heat template - :param files: files used in template (dict of file name to - file path) - :param environment: stack environment definition (dict) - """ - # TODO(stpierre): Kilo Heat is *much* better than Juno for the - # requirements of this scenario, so once Juno goes out of - # support we should update this scenario to suck less. Namely: - # - # * Kilo Heat can supply alarm_url attributes without needing - # an output key, so instead of getting the output key from - # the user, just get the name of the ScalingPolicy to apply. - # * Kilo Heat changes the status of a stack while scaling it, - # so _scale_stack() can check for the stack to have changed - # size and for it to be in UPDATE_COMPLETE state, so the - # user no longer needs to specify the expected delta. - stack = self._create_stack(template_path, parameters, files, - environment) - self._scale_stack(stack, output_key, delta) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_suspend_resume_delete_stack", - platform="openstack") -class CreateSuspendResumeDeleteStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create, suspend-resume and then delete a stack. - - Measure performance of the following commands: - heat stack-create - heat action-suspend - heat action-resume - heat stack-delete - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - - s = self._create_stack(template_path, parameters, files, environment) - self._suspend_stack(s) - self._resume_stack(s) - self._delete_stack(s) - - -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="HeatStacks.list_stacks_and_events", - platform="openstack") -class ListStacksAndEvents(utils.HeatScenario): - - def run(self): - """List events from tenant stacks.""" - stacks = self._list_stacks() - with atomic.ActionTimer( - self, "heat.list_events_of_%s_stacks" % len(stacks)): - for stack in stacks: - self.clients("heat").events.list(stack.id) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_snapshot_restore_delete_stack", - platform="openstack") -class CreateSnapshotRestoreDeleteStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create, snapshot-restore and then delete a stack. - - Measure performance of the following commands: - heat stack-create - heat stack-snapshot - heat stack-restore - heat stack-delete - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - - stack = self._create_stack( - template_path, parameters, files, environment) - snapshot = self._snapshot_stack(stack) - self._restore_stack(stack, snapshot["id"]) - self._delete_stack(stack) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_stack_and_show_output_via_API", - platform="openstack") -class CreateStackAndShowOutputViaAPI(utils.HeatScenario): - - def run(self, template_path, output_key, - parameters=None, files=None, environment=None): - """Create stack and show output by using old algorithm. - - Measure performance of the following commands: - heat stack-create - heat output-show - - :param template_path: path to stack template file - :param output_key: the stack output key that corresponds to - the scaling webhook - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack( - template_path, parameters, files, environment) - self._stack_show_output_via_API(stack, output_key) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_stack_and_show_output", - platform="openstack") -class CreateStackAndShowOutput(utils.HeatScenario): - - def run(self, template_path, output_key, - parameters=None, files=None, environment=None): - """Create stack and show output by using new algorithm. - - Measure performance of the following commands: - heat stack-create - heat output-show - - :param template_path: path to stack template file - :param output_key: the stack output key that corresponds to - the scaling webhook - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack( - template_path, parameters, files, environment) - self._stack_show_output(stack, output_key) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_stack_and_list_output_via_API", - platform="openstack") -class CreateStackAndListOutputViaAPI(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create stack and list outputs by using old algorithm. - - Measure performance of the following commands: - heat stack-create - heat output-list - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack( - template_path, parameters, files, environment) - self._stack_list_output_via_API(stack) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["heat"]}, - name="HeatStacks.create_stack_and_list_output", - platform="openstack") -class CreateStackAndListOutput(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create stack and list outputs by using new algorithm. - - Measure performance of the following commands: - heat stack-create - heat output-list - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack( - template_path, parameters, files, environment) - self._stack_list_output(stack) diff --git a/rally/plugins/openstack/scenarios/heat/utils.py b/rally/plugins/openstack/scenarios/heat/utils.py deleted file mode 100644 index c6c4ce75..00000000 --- a/rally/plugins/openstack/scenarios/heat/utils.py +++ /dev/null @@ -1,328 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import requests - -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF - - -class HeatScenario(scenario.OpenStackScenario): - """Base class for Heat scenarios with basic atomic actions.""" - - @atomic.action_timer("heat.list_stacks") - def _list_stacks(self): - """Return user stack list.""" - - return list(self.clients("heat").stacks.list()) - - @atomic.action_timer("heat.create_stack") - def _create_stack(self, template, parameters=None, - files=None, environment=None): - """Create a new stack. - - :param template: template with stack description. - :param parameters: template parameters used during stack creation - :param files: additional files used in template - :param environment: stack environment definition - - :returns: object of stack - """ - stack_name = self.generate_random_name() - kw = { - "stack_name": stack_name, - "disable_rollback": True, - "parameters": parameters or {}, - "template": template, - "files": files or {}, - "environment": environment or {} - } - - # heat client returns body instead manager object, so we should - # get manager object using stack_id - stack_id = self.clients("heat").stacks.create(**kw)["stack"]["id"] - stack = self.clients("heat").stacks.get(stack_id) - - self.sleep_between(CONF.benchmark.heat_stack_create_prepoll_delay) - - stack = utils.wait_for( - stack, - ready_statuses=["CREATE_COMPLETE"], - failure_statuses=["CREATE_FAILED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.heat_stack_create_timeout, - check_interval=CONF.benchmark.heat_stack_create_poll_interval) - - return stack - - @atomic.action_timer("heat.update_stack") - def _update_stack(self, stack, template, parameters=None, - files=None, environment=None): - """Update an existing stack - - :param stack: stack that need to be updated - :param template: Updated template - :param parameters: template parameters for stack update - :param files: additional files used in template - :param environment: stack environment definition - - :returns: object of updated stack - """ - - kw = { - "stack_name": stack.stack_name, - "disable_rollback": True, - "parameters": parameters or {}, - "template": template, - "files": files or {}, - "environment": environment or {} - } - self.clients("heat").stacks.update(stack.id, **kw) - - self.sleep_between(CONF.benchmark.heat_stack_update_prepoll_delay) - - stack = utils.wait_for( - stack, - ready_statuses=["UPDATE_COMPLETE"], - failure_statuses=["UPDATE_FAILED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.heat_stack_update_timeout, - check_interval=CONF.benchmark.heat_stack_update_poll_interval) - return stack - - @atomic.action_timer("heat.check_stack") - def _check_stack(self, stack): - """Check given stack. - - Check the stack and stack resources. - - :param stack: stack that needs to be checked - """ - self.clients("heat").actions.check(stack.id) - utils.wait_for( - stack, - ready_statuses=["CHECK_COMPLETE"], - failure_statuses=["CHECK_FAILED"], - update_resource=utils.get_from_manager(["CHECK_FAILED"]), - timeout=CONF.benchmark.heat_stack_check_timeout, - check_interval=CONF.benchmark.heat_stack_check_poll_interval) - - @atomic.action_timer("heat.delete_stack") - def _delete_stack(self, stack): - """Delete given stack. - - Returns when the stack is actually deleted. - - :param stack: stack object - """ - stack.delete() - utils.wait_for_status( - stack, - ready_statuses=["DELETE_COMPLETE"], - failure_statuses=["DELETE_FAILED"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.heat_stack_delete_timeout, - check_interval=CONF.benchmark.heat_stack_delete_poll_interval) - - @atomic.action_timer("heat.suspend_stack") - def _suspend_stack(self, stack): - """Suspend given stack. - - :param stack: stack that needs to be suspended - """ - - self.clients("heat").actions.suspend(stack.id) - utils.wait_for( - stack, - ready_statuses=["SUSPEND_COMPLETE"], - failure_statuses=["SUSPEND_FAILED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.heat_stack_suspend_timeout, - check_interval=CONF.benchmark.heat_stack_suspend_poll_interval) - - @atomic.action_timer("heat.resume_stack") - def _resume_stack(self, stack): - """Resume given stack. - - :param stack: stack that needs to be resumed - """ - - self.clients("heat").actions.resume(stack.id) - utils.wait_for( - stack, - ready_statuses=["RESUME_COMPLETE"], - failure_statuses=["RESUME_FAILED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.heat_stack_resume_timeout, - check_interval=CONF.benchmark.heat_stack_resume_poll_interval) - - @atomic.action_timer("heat.snapshot_stack") - def _snapshot_stack(self, stack): - """Creates a snapshot for given stack. - - :param stack: stack that will be used as base for snapshot - :returns: snapshot created for given stack - """ - snapshot = self.clients("heat").stacks.snapshot( - stack.id) - utils.wait_for( - stack, - ready_statuses=["SNAPSHOT_COMPLETE"], - failure_statuses=["SNAPSHOT_FAILED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.heat_stack_snapshot_timeout, - check_interval=CONF.benchmark.heat_stack_snapshot_poll_interval) - return snapshot - - @atomic.action_timer("heat.restore_stack") - def _restore_stack(self, stack, snapshot_id): - """Restores stack from given snapshot. - - :param stack: stack that will be restored from snapshot - :param snapshot_id: id of given snapshot - """ - self.clients("heat").stacks.restore(stack.id, snapshot_id) - utils.wait_for( - stack, - ready_statuses=["RESTORE_COMPLETE"], - failure_statuses=["RESTORE_FAILED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.heat_stack_restore_timeout, - check_interval=CONF.benchmark.heat_stack_restore_poll_interval - ) - - @atomic.action_timer("heat.show_output") - def _stack_show_output(self, stack, output_key): - """Execute output_show for specified "output_key". - - This method uses new output API call. - :param stack: stack with output_key output. - :param output_key: The name of the output. - """ - output = self.clients("heat").stacks.output_show(stack.id, output_key) - return output - - @atomic.action_timer("heat.show_output_via_API") - def _stack_show_output_via_API(self, stack, output_key): - """Execute output_show for specified "output_key". - - This method uses old way for getting output value. - It gets whole stack object and then finds necessary "output_key". - :param stack: stack with output_key output. - :param output_key: The name of the output. - """ - # this code copy-pasted and adopted for rally from old client version - # https://github.com/openstack/python-heatclient/blob/0.8.0/heatclient/ - # v1/shell.py#L682-L699 - stack = self.clients("heat").stacks.get(stack_id=stack.id) - for output in stack.to_dict().get("outputs", []): - if output["output_key"] == output_key: - return output - - @atomic.action_timer("heat.list_output") - def _stack_list_output(self, stack): - """Execute output_list for specified "stack". - - This method uses new output API call. - :param stack: stack to call output-list. - """ - output_list = self.clients("heat").stacks.output_list(stack.id) - return output_list - - @atomic.action_timer("heat.list_output_via_API") - def _stack_list_output_via_API(self, stack): - """Execute output_list for specified "stack". - - This method uses old way for getting output value. - It gets whole stack object and then prints all outputs - belongs this stack. - :param stack: stack to call output-list. - """ - # this code copy-pasted and adopted for rally from old client version - # https://github.com/openstack/python-heatclient/blob/0.8.0/heatclient/ - # v1/shell.py#L649-L663 - stack = self.clients("heat").stacks.get(stack_id=stack.id) - output_list = stack.to_dict()["outputs"] - return output_list - - def _count_instances(self, stack): - """Count instances in a Heat stack. - - :param stack: stack to count instances in. - """ - return len([ - r for r in self.clients("heat").resources.list(stack.id, - nested_depth=1) - if r.resource_type == "OS::Nova::Server"]) - - def _scale_stack(self, stack, output_key, delta): - """Scale a stack up or down. - - Calls the webhook given in the output value identified by - 'output_key', and waits for the stack size to change by - 'delta'. - - :param stack: stack to scale up or down - :param output_key: The name of the output to get the URL from - :param delta: The expected change in number of instances in - the stack (signed int) - """ - num_instances = self._count_instances(stack) - expected_instances = num_instances + delta - LOG.debug("Scaling stack %s from %s to %s instances with %s" % - (stack.id, num_instances, expected_instances, output_key)) - with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key): - self._stack_webhook(stack, output_key) - utils.wait_for( - stack, - is_ready=lambda s: ( - self._count_instances(s) == expected_instances), - failure_statuses=["UPDATE_FAILED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.heat_stack_scale_timeout, - check_interval=CONF.benchmark.heat_stack_scale_poll_interval) - - def _stack_webhook(self, stack, output_key): - """POST to the URL given in the output value identified by output_key. - - This can be used to scale stacks up and down, for instance. - - :param stack: stack to call a webhook on - :param output_key: The name of the output to get the URL from - :raises InvalidConfigException: if the output key is not found - """ - url = None - for output in stack.outputs: - if output["output_key"] == output_key: - url = output["output_value"] - break - else: - raise exceptions.InvalidConfigException( - "No output key %(key)s found in stack %(id)s" % - {"key": output_key, "id": stack.id}) - - with atomic.ActionTimer(self, "heat.%s_webhook" % output_key): - requests.post(url).raise_for_status() diff --git a/rally/plugins/openstack/scenarios/ironic/__init__.py b/rally/plugins/openstack/scenarios/ironic/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/ironic/nodes.py b/rally/plugins/openstack/scenarios/ironic/nodes.py deleted file mode 100644 index f43804e5..00000000 --- a/rally/plugins/openstack/scenarios/ironic/nodes.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ironic import utils -from rally.task import validation - - -"""Scenarios for ironic nodes.""" - - -@logging.log_deprecated_args("Useless arguments detected", "0.10.0", - ("marker", "limit", "sort_key"), once=True) -@validation.add("required_services", services=[consts.Service.IRONIC]) -@validation.add("restricted_parameters", param_names="name") -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["ironic"]}, - name="IronicNodes.create_and_list_node", - platform="openstack") -class CreateAndListNode(utils.IronicScenario): - - def run(self, driver, properties=None, associated=None, maintenance=None, - detail=False, sort_dir=None, marker=None, limit=None, - sort_key=None, **kwargs): - """Create and list nodes. - - :param driver: The name of the driver used to manage this Node. - :param properties: Key/value pair describing the physical - characteristics of the node. - :param associated: Optional argument of list request. Either a Boolean - or a string representation of a Boolean that indicates whether to - return a list of associated (True or "True") or unassociated - (False or "False") nodes. - :param maintenance: Optional argument of list request. Either a Boolean - or a string representation of a Boolean that indicates whether - to return nodes in maintenance mode (True or "True"), or not in - maintenance mode (False or "False"). - :param detail: Optional, boolean whether to return detailed - information about nodes. - :param sort_dir: Optional, direction of sorting, either 'asc' (the - default) or 'desc'. - :param marker: DEPRECATED since Rally 0.10.0 - :param limit: DEPRECATED since Rally 0.10.0 - :param sort_key: DEPRECATED since Rally 0.10.0 - :param kwargs: Optional additional arguments for node creation - """ - - node = self._create_node(driver, properties, **kwargs) - list_nodes = self._list_nodes( - associated=associated, maintenance=maintenance, detail=detail, - sort_dir=sort_dir) - self.assertIn(node.name, [n.name for n in list_nodes]) - - -@validation.add("required_services", services=[consts.Service.IRONIC]) -@validation.add("restricted_parameters", param_names="name") -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["ironic"]}, - name="IronicNodes.create_and_delete_node", - platform="openstack") -class CreateAndDeleteNode(utils.IronicScenario): - - def run(self, driver, properties=None, **kwargs): - """Create and delete node. - - :param driver: The name of the driver used to manage this Node. - :param properties: Key/value pair describing the physical - characteristics of the node. - :param kwargs: Optional additional arguments for node creation - """ - node = self._create_node(driver, properties, **kwargs) - self._delete_node(node) diff --git a/rally/plugins/openstack/scenarios/ironic/utils.py b/rally/plugins/openstack/scenarios/ironic/utils.py deleted file mode 100644 index 0a834256..00000000 --- a/rally/plugins/openstack/scenarios/ironic/utils.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import string - -from oslo_config import cfg - -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class IronicScenario(scenario.OpenStackScenario): - """Base class for Ironic scenarios with basic atomic actions.""" - - # NOTE(stpierre): Ironic has two name checkers. The new-style - # checker, in API v1.10+, is quite relaxed and will Just Work with - # the default random name pattern. (See - # https://bugs.launchpad.net/ironic/+bug/1434376.) The old-style - # checker *claims* to implement RFCs 952 and 1123, but it doesn't - # actually. (See https://bugs.launchpad.net/ironic/+bug/1468508 - # for details.) The default RESOURCE_NAME_FORMAT works fine for - # the new-style checker, but the old-style checker only allows - # underscores after the first dot, for reasons that I'm sure are - # entirely obvious, so we have to supply a bespoke format for - # Ironic names. - RESOURCE_NAME_FORMAT = "s-rally-XXXXXXXX-XXXXXXXX" - RESOURCE_NAME_ALLOWED_CHARACTERS = string.ascii_lowercase + string.digits - - @atomic.action_timer("ironic.create_node") - def _create_node(self, driver, properties, **kwargs): - """Create node immediately. - - :param driver: The name of the driver used to manage this Node. - :param properties: Key/value pair describing the physical - characteristics of the node. - :param kwargs: optional parameters to create image - :returns: node object - """ - kwargs["name"] = self.generate_random_name() - node = self.admin_clients("ironic").node.create(driver=driver, - properties=properties, - **kwargs) - - self.sleep_between(CONF.benchmark.ironic_node_create_poll_interval) - node = utils.wait_for_status( - node, - ready_statuses=["AVAILABLE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.ironic_node_create_timeout, - check_interval=CONF.benchmark.ironic_node_poll_interval, - id_attr="uuid", status_attr="provision_state" - ) - - return node - - @atomic.action_timer("ironic.list_nodes") - def _list_nodes(self, associated=None, maintenance=None, detail=False, - sort_dir=None): - """Return list of nodes. - - :param associated: Optional. Either a Boolean or a string - representation of a Boolean that indicates whether - to return a list of associated (True or "True") or - unassociated (False or "False") nodes. - :param maintenance: Optional. Either a Boolean or a string - representation of a Boolean that indicates whether - to return nodes in maintenance mode (True or - "True"), or not in maintenance mode (False or - "False"). - :param detail: Optional, boolean whether to return detailed information - about nodes. - :param sort_dir: Optional, direction of sorting, either 'asc' (the - default) or 'desc'. - :returns: A list of nodes. - """ - return self.admin_clients("ironic").node.list( - associated=associated, maintenance=maintenance, detail=detail, - sort_dir=sort_dir) - - @atomic.action_timer("ironic.delete_node") - def _delete_node(self, node): - """Delete the node with specific id. - - :param node: Ironic node object - """ - self.admin_clients("ironic").node.delete(node.uuid) - - utils.wait_for_status( - node, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.ironic_node_delete_timeout, - check_interval=CONF.benchmark.ironic_node_poll_interval, - id_attr="uuid", status_attr="provision_state" - ) diff --git a/rally/plugins/openstack/scenarios/keystone/__init__.py b/rally/plugins/openstack/scenarios/keystone/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/keystone/basic.py b/rally/plugins/openstack/scenarios/keystone/basic.py deleted file mode 100755 index 14ffd348..00000000 --- a/rally/plugins/openstack/scenarios/keystone/basic.py +++ /dev/null @@ -1,438 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Benchmark scenarios for Keystone. -""" - -from rally.common import logging -from rally.plugins.openstack import scenario -from rally.plugins.openstack.services.identity import identity -from rally.task import validation - - -class KeystoneBasic(scenario.OpenStackScenario): - """Base class for Keystone scenarios with initialized service object.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(KeystoneBasic, self).__init__(context, admin_clients, clients) - if hasattr(self, "_admin_clients"): - self.admin_keystone = identity.Identity( - self._admin_clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - if hasattr(self, "_clients"): - self.keystone = identity.Identity( - self._clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_user", - platform="openstack") -class CreateUser(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_user is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone user with random name. - - :param kwargs: Other optional parameters to create users like - "tenant_id", "enabled". - """ - self.admin_keystone.create_user(**kwargs) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_delete_user", - platform="openstack") -class CreateDeleteUser(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_delete_user is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone user with random name and then delete it. - - :param kwargs: Other optional parameters to create users like - "tenant_id", "enabled". - """ - user = self.admin_keystone.create_user(**kwargs) - self.admin_keystone.delete_user(user.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_user_set_enabled_and_delete", - platform="openstack") -class CreateUserSetEnabledAndDelete(KeystoneBasic): - - def run(self, enabled=True, **kwargs): - """Create a keystone user, enable or disable it, and delete it. - - :param enabled: Initial state of user 'enabled' flag. The user - will be created with 'enabled' set to this - value, and then it will be toggled. - :param kwargs: Other optional parameters to create user. - """ - user = self.admin_keystone.create_user(enabled=enabled, **kwargs) - self.admin_keystone.update_user(user.id, enabled=(not enabled)) - self.admin_keystone.delete_user(user.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_tenant", - platform="openstack") -class CreateTenant(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_tenant is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone tenant with random name. - - :param kwargs: Other optional parameters - """ - self.admin_keystone.create_project(**kwargs) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.authenticate_user_and_validate_token", - platform="openstack") -class AuthenticateUserAndValidateToken(KeystoneBasic): - - def run(self): - """Authenticate and validate a keystone token.""" - token = self.admin_keystone.fetch_token() - self.admin_keystone.validate_token(token) - - -@validation.add("number", param_name="users_per_tenant", minval=1) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_tenant_with_users", - platform="openstack") -class CreateTenantWithUsers(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_tenant_with_users is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, users_per_tenant, name_length=10, **kwargs): - """Create a keystone tenant and several users belonging to it. - - :param users_per_tenant: number of users to create for the tenant - :param kwargs: Other optional parameters for tenant creation - :returns: keystone tenant instance - """ - tenant = self.admin_keystone.create_project(**kwargs) - self.admin_keystone.create_users(tenant.id, - number_of_users=users_per_tenant) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_list_users", - platform="openstack") -class CreateAndListUsers(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_and_list_users is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone user with random name and list all users. - - :param kwargs: Other optional parameters to create users like - "tenant_id", "enabled". - """ - - kwargs.pop("name", None) - self.admin_keystone.create_user(**kwargs) - self.admin_keystone.list_users() - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_list_tenants", - platform="openstack") -class CreateAndListTenants(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_and_list_tenants is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone tenant with random name and list all tenants. - - :param kwargs: Other optional parameters - """ - self.admin_keystone.create_project(**kwargs) - self.admin_keystone.list_projects() - - -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.add_and_remove_user_role", - platform="openstack") -class AddAndRemoveUserRole(KeystoneBasic): - - def run(self): - """Create a user role add to a user and disassociate.""" - tenant_id = self.context["tenant"]["id"] - user_id = self.context["user"]["id"] - role = self.admin_keystone.create_role() - self.admin_keystone.add_role(role_id=role.id, user_id=user_id, - project_id=tenant_id) - self.admin_keystone.revoke_role(role.id, user_id=user_id, - project_id=tenant_id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_delete_role", - platform="openstack") -class CreateAndDeleteRole(KeystoneBasic): - - def run(self): - """Create a user role and delete it.""" - role = self.admin_keystone.create_role() - self.admin_keystone.delete_role(role.id) - - -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_add_and_list_user_roles", - platform="openstack") -class CreateAddAndListUserRoles(KeystoneBasic): - - def run(self): - """Create user role, add it and list user roles for given user.""" - tenant_id = self.context["tenant"]["id"] - user_id = self.context["user"]["id"] - role = self.admin_keystone.create_role() - self.admin_keystone.add_role(user_id=user_id, role_id=role.id, - project_id=tenant_id) - self.admin_keystone.list_roles(user_id=user_id, project_id=tenant_id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.get_entities", - platform="openstack") -class GetEntities(KeystoneBasic): - - def run(self, service_name="keystone"): - """Get instance of a tenant, user, role and service by id's. - - An ephemeral tenant, user, and role are each created. By - default, fetches the 'keystone' service. This can be - overridden (for instance, to get the 'Identity Service' - service on older OpenStack), or None can be passed explicitly - to service_name to create a new service and then query it by - ID. - - :param service_name: The name of the service to get by ID; or - None, to create an ephemeral service and - get it by ID. - """ - project = self.admin_keystone.create_project() - user = self.admin_keystone.create_user(project_id=project.id) - role = self.admin_keystone.create_role() - self.admin_keystone.get_project(project.id) - self.admin_keystone.get_user(user.id) - self.admin_keystone.get_role(role.id) - if service_name is None: - service = self.admin_keystone.create_service() - else: - service = self.admin_keystone.get_service_by_name(service_name) - self.admin_keystone.get_service(service.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_delete_service", - platform="openstack") -class CreateAndDeleteService(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_delete_service will be ignored", - "0.0.5", ["name"]) - def run(self, name=None, service_type=None, description=None): - """Create and delete service. - - :param service_type: type of the service - :param description: description of the service - """ - service = self.admin_keystone.create_service(service_type=service_type, - description=description) - self.admin_keystone.delete_service(service.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_update_and_delete_tenant", - platform="openstack") -class CreateUpdateAndDeleteTenant(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_update_and_delete_tenant is " - "ignored", "0.1.2", ["name_length"], once=True) - def run(self, name_length=None, **kwargs): - """Create, update and delete tenant. - - :param kwargs: Other optional parameters for tenant creation - """ - project = self.admin_keystone.create_project(**kwargs) - new_name = self.generate_random_name() - new_description = self.generate_random_name() - self.admin_keystone.update_project(project.id, name=new_name, - description=new_description) - self.admin_keystone.delete_project(project.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_user_update_password", - platform="openstack") -class CreateUserUpdatePassword(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' and 'password_length' arguments to " - "create_user_update_password are ignored", - "0.1.2", ["name_length", "password_length"], once=True) - def run(self, name_length=None, password_length=None): - """Create user and update password for that user.""" - user = self.admin_keystone.create_user() - password = self.generate_random_name() - self.admin_keystone.update_user(user.id, password=password) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_list_services", - platform="openstack") -class CreateAndListServices(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_list_services will be ignored", - "0.0.5", ["name"]) - def run(self, name=None, service_type=None, description=None): - """Create and list services. - - :param service_type: type of the service - :param description: description of the service - """ - self.admin_keystone.create_service(service_type=service_type, - description=description) - self.admin_keystone.list_services() - - -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_list_ec2credentials", - platform="openstack") -class CreateAndListEc2Credentials(KeystoneBasic): - - def run(self): - """Create and List all keystone ec2-credentials.""" - self.keystone.create_ec2credentials( - self.context["user"]["id"], - project_id=self.context["tenant"]["id"]) - self.keystone.list_ec2credentials(self.context["user"]["id"]) - - -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_delete_ec2credential", - platform="openstack") -class CreateAndDeleteEc2Credential(KeystoneBasic): - - def run(self): - """Create and delete keystone ec2-credential.""" - creds = self.keystone.create_ec2credentials( - self.context["user"]["id"], - project_id=self.context["tenant"]["id"]) - self.keystone.delete_ec2credential( - self.context["user"]["id"], access=creds.access) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_get_role", - platform="openstack") -class CreateAndGetRole(KeystoneBasic): - - def run(self, **kwargs): - """Create a user role and get it detailed information. - - :param kwargs: Optional additional arguments for roles creation - """ - role = self.admin_keystone.create_role(**kwargs) - self.admin_keystone.get_role(role.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_list_roles", - platform="openstack") -class CreateAddListRoles(KeystoneBasic): - - def run(self, create_role_kwargs=None, list_role_kwargs=None): - """Create a role, then list all roles. - - :param create_role_kwargs: Optional additional arguments for - roles create - :param list_role_kwargs: Optional additional arguments for roles list - """ - create_role_kwargs = create_role_kwargs or {} - list_role_kwargs = list_role_kwargs or {} - - role = self.admin_keystone.create_role(**create_role_kwargs) - msg = "Role isn't created" - self.assertTrue(role, err_msg=msg) - all_roles = self.admin_keystone.list_roles(**list_role_kwargs) - msg = ("Created role is not in the" - " list of all available roles") - self.assertIn(role, all_roles, err_msg=msg) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["keystone"]}, - name="KeystoneBasic.create_and_update_user", - platform="openstack") -class CreateAndUpdateUser(KeystoneBasic): - - def run(self, create_user_kwargs=None, update_user_kwargs=None): - """Create user and update the user. - - :param create_user_kwargs: Optional additional arguments for user - creation - :param update_user_kwargs: Optional additional arguments for user - updation - """ - create_user_kwargs = create_user_kwargs or {} - - user = self.admin_keystone.create_user(**create_user_kwargs) - self.admin_keystone.update_user(user.id, **update_user_kwargs) - user_data = self.admin_clients("keystone").users.get(user.id) - - for args in update_user_kwargs: - msg = ("%s isn't updated" % args) - self.assertEqual(getattr(user_data, str(args)), - update_user_kwargs[args], err_msg=msg) diff --git a/rally/plugins/openstack/scenarios/keystone/utils.py b/rally/plugins/openstack/scenarios/keystone/utils.py deleted file mode 100644 index 995a4fb4..00000000 --- a/rally/plugins/openstack/scenarios/keystone/utils.py +++ /dev/null @@ -1,300 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from rally.common.i18n import _LW -from rally.common import logging -from rally.plugins.openstack import scenario -from rally.plugins.openstack.wrappers import keystone as keystone_wrapper -from rally.task import atomic - - -LOG = logging.getLogger(__name__) - - -class KeystoneScenario(scenario.OpenStackScenario): - """Base class for Keystone scenarios with basic atomic actions.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(KeystoneScenario, self).__init__(context, admin_clients, clients) - LOG.warning(_LW( - "Class %s is deprecated since Rally 0.8.0 and will be removed " - "soon. Use " - "rally.plugins.openstack.services.identity.identity.Identity " - "instead.") % self.__class__) - - @atomic.action_timer("keystone.create_user") - def _user_create(self, email=None, **kwargs): - """Creates keystone user with random name. - - :param kwargs: Other optional parameters to create users like - "tenant_id", "enabled". - :returns: keystone user instance - """ - name = self.generate_random_name() - # NOTE(boris-42): password and email parameters are required by - # keystone client v2.0. This should be cleanuped - # when we switch to v3. - password = kwargs.pop("password", str(uuid.uuid4())) - email = email or (name + "@rally.me") - return self.admin_clients("keystone").users.create( - name, password=password, email=email, **kwargs) - - @atomic.action_timer("keystone.update_user_enabled") - def _update_user_enabled(self, user, enabled): - """Enable or disable a user. - - :param user: The user to enable or disable - :param enabled: Boolean indicating if the user should be - enabled (True) or disabled (False) - """ - self.admin_clients("keystone").users.update_enabled(user, enabled) - - @atomic.action_timer("keystone.validate_token") - def _token_validate(self, token): - """Validate a token for a user. - - :param token: The token to validate - """ - self.admin_clients("keystone").tokens.validate(token) - - @atomic.action_timer("keystone.token_authenticate") - def _authenticate_token(self, name, password, tenant_id, tenant): - """Authenticate user token. - - :param name: The user username - :param password: User password for authentication - :param tenant_id: Tenant id for authentication - :param tenant: Tenant on which authentication will take place - """ - return self.admin_clients("keystone").tokens.authenticate(name, - tenant_id, - tenant, - password) - - def _resource_delete(self, resource): - """"Delete keystone resource.""" - r = "keystone.delete_%s" % resource.__class__.__name__.lower() - with atomic.ActionTimer(self, r): - resource.delete() - - @atomic.action_timer("keystone.create_tenant") - def _tenant_create(self, **kwargs): - """Creates keystone tenant with random name. - - :param kwargs: Other optional parameters - :returns: keystone tenant instance - """ - name = self.generate_random_name() - return self.admin_clients("keystone").tenants.create(name, **kwargs) - - @atomic.action_timer("keystone.create_service") - def _service_create(self, service_type=None, - description=None): - """Creates keystone service with random name. - - :param service_type: type of the service - :param description: description of the service - :returns: keystone service instance - """ - service_type = service_type or "rally_test_type" - description = description or self.generate_random_name() - return self.admin_clients("keystone").services.create( - self.generate_random_name(), - service_type, description=description) - - @atomic.action_timer("keystone.create_users") - def _users_create(self, tenant, users_per_tenant): - """Adds users to a tenant. - - :param tenant: tenant object - :param users_per_tenant: number of users in per tenant - """ - for i in range(users_per_tenant): - name = self.generate_random_name() - password = name - email = name + "@rally.me" - self.admin_clients("keystone").users.create( - name, password=password, email=email, tenant_id=tenant.id) - - @atomic.action_timer("keystone.create_role") - def _role_create(self, **kwargs): - """Creates keystone user role with random name. - - :param **kwargs: Optional additional arguments for roles creation - :returns: keystone user role - """ - admin_clients = keystone_wrapper.wrap(self.admin_clients("keystone")) - - role = admin_clients.create_role( - self.generate_random_name(), **kwargs) - return role - - @atomic.action_timer("keystone.role_delete") - def _role_delete(self, role_id): - """Creates keystone user role with random name. - - :param user_id: id of the role - """ - admin_clients = keystone_wrapper.wrap(self.admin_clients("keystone")) - - admin_clients.delete_role(role_id) - - @atomic.action_timer("keystone.list_users") - def _list_users(self): - """List users.""" - return self.admin_clients("keystone").users.list() - - @atomic.action_timer("keystone.list_tenants") - def _list_tenants(self): - """List tenants.""" - return self.admin_clients("keystone").tenants.list() - - @atomic.action_timer("keystone.service_list") - def _list_services(self): - """List services.""" - return self.admin_clients("keystone").services.list() - - @atomic.action_timer("keystone.list_roles") - def _list_roles_for_user(self, user, tenant): - """List user roles. - - :param user: user for whom roles will be listed - :param tenant: tenant on which user have roles - """ - return self.admin_clients("keystone").roles.roles_for_user( - user, tenant) - - @atomic.action_timer("keystone.add_role") - def _role_add(self, user, role, tenant): - """Add role to a given user on a tenant. - - :param user: user to be assigned the role to - :param role: user role to assign with - :param tenant: tenant on which assignation will take place - """ - self.admin_clients("keystone").roles.add_user_role(user, role, tenant) - - @atomic.action_timer("keystone.remove_role") - def _role_remove(self, user, role, tenant): - """Dissociate user with role. - - :param user: user to be stripped with role - :param role: role to be dissociated with user - :param tenant: tenant on which assignation took place - """ - self.admin_clients("keystone").roles.remove_user_role(user, - role, tenant) - - @atomic.action_timer("keystone.get_tenant") - def _get_tenant(self, tenant_id): - """Get given tenant. - - :param tenant_id: tenant object - """ - return self.admin_clients("keystone").tenants.get(tenant_id) - - @atomic.action_timer("keystone.get_user") - def _get_user(self, user_id): - """Get given user. - - :param user_id: user object - """ - return self.admin_clients("keystone").users.get(user_id) - - @atomic.action_timer("keystone.get_role") - def _get_role(self, role_id): - """Get given user role. - - :param role_id: user role object - """ - return self.admin_clients("keystone").roles.get(role_id) - - @atomic.action_timer("keystone.get_service") - def _get_service(self, service_id): - """Get service with given service id. - - :param service_id: id for service object - """ - return self.admin_clients("keystone").services.get(service_id) - - def _get_service_by_name(self, name): - for i in self._list_services(): - if i.name == name: - return i - - @atomic.action_timer("keystone.delete_service") - def _delete_service(self, service_id): - """Delete service. - - :param service_id: service to be deleted - """ - self.admin_clients("keystone").services.delete(service_id) - - @atomic.action_timer("keystone.update_tenant") - def _update_tenant(self, tenant, description=None): - """Update tenant name and description. - - :param tenant: tenant to be updated - :param description: tenant description to be set - """ - name = self.generate_random_name() - description = description or self.generate_random_name() - self.admin_clients("keystone").tenants.update(tenant.id, - name, description) - - @atomic.action_timer("keystone.update_user_password") - def _update_user_password(self, user_id, password): - """Update user password. - - :param user_id: id of the user - :param password: new password - """ - admin_clients = self.admin_clients("keystone") - if admin_clients.version in ["v3"]: - admin_clients.users.update(user_id, password=password) - else: - admin_clients.users.update_password(user_id, password) - - @atomic.action_timer("keystone.create_ec2creds") - def _create_ec2credentials(self, user_id, tenant_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param tenant_id: Tenant ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self.clients("keystone").ec2.create(user_id, tenant_id) - - @atomic.action_timer("keystone.list_ec2creds") - def _list_ec2credentials(self, user_id): - """List of access/secret pairs for a user_id. - - :param user_id: List all ec2-credentials for User ID - - :returns: Return ec2-credentials list - """ - return self.clients("keystone").ec2.list(user_id) - - @atomic.action_timer("keystone.delete_ec2creds") - def _delete_ec2credential(self, user_id, access): - """Delete ec2credential. - - :param user_id: User ID for which to delete credential - :param access: access key for ec2credential to delete - """ - self.clients("keystone").ec2.delete(user_id, access) diff --git a/rally/plugins/openstack/scenarios/magnum/__init__.py b/rally/plugins/openstack/scenarios/magnum/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/magnum/cluster_templates.py b/rally/plugins/openstack/scenarios/magnum/cluster_templates.py deleted file mode 100644 index 6c86d8bb..00000000 --- a/rally/plugins/openstack/scenarios/magnum/cluster_templates.py +++ /dev/null @@ -1,46 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.magnum import utils -from rally.task import validation - - -"""Scenarios for Magnum cluster_templates.""" - - -@validation.add("required_services", services=[consts.Service.MAGNUM]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["magnum"]}, - name="MagnumClusterTemplates.list_cluster_templates", - platform="openstack") -class ListClusterTemplates(utils.MagnumScenario): - - def run(self, **kwargs): - """List all cluster_templates. - - Measure the "magnum cluster_template-list" command performance. - - :param limit: (Optional) The maximum number of results to return - per request, if: - - 1) limit > 0, the maximum number of cluster_templates to return. - 2) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Magnum API - (see Magnum's api.max_limit option). - :param kwargs: optional additional arguments for cluster_templates - listing - """ - self._list_cluster_templates(**kwargs) diff --git a/rally/plugins/openstack/scenarios/magnum/clusters.py b/rally/plugins/openstack/scenarios/magnum/clusters.py deleted file mode 100644 index 0394dc9f..00000000 --- a/rally/plugins/openstack/scenarios/magnum/clusters.py +++ /dev/null @@ -1,82 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.magnum import utils -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.task import validation - -"""Scenarios for Magnum clusters.""" - - -@validation.add("required_services", services=[consts.Service.MAGNUM]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["magnum.clusters"]}, - name="MagnumClusters.list_clusters", - platform="openstack") -class ListClusters(utils.MagnumScenario): - - def run(self, **kwargs): - """List all clusters. - - Measure the "magnum clusters-list" command performance. - :param limit: (Optional) The maximum number of results to return - per request, if: - - 1) limit > 0, the maximum number of clusters to return. - 2) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Magnum API - (see Magnum's api.max_limit option). - - :param kwargs: optional additional arguments for clusters listing - """ - self._list_clusters(**kwargs) - - -@validation.add("required_services", services=[consts.Service.MAGNUM]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["magnum.clusters", "nova.keypairs"]}, - name="MagnumClusters.create_and_list_clusters", - platform="openstack") -class CreateAndListClusters(utils.MagnumScenario): - - def run(self, node_count, **kwargs): - """create cluster and then list all clusters. - - :param node_count: the cluster node count. - :param cluster_template_uuid: optional, if user want to use an existing - cluster_template - :param kwargs: optional additional arguments for cluster creation - """ - cluster_template_uuid = kwargs.get("cluster_template_uuid", None) - if cluster_template_uuid is None: - cluster_template_uuid = self.context["tenant"]["cluster_template"] - else: - del kwargs["cluster_template_uuid"] - - nova_scenario = nova_utils.NovaScenario({ - "user": self.context["user"], - "task": self.context["task"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - keypair = nova_scenario._create_keypair() - - new_cluster = self._create_cluster(cluster_template_uuid, node_count, - keypair=keypair, **kwargs) - self.assertTrue(new_cluster, "Failed to create new cluster") - clusters = self._list_clusters(**kwargs) - self.assertIn(new_cluster.uuid, [cluster.uuid for cluster in clusters], - "New cluster not found in a list of clusters") diff --git a/rally/plugins/openstack/scenarios/magnum/k8s_pods.py b/rally/plugins/openstack/scenarios/magnum/k8s_pods.py deleted file mode 100644 index d3a9dbf8..00000000 --- a/rally/plugins/openstack/scenarios/magnum/k8s_pods.py +++ /dev/null @@ -1,73 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import yaml - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.magnum import utils -from rally.task import validation - - -"""Scenarios for Kubernetes pods and rcs.""" - - -@validation.add("required_services", services=consts.Service.MAGNUM) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="K8sPods.list_pods", platform="openstack") -class ListPods(utils.MagnumScenario): - - def run(self): - """List all pods. - - """ - self._list_v1pods() - - -@validation.add("required_services", services=consts.Service.MAGNUM) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="K8sPods.create_pods", platform="openstack") -class CreatePods(utils.MagnumScenario): - - def run(self, manifests): - """create pods and wait for them to be ready. - - :param manifests: manifest files used to create the pods - """ - for manifest in manifests: - with open(manifest, "r") as f: - manifest_str = f.read() - manifest = yaml.load(manifest_str) - pod = self._create_v1pod(manifest) - msg = ("Pod isn't created") - self.assertTrue(pod, err_msg=msg) - - -@validation.add("required_services", services=consts.Service.MAGNUM) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="K8sPods.create_rcs", platform="openstack") -class CreateRcs(utils.MagnumScenario): - - def run(self, manifests): - """create rcs and wait for them to be ready. - - :param manifests: manifest files use to create the rcs - """ - for manifest in manifests: - with open(manifest, "r") as f: - manifest_str = f.read() - manifest = yaml.load(manifest_str) - rc = self._create_v1rc(manifest) - msg = ("RC isn't created") - self.assertTrue(rc, err_msg=msg) diff --git a/rally/plugins/openstack/scenarios/magnum/utils.py b/rally/plugins/openstack/scenarios/magnum/utils.py deleted file mode 100644 index 88675ad2..00000000 --- a/rally/plugins/openstack/scenarios/magnum/utils.py +++ /dev/null @@ -1,265 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import random -import string -import time - -from oslo_config import cfg - -from kubernetes import client as k8s_config -from kubernetes.client import api_client -from kubernetes.client.apis import core_v1_api -from kubernetes.client.rest import ApiException -from rally.common import utils as common_utils -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF - - -class MagnumScenario(scenario.OpenStackScenario): - """Base class for Magnum scenarios with basic atomic actions.""" - - @atomic.action_timer("magnum.list_cluster_templates") - def _list_cluster_templates(self, **kwargs): - """Return list of cluster_templates. - - :param limit: (Optional) The maximum number of results to return - per request, if: - - 1) limit > 0, the maximum number of cluster_templates to return. - 2) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Magnum API - (see Magnum's api.max_limit option). - :param kwargs: Optional additional arguments for cluster_templates - listing - - :returns: cluster_templates list - """ - - return self.clients("magnum").cluster_templates.list(**kwargs) - - @atomic.action_timer("magnum.create_cluster_template") - def _create_cluster_template(self, **kwargs): - """Create a cluster_template - - :param kwargs: optional additional arguments for cluster_template - creation - :returns: magnum cluster_template - """ - - kwargs["name"] = self.generate_random_name() - - return self.clients("magnum").cluster_templates.create(**kwargs) - - @atomic.action_timer("magnum.get_cluster_template") - def _get_cluster_template(self, cluster_template): - """Return details of the specify cluster template. - - :param cluster_template: ID or name of the cluster template to show - :returns: clustertemplate detail - """ - return self.clients("magnum").cluster_templates.get(cluster_template) - - @atomic.action_timer("magnum.list_clusters") - def _list_clusters(self, limit=None, **kwargs): - """Return list of clusters. - - :param limit: Optional, the maximum number of results to return - per request, if: - - 1) limit > 0, the maximum number of clusters to return. - 2) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Magnum API - (see Magnum's api.max_limit option). - :param kwargs: Optional additional arguments for clusters listing - - :returns: clusters list - """ - return self.clients("magnum").clusters.list(limit=limit, **kwargs) - - @atomic.action_timer("magnum.create_cluster") - def _create_cluster(self, cluster_template, node_count, **kwargs): - """Create a cluster - - :param cluster_template: cluster_template for the cluster - :param node_count: the cluster node count - :param kwargs: optional additional arguments for cluster creation - :returns: magnum cluster - """ - - name = self.generate_random_name() - cluster = self.clients("magnum").clusters.create( - name=name, cluster_template_id=cluster_template, - node_count=node_count, **kwargs) - - common_utils.interruptable_sleep( - CONF.benchmark.magnum_cluster_create_prepoll_delay) - cluster = utils.wait_for_status( - cluster, - ready_statuses=["CREATE_COMPLETE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.magnum_cluster_create_timeout, - check_interval=CONF.benchmark.magnum_cluster_create_poll_interval, - id_attr="uuid" - ) - return cluster - - @atomic.action_timer("magnum.get_cluster") - def _get_cluster(self, cluster): - """Return details of the specify cluster. - - :param cluster: ID or name of the cluster to show - :returns: cluster detail - """ - return self.clients("magnum").clusters.get(cluster) - - @atomic.action_timer("magnum.get_ca_certificate") - def _get_ca_certificate(self, cluster_uuid): - """Get CA certificate for this cluster - - :param cluster_uuid: uuid of the cluster - """ - return self.clients("magnum").certificates.get(cluster_uuid) - - @atomic.action_timer("magnum.create_ca_certificate") - def _create_ca_certificate(self, csr_req): - """Send csr to Magnum to have it signed - - :param csr_req: {"cluster_uuid": , "csr": } - """ - return self.clients("magnum").certificates.create(**csr_req) - - def _get_k8s_api_client(self): - cluster_uuid = self.context["tenant"]["cluster"] - cluster = self._get_cluster(cluster_uuid) - cluster_template = self._get_cluster_template( - cluster.cluster_template_id) - key_file = None - cert_file = None - ca_certs = None - if not cluster_template.tls_disabled: - dir = self.context["ca_certs_directory"] - key_file = cluster_uuid + ".key" - key_file = os.path.join(dir, key_file) - cert_file = cluster_uuid + ".crt" - cert_file = os.path.join(dir, cert_file) - ca_certs = cluster_uuid + "_ca.crt" - ca_certs = os.path.join(dir, ca_certs) - config = k8s_config.ConfigurationObject() - config.host = cluster.api_address - config.ssl_ca_cert = ca_certs - config.cert_file = cert_file - config.key_file = key_file - client = api_client.ApiClient(config=config) - return core_v1_api.CoreV1Api(client) - - @atomic.action_timer("magnum.k8s_list_v1pods") - def _list_v1pods(self): - """List all pods. - - """ - k8s_api = self._get_k8s_api_client() - return k8s_api.list_node(namespace="default") - - @atomic.action_timer("magnum.k8s_create_v1pod") - def _create_v1pod(self, manifest): - """Create a pod on the specify cluster. - - :param manifest: manifest use to create the pod - """ - k8s_api = self._get_k8s_api_client() - podname = manifest["metadata"]["name"] + "-" - for i in range(5): - podname = podname + random.choice(string.ascii_lowercase) - manifest["metadata"]["name"] = podname - - for i in range(150): - try: - k8s_api.create_namespaced_pod(body=manifest, - namespace="default") - break - except ApiException as e: - if e.status != 403: - raise - time.sleep(2) - - start = time.time() - while True: - resp = k8s_api.read_namespaced_pod( - name=podname, namespace="default") - - if resp.status.conditions: - for condition in resp.status.conditions: - if condition.type.lower() == "ready" and \ - condition.status.lower() == "true": - return resp - - if (time.time() - start > CONF.benchmark.k8s_pod_create_timeout): - raise exceptions.TimeoutException( - desired_status="Ready", - resource_name=podname, - resource_type="Pod", - resource_id=resp.metadata.uid, - resource_status=resp.status) - common_utils.interruptable_sleep( - CONF.benchmark.k8s_pod_create_poll_interval) - - @atomic.action_timer("magnum.k8s_list_v1rcs") - def _list_v1rcs(self): - """List all rcs. - - """ - k8s_api = self._get_k8s_api_client() - return k8s_api.list_namespaced_replication_controller( - namespace="default") - - @atomic.action_timer("magnum.k8s_create_v1rc") - def _create_v1rc(self, manifest): - """Create rc on the specify cluster. - - :param manifest: manifest use to create the replication controller - """ - k8s_api = self._get_k8s_api_client() - suffix = "-" - for i in range(5): - suffix = suffix + random.choice(string.ascii_lowercase) - rcname = manifest["metadata"]["name"] + suffix - manifest["metadata"]["name"] = rcname - resp = k8s_api.create_namespaced_replication_controller( - body=manifest, - namespace="default") - expectd_status = resp.spec.replicas - start = time.time() - while True: - resp = k8s_api.read_namespaced_replication_controller( - name=rcname, - namespace="default") - status = resp.status.replicas - if status == expectd_status: - return resp - else: - if time.time() - start > CONF.benchmark.k8s_rc_create_timeout: - raise exceptions.TimeoutException( - desired_status=expectd_status, - resource_name=rcname, - resource_type="ReplicationController", - resource_id=resp.metadata.uid, - resource_status=status) - common_utils.interruptable_sleep( - CONF.benchmark.k8s_rc_create_poll_interval) diff --git a/rally/plugins/openstack/scenarios/manila/__init__.py b/rally/plugins/openstack/scenarios/manila/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/manila/shares.py b/rally/plugins/openstack/scenarios/manila/shares.py deleted file mode 100644 index af8f1b46..00000000 --- a/rally/plugins/openstack/scenarios/manila/shares.py +++ /dev/null @@ -1,378 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack.context.manila import consts as manila_consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.manila import utils -from rally.task import validation - - -"""Scenarios for Manila shares.""" - - -@validation.validate_share_proto() -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["manila"]}, - name="ManilaShares.create_and_delete_share", - platform="openstack") -class CreateAndDeleteShare(utils.ManilaScenario): - - def run(self, share_proto, size=1, min_sleep=0, max_sleep=0, **kwargs): - """Create and delete a share. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between share creation and deletion - (of random duration from [min_sleep, max_sleep]). - - :param share_proto: share protocol, valid values are NFS, CIFS, - GlusterFS and HDFS - :param size: share size in GB, should be greater than 0 - :param min_sleep: minimum sleep time in seconds (non-negative) - :param max_sleep: maximum sleep time in seconds (non-negative) - :param kwargs: optional args to create a share - """ - share = self._create_share( - share_proto=share_proto, - size=size, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._delete_share(share) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="ManilaShares.list_shares", platform="openstack") -class ListShares(utils.ManilaScenario): - - def run(self, detailed=True, search_opts=None): - """Basic scenario for 'share list' operation. - - :param detailed: defines either to return detailed list of - objects or not. - :param search_opts: container of search opts such as - "name", "host", "share_type", etc. - """ - self._list_shares(detailed=detailed, search_opts=search_opts) - - -@validation.add("enum", param_name="share_proto", values=["nfs", "cephfs", - "cifs", "glusterfs", "hdfs"], missed=False, - case_insensitive=True) -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["manila"]}, - name="ManilaShares.create_and_extend_share", - platform="openstack") -class CreateAndExtendShare(utils.ManilaScenario): - def run(self, share_proto, size=1, new_size=2, snapshot_id=None, - description=None, metadata=None, share_network=None, - share_type=None, is_public=False, availability_zone=None, - share_group_id=None): - """Create and extend a share - - :param share_proto: share protocol for new share - available values are NFS, CIFS, CephFS, GlusterFS and HDFS. - :param size: size in GiB - :param new_size: new size of the share in GiB - :param snapshot_id: ID of the snapshot - :param description: description of a share - :param metadata: optional metadata to set on share creation - :param share_network: either instance of ShareNetwork or text with ID - :param share_type: either instance of ShareType or text with ID - :param is_public: whether to set share as public or not. - :param availability_zone: availability zone of the share - :param share_group_id: ID of the share group to which the share - should belong - """ - share = self._create_share( - share_proto=share_proto, - size=size, - snapshot_id=snapshot_id, - description=description, - metadata=metadata, - share_network=share_network, - share_type=share_type, - is_public=is_public, - availability_zone=availability_zone, - share_group_id=share_group_id - ) - self._extend_share(share, new_size) - - -@validation.add("enum", param_name="share_proto", values=["nfs", "cephfs", - "cifs", "glusterfs", "hdfs"], missed=False, - case_insensitive=True) -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["manila"]}, - name="ManilaShares.create_and_shrink_share", - platform="openstack") -class CreateAndShrinkShare(utils.ManilaScenario): - def run(self, share_proto, size=2, new_size=1, snapshot_id=None, - description=None, metadata=None, share_network=None, - share_type=None, is_public=False, availability_zone=None, - share_group_id=None): - """Create and shrink a share - - :param share_proto: share protocol for new share - available values are NFS, CIFS, CephFS, GlusterFS and HDFS. - :param size: size in GiB - :param new_size: new size of the share in GiB - :param snapshot_id: ID of the snapshot - :param description: description of a share - :param metadata: optional metadata to set on share creation - :param share_network: either instance of ShareNetwork or text with ID - :param share_type: either instance of ShareType or text with ID - :param is_public: whether to set share as public or not. - :param availability_zone: availability zone of the share - :param share_group_id: ID of the share group to which the share - should belong - """ - share = self._create_share( - share_proto=share_proto, - size=size, - snapshot_id=snapshot_id, - description=description, - metadata=metadata, - share_network=share_network, - share_type=share_type, - is_public=is_public, - availability_zone=availability_zone, - share_group_id=share_group_id - ) - self._shrink_share(share, new_size) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["manila"]}, - name="ManilaShares.create_share_network_and_delete", - platform="openstack") -class CreateShareNetworkAndDelete(utils.ManilaScenario): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_delete_service will be ignored", - "1.1.2", ["name"], once=True) - def run(self, neutron_net_id=None, neutron_subnet_id=None, - nova_net_id=None, name=None, description=None): - """Creates share network and then deletes. - - :param neutron_net_id: ID of Neutron network - :param neutron_subnet_id: ID of Neutron subnet - :param nova_net_id: ID of Nova network - :param description: share network description - """ - share_network = self._create_share_network( - neutron_net_id=neutron_net_id, - neutron_subnet_id=neutron_subnet_id, - nova_net_id=nova_net_id, - description=description, - ) - self._delete_share_network(share_network) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["manila"]}, - name="ManilaShares.create_share_network_and_list", - platform="openstack") -class CreateShareNetworkAndList(utils.ManilaScenario): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_delete_service will be ignored", - "1.1.2", ["name"], once=True) - def run(self, neutron_net_id=None, neutron_subnet_id=None, - nova_net_id=None, name=None, description=None, - detailed=True, search_opts=None): - """Creates share network and then lists it. - - :param neutron_net_id: ID of Neutron network - :param neutron_subnet_id: ID of Neutron subnet - :param nova_net_id: ID of Nova network - :param description: share network description - :param detailed: defines either to return detailed list of - objects or not. - :param search_opts: container of search opts such as - "name", "nova_net_id", "neutron_net_id", etc. - """ - self._create_share_network( - neutron_net_id=neutron_net_id, - neutron_subnet_id=neutron_subnet_id, - nova_net_id=nova_net_id, - description=description, - ) - self._list_share_networks( - detailed=detailed, - search_opts=search_opts, - ) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="ManilaShares.list_share_servers", - platform="openstack") -class ListShareServers(utils.ManilaScenario): - - def run(self, search_opts=None): - """Lists share servers. - - Requires admin creds. - - :param search_opts: container of following search opts: - "host", "status", "share_network" and "project_id". - """ - self._list_share_servers(search_opts=search_opts) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["manila"]}, - name="ManilaShares.create_security_service_and_delete", - platform="openstack") -class CreateSecurityServiceAndDelete(utils.ManilaScenario): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_delete_service will be ignored", - "1.1.2", ["name"], once=True) - def run(self, security_service_type, dns_ip=None, server=None, - domain=None, user=None, password=None, - name=None, description=None): - """Creates security service and then deletes. - - :param security_service_type: security service type, permitted values - are 'ldap', 'kerberos' or 'active_directory'. - :param dns_ip: dns ip address used inside tenant's network - :param server: security service server ip address or hostname - :param domain: security service domain - :param user: security identifier used by tenant - :param password: password used by user - :param description: security service description - """ - security_service = self._create_security_service( - security_service_type=security_service_type, - dns_ip=dns_ip, - server=server, - domain=domain, - user=user, - password=password, - description=description, - ) - self._delete_security_service(security_service) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["manila"]}, - name="ManilaShares." - "attach_security_service_to_share_network", - platform="openstack") -class AttachSecurityServiceToShareNetwork(utils.ManilaScenario): - - def run(self, security_service_type="ldap"): - """Attaches security service to share network. - - :param security_service_type: type of security service to use. - Should be one of following: 'ldap', 'kerberos' or - 'active_directory'. - """ - sn = self._create_share_network() - ss = self._create_security_service( - security_service_type=security_service_type) - self._add_security_service_to_share_network(sn, ss) - - -@validation.validate_share_proto() -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["manila"]}, - name="ManilaShares.create_and_list_share", - platform="openstack") -class CreateAndListShare(utils.ManilaScenario): - - def run(self, share_proto, size=1, min_sleep=0, max_sleep=0, detailed=True, - **kwargs): - """Create a share and list all shares. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between share creation and list - (of random duration from [min_sleep, max_sleep]). - - :param share_proto: share protocol, valid values are NFS, CIFS, - GlusterFS and HDFS - :param size: share size in GB, should be greater than 0 - :param min_sleep: minimum sleep time in seconds (non-negative) - :param max_sleep: maximum sleep time in seconds (non-negative) - :param detailed: defines whether to get detailed list of shares or not - :param kwargs: optional args to create a share - """ - self._create_share(share_proto=share_proto, size=size, **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._list_shares(detailed=detailed) - - -@validation.add("number", param_name="sets", minval=1, integer_only=True) -@validation.add("number", param_name="set_size", minval=1, integer_only=True) -@validation.add("number", param_name="key_min_length", minval=1, maxval=256, - integer_only=True) -@validation.add("number", param_name="key_max_length", minval=1, maxval=256, - integer_only=True) -@validation.add("number", param_name="value_min_length", minval=1, maxval=1024, - integer_only=True) -@validation.add("number", param_name="value_max_length", minval=1, maxval=1024, - integer_only=True) -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", - contexts=manila_consts.SHARES_CONTEXT_NAME) -@scenario.configure(context={"cleanup": ["manila"]}, - name="ManilaShares.set_and_delete_metadata", - platform="openstack") -class SetAndDeleteMetadata(utils.ManilaScenario): - - def run(self, sets=10, set_size=3, delete_size=3, - key_min_length=1, key_max_length=256, - value_min_length=1, value_max_length=1024): - """Sets and deletes share metadata. - - This requires a share to be created with the shares - context. Additionally, ``sets * set_size`` must be greater - than or equal to ``deletes * delete_size``. - - :param sets: how many set_metadata operations to perform - :param set_size: number of metadata keys to set in each - set_metadata operation - :param delete_size: number of metadata keys to delete in each - delete_metadata operation - :param key_min_length: minimal size of metadata key to set - :param key_max_length: maximum size of metadata key to set - :param value_min_length: minimal size of metadata value to set - :param value_max_length: maximum size of metadata value to set - """ - shares = self.context.get("tenant", {}).get("shares", []) - share = shares[self.context["iteration"] % len(shares)] - - keys = self._set_metadata( - share=share, - sets=sets, - set_size=set_size, - key_min_length=key_min_length, - key_max_length=key_max_length, - value_min_length=value_min_length, - value_max_length=value_max_length) - - self._delete_metadata(share=share, keys=keys, delete_size=delete_size) diff --git a/rally/plugins/openstack/scenarios/manila/utils.py b/rally/plugins/openstack/scenarios/manila/utils.py deleted file mode 100644 index 407948d1..00000000 --- a/rally/plugins/openstack/scenarios/manila/utils.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from oslo_config import cfg - -from rally import exceptions -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class ManilaScenario(scenario.OpenStackScenario): - """Base class for Manila scenarios with basic atomic actions.""" - - @atomic.action_timer("manila.create_share") - def _create_share(self, share_proto, size=1, **kwargs): - """Create a share. - - :param share_proto: share protocol for new share, - available values are NFS, CIFS, GlusterFS, HDFS and CEPHFS. - :param size: size of a share in GB - :param snapshot_id: ID of the snapshot - :param name: name of new share - :param description: description of a share - :param metadata: optional metadata to set on share creation - :param share_network: either instance of ShareNetwork or str with ID - :param share_type: either instance of ShareType or str with ID - :param is_public: defines whether to set share as public or not. - :returns: instance of :class:`Share` - """ - if self.context: - share_networks = self.context.get("tenant", {}).get( - consts.SHARE_NETWORKS_CONTEXT_NAME, {}).get( - "share_networks", []) - if share_networks and not kwargs.get("share_network"): - kwargs["share_network"] = share_networks[ - self.context["iteration"] % len(share_networks)]["id"] - - if not kwargs.get("name"): - kwargs["name"] = self.generate_random_name() - - share = self.clients("manila").shares.create( - share_proto, size, **kwargs) - - self.sleep_between(CONF.benchmark.manila_share_create_prepoll_delay) - share = utils.wait_for( - share, - ready_statuses=["available"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.manila_share_create_timeout, - check_interval=CONF.benchmark.manila_share_create_poll_interval, - ) - return share - - @atomic.action_timer("manila.delete_share") - def _delete_share(self, share): - """Delete the given share. - - :param share: :class:`Share` - """ - share.delete() - error_statuses = ("error_deleting", ) - utils.wait_for_status( - share, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(error_statuses), - timeout=CONF.benchmark.manila_share_delete_timeout, - check_interval=CONF.benchmark.manila_share_delete_poll_interval) - - @atomic.action_timer("manila.list_shares") - def _list_shares(self, detailed=True, search_opts=None): - """Returns user shares list. - - :param detailed: defines either to return detailed list of - objects or not. - :param search_opts: container of search opts such as - "name", "host", "share_type", etc. - """ - return self.clients("manila").shares.list( - detailed=detailed, search_opts=search_opts) - - @atomic.action_timer("manila.extend_share") - def _extend_share(self, share, new_size): - """Extend the given share - - :param share: :class:`Share` - :param new_size: new size of the share - """ - share.extend(new_size) - utils.wait_for_status( - share, - ready_statuses=["available"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.manila_share_create_timeout, - check_interval=CONF.benchmark.manila_share_create_poll_interval) - - @atomic.action_timer("manila.shrink_share") - def _shrink_share(self, share, new_size): - """Shrink the given share - - :param share: :class:`Share` - :param new_size: new size of the share - """ - share.shrink(new_size) - utils.wait_for_status( - share, - ready_statuses=["available"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.manila_share_create_timeout, - check_interval=CONF.benchmark.manila_share_create_poll_interval) - - @atomic.action_timer("manila.create_share_network") - def _create_share_network(self, neutron_net_id=None, - neutron_subnet_id=None, - nova_net_id=None, description=None): - """Create share network. - - :param neutron_net_id: ID of Neutron network - :param neutron_subnet_id: ID of Neutron subnet - :param nova_net_id: ID of Nova network - :param description: share network description - :returns: instance of :class:`ShareNetwork` - """ - share_network = self.clients("manila").share_networks.create( - neutron_net_id=neutron_net_id, - neutron_subnet_id=neutron_subnet_id, - nova_net_id=nova_net_id, - name=self.generate_random_name(), - description=description) - return share_network - - @atomic.action_timer("manila.delete_share_network") - def _delete_share_network(self, share_network): - """Delete share network. - - :param share_network: instance of :class:`ShareNetwork`. - """ - share_network.delete() - utils.wait_for_status( - share_network, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.manila_share_delete_timeout, - check_interval=CONF.benchmark.manila_share_delete_poll_interval) - - @atomic.action_timer("manila.list_share_networks") - def _list_share_networks(self, detailed=True, search_opts=None): - """List share networks. - - :param detailed: defines either to return detailed list of - objects or not. - :param search_opts: container of search opts such as - "project_id" and "name". - :returns: list of instances of :class:`ShareNetwork` - """ - share_networks = self.clients("manila").share_networks.list( - detailed=detailed, search_opts=search_opts) - return share_networks - - @atomic.action_timer("manila.list_share_servers") - def _list_share_servers(self, search_opts=None): - """List share servers. Admin only. - - :param search_opts: set of key-value pairs to filter share servers by. - Example: {"share_network": "share_network_name_or_id"} - :returns: list of instances of :class:`ShareServer` - """ - share_servers = self.admin_clients("manila").share_servers.list( - search_opts=search_opts) - return share_servers - - @atomic.action_timer("manila.create_security_service") - def _create_security_service(self, security_service_type, dns_ip=None, - server=None, domain=None, user=None, - password=None, description=None): - """Create security service. - - 'Security service' is data container in Manila that stores info - about auth services 'Active Directory', 'Kerberos' and catalog - service 'LDAP' that should be used for shares. - - :param security_service_type: security service type, permitted values - are 'ldap', 'kerberos' or 'active_directory'. - :param dns_ip: dns ip address used inside tenant's network - :param server: security service server ip address or hostname - :param domain: security service domain - :param user: security identifier used by tenant - :param password: password used by user - :param description: security service description - :returns: instance of :class:`SecurityService` - """ - security_service = self.clients("manila").security_services.create( - type=security_service_type, - dns_ip=dns_ip, - server=server, - domain=domain, - user=user, - password=password, - name=self.generate_random_name(), - description=description) - return security_service - - @atomic.action_timer("manila.delete_security_service") - def _delete_security_service(self, security_service): - """Delete security service. - - :param security_service: instance of :class:`SecurityService`. - """ - security_service.delete() - utils.wait_for_status( - security_service, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.manila_share_delete_timeout, - check_interval=CONF.benchmark.manila_share_delete_poll_interval) - - @atomic.action_timer("manila.add_security_service_to_share_network") - def _add_security_service_to_share_network(self, share_network, - security_service): - """Associate given security service with a share network. - - :param share_network: ID or instance of :class:`ShareNetwork`. - :param security_service: ID or instance of :class:`SecurityService`. - :returns: instance of :class:`ShareNetwork`. - """ - share_network = self.clients( - "manila").share_networks.add_security_service( - share_network, security_service) - return share_network - - @atomic.action_timer("manila.set_metadata") - def _set_metadata(self, share, sets=1, set_size=1, - key_min_length=1, key_max_length=256, - value_min_length=1, value_max_length=1024): - """Sets share metadata. - - :param share: the share to set metadata on - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :param key_min_length: minimal size of metadata key to set - :param key_max_length: maximum size of metadata key to set - :param value_min_length: minimal size of metadata value to set - :param value_max_length: maximum size of metadata value to set - :returns: A list of keys that were set - :raises exceptions.InvalidArgumentsException: if invalid arguments - were provided. - """ - if not (key_min_length <= key_max_length and - value_min_length <= value_max_length): - raise exceptions.InvalidArgumentsException( - "Min length for keys and values of metadata can not be bigger " - "than maximum length.") - - keys = [] - for i in range(sets): - metadata = {} - for j in range(set_size): - if key_min_length == key_max_length: - key_length = key_min_length - else: - key_length = random.choice( - range(key_min_length, key_max_length)) - if value_min_length == value_max_length: - value_length = value_min_length - else: - value_length = random.choice( - range(value_min_length, value_max_length)) - key = self._generate_random_part(length=key_length) - keys.append(key) - metadata[key] = self._generate_random_part(length=value_length) - self.clients("manila").shares.set_metadata(share["id"], metadata) - - return keys - - @atomic.action_timer("manila.delete_metadata") - def _delete_metadata(self, share, keys, delete_size=3): - """Deletes share metadata. - - :param share: The share to delete metadata from. - :param delete_size: number of metadata keys to delete using one single - call. - :param keys: a list or tuple of keys to choose deletion candidates from - :raises exceptions.InvalidArgumentsException: if invalid arguments - were provided. - """ - if not (isinstance(keys, list) and keys): - raise exceptions.InvalidArgumentsException( - "Param 'keys' should be non-empty 'list'. keys = '%s'" % keys) - for i in range(0, len(keys), delete_size): - self.clients("manila").shares.delete_metadata( - share["id"], keys[i:i + delete_size]) diff --git a/rally/plugins/openstack/scenarios/mistral/__init__.py b/rally/plugins/openstack/scenarios/mistral/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/mistral/executions.py b/rally/plugins/openstack/scenarios/mistral/executions.py deleted file mode 100644 index 8c58b1a6..00000000 --- a/rally/plugins/openstack/scenarios/mistral/executions.py +++ /dev/null @@ -1,106 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import six -import yaml - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.mistral import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Mistral execution.""" - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MISTRAL]) -@scenario.configure(name="MistralExecutions.list_executions", - context={"cleanup": ["mistral"]}, - platform="openstack") -class ListExecutions(utils.MistralScenario): - - def run(self, marker="", limit=None, sort_keys="", sort_dirs=""): - """Scenario test mistral execution-list command. - - This simple scenario tests the Mistral execution-list - command by listing all the executions. - :param marker: The last execution uuid of the previous page, displays - list of executions after "marker". - :param limit: number Maximum number of executions to return in a single - result. - :param sort_keys: id,description - :param sort_dirs: [SORT_DIRS] Comma-separated list of sort directions. - Default: asc. - """ - self._list_executions(marker=marker, limit=limit, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - -@validation.add("file_exists", param_name="definition") -@types.convert(definition={"type": "file"}) -@types.convert(params={"type": "file"}) -@types.convert(wf_input={"type": "file"}) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MISTRAL]) -@validation.workbook_contains_workflow("definition", "workflow_name") -@scenario.configure(name="MistralExecutions.create_execution_from_workbook", - context={"cleanup": ["mistral"]}, - platform="openstack") -class CreateExecutionFromWorkbook(utils.MistralScenario): - - def run(self, definition, workflow_name=None, wf_input=None, params=None, - do_delete=False): - """Scenario tests execution creation and deletion. - - This scenario is a very useful tool to measure the - "mistral execution-create" and "mistral execution-delete" - commands performance. - :param definition: string (yaml string) representation of given file - content (Mistral workbook definition) - :param workflow_name: string the workflow name to execute. Should be - one of the to workflows in the definition. If no - workflow_name is passed, one of the workflows in - the definition will be taken. - :param wf_input: file containing a json string of mistral workflow - input - :param params: file containing a json string of mistral params - (the string is the place to pass the environment) - :param do_delete: if False than it allows to check performance - in "create only" mode. - """ - - wb = self._create_workbook(definition) - wb_def = yaml.safe_load(wb.definition) - - if not workflow_name: - workflow_name = six.next(six.iterkeys(wb_def["workflows"])) - - workflow_identifier = ".".join([wb.name, workflow_name]) - - if not params: - params = {} - else: - params = json.loads(params) - - ex = self._create_execution(workflow_identifier, wf_input, **params) - - if do_delete: - self._delete_workbook(wb.name) - self._delete_execution(ex) diff --git a/rally/plugins/openstack/scenarios/mistral/utils.py b/rally/plugins/openstack/scenarios/mistral/utils.py deleted file mode 100644 index a192fcdb..00000000 --- a/rally/plugins/openstack/scenarios/mistral/utils.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg -import yaml - -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class MistralScenario(scenario.OpenStackScenario): - """Base class for Mistral scenarios with basic atomic actions.""" - - @atomic.action_timer("mistral.list_workbooks") - def _list_workbooks(self): - """Gets list of existing workbooks. - - :returns: workbook list - """ - return self.clients("mistral").workbooks.list() - - @atomic.action_timer("mistral.create_workbook") - def _create_workbook(self, definition): - """Create a new workbook. - - :param definition: workbook description in string - (yaml string) format - :returns: workbook object - """ - definition = yaml.safe_load(definition) - definition["name"] = self.generate_random_name() - definition = yaml.safe_dump(definition) - - return self.clients("mistral").workbooks.create(definition) - - @atomic.action_timer("mistral.delete_workbook") - def _delete_workbook(self, wb_name): - """Delete the given workbook. - - :param wb_name: the name of workbook that would be deleted. - """ - self.clients("mistral").workbooks.delete(wb_name) - - @atomic.action_timer("mistral.list_executions") - def _list_executions(self, marker="", limit=None, sort_keys="", - sort_dirs=""): - """Gets list of existing executions. - - :returns: execution list - """ - - return self.clients("mistral").executions.list( - marker=marker, limit=limit, sort_keys=sort_keys, - sort_dirs=sort_dirs) - - @atomic.action_timer("mistral.create_execution") - def _create_execution(self, workflow_identifier, wf_input=None, **params): - """Create a new execution. - - :param workflow_identifier: name or id of the workflow to execute - :param input_: json string of mistral workflow input - :param params: optional mistral params (this is the place to pass - environment). - :returns: executions object - """ - - execution = self.clients("mistral").executions.create( - workflow_identifier, workflow_input=wf_input, **params) - - execution = utils.wait_for_status( - execution, ready_statuses=["SUCCESS"], failure_statuses=["ERROR"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.mistral_execution_timeout) - - return execution - - @atomic.action_timer("mistral.delete_execution") - def _delete_execution(self, execution): - """Delete the given execution. - - :param ex: the execution that would be deleted. - """ - self.clients("mistral").executions.delete(execution.id) diff --git a/rally/plugins/openstack/scenarios/mistral/workbooks.py b/rally/plugins/openstack/scenarios/mistral/workbooks.py deleted file mode 100644 index 71478c81..00000000 --- a/rally/plugins/openstack/scenarios/mistral/workbooks.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.mistral import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Mistral workbook.""" - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MISTRAL]) -@scenario.configure(name="MistralWorkbooks.list_workbooks", - platform="openstack") -class ListWorkbooks(utils.MistralScenario): - - def run(self): - """Scenario test mistral workbook-list command. - - This simple scenario tests the Mistral workbook-list - command by listing all the workbooks. - """ - self._list_workbooks() - - -@validation.add("file_exists", param_name="definition") -@types.convert(definition={"type": "file"}) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MISTRAL]) -@scenario.configure(context={"cleanup": ["mistral"]}, - name="MistralWorkbooks.create_workbook", - platform="openstack") -class CreateWorkbook(utils.MistralScenario): - - def run(self, definition, do_delete=False): - """Scenario tests workbook creation and deletion. - - This scenario is a very useful tool to measure the - "mistral workbook-create" and "mistral workbook-delete" - commands performance. - :param definition: string (yaml string) representation of given - file content (Mistral workbook definition) - :param do_delete: if False than it allows to check performance - in "create only" mode. - """ - wb = self._create_workbook(definition) - - if do_delete: - self._delete_workbook(wb.name) diff --git a/rally/plugins/openstack/scenarios/monasca/__init__.py b/rally/plugins/openstack/scenarios/monasca/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/monasca/metrics.py b/rally/plugins/openstack/scenarios/monasca/metrics.py deleted file mode 100644 index 2430c5d5..00000000 --- a/rally/plugins/openstack/scenarios/monasca/metrics.py +++ /dev/null @@ -1,36 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.monasca import utils as monascautils -from rally.task import validation - - -"""Scenarios for monasca Metrics API.""" - - -@validation.add("required_services", - services=[consts.Service.MONASCA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="MonascaMetrics.list_metrics", platform="openstack") -class ListMetrics(monascautils.MonascaScenario): - - def run(self, **kwargs): - """Fetch user's metrics. - - :param kwargs: optional arguments for list query: - name, dimensions, start_time, etc - """ - self._list_metrics(**kwargs) diff --git a/rally/plugins/openstack/scenarios/monasca/utils.py b/rally/plugins/openstack/scenarios/monasca/utils.py deleted file mode 100644 index 6d5e1930..00000000 --- a/rally/plugins/openstack/scenarios/monasca/utils.py +++ /dev/null @@ -1,54 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random -import time -import uuid - -from oslo_config import cfg - -from rally.plugins.openstack import scenario -from rally.task import atomic - - -CONF = cfg.CONF - - -class MonascaScenario(scenario.OpenStackScenario): - """Base class for Monasca scenarios with basic atomic actions.""" - - @atomic.action_timer("monasca.list_metrics") - def _list_metrics(self, **kwargs): - """Get list of user's metrics. - - :param kwargs: optional arguments for list query: - name, dimensions, start_time, etc - :returns list of monasca metrics - """ - return self.clients("monasca").metrics.list(**kwargs) - - @atomic.action_timer("monasca.create_metrics") - def _create_metrics(self, **kwargs): - """Create user metrics. - - :param kwargs: attributes for metric creation: - name, dimension, timestamp, value, etc - """ - timestamp = int(time.time() * 1000) - kwargs.update({"name": self.generate_random_name(), - "timestamp": timestamp, - "value": random.random(), - "value_meta": { - "key": str(uuid.uuid4())[:10]}}) - self.clients("monasca").metrics.create(**kwargs) diff --git a/rally/plugins/openstack/scenarios/murano/__init__.py b/rally/plugins/openstack/scenarios/murano/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/murano/environments.py b/rally/plugins/openstack/scenarios/murano/environments.py deleted file mode 100644 index ccde5fd5..00000000 --- a/rally/plugins/openstack/scenarios/murano/environments.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.murano import utils -from rally.task import validation - - -"""Scenarios for Murano environments.""" - - -@validation.add("required_services", services=[consts.Service.MURANO]) -@scenario.configure(context={"cleanup": ["murano.environments"]}, - name="MuranoEnvironments.list_environments", - platform="openstack") -class ListEnvironments(utils.MuranoScenario): - - def run(self): - """List the murano environments. - - Run murano environment-list for listing all environments. - """ - self._list_environments() - - -@validation.add("required_services", services=[consts.Service.MURANO]) -@scenario.configure(context={"cleanup": ["murano.environments"]}, - name="MuranoEnvironments.create_and_delete_environment", - platform="openstack") -class CreateAndDeleteEnvironment(utils.MuranoScenario): - - def run(self): - """Create environment, session and delete environment.""" - environment = self._create_environment() - - self._create_session(environment.id) - self._delete_environment(environment) - - -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_contexts", contexts=("murano_packages")) -@scenario.configure(context={"cleanup": ["murano"], "roles": ["admin"]}, - name="MuranoEnvironments.create_and_deploy_environment", - platform="openstack") -class CreateAndDeployEnvironment(utils.MuranoScenario): - - def run(self, packages_per_env=1): - """Create environment, session and deploy environment. - - Create environment, create session, add app to environment - packages_per_env times, send environment to deploy. - - :param packages_per_env: number of packages per environment - """ - environment = self._create_environment() - session = self._create_session(environment.id) - package = self.context["tenant"]["packages"][0] - - for i in range(packages_per_env): - self._create_service(environment, session, - package.fully_qualified_name) - - self._deploy_environment(environment, session) diff --git a/rally/plugins/openstack/scenarios/murano/packages.py b/rally/plugins/openstack/scenarios/murano/packages.py deleted file mode 100644 index 4d3d8b2e..00000000 --- a/rally/plugins/openstack/scenarios/murano/packages.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.murano import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Murano packages.""" - - -@types.convert(package={"type": "expand_user_path"}) -@validation.add("file_exists", param_name="package", mode=os.F_OK) -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["murano.packages"]}, - name="MuranoPackages.import_and_list_packages", - platform="openstack") -class ImportAndListPackages(utils.MuranoScenario): - - def run(self, package, include_disabled=False): - """Import Murano package and get list of packages. - - Measure the "murano import-package" and "murano package-list" commands - performance. - It imports Murano package from "package" (if it is not a zip archive - then zip archive will be prepared) and gets list of imported packages. - - :param package: path to zip archive that represents Murano - application package or absolute path to folder with - package components - :param include_disabled: specifies whether the disabled packages will - be included in a the result or not. - Default value is False. - """ - package_path = self._zip_package(package) - try: - self._import_package(package_path) - self._list_packages(include_disabled=include_disabled) - finally: - os.remove(package_path) - - -@types.convert(package={"type": "expand_user_path"}) -@validation.add("file_exists", param_name="package", mode=os.F_OK) -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["murano.packages"]}, - name="MuranoPackages.import_and_delete_package", - platform="openstack") -class ImportAndDeletePackage(utils.MuranoScenario): - - def run(self, package): - """Import Murano package and then delete it. - - Measure the "murano import-package" and "murano package-delete" - commands performance. - It imports Murano package from "package" (if it is not a zip archive - then zip archive will be prepared) and deletes it. - - :param package: path to zip archive that represents Murano - application package or absolute path to folder with - package components - """ - package_path = self._zip_package(package) - try: - package = self._import_package(package_path) - self._delete_package(package) - finally: - os.remove(package_path) - - -@types.convert(package={"type": "expand_user_path"}) -@validation.add("file_exists", param_name="package", mode=os.F_OK) -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["murano.packages"]}, - name="MuranoPackages.package_lifecycle", - platform="openstack") -class PackageLifecycle(utils.MuranoScenario): - - def run(self, package, body, operation="replace"): - """Import Murano package, modify it and then delete it. - - Measure the Murano import, update and delete package - commands performance. - It imports Murano package from "package" (if it is not a zip archive - then zip archive will be prepared), modifies it (using data from - "body") and deletes. - - :param package: path to zip archive that represents Murano - application package or absolute path to folder with - package components - :param body: dict object that defines what package property will be - updated, e.g {"tags": ["tag"]} or {"enabled": "true"} - :param operation: string object that defines the way of how package - property will be updated, allowed operations are - "add", "replace" or "delete". - Default value is "replace". - - """ - package_path = self._zip_package(package) - try: - package = self._import_package(package_path) - self._update_package(package, body, operation) - self._delete_package(package) - finally: - os.remove(package_path) - - -@types.convert(package={"type": "expand_user_path"}) -@validation.add("file_exists", param_name="package", mode=os.F_OK) -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["murano.packages"]}, - name="MuranoPackages.import_and_filter_applications", - platform="openstack") -class ImportAndFilterApplications(utils.MuranoScenario): - - def run(self, package, filter_query): - """Import Murano package and then filter packages by some criteria. - - Measure the performance of package import and package - filtering commands. - It imports Murano package from "package" (if it is not a zip archive - then zip archive will be prepared) and filters packages by some - criteria. - - :param package: path to zip archive that represents Murano - application package or absolute path to folder with - package components - :param filter_query: dict that contains filter criteria, lately it - will be passed as **kwargs to filter method - e.g. {"category": "Web"} - """ - package_path = self._zip_package(package) - try: - self._import_package(package_path) - self._filter_applications(filter_query) - finally: - os.remove(package_path) diff --git a/rally/plugins/openstack/scenarios/murano/utils.py b/rally/plugins/openstack/scenarios/murano/utils.py deleted file mode 100644 index bf61a6d9..00000000 --- a/rally/plugins/openstack/scenarios/murano/utils.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import shutil -import tempfile -import uuid -import zipfile - -from oslo_config import cfg -import yaml - -from rally.common import fileutils -from rally.common import utils as common_utils -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class MuranoScenario(scenario.OpenStackScenario): - """Base class for Murano scenarios with basic atomic actions.""" - - @atomic.action_timer("murano.list_environments") - def _list_environments(self): - """Return environments list.""" - return self.clients("murano").environments.list() - - @atomic.action_timer("murano.create_environment") - def _create_environment(self): - """Create environment. - - :param env_name: String used to name environment - - :returns: Environment instance - """ - env_name = self.generate_random_name() - return self.clients("murano").environments.create({"name": env_name}) - - @atomic.action_timer("murano.delete_environment") - def _delete_environment(self, environment): - """Delete given environment. - - Return when the environment is actually deleted. - - :param environment: Environment instance - """ - self.clients("murano").environments.delete(environment.id) - - @atomic.action_timer("murano.create_session") - def _create_session(self, environment_id): - """Create session for environment with specific id - - :param environment_id: Environment id - :returns: Session instance - """ - return self.clients("murano").sessions.configure(environment_id) - - @atomic.action_timer("murano.create_service") - def _create_service(self, environment, session, full_package_name, - image_name=None, flavor_name=None): - """Create Murano service. - - :param environment: Environment instance - :param session: Session instance - :param full_package_name: full name of the Murano package - :param image_name: Image name - :param flavor_name: Flavor name - :returns: Service instance - """ - app_id = str(uuid.uuid4()) - data = {"?": {"id": app_id, - "type": full_package_name}, - "name": self.generate_random_name()} - - return self.clients("murano").services.post( - environment_id=environment.id, path="/", data=data, - session_id=session.id) - - @atomic.action_timer("murano.deploy_environment") - def _deploy_environment(self, environment, session): - """Deploy environment. - - :param environment: Environment instance - :param session: Session instance - """ - self.clients("murano").sessions.deploy(environment.id, - session.id) - - config = CONF.benchmark - utils.wait_for( - environment, - ready_statuses=["READY"], - update_resource=utils.get_from_manager(["DEPLOY FAILURE"]), - timeout=config.murano_deploy_environment_timeout, - check_interval=config.murano_deploy_environment_check_interval - ) - - @atomic.action_timer("murano.list_packages") - def _list_packages(self, include_disabled=False): - """Returns packages list. - - :param include_disabled: if "True" then disabled packages will be - included in a the result. - Default value is False. - :returns: list of imported packages - """ - return self.clients("murano").packages.list( - include_disabled=include_disabled) - - @atomic.action_timer("murano.import_package") - def _import_package(self, package): - """Import package to the Murano. - - :param package: path to zip archive with Murano application - :returns: imported package - """ - - package = self.clients("murano").packages.create( - {}, {"file": open(package)} - ) - - return package - - @atomic.action_timer("murano.delete_package") - def _delete_package(self, package): - """Delete specified package. - - :param package: package that will be deleted - """ - - self.clients("murano").packages.delete(package.id) - - @atomic.action_timer("murano.update_package") - def _update_package(self, package, body, operation="replace"): - """Update specified package. - - :param package: package that will be updated - :param body: dict object that defines what package property will be - updated, e.g {"tags": ["tag"]} or {"enabled": "true"} - :param operation: string object that defines the way of how package - property will be updated, allowed operations are - "add", "replace" or "delete". - Default value is "replace". - :returns: updated package - """ - - return self.clients("murano").packages.update( - package.id, body, operation) - - @atomic.action_timer("murano.filter_applications") - def _filter_applications(self, filter_query): - """Filter list of uploaded application by specified criteria. - - :param filter_query: dict that contains filter criteria, it - will be passed as **kwargs to filter method - e.g. {"category": "Web"} - :returns: filtered list of packages - """ - - return self.clients("murano").packages.filter(**filter_query) - - def _zip_package(self, package_path): - """Call _prepare_package method that returns path to zip archive.""" - return MuranoPackageManager(self.task)._prepare_package(package_path) - - -class MuranoPackageManager(common_utils.RandomNameGeneratorMixin): - RESOURCE_NAME_FORMAT = "app.rally_XXXXXXXX_XXXXXXXX" - - def __init__(self, task): - self.task = task - - @staticmethod - def _read_from_file(filename): - with open(filename, "r") as f: - read_data = f.read() - return yaml.safe_load(read_data) - - @staticmethod - def _write_to_file(data, filename): - with open(filename, "w") as f: - yaml.safe_dump(data, f) - - def _change_app_fullname(self, app_dir): - """Change application full name. - - To avoid name conflict error during package import (when user - tries to import a few packages into the same tenant) need to change the - application name. For doing this need to replace following parts - in manifest.yaml - from - ... - FullName: app.name - ... - Classes: - app.name: app_class.yaml - to: - ... - FullName: - ... - Classes: - : app_class.yaml - - :param app_dir: path to directory with Murano application context - """ - - new_fullname = self.generate_random_name() - - manifest_file = os.path.join(app_dir, "manifest.yaml") - manifest = self._read_from_file(manifest_file) - - class_file_name = manifest["Classes"][manifest["FullName"]] - - # update manifest.yaml file - del manifest["Classes"][manifest["FullName"]] - manifest["FullName"] = new_fullname - manifest["Classes"][new_fullname] = class_file_name - self._write_to_file(manifest, manifest_file) - - def _prepare_package(self, package_path): - """Check whether the package path is path to zip archive or not. - - If package_path is not a path to zip archive but path to Murano - application folder, than method prepares zip archive with Murano - application. It copies directory with Murano app files to temporary - folder, changes manifest.yaml and class file (to avoid '409 Conflict' - errors in Murano) and prepares zip package. - - :param package_path: path to zip archive or directory with package - components - :returns: path to zip archive with Murano application - """ - - if not zipfile.is_zipfile(package_path): - tmp_dir = tempfile.mkdtemp() - pkg_dir = os.path.join(tmp_dir, "package/") - try: - shutil.copytree(os.path.expanduser(package_path), pkg_dir) - - self._change_app_fullname(pkg_dir) - package_path = fileutils.pack_dir(pkg_dir) - - finally: - shutil.rmtree(tmp_dir) - - return package_path diff --git a/rally/plugins/openstack/scenarios/neutron/__init__.py b/rally/plugins/openstack/scenarios/neutron/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/neutron/bgpvpn.py b/rally/plugins/openstack/scenarios/neutron/bgpvpn.py deleted file mode 100644 index 3f8cb2df..00000000 --- a/rally/plugins/openstack/scenarios/neutron/bgpvpn.py +++ /dev/null @@ -1,328 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import validation - - -"""Scenarios for Neutron Networking-Bgpvpn.""" - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_platform", platform="openstack", admin=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"admin_cleanup": ["neutron"]}, - name="NeutronBGPVPN.create_and_delete_bgpvpns", - platform="openstack") -class CreateAndDeleteBgpvpns(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Create bgpvpn and delete the bgpvpn. - - Measure the "neutron bgpvpn-create" and neutron bgpvpn-delete - command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type) - self._delete_bgpvpn(bgpvpn) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["neutron"]}, - name="NeutronBGPVPN.create_and_list_bgpvpns", - platform="openstack") -class CreateAndListBgpvpns(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Create a bgpvpn and then list all bgpvpns - - Measure the "neutron bgpvpn-list" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type) - bgpvpns = self._list_bgpvpns() - self.assertIn(bgpvpn["bgpvpn"]["id"], [b["id"] for b in bgpvpns]) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["neutron"]}, - name="NeutronBGPVPN.create_and_update_bgpvpns", - platform="openstack") -class CreateAndUpdateBgpvpns(utils.NeutronScenario): - - def run(self, update_name=False, route_targets=None, - import_targets=None, export_targets=None, - route_distinguishers=None, updated_route_targets=None, - updated_import_targets=None, updated_export_targets=None, - updated_route_distinguishers=None, bgpvpn_type="l3"): - """Create and Update bgpvpns - - Measure the "neutron bgpvpn-update" command performance. - - :param update_name: bool, whether or not to modify BGP VPN name - :param route_targets: Route Targets that will be both imported - and used for export - :param updated_route_targets: Updated Route Targets that will be both - imported and used for export - :param import_targets: Additional Route Targets that will be imported - :param updated_import_targets: Updated additional Route Targets that - will be imported - :param export_targets: additional Route Targets that will be used - for export. - :param updated_export_targets: Updated additional Route Targets that - will be used for export. - :param route_distinguishers: list of route distinguisher strings - :param updated_route_distinguishers: Updated list of route - distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - create_bgpvpn_args = { - "route_targets": route_targets, - "import_targets": import_targets, - "export_targets": export_targets, - "route_distinguishers": route_distinguishers, - "type": bgpvpn_type - } - bgpvpn = self._create_bgpvpn(**create_bgpvpn_args) - update_bgpvpn_args = { - "update_name": update_name, - "route_targets": updated_route_targets, - "import_targets": updated_import_targets, - "export_targets": updated_export_targets, - "route_distinguishers": updated_route_distinguishers, - } - self._update_bgpvpn(bgpvpn, **update_bgpvpn_args) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"admin_cleanup": ["neutron"], - "cleanup": ["neutron"]}, - name="NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks", - platform="openstack") -class CreateAndAssociateDissassociateNetworks(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Associate a network and disassociate it from a BGP VPN. - - Measure the "neutron bgpvpn-create", "neutron bgpvpn-net-assoc-create" - and "neutron bgpvpn-net-assoc-delete" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - - networks = self.context.get("tenant", {}).get("networks", []) - network = networks[0] - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type, - tenant_id=network["tenant_id"]) - net_asso = self._create_bgpvpn_network_assoc(bgpvpn, network) - self._delete_bgpvpn_network_assoc(bgpvpn, net_asso) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@validation.add("required_contexts", contexts=("router")) -@scenario.configure(context={"admin_cleanup": ["neutron"], - "cleanup": ["neutron"]}, - name="NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers", - platform="openstack") -class CreateAndAssociateDissassociateRouters(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Associate a router and disassociate it from a BGP VPN. - - Measure the "neutron bgpvpn-create", - "neutron bgpvpn-router-assoc-create" and - "neutron bgpvpn-router-assoc-delete" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - - routers = self.context.get("tenant", {}).get("routers", []) - router = routers[0]["router"] - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type, - tenant_id=router["tenant_id"]) - router_asso = self._create_bgpvpn_router_assoc(bgpvpn, router) - self._delete_bgpvpn_router_assoc(bgpvpn, router_asso) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"admin_cleanup": ["neutron"]}, - name="NeutronBGPVPN.create_and_list_networks_associations", - platform="openstack") -class CreateAndListNetworksAssocs(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Associate a network and list networks associations. - - Measure the "neutron bgpvpn-create", - "neutron bgpvpn-net-assoc-create" and - "neutron bgpvpn-net-assoc-list" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - - networks = self.context.get("tenant", {}).get("networks", []) - network = networks[0] - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type, - tenant_id=network["tenant_id"]) - self._create_bgpvpn_network_assoc(bgpvpn, network) - net_assocs = self._list_bgpvpn_network_assocs( - bgpvpn)["network_associations"] - - network_id = network["id"] - msg = ("Network not included into list of associated networks\n" - "Network created: {}\n" - "List of associations: {}").format(network, net_assocs) - list_networks = [net_assoc["network_id"] for net_assoc in net_assocs] - self.assertIn(network_id, list_networks, err_msg=msg) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@validation.add("required_contexts", contexts=("router")) -@scenario.configure(context={"admin_cleanup": ["neutron"]}, - name="NeutronBGPVPN.create_and_list_routers_associations", - platform="openstack") -class CreateAndListRoutersAssocs(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Associate a router and list routers associations. - - Measure the "neutron bgpvpn-create", - "neutron bgpvpn-router-assoc-create" and - "neutron bgpvpn-router-assoc-list" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - - routers = self.context.get("tenant", {}).get("routers", []) - router = routers[0]["router"] - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type, - tenant_id=router["tenant_id"]) - self._create_bgpvpn_router_assoc(bgpvpn, router) - router_assocs = self._list_bgpvpn_router_assocs( - bgpvpn)["router_associations"] - - router_id = router["id"] - msg = ("Router not included into list of associated routers\n" - "Router created: {}\n" - "List of associations: {}").format(router, router_assocs) - - list_routers = [r_assoc["router_id"] for r_assoc in router_assocs] - self.assertIn(router_id, list_routers, err_msg=msg) diff --git a/rally/plugins/openstack/scenarios/neutron/loadbalancer_v1.py b/rally/plugins/openstack/scenarios/neutron/loadbalancer_v1.py deleted file mode 100644 index 4012589a..00000000 --- a/rally/plugins/openstack/scenarios/neutron/loadbalancer_v1.py +++ /dev/null @@ -1,291 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import atomic -from rally.task import validation - - -"""Scenarios for Neutron Loadbalancer v1.""" - - -@validation.add("restricted_parameters", param_names="subnet_id", - subdict="pool_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_list_pools", - platform="openstack") -class CreateAndListPools(utils.NeutronScenario): - - def run(self, pool_create_args=None): - """Create a pool(v1) and then list pools(v1). - - Measure the "neutron lb-pool-list" command performance. - The scenario creates a pool for every subnet and then lists pools. - - :param pool_create_args: dict, POST /lb/pools request options - """ - pool_create_args = pool_create_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - self._create_v1_pools(networks, **pool_create_args) - self._list_v1_pools() - - -@validation.add("restricted_parameters", param_names="subnet_id", - subdict="pool_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_delete_pools", - platform="openstack") -class CreateAndDeletePools(utils.NeutronScenario): - - def run(self, pool_create_args=None): - """Create pools(v1) and delete pools(v1). - - Measure the "neutron lb-pool-create" and "neutron lb-pool-delete" - command performance. The scenario creates a pool for every subnet - and then deletes those pools. - - :param pool_create_args: dict, POST /lb/pools request options - """ - pool_create_args = pool_create_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - for pool in pools: - self._delete_v1_pool(pool["pool"]) - - -@validation.add("restricted_parameters", param_names="subnet_id", - subdict="pool_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_update_pools", - platform="openstack") -class CreateAndUpdatePools(utils.NeutronScenario): - - def run(self, pool_update_args=None, pool_create_args=None): - """Create pools(v1) and update pools(v1). - - Measure the "neutron lb-pool-create" and "neutron lb-pool-update" - command performance. The scenario creates a pool for every subnet - and then update those pools. - - :param pool_create_args: dict, POST /lb/pools request options - :param pool_update_args: dict, POST /lb/pools update options - """ - pool_create_args = pool_create_args or {} - pool_update_args = pool_update_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - for pool in pools: - self._update_v1_pool(pool, **pool_update_args) - - -@validation.add("restricted_parameters", param_names=["pool_id", "subnet_id"], - subdict="vip_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_list_vips", - platform="openstack") -class CreateAndListVips(utils.NeutronScenario): - - def run(self, pool_create_args=None, vip_create_args=None): - """Create a vip(v1) and then list vips(v1). - - Measure the "neutron lb-vip-create" and "neutron lb-vip-list" command - performance. The scenario creates a vip for every pool created and - then lists vips. - - :param vip_create_args: dict, POST /lb/vips request options - :param pool_create_args: dict, POST /lb/pools request options - """ - vip_create_args = vip_create_args or {} - pool_create_args = pool_create_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - with atomic.ActionTimer(self, "neutron.create_%s_vips" % len(pools)): - for pool in pools: - self._create_v1_vip(pool, **vip_create_args) - self._list_v1_vips() - - -@validation.add("restricted_parameters", param_names=["pool_id", "subnet_id"], - subdict="vip_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_delete_vips", - platform="openstack") -class CreateAndDeleteVips(utils.NeutronScenario): - - def run(self, pool_create_args=None, vip_create_args=None): - """Create a vip(v1) and then delete vips(v1). - - Measure the "neutron lb-vip-create" and "neutron lb-vip-delete" - command performance. The scenario creates a vip for pool and - then deletes those vips. - - :param pool_create_args: dict, POST /lb/pools request options - :param vip_create_args: dict, POST /lb/vips request options - """ - vips = [] - pool_create_args = pool_create_args or {} - vip_create_args = vip_create_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - with atomic.ActionTimer(self, "neutron.create_%s_vips" % len(pools)): - for pool in pools: - vips.append(self._create_v1_vip(pool, **vip_create_args)) - for vip in vips: - self._delete_v1_vip(vip["vip"]) - - -@validation.add("restricted_parameters", param_names=["pool_id", "subnet_id"], - subdict="vip_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_update_vips", - platform="openstack") -class CreateAndUpdateVips(utils.NeutronScenario): - - def run(self, pool_create_args=None, - vip_update_args=None, vip_create_args=None): - """Create vips(v1) and update vips(v1). - - Measure the "neutron lb-vip-create" and "neutron lb-vip-update" - command performance. The scenario creates a pool for every subnet - and then update those pools. - - :param pool_create_args: dict, POST /lb/pools request options - :param vip_create_args: dict, POST /lb/vips request options - :param vip_update_args: dict, POST /lb/vips update options - """ - vips = [] - pool_create_args = pool_create_args or {} - vip_create_args = vip_create_args or {} - vip_update_args = vip_update_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - with atomic.ActionTimer(self, "neutron.create_%s_vips" % len(pools)): - for pool in pools: - vips.append(self._create_v1_vip(pool, **vip_create_args)) - for vip in vips: - self._update_v1_vip(vip, **vip_update_args) - - -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_list_healthmonitors", - platform="openstack") -class CreateAndListHealthmonitors(utils.NeutronScenario): - - def run(self, healthmonitor_create_args=None): - """Create healthmonitors(v1) and list healthmonitors(v1). - - Measure the "neutron lb-healthmonitor-list" command performance. This - scenario creates healthmonitors and lists them. - - :param healthmonitor_create_args: dict, POST /lb/healthmonitors request - options - """ - healthmonitor_create_args = healthmonitor_create_args or {} - self._create_v1_healthmonitor(**healthmonitor_create_args) - self._list_v1_healthmonitors() - - -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_delete_healthmonitors", - platform="openstack") -class CreateAndDeleteHealthmonitors(utils.NeutronScenario): - - def run(self, healthmonitor_create_args=None): - """Create a healthmonitor(v1) and delete healthmonitors(v1). - - Measure the "neutron lb-healthmonitor-create" and "neutron - lb-healthmonitor-delete" command performance. The scenario creates - healthmonitors and deletes those healthmonitors. - - :param healthmonitor_create_args: dict, POST /lb/healthmonitors request - options - """ - healthmonitor_create_args = healthmonitor_create_args or {} - healthmonitor = self._create_v1_healthmonitor( - **healthmonitor_create_args) - self._delete_v1_healthmonitor(healthmonitor["health_monitor"]) - - -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_update_healthmonitors", - platform="openstack") -class CreateAndUpdateHealthmonitors(utils.NeutronScenario): - - def run(self, healthmonitor_create_args=None, - healthmonitor_update_args=None): - """Create a healthmonitor(v1) and update healthmonitors(v1). - - Measure the "neutron lb-healthmonitor-create" and "neutron - lb-healthmonitor-update" command performance. The scenario creates - healthmonitors and then updates them. - - :param healthmonitor_create_args: dict, POST /lb/healthmonitors request - options - :param healthmonitor_update_args: dict, POST /lb/healthmonitors update - options - """ - healthmonitor_create_args = healthmonitor_create_args or {} - healthmonitor_update_args = healthmonitor_update_args or { - "max_retries": random.choice(range(1, 10))} - healthmonitor = self._create_v1_healthmonitor( - **healthmonitor_create_args) - self._update_v1_healthmonitor(healthmonitor, - **healthmonitor_update_args) diff --git a/rally/plugins/openstack/scenarios/neutron/loadbalancer_v2.py b/rally/plugins/openstack/scenarios/neutron/loadbalancer_v2.py deleted file mode 100755 index 328a33c0..00000000 --- a/rally/plugins/openstack/scenarios/neutron/loadbalancer_v2.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import validation - - -"""Scenarios for Neutron Loadbalancer v2.""" - - -@validation.add("required_neutron_extensions", extensions=["lbaasv2"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronLoadbalancerV2.create_and_list_loadbalancers", - platform="openstack") -class CreateAndListLoadbalancers(utils.NeutronScenario): - - def run(self, lb_create_args=None): - """Create a loadbalancer(v2) and then list loadbalancers(v2). - - Measure the "neutron lbaas-loadbalancer-list" command performance. - The scenario creates a loadbalancer for every subnet and then lists - loadbalancers. - - :param lb_create_args: dict, POST /lbaas/loadbalancers - request options - """ - lb_create_args = lb_create_args or {} - subnets = [] - networks = self.context.get("tenant", {}).get("networks", []) - for network in networks: - subnets.extend(network.get("subnets", [])) - for subnet_id in subnets: - self._create_lbaasv2_loadbalancer(subnet_id, **lb_create_args) - self._list_lbaasv2_loadbalancers() diff --git a/rally/plugins/openstack/scenarios/neutron/network.py b/rally/plugins/openstack/scenarios/neutron/network.py deleted file mode 100644 index fd349c41..00000000 --- a/rally/plugins/openstack/scenarios/neutron/network.py +++ /dev/null @@ -1,577 +0,0 @@ -# Copyright 2014: Intel Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import atomic -from rally.task import validation - - -"""Scenarios for Neutron.""" - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_list_networks", - platform="openstack") -class CreateAndListNetworks(utils.NeutronScenario): - - def run(self, network_create_args=None): - """Create a network and then list all networks. - - Measure the "neutron net-list" command performance. - - If you have only 1 user in your context, you will - add 1 network on every iteration. So you will have more - and more networks and will be able to measure the - performance of the "neutron net-list" command depending on - the number of networks owned by users. - - :param network_create_args: dict, POST /v2.0/networks request options - """ - self._create_network(network_create_args or {}) - self._list_networks() - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_show_network", - platform="openstack") -class CreateAndShowNetwork(utils.NeutronScenario): - - def run(self, network_create_args=None): - """Create a network and show network details. - - Measure the "neutron net-show" command performance. - - :param network_create_args: dict, POST /v2.0/networks request options - """ - network = self._create_network(network_create_args or {}) - self._show_network(network) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_update_networks", - platform="openstack") -class CreateAndUpdateNetworks(utils.NeutronScenario): - - def run(self, network_update_args, network_create_args=None): - """Create and update a network. - - Measure the "neutron net-create and net-update" command performance. - - :param network_update_args: dict, PUT /v2.0/networks update request - :param network_create_args: dict, POST /v2.0/networks request options - """ - network = self._create_network(network_create_args or {}) - self._update_network(network, network_update_args) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_delete_networks", - platform="openstack") -class CreateAndDeleteNetworks(utils.NeutronScenario): - - def run(self, network_create_args=None): - """Create and delete a network. - - Measure the "neutron net-create" and "net-delete" command performance. - - :param network_create_args: dict, POST /v2.0/networks request options - """ - network = self._create_network(network_create_args or {}) - self._delete_network(network["network"]) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_list_subnets", - platform="openstack") -class CreateAndListSubnets(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1): - """Create and a given number of subnets and list all subnets. - - The scenario creates a network, a given number of subnets and then - lists subnets. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - """ - network = self._create_network(network_create_args or {}) - self._create_subnets(network, subnet_create_args, subnet_cidr_start, - subnets_per_network) - self._list_subnets() - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_update_subnets", - platform="openstack") -class CreateAndUpdateSubnets(utils.NeutronScenario): - - def run(self, subnet_update_args, network_create_args=None, - subnet_create_args=None, subnet_cidr_start=None, - subnets_per_network=1): - """Create and update a subnet. - - The scenario creates a network, a given number of subnets - and then updates the subnet. This scenario measures the - "neutron subnet-update" command performance. - - :param subnet_update_args: dict, PUT /v2.0/subnets update options - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - """ - network = self._create_network(network_create_args or {}) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, subnets_per_network) - - for subnet in subnets: - self._update_subnet(subnet, subnet_update_args) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_show_subnets", - platform="openstack") -class CreateAndShowSubnets(utils.NeutronScenario): - - def run(self, network_create_args=None, - subnet_create_args=None, subnet_cidr_start=None, - subnets_per_network=1): - """Create and show a subnet details. - - The scenario creates a network, a given number of subnets - and show the subnet details. This scenario measures the - "neutron subnet-show" command performance. - - :param network_create_args: dict, POST /v2.0/networks request - options. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - """ - network = self._get_or_create_network(network_create_args) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, subnets_per_network) - - for subnet in subnets: - self._show_subnet(subnet) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_delete_subnets", - platform="openstack") -class CreateAndDeleteSubnets(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1): - """Create and delete a given number of subnets. - - The scenario creates a network, a given number of subnets and then - deletes subnets. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - """ - network = self._get_or_create_network(network_create_args) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, subnets_per_network) - - for subnet in subnets: - self._delete_subnet(subnet) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_list_routers", - platform="openstack") -class CreateAndListRouters(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1, - router_create_args=None): - """Create and a given number of routers and list all routers. - - Create a network, a given number of subnets and routers - and then list all routers. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :param router_create_args: dict, POST /v2.0/routers request options - """ - self._create_network_structure(network_create_args, subnet_create_args, - subnet_cidr_start, subnets_per_network, - router_create_args) - self._list_routers() - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_show_routers", - platform="openstack") -class CreateAndShowRouters(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1, - router_create_args=None): - """Create and show a given number of routers. - - Create a network, a given number of subnets and routers - and then show all routers. - - :param network_create_args: dict, POST /v2.0/networks request - options - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for each network - :param router_create_args: dict, POST /v2.0/routers request options - """ - network, subnets, routers = self._create_network_structure( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - - for router in routers: - self._show_router(router) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_update_routers", - platform="openstack") -class CreateAndUpdateRouters(utils.NeutronScenario): - - def run(self, router_update_args, network_create_args=None, - subnet_create_args=None, subnet_cidr_start=None, - subnets_per_network=1, router_create_args=None): - """Create and update a given number of routers. - - Create a network, a given number of subnets and routers - and then updating all routers. - - :param router_update_args: dict, PUT /v2.0/routers update options - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :param router_create_args: dict, POST /v2.0/routers request options - """ - network, subnets, routers = self._create_network_structure( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - - for router in routers: - self._update_router(router, router_update_args) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_delete_routers", - platform="openstack") -class CreateAndDeleteRouters(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1, - router_create_args=None): - """Create and delete a given number of routers. - - Create a network, a given number of subnets and routers - and then delete all routers. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :param router_create_args: dict, POST /v2.0/routers request options - """ - network, subnets, routers = self._create_network_structure( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - - for e in range(subnets_per_network): - router = routers[e] - subnet = subnets[e] - self._remove_interface_router(subnet["subnet"], router["router"]) - self._delete_router(router) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.set_and_clear_router_gateway", - platform="openstack") -class SetAndClearRouterGateway(utils.NeutronScenario): - - def run(self, enable_snat=True, network_create_args=None, - router_create_args=None): - """Set and Remove the external network gateway from a router. - - create an external network and a router, set external network - gateway for the router, remove the external network gateway from - the router. - - :param enable_snat: True if enable snat - :param network_create_args: dict, POST /v2.0/networks request - options - :param router_create_args: dict, POST /v2.0/routers request options - """ - network_create_args = network_create_args or {} - router_create_args = router_create_args or {} - ext_net = self._create_network(network_create_args) - router = self._create_router(router_create_args) - self._add_gateway_router(router, ext_net, enable_snat) - self._remove_gateway_router(router) - - -@validation.add("number", param_name="ports_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_list_ports", - platform="openstack") -class CreateAndListPorts(utils.NeutronScenario): - - def run(self, network_create_args=None, - port_create_args=None, ports_per_network=1): - """Create and a given number of ports and list all ports. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param port_create_args: dict, POST /v2.0/ports request options - :param ports_per_network: int, number of ports for one network - """ - network = self._get_or_create_network(network_create_args) - for i in range(ports_per_network): - self._create_port(network, port_create_args or {}) - - self._list_ports() - - -@validation.add("number", param_name="ports_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_update_ports", - platform="openstack") -class CreateAndUpdatePorts(utils.NeutronScenario): - - def run(self, port_update_args, network_create_args=None, - port_create_args=None, ports_per_network=1): - """Create and update a given number of ports. - - Measure the "neutron port-create" and "neutron port-update" commands - performance. - - :param port_update_args: dict, PUT /v2.0/ports update request options - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param port_create_args: dict, POST /v2.0/ports request options - :param ports_per_network: int, number of ports for one network - """ - network = self._get_or_create_network(network_create_args) - for i in range(ports_per_network): - port = self._create_port(network, port_create_args) - self._update_port(port, port_update_args) - - -@validation.add("number", param_name="ports_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_show_ports", - platform="openstack") -class CreateAndShowPorts(utils.NeutronScenario): - - def run(self, network_create_args=None, - port_create_args=None, ports_per_network=1): - """Create a given number of ports and show created ports in trun. - - Measure the "neutron port-create" and "neutron port-show" commands - performance. - - :param network_create_args: dict, POST /v2.0/networks request - options. - :param port_create_args: dict, POST /v2.0/ports request options - :param ports_per_network: int, number of ports for one network - """ - network_create_args = network_create_args or {} - port_create_args = port_create_args or {} - - network = self._get_or_create_network(network_create_args) - with atomic.ActionTimer(self, "neutron.create_and_show_%i_ports" % - ports_per_network): - for i in range(ports_per_network): - port = self._create_port(network, port_create_args) - msg = "Port isn't created" - self.assertTrue(port, err_msg=msg) - - port_info = self._show_port(port) - msg = "Created port and Showed port isn't equal" - self.assertEqual(port["port"]["id"], port_info["port"]["id"], - err_msg=msg) - - -@validation.add("number", param_name="ports_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_delete_ports", - platform="openstack") -class CreateAndDeletePorts(utils.NeutronScenario): - - def run(self, network_create_args=None, - port_create_args=None, ports_per_network=1): - """Create and delete a port. - - Measure the "neutron port-create" and "neutron port-delete" - commands performance. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param port_create_args: dict, POST /v2.0/ports request options - :param ports_per_network: int, number of ports for one network - """ - network = self._get_or_create_network(network_create_args) - for i in range(ports_per_network): - port = self._create_port(network, port_create_args) - self._delete_port(port) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("external_network_exists", param_name="floating_network") -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_list_floating_ips", - platform="openstack") -class CreateAndListFloatingIps(utils.NeutronScenario): - - def run(self, floating_network=None, floating_ip_args=None): - """Create and list floating IPs. - - Measure the "neutron floating-ip-create" and "neutron floating-ip-list" - commands performance. - - :param floating_network: str, external network for floating IP creation - :param floating_ip_args: dict, POST /floatingips request options - """ - floating_ip_args = floating_ip_args or {} - self._create_floatingip(floating_network, **floating_ip_args) - self._list_floating_ips() - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("external_network_exists", param_name="floating_network") -@scenario.configure(context={"cleanup": ["neutron"]}, - name="NeutronNetworks.create_and_delete_floating_ips", - platform="openstack") -class CreateAndDeleteFloatingIps(utils.NeutronScenario): - - def run(self, floating_network=None, floating_ip_args=None): - """Create and delete floating IPs. - - Measure the "neutron floating-ip-create" and "neutron - floating-ip-delete" commands performance. - - :param floating_network: str, external network for floating IP creation - :param floating_ip_args: dict, POST /floatingips request options - """ - floating_ip_args = floating_ip_args or {} - floating_ip = self._create_floatingip(floating_network, - **floating_ip_args) - self._delete_floating_ip(floating_ip["floatingip"]) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="NeutronNetworks.list_agents", platform="openstack") -class ListAgents(utils.NeutronScenario): - - def run(self, agent_args=None): - """List all neutron agents. - - This simple scenario tests the "neutron agent-list" command by - listing all the neutron agents. - - :param agent_args: dict, POST /v2.0/agents request options - """ - agent_args = agent_args or {} - self._list_agents(**agent_args) diff --git a/rally/plugins/openstack/scenarios/neutron/security_groups.py b/rally/plugins/openstack/scenarios/neutron/security_groups.py deleted file mode 100644 index 0084e837..00000000 --- a/rally/plugins/openstack/scenarios/neutron/security_groups.py +++ /dev/null @@ -1,198 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import validation - - -"""Scenarios for Neutron Security Groups.""" - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["neutron"]}, - name="NeutronSecurityGroup.create_and_list_security_groups", - platform="openstack") -class CreateAndListSecurityGroups(utils.NeutronScenario): - - def run(self, security_group_create_args=None): - """Create and list Neutron security-groups. - - Measure the "neutron security-group-create" and "neutron - security-group-list" command performance. - - :param security_group_create_args: dict, POST /v2.0/security-groups - request options - """ - security_group_create_args = security_group_create_args or {} - self._create_security_group(**security_group_create_args) - self._list_security_groups() - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["neutron"]}, - name="NeutronSecurityGroup.create_and_show_security_group", - platform="openstack") -class CreateAndShowSecurityGroup(utils.NeutronScenario): - - def run(self, security_group_create_args=None): - """Create and show Neutron security-group. - - Measure the "neutron security-group-create" and "neutron - security-group-show" command performance. - - :param security_group_create_args: dict, POST /v2.0/security-groups - request options - """ - security_group_create_args = security_group_create_args or {} - security_group = self._create_security_group( - **security_group_create_args) - msg = "security_group isn't created" - self.assertTrue(security_group, err_msg=msg) - - self._show_security_group(security_group) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["neutron"]}, - name="NeutronSecurityGroup.create_and_delete_security_groups", - platform="openstack") -class CreateAndDeleteSecurityGroups(utils.NeutronScenario): - - def run(self, security_group_create_args=None): - """Create and delete Neutron security-groups. - - Measure the "neutron security-group-create" and "neutron - security-group-delete" command performance. - - :param security_group_create_args: dict, POST /v2.0/security-groups - request options - """ - security_group_create_args = security_group_create_args or {} - security_group = self._create_security_group( - **security_group_create_args) - self._delete_security_group(security_group) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["neutron"]}, - name="NeutronSecurityGroup.create_and_update_security_groups", - platform="openstack") -class CreateAndUpdateSecurityGroups(utils.NeutronScenario): - - def run(self, security_group_create_args=None, - security_group_update_args=None): - """Create and update Neutron security-groups. - - Measure the "neutron security-group-create" and "neutron - security-group-update" command performance. - - :param security_group_create_args: dict, POST /v2.0/security-groups - request options - :param security_group_update_args: dict, PUT /v2.0/security-groups - update options - """ - security_group_create_args = security_group_create_args or {} - security_group_update_args = security_group_update_args or {} - security_group = self._create_security_group( - **security_group_create_args) - self._update_security_group(security_group, - **security_group_update_args) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["neutron"]}, - name="NeutronSecurityGroup.create_and_list_security_group_rules", - platform="openstack") -class CreateAndListSecurityGroupRules(utils.NeutronScenario): - - def run(self, security_group_args=None, - security_group_rule_args=None): - """Create and list Neutron security-group-rules. - - Measure the "neutron security-group-rule-create" and "neutron - security-group-rule-list" command performance. - - :param security_group_args: dict, POST /v2.0/security-groups - request options - :param security_group_rule_args: dict, - POST /v2.0/security-group-rules request options - """ - security_group_args = security_group_args or {} - security_group_rule_args = security_group_rule_args or {} - - security_group = self._create_security_group(**security_group_args) - msg = "security_group isn't created" - self.assertTrue(security_group, err_msg=msg) - - security_group_rule = self._create_security_group_rule( - security_group["security_group"]["id"], **security_group_rule_args) - msg = "security_group_rule isn't created" - self.assertTrue(security_group_rule, err_msg=msg) - - security_group_rules = self._list_security_group_rules() - self.assertIn(security_group_rule["security_group_rule"]["id"], - [sgr["id"] for sgr - in security_group_rules["security_group_rules"]]) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["neutron"]}, - name="NeutronSecurityGroup.create_and_show_security_group_rule", - platform="openstack") -class CreateAndShowSecurityGroupRule(utils.NeutronScenario): - - def run(self, security_group_args=None, - security_group_rule_args=None): - """Create and show Neutron security-group-rule. - - Measure the "neutron security-group-rule-create" and "neutron - security-group-rule-show" command performance. - - :param security_group_args: dict, POST /v2.0/security-groups - request options - :param security_group_rule_args: dict, - POST /v2.0/security-group-rules request options - """ - security_group_args = security_group_args or {} - security_group_rule_args = security_group_rule_args or {} - - security_group = self._create_security_group(**security_group_args) - msg = "security_group isn't created" - self.assertTrue(security_group, err_msg=msg) - - security_group_rule = self._create_security_group_rule( - security_group["security_group"]["id"], **security_group_rule_args) - msg = "security_group_rule isn't created" - self.assertTrue(security_group_rule, err_msg=msg) - - self._show_security_group_rule( - security_group_rule["security_group_rule"]["id"]) diff --git a/rally/plugins/openstack/scenarios/neutron/utils.py b/rally/plugins/openstack/scenarios/neutron/utils.py deleted file mode 100644 index 05bb292c..00000000 --- a/rally/plugins/openstack/scenarios/neutron/utils.py +++ /dev/null @@ -1,845 +0,0 @@ -# Copyright 2014: Intel Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from oslo_config import cfg - -from rally.common.i18n import _ -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -LOG = logging.getLogger(__name__) - - -class NeutronScenario(scenario.OpenStackScenario): - """Base class for Neutron scenarios with basic atomic actions.""" - - SUBNET_IP_VERSION = 4 - # TODO(rkiran): modify in case LBaaS-v2 requires - LB_METHOD = "ROUND_ROBIN" - LB_PROTOCOL = "HTTP" - LB_PROTOCOL_PORT = 80 - HM_TYPE = "PING" - HM_MAX_RETRIES = 3 - HM_DELAY = 20 - HM_TIMEOUT = 10 - - def _get_network_id(self, network, **kwargs): - """Get Neutron network ID for the network name. - - param network: str, network name/id - param kwargs: dict, network options - returns: str, Neutron network-id - """ - networks = self._list_networks() - for net in networks: - if (net["name"] == network) or (net["id"] == network): - return net["id"] - msg = (_("Network %s not found.") % network) - raise exceptions.NotFoundException(message=msg) - - @atomic.action_timer("neutron.create_network") - def _create_network(self, network_create_args): - """Create neutron network. - - :param network_create_args: dict, POST /v2.0/networks request options - :returns: neutron network dict - """ - network_create_args["name"] = self.generate_random_name() - return self.clients("neutron").create_network( - {"network": network_create_args}) - - @atomic.action_timer("neutron.list_networks") - def _list_networks(self, **kwargs): - """Return user networks list. - - :param kwargs: network list options - """ - return self.clients("neutron").list_networks(**kwargs)["networks"] - - @atomic.action_timer("neutron.list_agents") - def _list_agents(self, **kwargs): - """Fetches agents. - - :param kwargs: neutron agent list options - :returns: user agents list - """ - return self.clients("neutron").list_agents(**kwargs)["agents"] - - @atomic.action_timer("neutron.update_network") - def _update_network(self, network, network_update_args): - """Update the network. - - This atomic function updates the network with network_update_args. - - :param network: Network object - :param network_update_args: dict, POST /v2.0/networks update options - :returns: updated neutron network dict - """ - network_update_args["name"] = self.generate_random_name() - body = {"network": network_update_args} - return self.clients("neutron").update_network( - network["network"]["id"], body) - - @atomic.action_timer("neutron.show_network") - def _show_network(self, network, **kwargs): - """show network details. - - :param network: Network object - :param kwargs: dict, POST /v2.0/networks show options - :returns: details of the network - """ - return self.clients("neutron").show_network( - network["network"]["id"], **kwargs) - - @atomic.action_timer("neutron.delete_network") - def _delete_network(self, network): - """Delete neutron network. - - :param network: Network object - """ - self.clients("neutron").delete_network(network["id"]) - - @atomic.action_timer("neutron.create_subnet") - def _create_subnet(self, network, subnet_create_args, start_cidr=None): - """Create neutron subnet. - - :param network: neutron network dict - :param subnet_create_args: POST /v2.0/subnets request options - :returns: neutron subnet dict - """ - network_id = network["network"]["id"] - - if not subnet_create_args.get("cidr"): - start_cidr = start_cidr or "10.2.0.0/24" - subnet_create_args["cidr"] = ( - network_wrapper.generate_cidr(start_cidr=start_cidr)) - - subnet_create_args["network_id"] = network_id - subnet_create_args["name"] = self.generate_random_name() - subnet_create_args.setdefault("ip_version", self.SUBNET_IP_VERSION) - - return self.clients("neutron").create_subnet( - {"subnet": subnet_create_args}) - - @atomic.action_timer("neutron.list_subnets") - def _list_subnets(self): - """Returns user subnetworks list.""" - return self.clients("neutron").list_subnets()["subnets"] - - @atomic.action_timer("neutron.show_subnet") - def _show_subnet(self, subnet, **kwargs): - """show subnet details. - - :param: subnet: Subnet object - :param: kwargs: Optional additional arguments for subnet show - :returns: details of the subnet - """ - return self.clients("neutron").show_subnet(subnet["subnet"]["id"], - **kwargs) - - @atomic.action_timer("neutron.update_subnet") - def _update_subnet(self, subnet, subnet_update_args): - """Update the neutron subnet. - - This atomic function updates the subnet with subnet_update_args. - - :param subnet: Subnet object - :param subnet_update_args: dict, PUT /v2.0/subnets update options - :returns: updated neutron subnet dict - """ - subnet_update_args["name"] = self.generate_random_name() - body = {"subnet": subnet_update_args} - return self.clients("neutron").update_subnet( - subnet["subnet"]["id"], body) - - @atomic.action_timer("neutron.delete_subnet") - def _delete_subnet(self, subnet): - """Delete neutron subnet - - :param subnet: Subnet object - """ - self.clients("neutron").delete_subnet(subnet["subnet"]["id"]) - - @atomic.action_timer("neutron.create_router") - def _create_router(self, router_create_args, external_gw=False): - """Create neutron router. - - :param router_create_args: POST /v2.0/routers request options - :returns: neutron router dict - """ - router_create_args["name"] = self.generate_random_name() - - if external_gw: - for network in self._list_networks(): - if network.get("router:external"): - external_network = network - gw_info = {"network_id": external_network["id"], - "enable_snat": True} - router_create_args.setdefault("external_gateway_info", - gw_info) - - return self.clients("neutron").create_router( - {"router": router_create_args}) - - @atomic.action_timer("neutron.list_routers") - def _list_routers(self): - """Returns user routers list.""" - return self.clients("neutron").list_routers()["routers"] - - @atomic.action_timer("neutron.show_router") - def _show_router(self, router, **kwargs): - """Show information of a given router. - - :param router: ID or name of router to look up - :kwargs: dict, POST /v2.0/routers show options - :return: details of the router - """ - return self.clients("neutron").show_router( - router["router"]["id"], **kwargs) - - @atomic.action_timer("neutron.delete_router") - def _delete_router(self, router): - """Delete neutron router - - :param router: Router object - """ - self.clients("neutron").delete_router(router["router"]["id"]) - - @atomic.action_timer("neutron.update_router") - def _update_router(self, router, router_update_args): - """Update the neutron router. - - This atomic function updates the router with router_update_args. - - :param router: dict, neutron router - :param router_update_args: dict, PUT /v2.0/routers update options - :returns: updated neutron router dict - """ - router_update_args["name"] = self.generate_random_name() - body = {"router": router_update_args} - return self.clients("neutron").update_router( - router["router"]["id"], body) - - @atomic.action_timer("neutron.create_port") - def _create_port(self, network, port_create_args): - """Create neutron port. - - :param network: neutron network dict - :param port_create_args: POST /v2.0/ports request options - :returns: neutron port dict - """ - port_create_args["network_id"] = network["network"]["id"] - port_create_args["name"] = self.generate_random_name() - return self.clients("neutron").create_port({"port": port_create_args}) - - @atomic.action_timer("neutron.list_ports") - def _list_ports(self): - """Return user ports list.""" - return self.clients("neutron").list_ports()["ports"] - - @atomic.action_timer("neutron.show_port") - def _show_port(self, port, **params): - """Return user port details. - - :param port: dict, neutron port - :param params: neutron port show options - :returns: neutron port dict - """ - return self.clients("neutron").show_port(port["port"]["id"], **params) - - @atomic.action_timer("neutron.update_port") - def _update_port(self, port, port_update_args): - """Update the neutron port. - - This atomic function updates port with port_update_args. - - :param port: dict, neutron port - :param port_update_args: dict, PUT /v2.0/ports update options - :returns: updated neutron port dict - """ - port_update_args["name"] = self.generate_random_name() - body = {"port": port_update_args} - return self.clients("neutron").update_port(port["port"]["id"], body) - - @atomic.action_timer("neutron.delete_port") - def _delete_port(self, port): - """Delete neutron port. - - :param port: Port object - """ - self.clients("neutron").delete_port(port["port"]["id"]) - - @logging.log_deprecated_args(_("network_create_args is deprecated; " - "use the network context instead"), - "0.1.0", "network_create_args") - def _get_or_create_network(self, network_create_args=None): - """Get a network from context, or create a new one. - - This lets users either create networks with the 'network' - context, provide existing networks with the 'existing_network' - context, or let the scenario create a default network for - them. Running this without one of the network contexts is - deprecated. - - :param network_create_args: Deprecated way to provide network - creation args; use the network - context instead. - :returns: Network dict - """ - if "networks" in self.context["tenant"]: - return {"network": - random.choice(self.context["tenant"]["networks"])} - else: - LOG.warning(_("Running this scenario without either the 'network' " - "or 'existing_network' context is deprecated")) - return self._create_network(network_create_args or {}) - - def _create_subnets(self, network, - subnet_create_args=None, - subnet_cidr_start=None, - subnets_per_network=1): - """Create new subnets in the given network. - - :param network: network to create subnets in - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :returns: List of subnet dicts - """ - return [self._create_subnet(network, subnet_create_args or {}, - subnet_cidr_start) - for i in range(subnets_per_network)] - - def _create_network_and_subnets(self, - network_create_args=None, - subnet_create_args=None, - subnets_per_network=1, - subnet_cidr_start="1.0.0.0/24"): - """Create network and subnets. - - :parm network_create_args: dict, POST /v2.0/networks request options - :parm subnet_create_args: dict, POST /v2.0/subnets request options - :parm subnets_per_network: int, number of subnets for one network - :parm subnet_cidr_start: str, start value for subnets CIDR - :returns: tuple of result network and subnets list - """ - network = self._create_network(network_create_args or {}) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, subnets_per_network) - return network, subnets - - def _create_network_structure(self, network_create_args=None, - subnet_create_args=None, - subnet_cidr_start=None, - subnets_per_network=None, - router_create_args=None): - """Create a network and a given number of subnets and routers. - - :param network_create_args: dict, POST /v2.0/networks request options - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :param router_create_args: dict, POST /v2.0/routers request options - :returns: tuple of (network, subnets, routers) - """ - network = self._create_network(network_create_args or {}) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, - subnets_per_network) - - routers = [] - for subnet in subnets: - router = self._create_router(router_create_args or {}) - self._add_interface_router(subnet["subnet"], - router["router"]) - routers.append(router) - - return (network, subnets, routers) - - @atomic.action_timer("neutron.add_interface_router") - def _add_interface_router(self, subnet, router): - """Connect subnet to router. - - :param subnet: dict, neutron subnet - :param router: dict, neutron router - """ - self.clients("neutron").add_interface_router( - router["id"], {"subnet_id": subnet["id"]}) - - @atomic.action_timer("neutron.remove_interface_router") - def _remove_interface_router(self, subnet, router): - """Remove subnet from router - - :param subnet: dict, neutron subnet - :param router: dict, neutron router - """ - self.clients("neutron").remove_interface_router( - router["id"], {"subnet_id": subnet["id"]}) - - @atomic.action_timer("neutron.add_gateway_router") - def _add_gateway_router(self, router, ext_net, enable_snat): - """Set the external network gateway for a router. - - :param router: dict, neutron router - :param ext_net: external network for the gateway - :param enable_snat: True if enable snat - """ - gw_info = {"network_id": ext_net["network"]["id"], - "enable_snat": enable_snat} - self.clients("neutron").add_gateway_router( - router["router"]["id"], gw_info) - - @atomic.action_timer("neutron.remove_gateway_router") - def _remove_gateway_router(self, router): - """Removes an external network gateway from the specified router. - - :param router: dict, neutron router - """ - self.clients("neutron").remove_gateway_router( - router["router"]["id"]) - - @atomic.action_timer("neutron.create_pool") - def _create_lb_pool(self, subnet_id, **pool_create_args): - """Create LB pool(v1) - - :param subnet_id: str, neutron subnet-id - :param pool_create_args: dict, POST /lb/pools request options - :returns: dict, neutron lb pool - """ - args = {"lb_method": self.LB_METHOD, - "protocol": self.LB_PROTOCOL, - "name": self.generate_random_name(), - "subnet_id": subnet_id} - args.update(pool_create_args) - return self.clients("neutron").create_pool({"pool": args}) - - def _create_v1_pools(self, networks, **pool_create_args): - """Create LB pools(v1) - - :param networks: list, neutron networks - :param pool_create_args: dict, POST /lb/pools request options - :returns: list, neutron lb pools - """ - subnets = [] - pools = [] - for net in networks: - subnets.extend(net.get("subnets", [])) - for subnet_id in subnets: - pools.append(self._create_lb_pool( - subnet_id, **pool_create_args)) - return pools - - @atomic.action_timer("neutron.list_pools") - def _list_v1_pools(self, **kwargs): - """Return user lb pool list(v1).""" - return self.clients("neutron").list_pools(**kwargs) - - @atomic.action_timer("neutron.delete_pool") - def _delete_v1_pool(self, pool): - """Delete neutron pool. - - :param pool: Pool object - """ - self.clients("neutron").delete_pool(pool["id"]) - - @atomic.action_timer("neutron.update_pool") - def _update_v1_pool(self, pool, **pool_update_args): - """Update pool. - - This atomic function updates the pool with pool_update_args. - - :param pool: Pool object - :param pool_update_args: dict, POST /lb/pools update options - :returns: updated neutron pool dict - """ - pool_update_args["name"] = self.generate_random_name() - body = {"pool": pool_update_args} - return self.clients("neutron").update_pool(pool["pool"]["id"], body) - - def _create_v1_vip(self, pool, **vip_create_args): - """Create VIP(v1) - - :parm pool: dict, neutron lb-pool - :parm vip_create_args: dict, POST /lb/vips request options - :returns: dict, neutron lb vip - """ - args = {"protocol": self.LB_PROTOCOL, - "protocol_port": self.LB_PROTOCOL_PORT, - "name": self.generate_random_name(), - "pool_id": pool["pool"]["id"], - "subnet_id": pool["pool"]["subnet_id"]} - args.update(vip_create_args) - return self.clients("neutron").create_vip({"vip": args}) - - @atomic.action_timer("neutron.list_vips") - def _list_v1_vips(self, **kwargs): - """Return user lb vip list(v1).""" - return self.clients("neutron").list_vips(**kwargs) - - @atomic.action_timer("neutron.delete_vip") - def _delete_v1_vip(self, vip): - """Delete neutron vip. - - :param vip: neutron Virtual IP object - """ - self.clients("neutron").delete_vip(vip["id"]) - - @atomic.action_timer("neutron.update_vip") - def _update_v1_vip(self, vip, **vip_update_args): - """Updates vip. - - This atomic function updates vip name and admin state - - :param vip: Vip object - :param vip_update_args: dict, POST /lb/vips update options - :returns: updated neutron vip dict - """ - vip_update_args["name"] = self.generate_random_name() - body = {"vip": vip_update_args} - return self.clients("neutron").update_vip(vip["vip"]["id"], body) - - @atomic.action_timer("neutron.create_floating_ip") - def _create_floatingip(self, floating_network, **floating_ip_args): - """Create floating IP with floating_network. - - param: floating_network: str, external network to create floating IP - param: floating_ip_args: dict, POST /floatingips create options - returns: dict, neutron floating IP - """ - floating_network_id = self._get_network_id( - floating_network) - args = {"floating_network_id": floating_network_id} - args.update(floating_ip_args) - return self.clients("neutron").create_floatingip({"floatingip": args}) - - @atomic.action_timer("neutron.list_floating_ips") - def _list_floating_ips(self, **kwargs): - """Return floating IPs list.""" - return self.clients("neutron").list_floatingips(**kwargs) - - @atomic.action_timer("neutron.delete_floating_ip") - def _delete_floating_ip(self, floating_ip): - """Delete floating IP. - - :param: dict, floating IP object - """ - return self.clients("neutron").delete_floatingip(floating_ip["id"]) - - @atomic.action_timer("neutron.create_healthmonitor") - def _create_v1_healthmonitor(self, **healthmonitor_create_args): - """Create LB healthmonitor. - - This atomic function creates healthmonitor with the provided - healthmonitor_create_args. - - :param healthmonitor_create_args: dict, POST /lb/healthmonitors - :returns: neutron healthmonitor dict - """ - args = {"type": self.HM_TYPE, - "delay": self.HM_DELAY, - "max_retries": self.HM_MAX_RETRIES, - "timeout": self.HM_TIMEOUT} - args.update(healthmonitor_create_args) - return self.clients("neutron").create_health_monitor( - {"health_monitor": args}) - - @atomic.action_timer("neutron.list_healthmonitors") - def _list_v1_healthmonitors(self, **kwargs): - """List LB healthmonitors. - - This atomic function lists all helthmonitors. - - :param kwargs: optional parameters - :returns: neutron lb healthmonitor list - """ - return self.clients("neutron").list_health_monitors(**kwargs) - - @atomic.action_timer("neutron.delete_healthmonitor") - def _delete_v1_healthmonitor(self, healthmonitor): - """Delete neutron healthmonitor. - - :param healthmonitor: neutron healthmonitor dict - """ - self.clients("neutron").delete_health_monitor(healthmonitor["id"]) - - @atomic.action_timer("neutron.update_healthmonitor") - def _update_v1_healthmonitor(self, healthmonitor, - **healthmonitor_update_args): - """Update neutron healthmonitor. - - :param healthmonitor: neutron lb healthmonitor dict - :param healthmonitor_update_args: POST /lb/healthmonitors - update options - :returns: updated neutron lb healthmonitor dict - """ - body = {"health_monitor": healthmonitor_update_args} - return self.clients("neutron").update_health_monitor( - healthmonitor["health_monitor"]["id"], body) - - @atomic.action_timer("neutron.create_security_group") - def _create_security_group(self, **security_group_create_args): - """Create Neutron security-group. - - param: security_group_create_args: dict, POST /v2.0/security-groups - request options - return: dict, neutron security-group - """ - security_group_create_args["name"] = self.generate_random_name() - return self.clients("neutron").create_security_group( - {"security_group": security_group_create_args}) - - @atomic.action_timer("neutron.delete_security_group") - def _delete_security_group(self, security_group): - """Delete Neutron security group. - - param: security_group: dict, neutron security_group - """ - return self.clients("neutron").delete_security_group( - security_group["security_group"]["id"]) - - @atomic.action_timer("neutron.list_security_groups") - def _list_security_groups(self, **kwargs): - """Return list of Neutron security groups.""" - return self.clients("neutron").list_security_groups(**kwargs) - - @atomic.action_timer("neutron.show_security_group") - def _show_security_group(self, security_group, **kwargs): - """Show security group details. - - :param: security_group: dict, neutron security_group - :param: kwargs: Optional additional arguments for security_group show - :returns: security_group details - """ - return self.clients("neutron").show_security_group( - security_group["security_group"]["id"], **kwargs) - - @atomic.action_timer("neutron.update_security_group") - def _update_security_group(self, security_group, - **security_group_update_args): - """Update Neutron security-group. - - param: security_group: dict, neutron security_group - param: security_group_update_args: dict, POST /v2.0/security-groups - update options - return: dict, updated neutron security-group - """ - security_group_update_args["name"] = self.generate_random_name() - body = {"security_group": security_group_update_args} - return self.clients("neutron").update_security_group( - security_group["security_group"]["id"], body) - - def update_loadbalancer_resource(self, lb): - try: - new_lb = self.clients("neutron").show_loadbalancer(lb["id"]) - except Exception as e: - if getattr(e, "status_code", 400) == 404: - raise exceptions.GetResourceNotFound(resource=lb) - raise exceptions.GetResourceFailure(resource=lb, err=e) - return new_lb["loadbalancer"] - - @atomic.action_timer("neutron.create_lbaasv2_loadbalancer") - def _create_lbaasv2_loadbalancer(self, subnet_id, **lb_create_args): - """Create LB loadbalancer(v2) - - :param subnet_id: str, neutron subnet-id - :param lb_create_args: dict, POST /lbaas/loadbalancers request options - :returns: dict, neutron lb - """ - args = {"name": self.generate_random_name(), - "vip_subnet_id": subnet_id} - args.update(lb_create_args) - neutronclient = self.clients("neutron") - lb = neutronclient.create_loadbalancer({"loadbalancer": args}) - lb = lb["loadbalancer"] - lb = utils.wait_for_status( - lb, - ready_statuses=["ACTIVE"], - status_attr="provisioning_status", - update_resource=self.update_loadbalancer_resource, - timeout=CONF.benchmark.neutron_create_loadbalancer_timeout, - check_interval=( - CONF.benchmark.neutron_create_loadbalancer_poll_interval) - ) - return lb - - @atomic.action_timer("neutron.list_lbaasv2_loadbalancers") - def _list_lbaasv2_loadbalancers(self, retrieve_all=True, **lb_list_args): - """List LB loadbalancers(v2) - - :param lb_list_args: dict, POST /lbaas/loadbalancers request options - :returns: dict, neutron lb loadbalancers(v2) - """ - return self.clients("neutron").list_loadbalancers(retrieve_all, - **lb_list_args) - - @atomic.action_timer("neutron.create_bgpvpn") - def _create_bgpvpn(self, **kwargs): - """Create Bgpvpn resource (POST /bgpvpn/bgpvpn) - - :param kwargs: optional parameters to create BGP VPN - :returns dict, bgpvpn resource details - """ - kwargs["name"] = self.generate_random_name() - return self.admin_clients("neutron").create_bgpvpn({"bgpvpn": kwargs}) - - @atomic.action_timer("neutron.delete_bgpvpn") - def _delete_bgpvpn(self, bgpvpn): - """Delete Bgpvpn resource.(DELETE /bgpvpn/bgpvpns/{id}) - - :param bgpvpn: dict, bgpvpn - :return dict, bgpvpn - """ - return self.admin_clients("neutron").delete_bgpvpn( - bgpvpn["bgpvpn"]["id"]) - - @atomic.action_timer("neutron.list_bgpvpns") - def _list_bgpvpns(self, **kwargs): - """Return bgpvpns list. - - :param kwargs: dict, GET /bgpvpn/bgpvpns request options - :returns: bgpvpns list - """ - return self.admin_clients("neutron").list_bgpvpns( - True, **kwargs)["bgpvpns"] - - @atomic.action_timer("neutron.update_bgpvpn") - def _update_bgpvpn(self, bgpvpn, update_name=False, **kwargs): - """Update a bgpvpn. - - :param bgpvpn: dict, bgpvpn - :param update_name: update_name: bool, whether or not to modify - BGP VPN name - :param **kwargs: dict, PUT /bgpvpn/bgpvpns update options - :return dict, updated bgpvpn - """ - if update_name or "name" in kwargs: - kwargs["name"] = self.generate_random_name() - return self.admin_clients("neutron").update_bgpvpn( - bgpvpn["bgpvpn"]["id"], {"bgpvpn": kwargs}) - - @atomic.action_timer("neutron.create_bgpvpn_network_assoc") - def _create_bgpvpn_network_assoc(self, bgpvpn, network): - """Creates a new BGP VPN network association. - - :param bgpvpn: dict, bgpvpn - :param network: dict, network - :return dict: network_association - """ - netassoc = {"network_id": network["id"]} - return self.clients("neutron").create_bgpvpn_network_assoc( - bgpvpn["bgpvpn"]["id"], {"network_association": netassoc}) - - @atomic.action_timer("neutron.delete_bgpvpn_network_assoc") - def _delete_bgpvpn_network_assoc(self, bgpvpn, net_assoc): - """Delete the specified BGP VPN network association - - :param bgpvpn: dict, bgpvpn - :param net_assoc: dict, network - :return dict: network_association - """ - return self.clients("neutron").delete_bgpvpn_network_assoc( - bgpvpn["bgpvpn"]["id"], net_assoc["network_association"]["id"]) - - @atomic.action_timer("neutron.create_bgpvpn_router_assoc") - def _create_bgpvpn_router_assoc(self, bgpvpn, router): - """Creates a new BGP VPN router association. - - :param bgpvpn: dict, bgpvpn - :param router: dict, router - :return dict: network_association - """ - router_assoc = {"router_id": router["id"]} - return self.clients("neutron").create_bgpvpn_router_assoc( - bgpvpn["bgpvpn"]["id"], {"router_association": router_assoc}) - - @atomic.action_timer("neutron.delete_bgpvpn_router_assoc") - def _delete_bgpvpn_router_assoc(self, bgpvpn, router_assoc): - """Delete the specified BGP VPN router association - - :param bgpvpn: dict, bgpvpn - :param router_assoc: dict, router - :return dict: router_association - """ - return self.clients("neutron").delete_bgpvpn_router_assoc( - bgpvpn["bgpvpn"]["id"], router_assoc["router_association"]["id"]) - - @atomic.action_timer("neutron.list_bgpvpn_network_assocs") - def _list_bgpvpn_network_assocs(self, bgpvpn, **kwargs): - """List network association of bgpvpn - - :param bgpvpn: dict, bgpvpn - :param **kwargs: dict, optional parameters - :return dict: network_association - """ - return self.clients("neutron").list_bgpvpn_network_assocs( - bgpvpn["bgpvpn"]["id"], **kwargs) - - @atomic.action_timer("neutron.list_bgpvpn_router_assocs") - def _list_bgpvpn_router_assocs(self, bgpvpn, **kwargs): - """List router association of bgpvpn - - :param bgpvpn: dict, bgpvpn - :param **kwargs: dict, optional parameters - :return dict: router_association - """ - return self.clients("neutron").list_bgpvpn_router_assocs( - bgpvpn["bgpvpn"]["id"], **kwargs) - - @atomic.action_timer("neutron.create_security_group_rule") - def _create_security_group_rule(self, security_group_id, - **security_group_rule_args): - """Create Neutron security-group-rule. - - param: security_group_id: id of neutron security_group - param: security_group_rule_args: dict, POST - /v2.0/security-group-rules request options - return: dict, neutron security-group-rule - """ - security_group_rule_args["security_group_id"] = security_group_id - if "direction" not in security_group_rule_args: - security_group_rule_args["direction"] = "ingress" - - return self.clients("neutron").create_security_group_rule( - {"security_group_rule": security_group_rule_args}) - - @atomic.action_timer("neutron.list_security_group_rules") - def _list_security_group_rules(self, **kwargs): - """List all security group rules. - - :param kwargs: Optional additional arguments for roles list - :return: list of security group rules - """ - return self.clients("neutron").list_security_group_rules(**kwargs) - - @atomic.action_timer("neutron.show_security_group_rule") - def _show_security_group_rule(self, security_group_rule, **kwargs): - """Show information of a given security group rule. - - :param security_group_rule: id of security group rule - :param kwargs: Optional additional arguments for roles list - :return: details of security group rule - """ - return self.clients("neutron").show_security_group_rule( - security_group_rule, **kwargs) diff --git a/rally/plugins/openstack/scenarios/nova/__init__.py b/rally/plugins/openstack/scenarios/nova/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/nova/agents.py b/rally/plugins/openstack/scenarios/nova/agents.py deleted file mode 100644 index 25a9c71d..00000000 --- a/rally/plugins/openstack/scenarios/nova/agents.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova agents.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaAgents.list_agents", platform="openstack") -class ListAgents(utils.NovaScenario): - def run(self, hypervisor=None): - """List all builds. - - Measure the "nova agent-list" command performance. - - :param hypervisor: List agent builds on a specific hypervisor. - None (default value) means list for all - hypervisors - """ - self._list_agents(hypervisor) diff --git a/rally/plugins/openstack/scenarios/nova/aggregates.py b/rally/plugins/openstack/scenarios/nova/aggregates.py deleted file mode 100644 index 4f86303f..00000000 --- a/rally/plugins/openstack/scenarios/nova/aggregates.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Nova aggregates.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaAggregates.list_aggregates", - platform="openstack") -class ListAggregates(utils.NovaScenario): - - def run(self): - """List all nova aggregates. - - Measure the "nova aggregate-list" command performance. - """ - self._list_aggregates() - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaAggregates.create_and_list_aggregates", - platform="openstack") -class CreateAndListAggregates(utils.NovaScenario): - """scenario for create and list aggregate.""" - - def run(self, availability_zone): - """Create a aggregate and then list all aggregates. - - This scenario creates a aggregate and then lists all aggregates. - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - msg = "Aggregate isn't created" - self.assertTrue(aggregate, err_msg=msg) - all_aggregates = self._list_aggregates() - msg = ("Created aggregate is not in the" - " list of all available aggregates") - self.assertIn(aggregate, all_aggregates, err_msg=msg) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaAggregates.create_and_delete_aggregate", - platform="openstack") -class CreateAndDeleteAggregate(utils.NovaScenario): - """Scenario for create and delete aggregate.""" - - def run(self, availability_zone): - """Create an aggregate and then delete it. - - This scenario first creates an aggregate and then delete it. - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - self._delete_aggregate(aggregate) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaAggregates.create_and_update_aggregate", - platform="openstack") -class CreateAndUpdateAggregate(utils.NovaScenario): - """Scenario for create and update aggregate.""" - - def run(self, availability_zone): - """Create an aggregate and then update its name and availability_zone - - This scenario first creates an aggregate and then update its name and - availability_zone - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - self._update_aggregate(aggregate) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaAggregates.create_aggregate_add_and_remove_host", - platform="openstack") -class CreateAggregateAddAndRemoveHost(utils.NovaScenario): - """Scenario for add a host to and remove the host from an aggregate.""" - - def run(self, availability_zone): - """Create an aggregate, add a host to and remove the host from it - - Measure "nova aggregate-add-host" and "nova aggregate-remove-host" - command performance. - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - hosts = self._list_hypervisors() - host_name = hosts[0].service["host"] - self._aggregate_add_host(aggregate, host_name) - self._aggregate_remove_host(aggregate, host_name) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaAggregates.create_and_get_aggregate_details", - platform="openstack") -class CreateAndGetAggregateDetails(utils.NovaScenario): - """Scenario for create and get aggregate details.""" - - def run(self, availability_zone): - """Create an aggregate and then get its details. - - This scenario first creates an aggregate and then get details of it. - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - self._get_aggregate_details(aggregate) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure( - context={"admin_cleanup": ["nova"], "cleanup": ["nova"]}, - name="NovaAggregates.create_aggregate_add_host_and_boot_server", - platform="openstack") -class CreateAggregateAddHostAndBootServer(utils.NovaScenario): - """Scenario to verify an aggregate.""" - - def run(self, image, metadata, availability_zone=None, ram=512, vcpus=1, - disk=1, boot_server_kwargs=None): - """Scenario to create and verify an aggregate - - This scenario creates an aggregate, adds a compute host and metadata - to the aggregate, adds the same metadata to the flavor and creates an - instance. Verifies that instance host is one of the hosts in the - aggregate. - - :param image: The image ID to boot from - :param metadata: The metadata to be set as flavor extra specs - :param availability_zone: The availability zone of the aggregate - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param boot_server_kwargs: Optional additional arguments to verify host - aggregates - :raises RallyException: if instance and aggregate hosts do not match - """ - - boot_server_kwargs = boot_server_kwargs or {} - - aggregate = self._create_aggregate(availability_zone) - hosts = self._list_hypervisors() - host_name = hosts[0].service["host"] - self._aggregate_set_metadata(aggregate, metadata) - self._aggregate_add_host(aggregate, host_name) - flavor = self._create_flavor(ram, vcpus, disk) - flavor.set_keys(metadata) - - server = self._boot_server(image, flavor.id, **boot_server_kwargs) - # NOTE: we need to get server object by admin user to obtain - # "hypervisor_hostname" attribute - server = self.admin_clients("nova").servers.get(server.id) - instance_hostname = getattr(server, - "OS-EXT-SRV-ATTR:hypervisor_hostname") - if instance_hostname != host_name: - raise exceptions.RallyException("Instance host and aggregate " - "host are different") diff --git a/rally/plugins/openstack/scenarios/nova/availability_zones.py b/rally/plugins/openstack/scenarios/nova/availability_zones.py deleted file mode 100644 index 92397da1..00000000 --- a/rally/plugins/openstack/scenarios/nova/availability_zones.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova availability-zones.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaAvailabilityZones.list_availability_zones", - platform="openstack") -class ListAvailabilityZones(utils.NovaScenario): - - def run(self, detailed=True): - """List all availability zones. - - Measure the "nova availability-zone-list" command performance. - - :param detailed: True if the availability-zone listing should contain - detailed information about all of them - """ - self._list_availability_zones(detailed) diff --git a/rally/plugins/openstack/scenarios/nova/flavors.py b/rally/plugins/openstack/scenarios/nova/flavors.py deleted file mode 100644 index d764b6c5..00000000 --- a/rally/plugins/openstack/scenarios/nova/flavors.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright 2015: Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _LW -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova flavors.""" - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="NovaFlavors.list_flavors", platform="openstack") -class ListFlavors(utils.NovaScenario): - - def run(self, detailed=True, is_public=True, marker=None, min_disk=None, - min_ram=None, limit=None, sort_key=None, sort_dir=None): - """List all flavors. - - Measure the "nova flavor-list" command performance. - - :param detailed: Whether flavor needs to be return with details - (optional). - :param is_public: Filter flavors with provided access type (optional). - None means give all flavors and only admin has query - access to all flavor types. - :param marker: Begin returning flavors that appear later in the flavor - list than that represented by this flavor id (optional). - :param min_disk: Filters the flavors by a minimum disk space, in GiB. - :param min_ram: Filters the flavors by a minimum RAM, in MB. - :param limit: maximum number of flavors to return (optional). - :param sort_key: Flavors list sort key (optional). - :param sort_dir: Flavors list sort direction (optional). - """ - self._list_flavors(detailed=detailed, is_public=is_public, - marker=marker, min_disk=min_disk, min_ram=min_ram, - limit=limit, sort_key=sort_key, sort_dir=sort_dir) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaFlavors.create_and_list_flavor_access", - platform="openstack") -class CreateAndListFlavorAccess(utils.NovaScenario): - - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create a non-public flavor and list its access rules - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - # NOTE(pirsriva): access rules can be listed - # only for non-public flavors - if is_public: - LOG.warning(_LW("is_public cannot be set to True for listing " - "flavor access rules. Setting is_public to False")) - is_public = False - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self.assertTrue(flavor) - - self._list_flavor_access(flavor.id) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaFlavors.create_flavor_and_add_tenant_access", - platform="openstack") -class CreateFlavorAndAddTenantAccess(utils.NovaScenario): - - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create a flavor and Add flavor access for the given tenant. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self.assertTrue(flavor) - self._add_tenant_access(flavor.id, self.context["tenant"]["id"]) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaFlavors.create_flavor", platform="openstack") -class CreateFlavor(utils.NovaScenario): - - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create a flavor. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaFlavors.create_and_get_flavor", - platform="openstack") -class CreateAndGetFlavor(utils.NovaScenario): - """Scenario for create and get flavor.""" - - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create flavor and get detailed information of the flavor. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self._get_flavor(flavor.id) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaFlavors.create_and_delete_flavor", - platform="openstack") -class CreateAndDeleteFlavor(utils.NovaScenario): - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create flavor and delete the flavor. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self._delete_flavor(flavor.id) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["nova"]}, - name="NovaFlavors.create_flavor_and_set_keys", - platform="openstack") -class CreateFlavorAndSetKeys(utils.NovaScenario): - def run(self, ram, vcpus, disk, extra_specs, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create flavor and set keys to the flavor. - - Measure the "nova flavor-key" command performance. - the scenario first create a flavor,then add the extra specs to it. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param extra_specs: additional arguments for flavor set keys - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self._set_flavor_keys(flavor, extra_specs) diff --git a/rally/plugins/openstack/scenarios/nova/hosts.py b/rally/plugins/openstack/scenarios/nova/hosts.py deleted file mode 100644 index c414da49..00000000 --- a/rally/plugins/openstack/scenarios/nova/hosts.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2016 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova hosts.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHosts.list_hosts", platform="openstack") -class ListHosts(utils.NovaScenario): - - def run(self, zone=None): - """List all nova hosts. - - Measure the "nova host-list" command performance. - - :param zone: List nova hosts in an availability-zone. - None (default value) means list hosts in all - availability-zones - """ - self._list_hosts(zone) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHosts.list_and_get_hosts", platform="openstack") -class ListAndGetHosts(utils.NovaScenario): - - def run(self, zone=None): - """List all nova hosts, and get detailed information for compute hosts. - - Measure the "nova host-describe" command performance. - - :param zone: List nova hosts in an availability-zone. - None (default value) means list hosts in all - availability-zones - """ - hosts = self._list_hosts(zone, service="compute") - - for host in hosts: - self._get_host(host.host_name) diff --git a/rally/plugins/openstack/scenarios/nova/hypervisors.py b/rally/plugins/openstack/scenarios/nova/hypervisors.py deleted file mode 100644 index 9008e9b4..00000000 --- a/rally/plugins/openstack/scenarios/nova/hypervisors.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2015 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova hypervisors.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.list_hypervisors", - platform="openstack") -class ListHypervisors(utils.NovaScenario): - - def run(self, detailed=True): - """List hypervisors. - - Measure the "nova hypervisor-list" command performance. - - :param detailed: True if the hypervisor listing should contain - detailed information about all of them - """ - self._list_hypervisors(detailed) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.list_and_get_hypervisors", - platform="openstack") -class ListAndGetHypervisors(utils.NovaScenario): - """Benchmark scenario for Nova hypervisors.""" - def run(self, detailed=True): - """List and Get hypervisors. - - The scenario first lists all hypervisors, then get detailed information - of the listed hypervisors in turn. - - Measure the "nova hypervisor-show" command performance. - - :param detailed: True if the hypervisor listing should contain - detailed information about all of them - """ - hypervisors = self._list_hypervisors(detailed) - - for hypervisor in hypervisors: - self._get_hypervisor(hypervisor) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.statistics_hypervisors", - platform="openstack") -class StatisticsHypervisors(utils.NovaScenario): - - def run(self): - """Get hypervisor statistics over all compute nodes. - - Measure the "nova hypervisor-stats" command performance. - """ - self._statistics_hypervisors() - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.list_and_get_uptime_hypervisors", - platform="openstack") -class ListAndGetUptimeHypervisors(utils.NovaScenario): - def run(self, detailed=True): - """List hypervisors,then display the uptime of it. - - The scenario first list all hypervisors,then display - the uptime of the listed hypervisors in turn. - - Measure the "nova hypervisor-uptime" command performance. - - :param detailed: True if the hypervisor listing should contain - detailed information about all of them - """ - hypervisors = self._list_hypervisors(detailed) - - for hypervisor in hypervisors: - self._uptime_hypervisor(hypervisor) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.list_and_search_hypervisors", - platform="openstack") -class ListAndSearchHypervisors(utils.NovaScenario): - def run(self, detailed=True): - """List all servers belonging to specific hypervisor. - - The scenario first list all hypervisors,then find its hostname, - then list all servers belonging to the hypervisor - - Measure the "nova hypervisor-servers " command performance. - - :param detailed: True if the hypervisor listing should contain - detailed information about all of them - """ - hypervisors = self._list_hypervisors(detailed) - - for hypervisor in hypervisors: - self._search_hypervisors(hypervisor.hypervisor_hostname) diff --git a/rally/plugins/openstack/scenarios/nova/images.py b/rally/plugins/openstack/scenarios/nova/images.py deleted file mode 100644 index c7ec2a93..00000000 --- a/rally/plugins/openstack/scenarios/nova/images.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2015: Workday, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova images.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaImages.list_images", - platform="openstack") -class ListImages(utils.NovaScenario): - - def run(self, detailed=True, **kwargs): - """List all images. - - Measure the "nova image-list" command performance. - - :param detailed: True if the image listing - should contain detailed information - :param kwargs: Optional additional arguments for image listing - """ - self._list_images(detailed, **kwargs) diff --git a/rally/plugins/openstack/scenarios/nova/keypairs.py b/rally/plugins/openstack/scenarios/nova/keypairs.py deleted file mode 100644 index eb7fa8ec..00000000 --- a/rally/plugins/openstack/scenarios/nova/keypairs.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2015: Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Nova keypairs.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaKeypair.create_and_list_keypairs", - platform="openstack") -class CreateAndListKeypairs(utils.NovaScenario): - - def run(self, **kwargs): - """Create a keypair with random name and list keypairs. - - This scenario creates a keypair and then lists all keypairs. - - :param kwargs: Optional additional arguments for keypair creation - """ - - keypair_name = self._create_keypair(**kwargs) - self.assertTrue(keypair_name, "Keypair isn't created") - - list_keypairs = self._list_keypairs() - self.assertIn(keypair_name, [i.id for i in list_keypairs]) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaKeypair.create_and_delete_keypair", - platform="openstack") -class CreateAndDeleteKeypair(utils.NovaScenario): - - def run(self, **kwargs): - """Create a keypair with random name and delete keypair. - - This scenario creates a keypair and then delete that keypair. - - :param kwargs: Optional additional arguments for keypair creation - """ - - keypair = self._create_keypair(**kwargs) - self._delete_keypair(keypair) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaKeypair.boot_and_delete_server_with_keypair", - platform="openstack") -class BootAndDeleteServerWithKeypair(utils.NovaScenario): - - @logging.log_deprecated_args( - "'server_kwargs' has been renamed 'boot_server_kwargs'", - "0.3.2", ["server_kwargs"], once=True) - def run(self, image, flavor, boot_server_kwargs=None, - server_kwargs=None, **kwargs): - """Boot and delete server with keypair. - - Plan of this scenario: - - create a keypair - - boot a VM with created keypair - - delete server - - delete keypair - - :param image: ID of the image to be used for server creation - :param flavor: ID of the flavor to be used for server creation - :param boot_server_kwargs: Optional additional arguments for VM - creation - :param server_kwargs: Deprecated alias for boot_server_kwargs - :param kwargs: Optional additional arguments for keypair creation - """ - - boot_server_kwargs = boot_server_kwargs or server_kwargs or {} - - keypair = self._create_keypair(**kwargs) - server = self._boot_server(image, flavor, - key_name=keypair, - **boot_server_kwargs) - self._delete_server(server) - self._delete_keypair(keypair) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaKeypair.create_and_get_keypair", - platform="openstack") -class CreateAndGetKeypair(utils.NovaScenario): - - def run(self, **kwargs): - """Create a keypair and get the keypair details. - - :param kwargs: Optional additional arguments for keypair creation - """ - - keypair = self._create_keypair(**kwargs) - - self._get_keypair(keypair) diff --git a/rally/plugins/openstack/scenarios/nova/server_groups.py b/rally/plugins/openstack/scenarios/nova/server_groups.py deleted file mode 100755 index 19174741..00000000 --- a/rally/plugins/openstack/scenarios/nova/server_groups.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2017: Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova Group servers.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServerGroups.create_and_list_server_groups", - platform="openstack") -class CreateAndListServerGroups(utils.NovaScenario): - - def run(self, all_projects=False, kwargs=None): - """Create a server group, then list all server groups. - - Measure the "nova server-group-create" and "nova server-group-list" - command performance. - - :param all_projects: If True, display server groups from all - projects(Admin only) - :param kwargs: Server group name and policy - """ - kwargs["name"] = self.generate_random_name() - server_group = self._create_server_group(**kwargs) - msg = ("Server Groups isn't created") - self.assertTrue(server_group, err_msg=msg) - - server_groups_list = self._list_server_groups(all_projects) - msg = ("Server Group not included into list of server groups\n" - "Created server group: {}\n" - "list of server groups: {}").format(server_group, - server_groups_list) - self.assertIn(server_group, server_groups_list, err_msg=msg) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServerGroups.create_and_get_server_group", - platform="openstack") -class CreateAndGetServerGroup(utils.NovaScenario): - - def run(self, kwargs=None): - """Create a server group, then get its detailed information. - - Measure the "nova server-group-create" and "nova server-group-get" - command performance. - - :param kwargs: Server group name and policy - """ - kwargs["name"] = self.generate_random_name() - server_group = self._create_server_group(**kwargs) - msg = ("Server Groups isn't created") - self.assertTrue(server_group, err_msg=msg) - - server_group_info = self._get_server_group(server_group.id) - self.assertEqual(server_group.id, server_group_info.id) diff --git a/rally/plugins/openstack/scenarios/nova/servers.py b/rally/plugins/openstack/scenarios/nova/servers.py deleted file mode 100755 index 4c4ccecb..00000000 --- a/rally/plugins/openstack/scenarios/nova/servers.py +++ /dev/null @@ -1,1174 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jsonschema - -from rally.common import logging -from rally import consts -from rally import exceptions as rally_exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils -from rally.plugins.openstack.scenarios.nova import utils -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import types -from rally.task import validation - - -"""Scenarios for Nova servers.""" - - -LOG = logging.getLogger(__name__) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=(consts.Service.NOVA)) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_list_server", - platform="openstack") -class BootAndListServer(utils.NovaScenario): - - def run(self, image, flavor, detailed=True, **kwargs): - """Boot a server from an image and then list all servers. - - Measure the "nova list" command performance. - - If you have only 1 user in your context, you will - add 1 server on every iteration. So you will have more - and more servers and will be able to measure the - performance of the "nova list" command depending on - the number of servers owned by users. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param detailed: True if the server listing should contain - detailed information about all of them - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - msg = ("Servers isn't created") - self.assertTrue(server, err_msg=msg) - - pool_list = self._list_servers(detailed) - msg = ("Server not included into list of available servers\n" - "Booted server: {}\n" - "Pool of servers: {}").format(server, pool_list) - self.assertIn(server, pool_list, err_msg=msg) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.list_servers", platform="openstack") -class ListServers(utils.NovaScenario): - - def run(self, detailed=True): - """List all servers. - - This simple scenario test the nova list command by listing - all the servers. - - :param detailed: True if detailed information about servers - should be listed - """ - self._list_servers(detailed) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_delete_server", - platform="openstack") -class BootAndDeleteServer(utils.NovaScenario): - - def run(self, image, flavor, min_sleep=0, max_sleep=0, - force_delete=False, **kwargs): - """Boot and delete a server. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between volume creation and deletion - (of random duration from [min_sleep, max_sleep]). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_delete_multiple_servers", - platform="openstack") -class BootAndDeleteMultipleServers(utils.NovaScenario): - - def run(self, image, flavor, count=2, min_sleep=0, - max_sleep=0, force_delete=False, **kwargs): - """Boot multiple servers in a single request and delete them. - - Deletion is done in parallel with one request per server, not - with a single request for all servers. - - :param image: The image to boot from - :param flavor: Flavor used to boot instance - :param count: Number of instances to boot - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for instance creation - """ - servers = self._boot_servers(image, flavor, 1, instances_amount=count, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._delete_servers(servers, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume_and_delete", - platform="openstack") -class BootServerFromVolumeAndDelete(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size, volume_type=None, - min_sleep=0, max_sleep=0, force_delete=False, **kwargs): - """Boot a server from volume and then delete it. - - The scenario first creates a volume and then a server. - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between volume creation and deletion - (of random duration from [min_sleep, max_sleep]). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param volume_size: volume size (in GB) - :param volume_type: specifies volume type when there are - multiple backends - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - volume = self.cinder.create_volume(volume_size, imageRef=image, - volume_type=volume_type) - block_device_mapping = {"vda": "%s:::1" % volume.id} - server = self._boot_server(None, flavor, - block_device_mapping=block_device_mapping, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_bounce_server", - platform="openstack") -class BootAndBounceServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, actions=None, **kwargs): - """Boot a server and run specified actions against it. - - Actions should be passed into the actions parameter. Available actions - are 'hard_reboot', 'soft_reboot', 'stop_start', 'rescue_unrescue', - 'pause_unpause', 'suspend_resume', 'lock_unlock' and 'shelve_unshelve'. - Delete server after all actions were completed. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param actions: list of action dictionaries, where each action - dictionary speicifes an action to be performed - in the following format: - {"action_name": } - :param kwargs: Optional additional arguments for server creation - """ - action_builder = self._bind_actions() - actions = actions or [] - try: - action_builder.validate(actions) - except jsonschema.exceptions.ValidationError as error: - raise rally_exceptions.InvalidConfigException( - "Invalid server actions configuration \'%(actions)s\' due to: " - "%(error)s" % {"actions": str(actions), "error": str(error)}) - server = self._boot_server(image, flavor, **kwargs) - for action in action_builder.build_actions(actions, server): - action() - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_lock_unlock_and_delete", - platform="openstack") -class BootLockUnlockAndDelete(utils.NovaScenario): - - def run(self, image, flavor, min_sleep=0, - max_sleep=0, force_delete=False, **kwargs): - """Boot a server, lock it, then unlock and delete it. - - Optional 'min_sleep' and 'max_sleep' parameters allow the - scenario to simulate a pause between locking and unlocking the - server (of random duration from min_sleep to max_sleep). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param min_sleep: Minimum sleep time between locking and unlocking - in seconds - :param max_sleep: Maximum sleep time between locking and unlocking - in seconds - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._lock_server(server) - self.sleep_between(min_sleep, max_sleep) - self._unlock_server(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova", "glance"]}, - name="NovaServers.snapshot_server", - platform="openstack") -class SnapshotServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, **kwargs): - """Boot a server, make its snapshot and delete both. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - - server = self._boot_server(image, flavor, **kwargs) - image = self._create_image(server) - self._delete_server(server, force=force_delete) - - server = self._boot_server(image.id, flavor, **kwargs) - self._delete_server(server, force=force_delete) - self._delete_image(image) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_server", - platform="openstack") -class BootServer(utils.NovaScenario): - - def run(self, image, flavor, auto_assign_nic=False, **kwargs): - """Boot a server. - - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param auto_assign_nic: True if NICs should be assigned - :param kwargs: Optional additional arguments for server creation - """ - self._boot_server(image, flavor, - auto_assign_nic=auto_assign_nic, **kwargs) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume", - platform="openstack") -class BootServerFromVolume(utils.NovaScenario, cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size, - volume_type=None, auto_assign_nic=False, **kwargs): - """Boot a server from volume. - - The scenario first creates a volume and then a server. - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param volume_size: volume size (in GB) - :param volume_type: specifies volume type when there are - multiple backends - :param auto_assign_nic: True if NICs should be assigned - :param kwargs: Optional additional arguments for server creation - """ - volume = self.cinder.create_volume(volume_size, imageRef=image, - volume_type=volume_type) - block_device_mapping = {"vda": "%s:::1" % volume.id} - self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic, - block_device_mapping=block_device_mapping, - **kwargs) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}, - to_flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=(consts.Service.NOVA)) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.resize_server", platform="openstack") -class ResizeServer(utils.NovaScenario): - - def run(self, image, flavor, to_flavor, force_delete=False, **kwargs): - """Boot a server, then resize and delete it. - - This test will confirm the resize by default, - or revert the resize if confirm is set to false. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param to_flavor: flavor to be used to resize the booted instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._resize(server, to_flavor) - # by default we confirm - confirm = kwargs.get("confirm", True) - if confirm: - self._resize_confirm(server) - else: - self._resize_revert(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}, - to_flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.resize_shutoff_server", - platform="openstack") -class ResizeShutoffServer(utils.NovaScenario): - - def run(self, image, flavor, to_flavor, confirm=True, - force_delete=False, **kwargs): - """Boot a server and stop it, then resize and delete it. - - This test will confirm the resize by default, - or revert the resize if confirm is set to false. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param to_flavor: flavor to be used to resize the booted instance - :param confirm: True if need to confirm resize else revert resize - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._stop_server(server) - self._resize(server, to_flavor) - - if confirm: - self._resize_confirm(server, "SHUTOFF") - else: - self._resize_revert(server, "SHUTOFF") - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}, - to_flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["cinder", "nova"]}, - name="NovaServers.boot_server_attach_created_volume_and_resize", - platform="openstack") -class BootServerAttachCreatedVolumeAndResize(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, to_flavor, volume_size, min_sleep=0, - max_sleep=0, force_delete=False, confirm=True, do_delete=True, - boot_server_kwargs=None, create_volume_kwargs=None): - """Create a VM from image, attach a volume to it and resize. - - Simple test to create a VM and attach a volume, then resize the VM, - detach the volume then delete volume and VM. - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between attaching a volume and running resize - (of random duration from range [min_sleep, max_sleep]). - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param to_flavor: flavor to be used to resize the booted instance - :param volume_size: volume size (in GB) - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param confirm: True if need to confirm resize else revert resize - :param do_delete: True if resources needs to be deleted explicitly - else use rally cleanup to remove resources - :param boot_server_kwargs: optional arguments for VM creation - :param create_volume_kwargs: optional arguments for volume creation - """ - boot_server_kwargs = boot_server_kwargs or {} - create_volume_kwargs = create_volume_kwargs or {} - - server = self._boot_server(image, flavor, **boot_server_kwargs) - volume = self.cinder.create_volume(volume_size, **create_volume_kwargs) - - attachment = self._attach_volume(server, volume) - self.sleep_between(min_sleep, max_sleep) - self._resize(server, to_flavor) - - if confirm: - self._resize_confirm(server) - else: - self._resize_revert(server) - - if do_delete: - self._detach_volume(server, volume, attachment) - self.cinder.delete_volume(volume) - self._delete_server(server, force=force_delete) - - -@validation.add("number", param_name="volume_num", minval=1, - integer_only=True) -@validation.add("number", param_name="volume_size", minval=1, - integer_only=True) -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["cinder", "nova"]}, - name="NovaServers.boot_server_attach_volume_and_list_attachments", - platform="openstack") -class BootServerAttachVolumeAndListAttachments(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size=1, volume_num=2, - boot_server_kwargs=None, create_volume_kwargs=None): - """Create a VM, attach N volume to it and list server's attachemnt. - - Measure the "nova volume-attachments" command performance. - - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param volume_size: volume size (in GB), default 1G - :param volume_num: the num of attached volume - :param boot_server_kwargs: optional arguments for VM creation - :param create_volume_kwargs: optional arguments for volume creation - """ - boot_server_kwargs = boot_server_kwargs or {} - create_volume_kwargs = create_volume_kwargs or {} - - server = self._boot_server(image, flavor, **boot_server_kwargs) - attachments = [] - for i in range(volume_num): - volume = self.cinder.create_volume(volume_size, - **create_volume_kwargs) - attachments.append(self._attach_volume(server, volume)) - - list_attachments = self._list_attachments(server.id) - - for attachment in attachments: - msg = ("attachment not included into list of available" - "attachments\n attachment: {}\n" - "list attachments: {}").format(attachment, list_attachments) - self.assertIn(attachment, list_attachments, err_msg=msg) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}, - to_flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume_and_resize", - platform="openstack") -class BootServerFromVolumeAndResize(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, to_flavor, volume_size, min_sleep=0, - max_sleep=0, force_delete=False, confirm=True, do_delete=True, - boot_server_kwargs=None, create_volume_kwargs=None): - """Boot a server from volume, then resize and delete it. - - The scenario first creates a volume and then a server. - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between volume creation and deletion - (of random duration from [min_sleep, max_sleep]). - - This test will confirm the resize by default, - or revert the resize if confirm is set to false. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param to_flavor: flavor to be used to resize the booted instance - :param volume_size: volume size (in GB) - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param confirm: True if need to confirm resize else revert resize - :param do_delete: True if resources needs to be deleted explicitly - else use rally cleanup to remove resources - :param boot_server_kwargs: optional arguments for VM creation - :param create_volume_kwargs: optional arguments for volume creation - """ - boot_server_kwargs = boot_server_kwargs or {} - create_volume_kwargs = create_volume_kwargs or {} - - if boot_server_kwargs.get("block_device_mapping"): - LOG.warning("Using already existing volume is not permitted.") - - volume = self.cinder.create_volume(volume_size, imageRef=image, - **create_volume_kwargs) - boot_server_kwargs["block_device_mapping"] = { - "vda": "%s:::1" % volume.id} - - server = self._boot_server(None, flavor, **boot_server_kwargs) - self.sleep_between(min_sleep, max_sleep) - self._resize(server, to_flavor) - - if confirm: - self._resize_confirm(server) - else: - self._resize_revert(server) - - if do_delete: - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.suspend_and_resume_server", - platform="openstack") -class SuspendAndResumeServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, **kwargs): - """Create a server, suspend, resume and then delete it - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._suspend_server(server) - self._resume_server(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.pause_and_unpause_server", - platform="openstack") -class PauseAndUnpauseServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, **kwargs): - """Create a server, pause, unpause and then delete it - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._pause_server(server) - self._unpause_server(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.shelve_and_unshelve_server", - platform="openstack") -class ShelveAndUnshelveServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, **kwargs): - """Create a server, shelve, unshelve and then delete it - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._shelve_server(server) - self._unshelve_server(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_live_migrate_server", - platform="openstack") -class BootAndLiveMigrateServer(utils.NovaScenario): - - def run(self, image, flavor, block_migration=False, disk_over_commit=False, - min_sleep=0, max_sleep=0, **kwargs): - """Live Migrate a server. - - This scenario launches a VM on a compute node available in - the availability zone and then migrates the VM to another - compute node on the same availability zone. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between VM booting and running live migration - (of random duration from range [min_sleep, max_sleep]). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param block_migration: Specifies the migration type - :param disk_over_commit: Specifies whether to allow overcommit - on migrated instance or not - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self.sleep_between(min_sleep, max_sleep) - - new_host = self._find_host_to_migrate(server) - self._live_migrate(server, new_host, - block_migration, disk_over_commit) - - self._delete_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure( - context={"cleanup": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume_and_live_migrate", - platform="openstack") -class BootServerFromVolumeAndLiveMigrate(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size, volume_type=None, - block_migration=False, disk_over_commit=False, force_delete=False, - min_sleep=0, max_sleep=0, **kwargs): - """Boot a server from volume and then migrate it. - - The scenario first creates a volume and a server booted from - the volume on a compute node available in the availability zone and - then migrates the VM to another compute node on the same availability - zone. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between VM booting and running live migration - (of random duration from range [min_sleep, max_sleep]). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param volume_size: volume size (in GB) - :param volume_type: specifies volume type when there are - multiple backends - :param block_migration: Specifies the migration type - :param disk_over_commit: Specifies whether to allow overcommit - on migrated instance or not - :param force_delete: True if force_delete should be used - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param kwargs: Optional additional arguments for server creation - """ - volume = self.cinder.create_volume(volume_size, imageRef=image, - volume_type=volume_type) - block_device_mapping = {"vda": "%s:::1" % volume.id} - server = self._boot_server(None, flavor, - block_device_mapping=block_device_mapping, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - - new_host = self._find_host_to_migrate(server) - self._live_migrate(server, new_host, - block_migration, disk_over_commit) - - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure( - context={"cleanup": ["cinder", "nova"]}, - name="NovaServers.boot_server_attach_created_volume_and_live_migrate", - platform="openstack") -class BootServerAttachCreatedVolumeAndLiveMigrate(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, size, block_migration=False, - disk_over_commit=False, boot_server_kwargs=None, - create_volume_kwargs=None, min_sleep=0, max_sleep=0): - """Create a VM, attach a volume to it and live migrate. - - Simple test to create a VM and attach a volume, then migrate the VM, - detach the volume and delete volume/VM. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between attaching a volume and running live - migration (of random duration from range [min_sleep, max_sleep]). - - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param size: volume size (in GB) - :param block_migration: Specifies the migration type - :param disk_over_commit: Specifies whether to allow overcommit - on migrated instance or not - :param boot_server_kwargs: optional arguments for VM creation - :param create_volume_kwargs: optional arguments for volume creation - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - """ - - if boot_server_kwargs is None: - boot_server_kwargs = {} - if create_volume_kwargs is None: - create_volume_kwargs = {} - - server = self._boot_server(image, flavor, **boot_server_kwargs) - volume = self.cinder.create_volume(size, **create_volume_kwargs) - - attachment = self._attach_volume(server, volume) - - self.sleep_between(min_sleep, max_sleep) - - new_host = self._find_host_to_migrate(server) - self._live_migrate(server, new_host, - block_migration, disk_over_commit) - - self._detach_volume(server, volume, attachment) - - self.cinder.delete_volume(volume) - self._delete_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_migrate_server", - platform="openstack") -class BootAndMigrateServer(utils.NovaScenario): - - def run(self, image, flavor, **kwargs): - """Migrate a server. - - This scenario launches a VM on a compute node available in - the availability zone, and then migrates the VM - to another compute node on the same availability zone. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._migrate(server) - # NOTE(wtakase): This is required because cold migration and resize - # share same code path. - confirm = kwargs.get("confirm", True) - if confirm: - self._resize_confirm(server, status="ACTIVE") - else: - self._resize_revert(server, status="ACTIVE") - self._delete_server(server) - - -@types.convert(from_image={"type": "glance_image"}, - to_image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="from_image") -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="to_image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_rebuild_server", - platform="openstack") -class BootAndRebuildServer(utils.NovaScenario): - - def run(self, from_image, to_image, flavor, **kwargs): - """Rebuild a server. - - This scenario launches a VM, then rebuilds that VM with a - different image. - - :param from_image: image to be used to boot an instance - :param to_image: image to be used to rebuild the instance - :param flavor: flavor to be used to boot an instance - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(from_image, flavor, **kwargs) - self._rebuild_server(server, to_image) - self._delete_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup": ["nova", "neutron.floatingip"]}, - name="NovaServers.boot_and_associate_floating_ip", - platform="openstack") -class BootAndAssociateFloatingIp(utils.NovaScenario): - - def run(self, image, flavor, **kwargs): - """Boot a server and associate a floating IP to it. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - address = network_wrapper.wrap(self.clients, self).create_floating_ip( - tenant_id=server.tenant_id) - self._associate_floating_ip(server, address["ip"]) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova", "neutron"]}, - name="NovaServers.boot_server_and_attach_interface", - platform="openstack") -class BootServerAndAttachInterface(utils.NovaScenario, - neutron_utils.NeutronScenario): - def run(self, image, flavor, network_create_args=None, - subnet_create_args=None, subnet_cidr_start=None, - boot_server_args=None): - """Create server and subnet, then attach the interface to it. - - This scenario measures the "nova interface-attach" command performance. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param network_create_args: dict, POST /v2.0/networks request - options. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param boot_server_args: Optional additional arguments for - server creation - """ - network = self._get_or_create_network(network_create_args) - self._create_subnet(network, subnet_create_args, subnet_cidr_start) - - server = self._boot_server(image, flavor, **boot_server_args) - self._attach_interface(server, net_id=network["network"]["id"]) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_show_server", - platform="openstack") -class BootAndShowServer(utils.NovaScenario): - - def run(self, image, flavor, **kwargs): - """Show server details. - - This simple scenario tests the nova show command by retrieving - the server details. - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param kwargs: Optional additional arguments for server creation - - :returns: Server details - """ - server = self._boot_server(image, flavor, **kwargs) - self._show_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_get_console_output", - platform="openstack") -class BootAndGetConsoleOutput(utils.NovaScenario): - - def run(self, image, flavor, length=None, **kwargs): - """Get text console output from server. - - This simple scenario tests the nova console-log command by retrieving - the text console log output. - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param length: The number of tail log lines you would like to retrieve. - None (default value) or -1 means unlimited length. - :param kwargs: Optional additional arguments for server creation - - :returns: Text console log output for server - """ - server = self._boot_server(image, flavor, **kwargs) - self._get_server_console_output(server, length) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_update_server", - platform="openstack") -class BootAndUpdateServer(utils.NovaScenario): - - def run(self, image, flavor, description=None, **kwargs): - """Boot a server, then update its name and description. - - The scenario first creates a server, then update it. - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param description: update the server description - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._update_server(server, description) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume_snapshot", - platform="openstack") -class BootServerFromVolumeSnapshot(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size, volume_type=None, - auto_assign_nic=False, **kwargs): - """Boot a server from a snapshot. - - The scenario first creates a volume and creates a - snapshot from this volume, then boots a server from - the created snapshot. - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param volume_size: volume size (in GB) - :param volume_type: specifies volume type when there are - multiple backends - :param auto_assign_nic: True if NICs should be assigned - :param kwargs: Optional additional arguments for server creation - """ - volume = self.cinder.create_volume(volume_size, imageRef=image, - volume_type=volume_type) - snapshot = self.cinder.create_snapshot(volume.id, force=False) - block_device_mapping = {"vda": "%s:snap::1" % snapshot.id} - self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic, - block_device_mapping=block_device_mapping, - **kwargs) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure( - context={"cleanup": ["nova", "neutron.floatingip"]}, - name="NovaServers.boot_server_associate_and_dissociate_floating_ip", - platform="openstack") -class BootServerAssociateAndDissociateFloatingIP(utils.NovaScenario): - """"Benchmark scenarios for Nova FloatingIp API.""" - def run(self, image, flavor, **kwargs): - """Boot a server associate and dissociate a floating IP from it. - - The scenario first boot a server and create a floating IP. then - associate the floating IP to the server.Finally dissociate the floating - IP. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - address = network_wrapper.wrap(self.clients, self).create_floating_ip( - tenant_id=server.tenant_id) - self._associate_floating_ip(server, address["ip"]) - self._dissociate_floating_ip(server, address["ip"]) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_server_and_list_interfaces", - platform="openstack") -class BootServerAndListInterfaces(utils.NovaScenario): - def run(self, image, flavor, **kwargs): - """Boot a server and list interfaces attached to it. - - Measure the "nova boot" and "nova interface-list" command performance. - - :param image: ID of the image to be used for server creation - :param flavor: ID of the flavor to be used for server creation - :param **kwargs: Optional arguments for booting the instance - """ - server = self._boot_server(image, flavor, **kwargs) - self._list_interfaces(server) - - -@validation.add( - "enum", param_name="console_type", - values=["novnc", "xvpvnc", "spice-html5", "rdp-html5", "serial", "webmks"]) -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova"]}, - name="NovaServers.boot_and_get_console_url", - platform="openstack") -class BootAndGetConsoleUrl(utils.NovaScenario): - - def run(self, image, flavor, console_type, **kwargs): - """Retrieve a console url of a server. - - This simple scenario tests retrieving the console url of a server. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param console_type: type can be novnc/xvpvnc for protocol vnc; - spice-html5 for protocol spice; rdp-html5 for - protocol rdp; serial for protocol serial. - webmks for protocol mks (since version 2.8). - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._get_console_url_server(server, console_type) diff --git a/rally/plugins/openstack/scenarios/nova/services.py b/rally/plugins/openstack/scenarios/nova/services.py deleted file mode 100644 index 859afdee..00000000 --- a/rally/plugins/openstack/scenarios/nova/services.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova agents.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaServices.list_services", platform="openstack") -class ListServices(utils.NovaScenario): - - def run(self, host=None, binary=None): - """List all nova services. - - Measure the "nova service-list" command performance. - - :param host: List nova services on host - :param binary: List nova services matching given binary - """ - self._list_services(host, binary) diff --git a/rally/plugins/openstack/scenarios/nova/utils.py b/rally/plugins/openstack/scenarios/nova/utils.py deleted file mode 100644 index c80477a8..00000000 --- a/rally/plugins/openstack/scenarios/nova/utils.py +++ /dev/null @@ -1,1235 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from oslo_config import cfg - -from rally.common.i18n import _ -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.services.image import image as image_service -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF -LOG = logging.getLogger(__file__) - - -class NovaScenario(scenario.OpenStackScenario): - """Base class for Nova scenarios with basic atomic actions.""" - - @atomic.action_timer("nova.list_servers") - def _list_servers(self, detailed=True): - """Returns user servers list.""" - return self.clients("nova").servers.list(detailed) - - def _pick_random_nic(self): - """Choose one network from existing ones.""" - ctxt = self.context - nets = [net["id"] - for net in ctxt.get("tenant", {}).get("networks", [])] - if nets: - # NOTE(amaretskiy): Balance servers among networks. - net_idx = self.context["iteration"] % len(nets) - return [{"net-id": nets[net_idx]}] - - @atomic.action_timer("nova.boot_server") - def _boot_server(self, image, flavor, - auto_assign_nic=False, **kwargs): - """Boot a server. - - Returns when the server is actually booted and in "ACTIVE" state. - - If multiple networks created by Network context are present, the first - network found that isn't associated with a floating IP pool is used. - - :param image: image ID or instance for server creation - :param flavor: int, flavor ID or instance for server creation - :param auto_assign_nic: bool, whether or not to auto assign NICs - :param kwargs: other optional parameters to initialize the server - :returns: nova Server instance - """ - server_name = self.generate_random_name() - secgroup = self.context.get("user", {}).get("secgroup") - if secgroup: - if "security_groups" not in kwargs: - kwargs["security_groups"] = [secgroup["name"]] - elif secgroup["name"] not in kwargs["security_groups"]: - kwargs["security_groups"].append(secgroup["name"]) - - if auto_assign_nic and not kwargs.get("nics", False): - nic = self._pick_random_nic() - if nic: - kwargs["nics"] = nic - - server = self.clients("nova").servers.create( - server_name, image, flavor, **kwargs) - - self.sleep_between(CONF.benchmark.nova_server_boot_prepoll_delay) - server = utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_boot_timeout, - check_interval=CONF.benchmark.nova_server_boot_poll_interval - ) - return server - - def _do_server_reboot(self, server, reboottype): - server.reboot(reboot_type=reboottype) - self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_reboot_timeout, - check_interval=CONF.benchmark.nova_server_reboot_poll_interval - ) - - @atomic.action_timer("nova.soft_reboot_server") - def _soft_reboot_server(self, server): - """Reboot a server with soft reboot. - - A soft reboot will be issued on the given server upon which time - this method will wait for the server to become active. - - :param server: The server to reboot. - """ - self._do_server_reboot(server, "SOFT") - - @atomic.action_timer("nova.show_server") - def _show_server(self, server): - """Show server details. - - :param server: The server to get details for. - - :returns: Server details - """ - return self.clients("nova").servers.get(server) - - @atomic.action_timer("nova.get_console_output_server") - def _get_server_console_output(self, server, length=None): - """Get text of a console log output from a server. - - :param server: The server whose console output to retrieve - :param length: The number of tail log lines you would like to retrieve. - - :returns: Text console output from server - """ - return self.clients("nova").servers.get_console_output(server, - length=length) - - @atomic.action_timer("nova.get_console_url_server") - def _get_console_url_server(self, server, console_type): - """Retrieve a console url of a server. - - :param server: server to get console url for - :param console_type: type can be novnc/xvpvnc for protocol vnc; - spice-html5 for protocol spice; rdp-html5 for - protocol rdp; serial for protocol serial. - webmks for protocol mks (since version 2.8). - - :returns: An instance of novaclient.base.DictWithMeta - """ - return self.clients("nova").servers.get_console_url(server, - console_type) - - @atomic.action_timer("nova.reboot_server") - def _reboot_server(self, server): - """Reboot a server with hard reboot. - - A reboot will be issued on the given server upon which time - this method will wait for the server to become active. - - :param server: The server to reboot. - """ - self._do_server_reboot(server, "HARD") - - @atomic.action_timer("nova.rebuild_server") - def _rebuild_server(self, server, image, **kwargs): - """Rebuild a server with a new image. - - :param server: The server to rebuild. - :param image: The new image to rebuild the server with. - :param kwargs: Optional additional arguments to pass to the rebuild - """ - server.rebuild(image, **kwargs) - self.sleep_between(CONF.benchmark.nova_server_rebuild_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_rebuild_timeout, - check_interval=CONF.benchmark.nova_server_rebuild_poll_interval - ) - - @atomic.action_timer("nova.start_server") - def _start_server(self, server): - """Start the given server. - - A start will be issued for the given server upon which time - this method will wait for it to become ACTIVE. - - :param server: The server to start and wait to become ACTIVE. - """ - server.start() - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_start_timeout, - check_interval=CONF.benchmark.nova_server_start_poll_interval - ) - - @atomic.action_timer("nova.stop_server") - def _stop_server(self, server): - """Stop the given server. - - Issues a stop on the given server and waits for the server - to become SHUTOFF. - - :param server: The server to stop. - """ - server.stop() - utils.wait_for_status( - server, - ready_statuses=["SHUTOFF"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_stop_timeout, - check_interval=CONF.benchmark.nova_server_stop_poll_interval - ) - - @atomic.action_timer("nova.rescue_server") - def _rescue_server(self, server): - """Rescue the given server. - - Returns when the server is actually rescue and is in the "Rescue" - state. - - :param server: Server object - """ - server.rescue() - self.sleep_between(CONF.benchmark.nova_server_rescue_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["RESCUE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_rescue_timeout, - check_interval=CONF.benchmark.nova_server_rescue_poll_interval - ) - - @atomic.action_timer("nova.unrescue_server") - def _unrescue_server(self, server): - """Unrescue the given server. - - Returns when the server is unrescue and waits to become ACTIVE - - :param server: Server object - """ - server.unrescue() - self.sleep_between(CONF.benchmark.nova_server_unrescue_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_unrescue_timeout, - check_interval=CONF.benchmark.nova_server_unrescue_poll_interval - ) - - @atomic.action_timer("nova.suspend_server") - def _suspend_server(self, server): - """Suspends the given server. - - Returns when the server is actually suspended and is in the "Suspended" - state. - - :param server: Server object - """ - server.suspend() - self.sleep_between(CONF.benchmark.nova_server_suspend_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["SUSPENDED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_suspend_timeout, - check_interval=CONF.benchmark.nova_server_suspend_poll_interval - ) - - @atomic.action_timer("nova.resume_server") - def _resume_server(self, server): - """Resumes the suspended server. - - Returns when the server is actually resumed and is in the "ACTIVE" - state. - - :param server: Server object - """ - server.resume() - self.sleep_between(CONF.benchmark.nova_server_resume_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_resume_timeout, - check_interval=CONF.benchmark.nova_server_resume_poll_interval - ) - - @atomic.action_timer("nova.pause_server") - def _pause_server(self, server): - """Pause the live server. - - Returns when the server is actually paused and is in the "PAUSED" - state. - - :param server: Server object - """ - server.pause() - self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["PAUSED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_pause_timeout, - check_interval=CONF.benchmark.nova_server_pause_poll_interval - ) - - @atomic.action_timer("nova.unpause_server") - def _unpause_server(self, server): - """Unpause the paused server. - - Returns when the server is actually unpaused and is in the "ACTIVE" - state. - - :param server: Server object - """ - server.unpause() - self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_unpause_timeout, - check_interval=CONF.benchmark.nova_server_unpause_poll_interval - ) - - @atomic.action_timer("nova.shelve_server") - def _shelve_server(self, server): - """Shelve the given server. - - Returns when the server is actually shelved and is in the - "SHELVED_OFFLOADED" state. - - :param server: Server object - """ - server.shelve() - self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["SHELVED_OFFLOADED"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_shelve_timeout, - check_interval=CONF.benchmark.nova_server_shelve_poll_interval - ) - - @atomic.action_timer("nova.unshelve_server") - def _unshelve_server(self, server): - """Unshelve the given server. - - Returns when the server is unshelved and is in the "ACTIVE" state. - - :param server: Server object - """ - server.unshelve() - - self.sleep_between(CONF.benchmark. nova_server_unshelve_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_unshelve_timeout, - check_interval=CONF.benchmark.nova_server_unshelve_poll_interval - ) - - def _delete_server(self, server, force=False): - """Delete the given server. - - Returns when the server is actually deleted. - - :param server: Server object - :param force: If True, force_delete will be used instead of delete. - """ - atomic_name = ("nova.%sdelete_server") % (force and "force_" or "") - with atomic.ActionTimer(self, atomic_name): - if force: - server.force_delete() - else: - server.delete() - - utils.wait_for_status( - server, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_delete_timeout, - check_interval=CONF.benchmark.nova_server_delete_poll_interval - ) - - def _delete_servers(self, servers, force=False): - """Delete multiple servers. - - :param servers: A list of servers to delete - :param force: If True, force_delete will be used instead of delete. - """ - atomic_name = ("nova.%sdelete_servers") % (force and "force_" or "") - with atomic.ActionTimer(self, atomic_name): - for server in servers: - if force: - server.force_delete() - else: - server.delete() - - for server in servers: - utils.wait_for_status( - server, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_delete_timeout, - check_interval=CONF. - benchmark.nova_server_delete_poll_interval - ) - - @atomic.action_timer("nova.create_server_group") - def _create_server_group(self, **kwargs): - """Create (allocate) a server group. - - :param kwargs: Server group name and policy - - :returns: Nova server group - """ - return self.clients("nova").server_groups.create(**kwargs) - - @atomic.action_timer("nova.get_server_group") - def _get_server_group(self, id): - """Get a specific server group. - - :param id: Unique ID of the server group to get - - :rtype: :class:`ServerGroup` - """ - return self.clients("nova").server_groups.get(id) - - @atomic.action_timer("nova.list_server_groups") - def _list_server_groups(self, all_projects=False): - """Get a list of all server groups. - - :param all_projects: If True, display server groups from all - projects(Admin only) - - :rtype: list of :class:`ServerGroup`. - """ - if all_projects: - return self.admin_clients("nova").server_groups.list(all_projects) - else: - return self.clients("nova").server_groups.list(all_projects) - - @atomic.action_timer("nova.delete_image") - def _delete_image(self, image): - """Delete the given image. - - Returns when the image is actually deleted. - - :param image: Image object - """ - LOG.warning("Method '_delete_image' of NovaScenario class is " - "deprecated since Rally 0.10.0. Use GlanceUtils instead.") - glance = image_service.Image(self._clients, - atomic_inst=self.atomic_actions()) - glance.delete_image(image.id) - check_interval = CONF.benchmark.nova_server_image_delete_poll_interval - with atomic.ActionTimer(self, "glance.wait_for_delete"): - utils.wait_for_status( - image, - ready_statuses=["deleted", "pending_delete"], - check_deletion=True, - update_resource=glance.get_image, - timeout=CONF.benchmark.nova_server_image_delete_timeout, - check_interval=check_interval - ) - - @atomic.action_timer("nova.create_image") - def _create_image(self, server): - """Create an image from the given server - - Uses the server name to name the created image. Returns when the image - is actually created and is in the "Active" state. - - :param server: Server object for which the image will be created - - :returns: Created image object - """ - image_uuid = self.clients("nova").servers.create_image(server, - server.name) - glance = image_service.Image(self._clients, - atomic_inst=self.atomic_actions()) - image = glance.get_image(image_uuid) - check_interval = CONF.benchmark.nova_server_image_create_poll_interval - with atomic.ActionTimer(self, "glance.wait_for_image"): - image = utils.wait_for_status( - image, - ready_statuses=["ACTIVE"], - update_resource=glance.get_image, - timeout=CONF.benchmark.nova_server_image_create_timeout, - check_interval=check_interval - ) - return image - - @atomic.action_timer("nova.list_images") - def _list_images(self, detailed=False, **kwargs): - """List all images. - - :param detailed: True if the image listing - should contain detailed information - :param kwargs: Optional additional arguments for image listing - - :returns: Image list - """ - LOG.warning("Method '_delete_image' of NovaScenario class is " - "deprecated since Rally 0.10.0. Use GlanceUtils instead.") - glance = image_service.Image(self._clients, - atomic_inst=self.atomic_actions()) - return glance.list_images() - - @atomic.action_timer("nova.get_keypair") - def _get_keypair(self, keypair): - """Get a keypair. - - :param keypair: The ID of the keypair to get. - :rtype: :class:`Keypair` - """ - return self.clients("nova").keypairs.get(keypair) - - @atomic.action_timer("nova.create_keypair") - def _create_keypair(self, **kwargs): - """Create a keypair - - :returns: Created keypair name - """ - keypair_name = self.generate_random_name() - keypair = self.clients("nova").keypairs.create(keypair_name, **kwargs) - return keypair.name - - @atomic.action_timer("nova.list_keypairs") - def _list_keypairs(self): - """Return user keypairs list.""" - return self.clients("nova").keypairs.list() - - @atomic.action_timer("nova.delete_keypair") - def _delete_keypair(self, keypair_name): - """Delete keypair - - :param keypair_name: The keypair name to delete. - """ - self.clients("nova").keypairs.delete(keypair_name) - - @atomic.action_timer("nova.boot_servers") - def _boot_servers(self, image_id, flavor_id, requests, instances_amount=1, - auto_assign_nic=False, **kwargs): - """Boot multiple servers. - - Returns when all the servers are actually booted and are in the - "Active" state. - - :param image_id: ID of the image to be used for server creation - :param flavor_id: ID of the flavor to be used for server creation - :param requests: Number of booting requests to perform - :param instances_amount: Number of instances to boot per each request - :param auto_assign_nic: bool, whether or not to auto assign NICs - :param kwargs: other optional parameters to initialize the servers - - :returns: List of created server objects - """ - if auto_assign_nic and not kwargs.get("nics", False): - nic = self._pick_random_nic() - if nic: - kwargs["nics"] = nic - - name_prefix = self.generate_random_name() - for i in range(requests): - self.clients("nova").servers.create("%s_%d" % (name_prefix, i), - image_id, flavor_id, - min_count=instances_amount, - max_count=instances_amount, - **kwargs) - # NOTE(msdubov): Nova python client returns only one server even when - # min_count > 1, so we have to rediscover all the - # created servers manually. - servers = [s for s in self.clients("nova").servers.list() - if s.name.startswith(name_prefix)] - self.sleep_between(CONF.benchmark.nova_server_boot_prepoll_delay) - servers = [utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils. - get_from_manager(), - timeout=CONF.benchmark.nova_server_boot_timeout, - check_interval=CONF.benchmark.nova_server_boot_poll_interval - ) for server in servers] - return servers - - @atomic.action_timer("nova.associate_floating_ip") - def _associate_floating_ip(self, server, address, fixed_address=None): - """Add floating IP to an instance - - :param server: The :class:`Server` to add an IP to. - :param address: The ip address or FloatingIP to add to the instance - :param fixed_address: The fixedIP address the FloatingIP is to be - associated with (optional) - """ - server.add_floating_ip(address, fixed_address=fixed_address) - utils.wait_for(server, - is_ready=self.check_ip_address(address), - update_resource=utils.get_from_manager()) - # Update server data - server.addresses = server.manager.get(server.id).addresses - - @atomic.action_timer("nova.dissociate_floating_ip") - def _dissociate_floating_ip(self, server, address): - """Remove floating IP from an instance - - :param server: The :class:`Server` to add an IP to. - :param address: The ip address or FloatingIP to remove - """ - server.remove_floating_ip(address) - utils.wait_for( - server, - is_ready=self.check_ip_address(address, must_exist=False), - update_resource=utils.get_from_manager() - ) - # Update server data - server.addresses = server.manager.get(server.id).addresses - - @staticmethod - def check_ip_address(address, must_exist=True): - ip_to_check = getattr(address, "ip", address) - - def _check_addr(resource): - for network, addr_list in resource.addresses.items(): - for addr in addr_list: - if ip_to_check == addr["addr"]: - return must_exist - return not must_exist - return _check_addr - - @atomic.action_timer("nova.resize") - def _resize(self, server, flavor): - server.resize(flavor) - utils.wait_for_status( - server, - ready_statuses=["VERIFY_RESIZE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_resize_timeout, - check_interval=CONF.benchmark.nova_server_resize_poll_interval - ) - - @atomic.action_timer("nova.resize_confirm") - def _resize_confirm(self, server, status="ACTIVE"): - server.confirm_resize() - utils.wait_for_status( - server, - ready_statuses=[status], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_resize_confirm_timeout, - check_interval=( - CONF.benchmark.nova_server_resize_confirm_poll_interval) - ) - - @atomic.action_timer("nova.resize_revert") - def _resize_revert(self, server, status="ACTIVE"): - server.revert_resize() - utils.wait_for_status( - server, - ready_statuses=[status], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_resize_revert_timeout, - check_interval=( - CONF.benchmark.nova_server_resize_revert_poll_interval) - ) - - def _update_volume_resource(self, resource): - cinder_service = cinder_utils.CinderBasic(self.context) - return cinder_service.cinder.get_volume(resource.id) - - @atomic.action_timer("nova.attach_volume") - def _attach_volume(self, server, volume, device=None): - server_id = server.id - volume_id = volume.id - attachment = self.clients("nova").volumes.create_server_volume( - server_id, volume_id, device) - utils.wait_for_status( - volume, - ready_statuses=["in-use"], - update_resource=self._update_volume_resource, - timeout=CONF.benchmark.nova_server_resize_revert_timeout, - check_interval=( - CONF.benchmark.nova_server_resize_revert_poll_interval) - ) - return attachment - - @atomic.action_timer("nova.list_attachments") - def _list_attachments(self, server_id): - """Get a list of all the attached volumes for the given server ID. - - :param server_id: The ID of the server - :rtype: list of :class:`Volume` - """ - return self.clients("nova").volumes.get_server_volumes(server_id) - - @atomic.action_timer("nova.detach_volume") - def _detach_volume(self, server, volume, attachment=None): - server_id = server.id - # NOTE(chenhb): Recommend the use of attachment.The use of - # volume.id is retained mainly for backward compatibility. - attachment_id = attachment.id if attachment else volume.id - - self.clients("nova").volumes.delete_server_volume(server_id, - attachment_id) - utils.wait_for_status( - volume, - ready_statuses=["available"], - update_resource=self._update_volume_resource, - timeout=CONF.benchmark.nova_detach_volume_timeout, - check_interval=CONF.benchmark.nova_detach_volume_poll_interval - ) - - @atomic.action_timer("nova.live_migrate") - def _live_migrate(self, server, target_host, block_migration=False, - disk_over_commit=False, skip_host_check=False): - """Run live migration of the given server. - - :param server: Server object - :param target_host: Specifies the target compute node to migrate - :param block_migration: Specifies the migration type - :param disk_over_commit: Specifies whether to overcommit migrated - instance or not - :param skip_host_check: Specifies whether to verify the targeted host - availability - """ - server_admin = self.admin_clients("nova").servers.get(server.id) - host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") - server_admin.live_migrate(target_host, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_live_migrate_timeout, - check_interval=( - CONF.benchmark.nova_server_live_migrate_poll_interval) - ) - server_admin = self.admin_clients("nova").servers.get(server.id) - if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host") - and not skip_host_check): - raise exceptions.RallyException(_( - "Live Migration failed: Migration complete " - "but instance did not change host: %s") % host_pre_migrate) - - @atomic.action_timer("nova.find_host_to_migrate") - def _find_host_to_migrate(self, server): - """Find a compute node for live migration. - - :param server: Server object - """ - server_admin = self.admin_clients("nova").servers.get(server.id) - host = getattr(server_admin, "OS-EXT-SRV-ATTR:host") - az_name = getattr(server_admin, "OS-EXT-AZ:availability_zone") - az = None - for a in self.admin_clients("nova").availability_zones.list(): - if az_name == a.zoneName: - az = a - break - try: - new_host = random.choice( - [key for key, value in az.hosts.items() - if key != host and - value.get("nova-compute", {}).get("available", False)]) - return new_host - except IndexError: - raise exceptions.RallyException( - _("Live Migration failed: No valid host found to migrate")) - - @atomic.action_timer("nova.migrate") - def _migrate(self, server, skip_host_check=False): - """Run migration of the given server. - - :param server: Server object - :param skip_host_check: Specifies whether to verify the targeted host - availability - """ - server_admin = self.admin_clients("nova").servers.get(server.id) - host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") - server_admin.migrate() - utils.wait_for_status( - server, - ready_statuses=["VERIFY_RESIZE"], - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.nova_server_migrate_timeout, - check_interval=( - CONF.benchmark.nova_server_migrate_poll_interval) - ) - if not skip_host_check: - server_admin = self.admin_clients("nova").servers.get(server.id) - host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") - if host_pre_migrate == host_after_migrate: - raise exceptions.RallyException( - _("Migration failed: Migration complete but instance" - " did not change host: %s") % host_pre_migrate) - - @atomic.action_timer("nova.add_server_secgroups") - def _add_server_secgroups(self, server, security_group, - atomic_action=False): - """add security group to a server. - - :param server: Server object - :returns: An instance of novaclient.base.DictWithMeta - """ - return self.clients("nova").servers.add_security_group(server, - security_group) - - @atomic.action_timer("nova.list_hypervisors") - def _list_hypervisors(self, detailed=True): - """List hypervisors.""" - return self.admin_clients("nova").hypervisors.list(detailed) - - @atomic.action_timer("nova.statistics_hypervisors") - def _statistics_hypervisors(self): - """Get hypervisor statistics over all compute nodes. - - :returns: Hypervisor statistics - """ - return self.admin_clients("nova").hypervisors.statistics() - - @atomic.action_timer("nova.get_hypervisor") - def _get_hypervisor(self, hypervisor): - """Get a specific hypervisor. - - :param hypervisor: Hypervisor to get. - :returns: Hypervisor object - """ - return self.admin_clients("nova").hypervisors.get(hypervisor) - - @atomic.action_timer("nova.search_hypervisors") - def _search_hypervisors(self, hypervisor_match, servers=False): - """List all servers belonging to specific hypervisor. - - :param hypervisor_match: Hypervisor's host name. - :param servers: If True, server information is also retrieved. - :returns: Hypervisor object - """ - return self.admin_clients("nova").hypervisors.search(hypervisor_match, - servers=servers) - - @atomic.action_timer("nova.lock_server") - def _lock_server(self, server): - """Lock the given server. - - :param server: Server to lock - """ - server.lock() - - @atomic.action_timer("nova.uptime_hypervisor") - def _uptime_hypervisor(self, hypervisor): - """Display the uptime of the specified hypervisor. - - :param hypervisor: Hypervisor to get. - :returns: Hypervisor object - """ - return self.admin_clients("nova").hypervisors.uptime(hypervisor) - - @atomic.action_timer("nova.unlock_server") - def _unlock_server(self, server): - """Unlock the given server. - - :param server: Server to unlock - """ - server.unlock() - - @atomic.action_timer("nova.delete_network") - def _delete_network(self, net_id): - """Delete nova network. - - :param net_id: The nova-network ID to delete - """ - return self.admin_clients("nova").networks.delete(net_id) - - @atomic.action_timer("nova.list_flavors") - def _list_flavors(self, detailed=True, **kwargs): - """List all flavors. - - :param kwargs: Optional additional arguments for flavor listing - :param detailed: True if the image listing - should contain detailed information - :returns: flavors list - """ - return self.clients("nova").flavors.list(detailed, **kwargs) - - @atomic.action_timer("nova.set_flavor_keys") - def _set_flavor_keys(self, flavor, extra_specs): - """set flavor keys - - :param flavor: flavor to set keys - :param extra_specs: additional arguments for flavor set keys - """ - return flavor.set_keys(extra_specs) - - @atomic.action_timer("nova.list_agents") - def _list_agents(self, hypervisor=None): - """List all nova-agent builds. - - :param hypervisor: The nova-hypervisor ID on which we need to list all - the builds - :returns: Nova-agent build list - """ - return self.admin_clients("nova").agents.list(hypervisor) - - @atomic.action_timer("nova.list_aggregates") - def _list_aggregates(self): - """Returns list of all os-aggregates.""" - return self.admin_clients("nova").aggregates.list() - - @atomic.action_timer("nova.list_availability_zones") - def _list_availability_zones(self, detailed=True): - """List availability-zones. - - :param detailed: True if the availability-zone listing should contain - detailed information - :returns: Availability-zone list - """ - return self.admin_clients("nova").availability_zones.list(detailed) - - @atomic.action_timer("nova.list_hosts") - def _list_hosts(self, zone=None, service=None): - """List nova hosts. - - :param zone: List all hosts in the given nova availability-zone ID - :param service: Name of service type to filter - :returns: Nova host list - """ - hosts = self.admin_clients("nova").hosts.list(zone) - if service: - hosts = [host for host in hosts if host.service == service] - return hosts - - @atomic.action_timer("nova.list_interfaces") - def _list_interfaces(self, server): - """List interfaces attached to a server. - - :param server:Instance or ID of server. - :returns: Server interface list - """ - return self.clients("nova").servers.interface_list(server) - - @atomic.action_timer("nova.get_host") - def _get_host(self, host_name): - """Describe a specific host. - - :param host_name: host name to get. - :returns: host object - """ - return self.admin_clients("nova").hosts.get(host_name) - - @atomic.action_timer("nova.list_services") - def _list_services(self, host=None, binary=None): - """return all nova service details - - :param host: List all nova services on host - :param binary: List all nova services matching given binary - """ - return self.admin_clients("nova").services.list(host, binary) - - @atomic.action_timer("nova.create_flavor") - def _create_flavor(self, ram, vcpus, disk, **kwargs): - """Create a flavor - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param kwargs: Optional additional arguments for flavor creation - """ - name = self.generate_random_name() - return self.admin_clients("nova").flavors.create(name, ram, vcpus, - disk, **kwargs) - - @atomic.action_timer("nova.delete_flavor") - def _delete_flavor(self, flavor): - """Delete a flavor - - :param flavor: The ID of the :class:`Flavor` - :returns: An instance of novaclient.base.TupleWithMeta - """ - return self.admin_clients("nova").flavors.delete(flavor) - - @atomic.action_timer("nova.list_flavor_access") - def _list_flavor_access(self, flavor): - """List access-rules for non-public flavor. - - :param flavor: List access rules for flavor instance or flavor ID - """ - return self.admin_clients("nova").flavor_access.list(flavor=flavor) - - @atomic.action_timer("nova.add_tenant_access") - def _add_tenant_access(self, flavor, tenant): - """Add a tenant to the given flavor access list. - - :param flavor: name or id of the object flavor - :param tenant: id of the object tenant - :returns: access rules for flavor instance or flavor ID - """ - return self.admin_clients("nova").flavor_access.add_tenant_access( - flavor, tenant) - - @atomic.action_timer("nova.update_server") - def _update_server(self, server, description=None): - """update the server's name and description. - - :param server: Server object - :param description: update the server description - :returns: The updated server - """ - new_name = self.generate_random_name() - if description: - return server.update(name=new_name, - description=description) - else: - return server.update(name=new_name) - - @atomic.action_timer("nova.get_flavor") - def _get_flavor(self, flavor_id): - """Show a flavor - - :param flavor_id: The flavor ID to get - """ - return self.admin_clients("nova").flavors.get(flavor_id) - - @atomic.action_timer("nova.create_aggregate") - def _create_aggregate(self, availability_zone): - """Create a new aggregate. - - :param availability_zone: The availability zone of the aggregate - :returns: The created aggregate - """ - aggregate_name = self.generate_random_name() - return self.admin_clients("nova").aggregates.create(aggregate_name, - availability_zone) - - @atomic.action_timer("nova.get_aggregate_details") - def _get_aggregate_details(self, aggregate): - """Get details of the specified aggregate. - - :param aggregate: The aggregate to get details - :returns: Detailed information of aggregate - """ - return self.admin_clients("nova").aggregates.get_details(aggregate) - - @atomic.action_timer("nova.delete_aggregate") - def _delete_aggregate(self, aggregate): - """Delete the specified aggregate. - - :param aggregate: The aggregate to delete - :returns: An instance of novaclient.base.TupleWithMeta - """ - return self.admin_clients("nova").aggregates.delete(aggregate) - - @atomic.action_timer("nova.bind_actions") - def _bind_actions(self): - actions = ["hard_reboot", "soft_reboot", "stop_start", - "rescue_unrescue", "pause_unpause", "suspend_resume", - "lock_unlock", "shelve_unshelve"] - action_builder = utils.ActionBuilder(actions) - action_builder.bind_action("hard_reboot", self._reboot_server) - action_builder.bind_action("soft_reboot", self._soft_reboot_server) - action_builder.bind_action("stop_start", - self._stop_and_start_server) - action_builder.bind_action("rescue_unrescue", - self._rescue_and_unrescue_server) - action_builder.bind_action("pause_unpause", - self._pause_and_unpause_server) - action_builder.bind_action("suspend_resume", - self._suspend_and_resume_server) - action_builder.bind_action("lock_unlock", - self._lock_and_unlock_server) - action_builder.bind_action("shelve_unshelve", - self._shelve_and_unshelve_server) - - return action_builder - - @atomic.action_timer("nova.stop_and_start_server") - def _stop_and_start_server(self, server): - """Stop and then start the given server. - - A stop will be issued on the given server upon which time - this method will wait for the server to become 'SHUTOFF'. - Once the server is SHUTOFF a start will be issued and this - method will wait for the server to become 'ACTIVE' again. - - :param server: The server to stop and then start. - - """ - self._stop_server(server) - self._start_server(server) - - @atomic.action_timer("nova.rescue_and_unrescue_server") - def _rescue_and_unrescue_server(self, server): - """Rescue and then unrescue the given server. - - A rescue will be issued on the given server upon which time - this method will wait for the server to become 'RESCUE'. - Once the server is RESCUE an unrescue will be issued and - this method will wait for the server to become 'ACTIVE' - again. - - :param server: The server to rescue and then unrescue. - - """ - self._rescue_server(server) - self._unrescue_server(server) - - @atomic.action_timer("nova.pause_and_unpause_server") - def _pause_and_unpause_server(self, server): - """Pause and then unpause the given server. - - A pause will be issued on the given server upon which time - this method will wait for the server to become 'PAUSED'. - Once the server is PAUSED an unpause will be issued and - this method will wait for the server to become 'ACTIVE' - again. - - :param server: The server to pause and then unpause. - - """ - self._pause_server(server) - self._unpause_server(server) - - @atomic.action_timer("nova.suspend_and_resume_server") - def _suspend_and_resume_server(self, server): - """Suspend and then resume the given server. - - A suspend will be issued on the given server upon which time - this method will wait for the server to become 'SUSPENDED'. - Once the server is SUSPENDED an resume will be issued and - this method will wait for the server to become 'ACTIVE' - again. - - :param server: The server to suspend and then resume. - - """ - self._suspend_server(server) - self._resume_server(server) - - @atomic.action_timer("nova.lock_and_unlock_server") - def _lock_and_unlock_server(self, server): - """Lock and then unlock the given server. - - A lock will be issued on the given server upon which time - this method will wait for the server to become locked'. - Once the server is locked an unlock will be issued. - - :param server: The server to lock and then unlock. - - """ - self._lock_server(server) - self._unlock_server(server) - - @atomic.action_timer("nova.shelve_and_unshelve_server") - def _shelve_and_unshelve_server(self, server): - """Shelve and then unshelve the given server. - - A shelve will be issued on the given server upon which time - this method will wait for the server to become 'SHELVED'. - Once the server is SHELVED an unshelve will be issued and - this method will wait for the server to become 'ACTIVE' - again. - - :param server: The server to shelve and then unshelve. - - """ - self._shelve_server(server) - self._unshelve_server(server) - - @atomic.action_timer("nova.update_aggregate") - def _update_aggregate(self, aggregate): - """Update the aggregate's name and availability_zone. - - :param aggregate: The aggregate to update - :return: The updated aggregate - """ - aggregate_name = self.generate_random_name() - availability_zone = self.generate_random_name() - values = {"name": aggregate_name, - "availability_zone": availability_zone} - return self.admin_clients("nova").aggregates.update(aggregate, - values) - - @atomic.action_timer("nova.aggregate_add_host") - def _aggregate_add_host(self, aggregate, host): - """Add a host into the Host Aggregate. - - :param aggregate: The aggregate add host to - :param host: The host add to aggregate - :returns: The aggregate that has been added host to - """ - return self.admin_clients("nova").aggregates.add_host(aggregate, - host) - - @atomic.action_timer("nova.aggregate_remove_host") - def _aggregate_remove_host(self, aggregate, host): - """Remove a host from an aggregate. - - :param aggregate: The aggregate remove host from - :param host: The host to remove - :returns: The aggregate that has been removed host from - """ - return self.admin_clients("nova").aggregates.remove_host(aggregate, - host) - - @atomic.action_timer("nova.aggregate_set_metadata") - def _aggregate_set_metadata(self, aggregate, metadata): - """Set metadata to an aggregate - - :param aggregate: The aggregate to set metadata to - :param metadata: The metadata to be set - :return: The aggregate that has the set metadata - """ - return self.admin_clients("nova").aggregates.set_metadata(aggregate, - metadata) - - @atomic.action_timer("nova.attach_interface") - def _attach_interface(self, server, port_id=None, - net_id=None, fixed_ip=None): - """Attach a network_interface to an instance. - - :param server: The :class:`Server` (or its ID) to attach to. - :param port_id: The port to attach. - :param network_id: the Network to attach - :param fixed_ip: the Fix_ip to attach - :returns the server that has attach interface - """ - return self.clients("nova").servers.interface_attach(server, - port_id, net_id, - fixed_ip) diff --git a/rally/plugins/openstack/scenarios/quotas/__init__.py b/rally/plugins/openstack/scenarios/quotas/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/quotas/quotas.py b/rally/plugins/openstack/scenarios/quotas/quotas.py deleted file mode 100644 index dfe2b6e9..00000000 --- a/rally/plugins/openstack/scenarios/quotas/quotas.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2014: Kylin Cloud -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.quotas import utils -from rally.task import validation - -"""Scenarios for quotas.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup": ["nova.quotas"]}, - name="Quotas.nova_update", - platform="openstack") -class NovaUpdate(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update quotas for Nova. - - :param max_quota: Max value to be updated for quota. - """ - - self._update_quotas("nova", self.context["tenant"]["id"], - max_quota) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup": ["nova.quotas"]}, - name="Quotas.nova_update_and_delete", platform="openstack") -class NovaUpdateAndDelete(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update and delete quotas for Nova. - - :param max_quota: Max value to be updated for quota. - """ - - self._update_quotas("nova", self.context["tenant"]["id"], - max_quota) - self._delete_quotas("nova", self.context["tenant"]["id"]) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup": ["cinder.quotas"]}, - name="Quotas.cinder_update", platform="openstack") -class CinderUpdate(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update quotas for Cinder. - - :param max_quota: Max value to be updated for quota. - """ - - self._update_quotas("cinder", self.context["tenant"]["id"], - max_quota) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup": ["cinder.quotas"]}, - name="Quotas.cinder_get", platform="openstack") -class CinderGet(utils.QuotasScenario): - - def run(self): - """Get quotas for Cinder. - - Measure the "cinder quota-show" command performance - - """ - self._get_quotas("cinder", self.context["tenant"]["id"]) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup": ["cinder.quotas"]}, - name="Quotas.cinder_update_and_delete", - platform="openstack") -class CinderUpdateAndDelete(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update and Delete quotas for Cinder. - - :param max_quota: Max value to be updated for quota. - """ - - self._update_quotas("cinder", self.context["tenant"]["id"], - max_quota) - self._delete_quotas("cinder", self.context["tenant"]["id"]) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup": ["neutron.quota"]}, - name="Quotas.neutron_update", platform="openstack") -class NeutronUpdate(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update quotas for neutron. - - :param max_quota: Max value to be updated for quota. - """ - - quota_update_fn = self.admin_clients("neutron").update_quota - self._update_quotas("neutron", self.context["tenant"]["id"], - max_quota, quota_update_fn) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup": ["nova.quotas"]}, - name="Quotas.nova_get", platform="openstack") -class NovaGet(utils.QuotasScenario): - - def run(self): - """Get quotas for nova.""" - - self._get_quotas("nova", self.context["tenant"]["id"]) diff --git a/rally/plugins/openstack/scenarios/quotas/utils.py b/rally/plugins/openstack/scenarios/quotas/utils.py deleted file mode 100644 index 5c687bdf..00000000 --- a/rally/plugins/openstack/scenarios/quotas/utils.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2014: Kylin Cloud -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.plugins.openstack import scenario -from rally.task import atomic - - -class QuotasScenario(scenario.OpenStackScenario): - """Base class for quotas scenarios with basic atomic actions.""" - - @atomic.action_timer("quotas.update_quotas") - def _update_quotas(self, component, tenant_id, max_quota=1024, - quota_update_fn=None): - """Updates quotas. - - :param component: Component for the quotas. - :param tenant_id: The project_id for the quotas to be updated. - :param max_quota: Max value to be updated for quota. - :param quota_update_fn: Client quota update function. - - Standard OpenStack clients use quotas.update(). - Use `quota_update_fn` to override for non-standard clients. - - :returns: Updated quotas dictionary. - """ - quotas = self._generate_quota_values(max_quota, component) - if quota_update_fn: - return quota_update_fn(tenant_id, **quotas) - return self.admin_clients(component).quotas.update(tenant_id, **quotas) - - @atomic.action_timer("quotas.delete_quotas") - def _delete_quotas(self, component, tenant_id): - """Delete quotas. - - :param component: Component for the quotas. - :param tenant_id: The project_id for the quotas to be updated. - """ - self.admin_clients(component).quotas.delete(tenant_id) - - def _generate_quota_values(self, max_quota, component): - quotas = {} - if component == "nova": - quotas = { - "metadata_items": random.randint(-1, max_quota), - "key_pairs": random.randint(-1, max_quota), - "injected_file_content_bytes": random.randint(-1, max_quota), - "injected_file_path_bytes": random.randint(-1, max_quota), - "ram": random.randint(-1, max_quota), - "instances": random.randint(-1, max_quota), - "injected_files": random.randint(-1, max_quota), - "cores": random.randint(-1, max_quota) - } - elif component == "cinder": - quotas = { - "volumes": random.randint(-1, max_quota), - "snapshots": random.randint(-1, max_quota), - "gigabytes": random.randint(-1, max_quota), - } - elif component == "neutron": - quota = {} - for key in ["network", "subnet", "port", "router", "floatingip", - "security_group", "security_group_rule"]: - quota[key] = random.randint(-1, max_quota) - quotas = {"body": {"quota": quota}} - return quotas - - @atomic.action_timer("quotas.get_quotas") - def _get_quotas(self, component, tenant_id): - """Get quotas for a project. - - :param component: Openstack component for the quotas. - :param tenant_id: The project_id for the quotas to show. - :return: Get quotas for a project. - """ - return self.admin_clients(component).quotas.get(tenant_id) diff --git a/rally/plugins/openstack/scenarios/sahara/__init__.py b/rally/plugins/openstack/scenarios/sahara/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/sahara/clusters.py b/rally/plugins/openstack/scenarios/sahara/clusters.py deleted file mode 100644 index beaf9fa3..00000000 --- a/rally/plugins/openstack/scenarios/sahara/clusters.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import types -from rally.task import validation - -LOG = logging.getLogger(__name__) - -"""Scenarios for Sahara clusters.""" - - -@types.convert(flavor={"type": "nova_flavor"}, - master_flavor={"type": "nova_flavor"}, - worker_flavor={"type": "nova_flavor"}, - neutron_net={"type": "neutron_network"}, - floating_ip_pool={"type": "neutron_network"}) -@validation.flavor_exists("master_flavor") -@validation.flavor_exists("worker_flavor") -@validation.add("required_contexts", contexts=["users", "sahara_image"]) -@validation.add("number", param_name="workers_count", minval=1, - integer_only=True) -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["sahara"]}, - name="SaharaClusters.create_and_delete_cluster", - platform="openstack") -class CreateAndDeleteCluster(utils.SaharaScenario): - - def run(self, workers_count, plugin_name, hadoop_version, - master_flavor=None, worker_flavor=None, flavor=None, - floating_ip_pool=None, volumes_per_node=None, - volumes_size=None, auto_security_group=None, - security_groups=None, node_configs=None, - cluster_configs=None, enable_anti_affinity=False, - enable_proxy=False, use_autoconfig=True): - """Launch and delete a Sahara Cluster. - - This scenario launches a Hadoop cluster, waits until it becomes - 'Active' and deletes it. - - :param flavor: Nova flavor that will be for nodes in the - created node groups. Deprecated. - :param master_flavor: Nova flavor that will be used for the master - instance of the cluster - :param worker_flavor: Nova flavor that will be used for the workers of - the cluster - :param workers_count: number of worker instances in a cluster - :param plugin_name: name of a provisioning plugin - :param hadoop_version: version of Hadoop distribution supported by - the specified plugin. - :param floating_ip_pool: floating ip pool name from which Floating - IPs will be allocated. Sahara will determine - automatically how to treat this depending on - its own configurations. Defaults to None - because in some cases Sahara may work w/o - Floating IPs. - :param volumes_per_node: number of Cinder volumes that will be - attached to every cluster node - :param volumes_size: size of each Cinder volume in GB - :param auto_security_group: boolean value. If set to True Sahara will - create a Security Group for each Node Group - in the Cluster automatically. - :param security_groups: list of security groups that will be used - while creating VMs. If auto_security_group - is set to True, this list can be left empty. - :param node_configs: config dict that will be passed to each Node - Group - :param cluster_configs: config dict that will be passed to the - Cluster - :param enable_anti_affinity: If set to true the vms will be scheduled - one per compute node. - :param enable_proxy: Use Master Node of a Cluster as a Proxy node and - do not assign floating ips to workers. - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - """ - - image_id = self.context["tenant"]["sahara"]["image"] - - LOG.debug("Using Image: %s" % image_id) - - cluster = self._launch_cluster( - flavor_id=flavor, - master_flavor_id=master_flavor, - worker_flavor_id=worker_flavor, - image_id=image_id, - workers_count=workers_count, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - floating_ip_pool=floating_ip_pool, - volumes_per_node=volumes_per_node, - volumes_size=volumes_size, - auto_security_group=auto_security_group, - security_groups=security_groups, - node_configs=node_configs, - cluster_configs=cluster_configs, - enable_anti_affinity=enable_anti_affinity, - enable_proxy=enable_proxy, - use_autoconfig=use_autoconfig) - - self._delete_cluster(cluster) - - -@types.convert(flavor={"type": "nova_flavor"}, - master_flavor={"type": "nova_flavor"}, - worker_flavor={"type": "nova_flavor"}) -@validation.flavor_exists("master_flavor") -@validation.flavor_exists("worker_flavor") -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_contexts", contexts=["users", "sahara_image"]) -@validation.add("number", param_name="workers_count", minval=1, - integer_only=True) -@scenario.configure(context={"cleanup": ["sahara"]}, - name="SaharaClusters.create_scale_delete_cluster", - platform="openstack") -class CreateScaleDeleteCluster(utils.SaharaScenario): - - def run(self, master_flavor, worker_flavor, workers_count, - plugin_name, hadoop_version, deltas, flavor=None, - floating_ip_pool=None, volumes_per_node=None, - volumes_size=None, auto_security_group=None, - security_groups=None, node_configs=None, - cluster_configs=None, enable_anti_affinity=False, - enable_proxy=False, use_autoconfig=True): - """Launch, scale and delete a Sahara Cluster. - - This scenario launches a Hadoop cluster, waits until it becomes - 'Active'. Then a series of scale operations is applied. The scaling - happens according to numbers listed in :param deltas. Ex. if - deltas is set to [2, -2] it means that the first scaling operation will - add 2 worker nodes to the cluster and the second will remove two. - - :param flavor: Nova flavor that will be for nodes in the - created node groups. Deprecated. - :param master_flavor: Nova flavor that will be used for the master - instance of the cluster - :param worker_flavor: Nova flavor that will be used for the workers of - the cluster - :param workers_count: number of worker instances in a cluster - :param plugin_name: name of a provisioning plugin - :param hadoop_version: version of Hadoop distribution supported by - the specified plugin. - :param deltas: list of integers which will be used to add or - remove worker nodes from the cluster - :param floating_ip_pool: floating ip pool name from which Floating - IPs will be allocated. Sahara will determine - automatically how to treat this depending on - its own configurations. Defaults to None - because in some cases Sahara may work w/o - Floating IPs. - :param neutron_net_id: id of a Neutron network that will be used - for fixed IPs. This parameter is ignored when - Nova Network is set up. - :param volumes_per_node: number of Cinder volumes that will be - attached to every cluster node - :param volumes_size: size of each Cinder volume in GB - :param auto_security_group: boolean value. If set to True Sahara will - create a Security Group for each Node Group - in the Cluster automatically. - :param security_groups: list of security groups that will be used - while creating VMs. If auto_security_group - is set to True this list can be left empty. - :param node_configs: configs dict that will be passed to each Node - Group - :param cluster_configs: configs dict that will be passed to the - Cluster - :param enable_anti_affinity: If set to true the vms will be scheduled - one per compute node. - :param enable_proxy: Use Master Node of a Cluster as a Proxy node and - do not assign floating ips to workers. - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - """ - - image_id = self.context["tenant"]["sahara"]["image"] - - LOG.debug("Using Image: %s" % image_id) - - cluster = self._launch_cluster( - flavor_id=flavor, - master_flavor_id=master_flavor, - worker_flavor_id=worker_flavor, - image_id=image_id, - workers_count=workers_count, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - floating_ip_pool=floating_ip_pool, - volumes_per_node=volumes_per_node, - volumes_size=volumes_size, - auto_security_group=auto_security_group, - security_groups=security_groups, - node_configs=node_configs, - cluster_configs=cluster_configs, - enable_anti_affinity=enable_anti_affinity, - enable_proxy=enable_proxy, - use_autoconfig=use_autoconfig) - - for delta in deltas: - # The Cluster is fetched every time so that its node groups have - # correct 'count' values. - cluster = self.clients("sahara").clusters.get(cluster.id) - - if delta == 0: - # Zero scaling makes no sense. - continue - elif delta > 0: - self._scale_cluster_up(cluster, delta) - elif delta < 0: - self._scale_cluster_down(cluster, delta) - - self._delete_cluster(cluster) diff --git a/rally/plugins/openstack/scenarios/sahara/consts.py b/rally/plugins/openstack/scenarios/sahara/consts.py deleted file mode 100644 index c05c3e86..00000000 --- a/rally/plugins/openstack/scenarios/sahara/consts.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -NODE_PROCESSES = { - "vanilla": { - "1.2.1": { - "master": ["namenode", "jobtracker", "oozie"], - "worker": ["datanode", "tasktracker"] - }, - "2.3.0": { - "master": ["namenode", "resourcemanager", "historyserver", - "oozie"], - "worker": ["datanode", "nodemanager"] - }, - "2.4.1": { - "master": ["namenode", "resourcemanager", "historyserver", - "oozie"], - "worker": ["datanode", "nodemanager"] - }, - "2.6.0": { - "master": ["namenode", "resourcemanager", "historyserver", - "oozie"], - "worker": ["datanode", "nodemanager"] - }, - "2.7.1": { - "master": ["namenode", "resourcemanager", "historyserver", - "oozie"], - "worker": ["datanode", "nodemanager"] - } - }, - "hdp": { - "1.3.2": { - "master": ["JOBTRACKER", "NAMENODE", "SECONDARY_NAMENODE", - "GANGLIA_SERVER", "NAGIOS_SERVER", - "AMBARI_SERVER", "OOZIE_SERVER"], - "worker": ["TASKTRACKER", "DATANODE", "HDFS_CLIENT", - "MAPREDUCE_CLIENT", "OOZIE_CLIENT", "PIG"] - }, - "2.0.6": { - "manager": ["AMBARI_SERVER", "GANGLIA_SERVER", - "NAGIOS_SERVER"], - "master": ["NAMENODE", "SECONDARY_NAMENODE", - "ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", - "HISTORYSERVER", "RESOURCEMANAGER", - "OOZIE_SERVER"], - "worker": ["DATANODE", "HDFS_CLIENT", "ZOOKEEPER_CLIENT", - "PIG", "MAPREDUCE2_CLIENT", "YARN_CLIENT", - "NODEMANAGER", "OOZIE_CLIENT"] - }, - "2.2": { - "manager": ["AMBARI_SERVER", "GANGLIA_SERVER", - "NAGIOS_SERVER"], - "master": ["NAMENODE", "SECONDARY_NAMENODE", - "ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", - "HISTORYSERVER", "RESOURCEMANAGER", - "OOZIE_SERVER"], - "worker": ["DATANODE", "HDFS_CLIENT", "ZOOKEEPER_CLIENT", - "PIG", "MAPREDUCE2_CLIENT", "YARN_CLIENT", - "NODEMANAGER", "OOZIE_CLIENT", "TEZ_CLIENT"] - } - }, - "cdh": { - "5": { - "manager": ["CLOUDERA_MANAGER"], - "master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER", - "OOZIE_SERVER", "YARN_JOBHISTORY", - "HDFS_SECONDARYNAMENODE", "HIVE_METASTORE", - "HIVE_SERVER2"], - "worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"] - }, - "5.4.0": { - "manager": ["CLOUDERA_MANAGER"], - "master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER", - "OOZIE_SERVER", "YARN_JOBHISTORY", - "HDFS_SECONDARYNAMENODE", "HIVE_METASTORE", - "HIVE_SERVER2"], - "worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"] - }, - "5.5.0": { - "manager": ["CLOUDERA_MANAGER"], - "master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER", - "OOZIE_SERVER", "YARN_JOBHISTORY", - "HDFS_SECONDARYNAMENODE", "HIVE_METASTORE", - "HIVE_SERVER2"], - "worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"] - } - }, - "spark": { - "1.3.1": { - "master": ["namenode", "master"], - "worker": ["datanode", "slave"] - }, - "1.6.0": { - "master": ["namenode", "master"], - "worker": ["datanode", "slave"] - } - }, - "ambari": { - "2.3": { - "master-edp": ["Hive Metastore", "HiveServer", "Oozie"], - "master": ["Ambari", "MapReduce History Server", - "Spark History Server", "NameNode", "ResourceManager", - "SecondaryNameNode", "YARN Timeline Server", - "ZooKeeper"], - "worker": ["DataNode", "NodeManager"] - } - }, - "mapr": { - "5.0.0.mrv2": { - "master": ["Metrics", "Webserver", "Zookeeper", "HTTPFS", - "Oozie", "FileServer", "CLDB", "Flume", "Hue", - "NodeManager", "HistoryServer", "ResourseManager", - "HiveServer2", "HiveMetastore", "Sqoop2-Client", - "Sqoop2-Server"], - "worker": ["NodeManager", "FileServer"] - }, - "5.1.0.mrv2": { - "master": ["Metrics", "Webserver", "Zookeeper", "HTTPFS", - "Oozie", "FileServer", "CLDB", "Flume", "Hue", - "NodeManager", "HistoryServer", "ResourseManager", - "HiveServer2", "HiveMetastore", "Sqoop2-Client", - "Sqoop2-Server"], - "worker": ["NodeManager", "FileServer"] - } - } -} - -REPLICATION_CONFIGS = { - "vanilla": { - "1.2.1": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.3.0": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.4.1": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.6.0": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.7.1": { - "target": "HDFS", - "config_name": "dfs.replication" - } - }, - "hdp": { - "1.3.2": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.0.6": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.2": { - "target": "HDFS", - "config_name": "dfs.replication" - } - }, - "cdh": { - "5": { - "target": "HDFS", - "config_name": "dfs_replication" - }, - "5.4.0": { - "target": "HDFS", - "config_name": "dfs_replication" - }, - "5.5.0": { - "target": "HDFS", - "config_name": "dfs_replication" - } - }, - "spark": { - "1.3.1": { - "target": "HDFS", - "config_name": "dfs_replication" - }, - "1.6.0": { - "target": "HDFS", - "config_name": "dfs_replication" - } - }, - "ambari": { - "2.3": { - "target": "HDFS", - "config_name": "dfs_replication" - } - }, - "mapr": { - "5.0.0.mrv2": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "5.1.0.mrv2": { - "target": "HDFS", - "config_name": "dfs.replication" - } - } - -} - -ANTI_AFFINITY_PROCESSES = { - "vanilla": { - "1.2.1": ["datanode"], - "2.3.0": ["datanode"], - "2.4.1": ["datanode"], - "2.6.0": ["datanode"], - "2.7.1": ["datanode"] - }, - "hdp": { - "1.3.2": ["DATANODE"], - "2.0.6": ["DATANODE"], - "2.2": ["DATANODE"] - }, - "cdh": { - "5": ["HDFS_DATANODE"], - "5.4.0": ["HDFS_DATANODE"], - "5.5.0": ["HDFS_DATANODE"] - }, - "spark": { - "1.3.1": ["datanode"], - "1.6.0": ["datanode"] - }, - "ambari": { - "2.3": ["DataNode"], - }, - "mapr": { - "5.0.0.mrv2": ["FileServer"], - "5.1.0.mrv2": ["FileServer"], - } -} diff --git a/rally/plugins/openstack/scenarios/sahara/jobs.py b/rally/plugins/openstack/scenarios/sahara/jobs.py deleted file mode 100644 index 9619c4d1..00000000 --- a/rally/plugins/openstack/scenarios/sahara/jobs.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import validation - -LOG = logging.getLogger(__name__) - - -"""Benchmark scenarios for Sahara jobs.""" - - -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_contexts", contexts=["users", "sahara_image", - "sahara_job_binaries", - "sahara_cluster"]) -@scenario.configure(context={"cleanup": ["sahara"]}, - name="SaharaJob.create_launch_job", - platform="openstack") -class CreateLaunchJob(utils.SaharaScenario): - - def run(self, job_type, configs, job_idx=0): - """Create and execute a Sahara EDP Job. - - This scenario Creates a Job entity and launches an execution on a - Cluster. - - :param job_type: type of the Data Processing Job - :param configs: config dict that will be passed to a Job Execution - :param job_idx: index of a job in a sequence. This index will be - used to create different atomic actions for each job - in a sequence - """ - - mains = self.context["tenant"]["sahara"]["mains"] - libs = self.context["tenant"]["sahara"]["libs"] - - name = self.generate_random_name() - job = self.clients("sahara").jobs.create(name=name, - type=job_type, - description="", - mains=mains, - libs=libs) - - cluster_id = self.context["tenant"]["sahara"]["cluster"] - - if job_type.lower() == "java": - input_id = None - output_id = None - else: - input_id = self.context["tenant"]["sahara"]["input"] - output_id = self._create_output_ds().id - - self._run_job_execution(job_id=job.id, - cluster_id=cluster_id, - input_id=input_id, - output_id=output_id, - configs=configs, - job_idx=job_idx) - - -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_contexts", contexts=["users", "sahara_image", - "sahara_job_binaries", - "sahara_cluster"]) -@scenario.configure(context={"cleanup": ["sahara"]}, - name="SaharaJob.create_launch_job_sequence", - platform="openstack") -class CreateLaunchJobSequence(utils.SaharaScenario): - - def run(self, jobs): - """Create and execute a sequence of the Sahara EDP Jobs. - - This scenario Creates a Job entity and launches an execution on a - Cluster for every job object provided. - - :param jobs: list of jobs that should be executed in one context - """ - - launch_job = CreateLaunchJob(self.context) - - for idx, job in enumerate(jobs): - LOG.debug("Launching Job. Sequence #%d" % idx) - launch_job.run(job["job_type"], job["configs"], idx) - - -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_contexts", contexts=["users", "sahara_image", - "sahara_job_binaries", - "sahara_cluster"]) -@scenario.configure(context={"cleanup": ["sahara"]}, - name="SaharaJob.create_launch_job_sequence_with_scaling", - platform="openstack") -class CreateLaunchJobSequenceWithScaling(utils.SaharaScenario,): - - def run(self, jobs, deltas): - """Create and execute Sahara EDP Jobs on a scaling Cluster. - - This scenario Creates a Job entity and launches an execution on a - Cluster for every job object provided. The Cluster is scaled according - to the deltas values and the sequence is launched again. - - :param jobs: list of jobs that should be executed in one context - :param deltas: list of integers which will be used to add or - remove worker nodes from the cluster - """ - - cluster_id = self.context["tenant"]["sahara"]["cluster"] - - launch_job_sequence = CreateLaunchJobSequence(self.context) - launch_job_sequence.run(jobs) - - for delta in deltas: - # The Cluster is fetched every time so that its node groups have - # correct 'count' values. - cluster = self.clients("sahara").clusters.get(cluster_id) - - LOG.debug("Scaling cluster %s with delta %d" % - (cluster.name, delta)) - if delta == 0: - # Zero scaling makes no sense. - continue - elif delta > 0: - self._scale_cluster_up(cluster, delta) - elif delta < 0: - self._scale_cluster_down(cluster, delta) - - LOG.debug("Starting Job sequence") - launch_job_sequence.run(jobs) diff --git a/rally/plugins/openstack/scenarios/sahara/node_group_templates.py b/rally/plugins/openstack/scenarios/sahara/node_group_templates.py deleted file mode 100644 index 5357b1a7..00000000 --- a/rally/plugins/openstack/scenarios/sahara/node_group_templates.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import types -from rally.task import validation - -"""Scenarios for Sahara node group templates.""" - - -@types.convert(flavor={"type": "nova_flavor"}) -@validation.flavor_exists("flavor") -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["sahara"]}, - name="SaharaNodeGroupTemplates.create_and_list_node_group_templates", - platform="openstack") -class CreateAndListNodeGroupTemplates(utils.SaharaScenario): - - def run(self, flavor, plugin_name="vanilla", - hadoop_version="1.2.1", use_autoconfig=True): - """Create and list Sahara Node Group Templates. - - This scenario creates two Node Group Templates with different set of - node processes. The master Node Group Template contains Hadoop's - management processes. The worker Node Group Template contains - Hadoop's worker processes. - - By default the templates are created for the vanilla Hadoop - provisioning plugin using the version 1.2.1 - - After the templates are created the list operation is called. - - :param flavor: Nova flavor that will be for nodes in the - created node groups - :param plugin_name: name of a provisioning plugin - :param hadoop_version: version of Hadoop distribution supported by - the specified plugin. - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - """ - - self._create_master_node_group_template(flavor_id=flavor, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - use_autoconfig=use_autoconfig) - self._create_worker_node_group_template(flavor_id=flavor, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - use_autoconfig=use_autoconfig) - self._list_node_group_templates() - - -@types.convert(flavor={"type": "nova_flavor"}) -@validation.flavor_exists("flavor") -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["sahara"]}, - name="SaharaNodeGroupTemplates.create_delete_node_group_templates", - platform="openstack") -class CreateDeleteNodeGroupTemplates(utils.SaharaScenario): - - def run(self, flavor, plugin_name="vanilla", - hadoop_version="1.2.1", use_autoconfig=True): - """Create and delete Sahara Node Group Templates. - - This scenario creates and deletes two most common types of - Node Group Templates. - - By default the templates are created for the vanilla Hadoop - provisioning plugin using the version 1.2.1 - - :param flavor: Nova flavor that will be for nodes in the - created node groups - :param plugin_name: name of a provisioning plugin - :param hadoop_version: version of Hadoop distribution supported by - the specified plugin. - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - """ - - master_ngt = self._create_master_node_group_template( - flavor_id=flavor, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - use_autoconfig=use_autoconfig) - - worker_ngt = self._create_worker_node_group_template( - flavor_id=flavor, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - use_autoconfig=use_autoconfig) - - self._delete_node_group_template(master_ngt) - self._delete_node_group_template(worker_ngt) diff --git a/rally/plugins/openstack/scenarios/sahara/utils.py b/rally/plugins/openstack/scenarios/sahara/utils.py deleted file mode 100644 index e3577340..00000000 --- a/rally/plugins/openstack/scenarios/sahara/utils.py +++ /dev/null @@ -1,589 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from oslo_config import cfg -from oslo_utils import uuidutils -from saharaclient.api import base as sahara_base - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils as rutils -from rally import consts -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.sahara import consts as sahara_consts -from rally.task import atomic -from rally.task import utils - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class SaharaScenario(scenario.OpenStackScenario): - """Base class for Sahara scenarios with basic atomic actions.""" - - # NOTE(sskripnick): Some sahara resource names are validated as hostnames. - # Since underscores are not allowed in hostnames we should not use them. - RESOURCE_NAME_FORMAT = "rally-sahara-XXXXXX-XXXXXXXXXXXXXXXX" - - @atomic.action_timer("sahara.list_node_group_templates") - def _list_node_group_templates(self): - """Return user Node Group Templates list.""" - return self.clients("sahara").node_group_templates.list() - - @atomic.action_timer("sahara.create_master_node_group_template") - def _create_master_node_group_template(self, flavor_id, plugin_name, - hadoop_version, - use_autoconfig=True): - """Create a master Node Group Template with a random name. - - :param flavor_id: The required argument for the Template - :param plugin_name: Sahara provisioning plugin name - :param hadoop_version: The version of Hadoop distribution supported by - the plugin - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - :returns: The created Template - """ - name = self.generate_random_name() - - return self.clients("sahara").node_group_templates.create( - name=name, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - flavor_id=flavor_id, - node_processes=sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["master"], - use_autoconfig=use_autoconfig) - - @atomic.action_timer("sahara.create_worker_node_group_template") - def _create_worker_node_group_template(self, flavor_id, plugin_name, - hadoop_version, use_autoconfig): - """Create a worker Node Group Template with a random name. - - :param flavor_id: The required argument for the Template - :param plugin_name: Sahara provisioning plugin name - :param hadoop_version: The version of Hadoop distribution supported by - the plugin - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - :returns: The created Template - """ - name = self.generate_random_name() - - return self.clients("sahara").node_group_templates.create( - name=name, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - flavor_id=flavor_id, - node_processes=sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["worker"], - use_autoconfig=use_autoconfig) - - @atomic.action_timer("sahara.delete_node_group_template") - def _delete_node_group_template(self, node_group): - """Delete a Node Group Template by id. - - :param node_group: The Node Group Template to be deleted - """ - self.clients("sahara").node_group_templates.delete(node_group.id) - - def _wait_active(self, cluster_object): - utils.wait_for( - resource=cluster_object, ready_statuses=["active"], - failure_statuses=["error"], update_resource=self._update_cluster, - timeout=CONF.benchmark.sahara_cluster_create_timeout, - check_interval=CONF.benchmark.sahara_cluster_check_interval) - - def _setup_neutron_floating_ip_pool(self, name_or_id): - if name_or_id: - if uuidutils.is_uuid_like(name_or_id): - # Looks like an id is provided Return as is. - return name_or_id - else: - # It's a name. Changing to id. - for net in self.clients("neutron").list_networks()["networks"]: - if net["name"] == name_or_id: - return net["id"] - # If the name is not found in the list. Exit with error. - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=_("Could not resolve Floating IP Pool" - " name %s to id") % name_or_id) - else: - # Pool is not provided. Using the one set as GW for current router. - - net = self.context["tenant"]["networks"][0] - router_id = net["router_id"] - router = self.clients("neutron").show_router(router_id)["router"] - net_id = router["external_gateway_info"]["network_id"] - - return net_id - - def _setup_nova_floating_ip_pool(self, name): - if name: - # The name is provided returning it as is. - return name - else: - # The name is not provided. Discovering - LOG.debug("No Floating Ip Pool provided. Taking random.") - pools = self.clients("nova").floating_ip_pools.list() - - if pools: - return random.choice(pools).name - else: - LOG.warning("No Floating Ip Pools found. This may cause " - "instances to be unreachable.") - return None - - def _setup_floating_ip_pool(self, node_groups, floating_ip_pool, - enable_proxy): - if consts.Service.NEUTRON in self.clients("services").values(): - LOG.debug("Neutron detected as networking backend.") - floating_ip_pool_value = self._setup_neutron_floating_ip_pool( - floating_ip_pool) - else: - LOG.debug("Nova Network detected as networking backend.") - floating_ip_pool_value = self._setup_nova_floating_ip_pool( - floating_ip_pool) - - if floating_ip_pool_value: - LOG.debug("Using floating ip pool %s." % floating_ip_pool_value) - # If the pool is set by any means assign it to all node groups. - # If the proxy node feature is enabled, Master Node Group and - # Proxy Workers should have a floating ip pool set up - - if enable_proxy: - proxy_groups = [x for x in node_groups - if x["name"] in ("master-ng", "proxy-ng")] - for ng in proxy_groups: - ng["is_proxy_gateway"] = True - ng["floating_ip_pool"] = floating_ip_pool_value - else: - for ng in node_groups: - ng["floating_ip_pool"] = floating_ip_pool_value - - return node_groups - - def _setup_volumes(self, node_groups, volumes_per_node, volumes_size): - if volumes_per_node: - LOG.debug("Adding volumes config to Node Groups") - for ng in node_groups: - ng_name = ng["name"] - if "worker" in ng_name or "proxy" in ng_name: - # NOTE: Volume storage is used only by HDFS Datanode - # process which runs on workers and proxies. - - ng["volumes_per_node"] = volumes_per_node - ng["volumes_size"] = volumes_size - - return node_groups - - def _setup_security_groups(self, node_groups, auto_security_group, - security_groups): - if auto_security_group: - LOG.debug("Auto security group enabled. Adding to Node Groups.") - if security_groups: - LOG.debug("Adding provided Security Groups to Node Groups.") - - for ng in node_groups: - if auto_security_group: - ng["auto_security_group"] = auto_security_group - if security_groups: - ng["security_groups"] = security_groups - - return node_groups - - def _setup_node_configs(self, node_groups, node_configs): - if node_configs: - LOG.debug("Adding Hadoop configs to Node Groups") - for ng in node_groups: - ng["node_configs"] = node_configs - - return node_groups - - def _setup_node_autoconfig(self, node_groups, node_autoconfig): - LOG.debug("Adding auto-config par to Node Groups") - for ng in node_groups: - ng["use_autoconfig"] = node_autoconfig - - return node_groups - - def _setup_replication_config(self, hadoop_version, workers_count, - plugin_name): - replication_value = min(workers_count, 3) - # 3 is a default Hadoop replication - conf = sahara_consts.REPLICATION_CONFIGS[plugin_name][hadoop_version] - LOG.debug("Using replication factor: %s" % replication_value) - replication_config = { - conf["target"]: { - conf["config_name"]: replication_value - } - } - return replication_config - - @logging.log_deprecated_args("`flavor_id` argument is deprecated. Use " - "`master_flavor_id` and `worker_flavor_id` " - "parameters.", rally_version="2.0", - deprecated_args=["flavor_id"]) - @atomic.action_timer("sahara.launch_cluster") - def _launch_cluster(self, plugin_name, hadoop_version, master_flavor_id, - worker_flavor_id, image_id, workers_count, - flavor_id=None, - floating_ip_pool=None, volumes_per_node=None, - volumes_size=None, auto_security_group=None, - security_groups=None, node_configs=None, - cluster_configs=None, enable_anti_affinity=False, - enable_proxy=False, - wait_active=True, - use_autoconfig=True): - """Create a cluster and wait until it becomes Active. - - The cluster is created with two node groups. The master Node Group is - created with one instance. The worker node group contains - node_count - 1 instances. - - :param plugin_name: provisioning plugin name - :param hadoop_version: Hadoop version supported by the plugin - :param master_flavor_id: flavor which will be used to create master - instance - :param worker_flavor_id: flavor which will be used to create workers - :param image_id: image id that will be used to boot instances - :param workers_count: number of worker instances. All plugins will - also add one Master instance and some plugins - add a Manager instance. - :param floating_ip_pool: floating ip pool name from which Floating - IPs will be allocated - :param volumes_per_node: number of Cinder volumes that will be - attached to every cluster node - :param volumes_size: size of each Cinder volume in GB - :param auto_security_group: boolean value. If set to True Sahara will - create a Security Group for each Node Group - in the Cluster automatically. - :param security_groups: list of security groups that will be used - while creating VMs. If auto_security_group is - set to True, this list can be left empty. - :param node_configs: configs dict that will be passed to each Node - Group - :param cluster_configs: configs dict that will be passed to the - Cluster - :param enable_anti_affinity: If set to true the vms will be scheduled - one per compute node. - :param enable_proxy: Use Master Node of a Cluster as a Proxy node and - do not assign floating ips to workers. - :param wait_active: Wait until a Cluster gets int "Active" state - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - :returns: created cluster - """ - - if enable_proxy: - proxies_count = int( - workers_count / CONF.benchmark.sahara_workers_per_proxy) - else: - proxies_count = 0 - - if flavor_id: - # Note: the deprecated argument is used. Falling back to single - # flavor behavior. - master_flavor_id = flavor_id - worker_flavor_id = flavor_id - - node_groups = [ - { - "name": "master-ng", - "flavor_id": master_flavor_id, - "node_processes": sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["master"], - "count": 1 - }, { - "name": "worker-ng", - "flavor_id": worker_flavor_id, - "node_processes": sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["worker"], - "count": workers_count - proxies_count - } - ] - - if proxies_count: - node_groups.append({ - "name": "proxy-ng", - "flavor_id": worker_flavor_id, - "node_processes": sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["worker"], - "count": proxies_count - }) - - if "manager" in (sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]): - # Adding manager group separately as it is supported only in - # specific configurations. - - node_groups.append({ - "name": "manager-ng", - "flavor_id": master_flavor_id, - "node_processes": sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["manager"], - "count": 1 - }) - - node_groups = self._setup_floating_ip_pool(node_groups, - floating_ip_pool, - enable_proxy) - - neutron_net_id = self._get_neutron_net_id() - - node_groups = self._setup_volumes(node_groups, volumes_per_node, - volumes_size) - - node_groups = self._setup_security_groups(node_groups, - auto_security_group, - security_groups) - - node_groups = self._setup_node_configs(node_groups, node_configs) - - node_groups = self._setup_node_autoconfig(node_groups, use_autoconfig) - - replication_config = self._setup_replication_config(hadoop_version, - workers_count, - plugin_name) - - # The replication factor should be set for small clusters. However the - # cluster_configs parameter can override it - merged_cluster_configs = self._merge_configs(replication_config, - cluster_configs) - - aa_processes = None - if enable_anti_affinity: - aa_processes = (sahara_consts.ANTI_AFFINITY_PROCESSES[plugin_name] - [hadoop_version]) - - name = self.generate_random_name() - - cluster_object = self.clients("sahara").clusters.create( - name=name, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - node_groups=node_groups, - default_image_id=image_id, - net_id=neutron_net_id, - cluster_configs=merged_cluster_configs, - anti_affinity=aa_processes, - use_autoconfig=use_autoconfig - ) - - if wait_active: - LOG.debug("Starting cluster `%s`" % name) - self._wait_active(cluster_object) - - return self.clients("sahara").clusters.get(cluster_object.id) - - def _update_cluster(self, cluster): - return self.clients("sahara").clusters.get(cluster.id) - - def _scale_cluster(self, cluster, delta): - """The scaling helper. - - This method finds the worker node group in a cluster, builds a - scale_object required by Sahara API and waits for the scaling to - complete. - - NOTE: This method is not meant to be called directly in benchmarks. - There two specific scaling methods of up and down scaling which have - different atomic timers. - """ - worker_node_group = [g for g in cluster.node_groups - if "worker" in g["name"]][0] - scale_object = { - "resize_node_groups": [ - { - "name": worker_node_group["name"], - "count": worker_node_group["count"] + delta - } - ] - } - self.clients("sahara").clusters.scale(cluster.id, scale_object) - - self._wait_active(cluster) - - @atomic.action_timer("sahara.scale_up") - def _scale_cluster_up(self, cluster, delta): - """Add a given number of worker nodes to the cluster. - - :param cluster: The cluster to be scaled - :param delta: The number of workers to be added. (A positive number is - expected here) - """ - self._scale_cluster(cluster, delta) - - @atomic.action_timer("sahara.scale_down") - def _scale_cluster_down(self, cluster, delta): - """Remove a given number of worker nodes from the cluster. - - :param cluster: The cluster to be scaled - :param delta: The number of workers to be removed. (A negative number - is expected here) - """ - self._scale_cluster(cluster, delta) - - @atomic.action_timer("sahara.delete_cluster") - def _delete_cluster(self, cluster): - """Delete cluster. - - :param cluster: cluster to delete - """ - - LOG.debug("Deleting cluster `%s`" % cluster.name) - self.clients("sahara").clusters.delete(cluster.id) - - utils.wait_for( - resource=cluster, - timeout=CONF.benchmark.sahara_cluster_delete_timeout, - check_interval=CONF.benchmark.sahara_cluster_check_interval, - is_ready=self._is_cluster_deleted) - - def _is_cluster_deleted(self, cluster): - LOG.debug("Checking cluster `%s` to be deleted. Status: `%s`" % - (cluster.name, cluster.status)) - try: - self.clients("sahara").clusters.get(cluster.id) - return False - except sahara_base.APIException: - return True - - def _create_output_ds(self): - """Create an output Data Source based on EDP context - - :returns: The created Data Source - """ - ds_type = self.context["sahara"]["output_conf"]["output_type"] - url_prefix = self.context["sahara"]["output_conf"]["output_url_prefix"] - - if ds_type == "swift": - raise exceptions.RallyException( - _("Swift Data Sources are not implemented yet")) - - url = url_prefix.rstrip("/") + "/%s" % self.generate_random_name() - - return self.clients("sahara").data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type=ds_type, - url=url) - - def _run_job_execution(self, job_id, cluster_id, input_id, output_id, - configs, job_idx): - """Run a Job Execution and wait until it completes or fails. - - The Job Execution is accepted as successful when Oozie reports - "success" or "succeeded" status. The failure statuses are "failed" and - "killed". - - The timeout and the polling interval may be configured through - "sahara_job_execution_timeout" and "sahara_job_check_interval" - parameters under the "benchmark" section. - - :param job_id: The Job id that will be executed - :param cluster_id: The Cluster id which will execute the Job - :param input_id: The input Data Source id - :param output_id: The output Data Source id - :param configs: The config dict that will be passed as Job Execution's - parameters. - :param job_idx: The index of a job in a sequence - - """ - @atomic.action_timer("sahara.job_execution_%s" % job_idx) - def run(self): - job_execution = self.clients("sahara").job_executions.create( - job_id=job_id, - cluster_id=cluster_id, - input_id=input_id, - output_id=output_id, - configs=configs) - - utils.wait_for( - resource=job_execution.id, - is_ready=self._job_execution_is_finished, - timeout=CONF.benchmark.sahara_job_execution_timeout, - check_interval=CONF.benchmark.sahara_job_check_interval) - - run(self) - - def _job_execution_is_finished(self, je_id): - status = self.clients("sahara").job_executions.get(je_id).info[ - "status"].lower() - - LOG.debug("Checking for Job Execution %s to complete. Status: %s" % - (je_id, status)) - if status in ("success", "succeeded"): - return True - elif status in ("failed", "killed"): - raise exceptions.RallyException( - "Job execution %s has failed" % je_id) - return False - - def _merge_configs(self, *configs): - """Merge configs in special format. - - It supports merging of configs in the following format: - applicable_target -> config_name -> config_value - - """ - result = {} - for config_dict in configs: - if config_dict: - for a_target in config_dict: - if a_target not in result or not result[a_target]: - result[a_target] = {} - result[a_target].update(config_dict[a_target]) - - return result - - def _get_neutron_net_id(self): - """Get the Neutron Network id from context. - - If Nova Network is used as networking backend, None is returned. - - :returns: Network id for Neutron or None for Nova Networking. - """ - - if consts.Service.NEUTRON not in self.clients("services").values(): - return None - - # Taking net id from context. - net = self.context["tenant"]["networks"][0] - neutron_net_id = net["id"] - LOG.debug("Using neutron network %s." % neutron_net_id) - LOG.debug("Using neutron router %s." % net["router_id"]) - - return neutron_net_id - - -def init_sahara_context(context_instance): - context_instance.context["sahara"] = context_instance.context.get("sahara", - {}) - for user, tenant_id in rutils.iterate_per_tenants( - context_instance.context["users"]): - context_instance.context["tenants"][tenant_id]["sahara"] = ( - context_instance.context["tenants"][tenant_id].get("sahara", {})) diff --git a/rally/plugins/openstack/scenarios/senlin/__init__.py b/rally/plugins/openstack/scenarios/senlin/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/senlin/clusters.py b/rally/plugins/openstack/scenarios/senlin/clusters.py deleted file mode 100644 index ae7339f3..00000000 --- a/rally/plugins/openstack/scenarios/senlin/clusters.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.senlin import utils -from rally.task import validation - - -"""Scenarios for Senlin clusters.""" - - -@validation.add("required_platform", platform="openstack", admin=True) -@validation.add("required_services", services=[consts.Service.SENLIN]) -@validation.add("required_contexts", contexts=("profiles")) -@scenario.configure(context={"admin_cleanup": ["senlin"]}, - name="SenlinClusters.create_and_delete_cluster", - platform="openstack") -class CreateAndDeleteCluster(utils.SenlinScenario): - - def run(self, desired_capacity=0, min_size=0, - max_size=-1, timeout=3600, metadata=None): - """Create a cluster and then delete it. - - Measure the "senlin cluster-create" and "senlin cluster-delete" - commands performance. - - :param desired_capacity: The capacity or initial number of nodes - owned by the cluster - :param min_size: The minimum number of nodes owned by the cluster - :param max_size: The maximum number of nodes owned by the cluster. - -1 means no limit - :param timeout: The timeout value in seconds for cluster creation - :param metadata: A set of key value pairs to associate with the cluster - """ - - profile_id = self.context["tenant"]["profile"] - cluster = self._create_cluster(profile_id, desired_capacity, - min_size, max_size, timeout, metadata) - self._delete_cluster(cluster) diff --git a/rally/plugins/openstack/scenarios/senlin/utils.py b/rally/plugins/openstack/scenarios/senlin/utils.py deleted file mode 100644 index b1c2a039..00000000 --- a/rally/plugins/openstack/scenarios/senlin/utils.py +++ /dev/null @@ -1,146 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class SenlinScenario(scenario.OpenStackScenario): - """Base class for Senlin scenarios with basic atomic actions.""" - - @atomic.action_timer("senlin.list_clusters") - def _list_clusters(self, **queries): - """Return user cluster list. - - :param kwargs \*\*queries: Optional query parameters to be sent to - restrict the clusters to be returned. Available parameters include: - - * name: The name of a cluster. - * status: The current status of a cluster. - * sort: A list of sorting keys separated by commas. Each sorting - key can optionally be attached with a sorting direction - modifier which can be ``asc`` or ``desc``. - * limit: Requests a specified size of returned items from the - query. Returns a number of items up to the specified limit - value. - * marker: Specifies the ID of the last-seen item. Use the limit - parameter to make an initial limited request and use the ID of - the last-seen item from the response as the marker parameter - value in a subsequent limited request. - * global_project: A boolean value indicating whether clusters - from all projects will be returned. - - :returns: list of clusters according to query. - """ - return list(self.admin_clients("senlin").clusters(**queries)) - - @atomic.action_timer("senlin.create_cluster") - def _create_cluster(self, profile_id, desired_capacity=0, min_size=0, - max_size=-1, timeout=60, metadata=None): - """Create a new cluster from attributes. - - :param profile_id: ID of profile used to create cluster - :param desired_capacity: The capacity or initial number of nodes - owned by the cluster - :param min_size: The minimum number of nodes owned by the cluster - :param max_size: The maximum number of nodes owned by the cluster. - -1 means no limit - :param timeout: The timeout value in minutes for cluster creation - :param metadata: A set of key value pairs to associate with the cluster - - :returns: object of cluster created. - """ - attrs = { - "profile_id": profile_id, - "name": self.generate_random_name(), - "desired_capacity": desired_capacity, - "min_size": min_size, - "max_size": max_size, - "metadata": metadata, - "timeout": timeout - } - - cluster = self.admin_clients("senlin").create_cluster(**attrs) - cluster = utils.wait_for_status( - cluster, - ready_statuses=["ACTIVE"], - failure_statuses=["ERROR"], - update_resource=self._get_cluster, - timeout=CONF.benchmark.senlin_action_timeout) - - return cluster - - def _get_cluster(self, cluster): - """Get cluster details. - - :param cluster: cluster to get - - :returns: object of cluster - """ - try: - return self.admin_clients("senlin").get_cluster(cluster.id) - except Exception as e: - if getattr(e, "code", getattr(e, "http_status", 400)) == 404: - raise exceptions.GetResourceNotFound(resource=cluster.id) - raise exceptions.GetResourceFailure(resource=cluster.id, err=e) - - @atomic.action_timer("senlin.delete_cluster") - def _delete_cluster(self, cluster): - """Delete given cluster. - - Returns after the cluster is successfully deleted. - - :param cluster: cluster object to delete - """ - self.admin_clients("senlin").delete_cluster(cluster) - utils.wait_for_status( - cluster, - ready_statuses=["DELETED"], - failure_statuses=["ERROR"], - check_deletion=True, - update_resource=self._get_cluster, - timeout=CONF.benchmark.senlin_action_timeout) - - @atomic.action_timer("senlin.create_profile") - def _create_profile(self, spec, metadata=None): - """Create a new profile from attributes. - - :param spec: spec dictionary used to create profile - :param metadata: A set of key value pairs to associate with the - profile - - :returns: object of profile created - """ - attrs = {} - attrs["spec"] = spec - attrs["name"] = self.generate_random_name() - if metadata: - attrs["metadata"] = metadata - - return self.clients("senlin").create_profile(**attrs) - - @atomic.action_timer("senlin.delete_profile") - def _delete_profile(self, profile): - """Delete given profile. - - Returns after the profile is successfully deleted. - - :param profile: profile object to be deleted - """ - self.clients("senlin").delete_profile(profile) diff --git a/rally/plugins/openstack/scenarios/swift/__init__.py b/rally/plugins/openstack/scenarios/swift/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/swift/objects.py b/rally/plugins/openstack/scenarios/swift/objects.py deleted file mode 100644 index fc6731bf..00000000 --- a/rally/plugins/openstack/scenarios/swift/objects.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tempfile - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.swift import utils -from rally.task import atomic -from rally.task import validation - - -"""Scenarios for Swift Objects.""" - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["swift"]}, - name="SwiftObjects.create_container_and_object_then_list_objects", - platform="openstack") -class CreateContainerAndObjectThenListObjects(utils.SwiftScenario): - - def run(self, objects_per_container=1, object_size=1024, **kwargs): - """Create container and objects then list all objects. - - :param objects_per_container: int, number of objects to upload - :param object_size: int, temporary local object size - :param kwargs: dict, optional parameters to create container - """ - - with tempfile.TemporaryFile() as dummy_file: - # set dummy file to specified object size - dummy_file.truncate(object_size) - container_name = self._create_container(**kwargs) - for i in range(objects_per_container): - dummy_file.seek(0) - self._upload_object(container_name, dummy_file) - self._list_objects(container_name) - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["swift"]}, - name="SwiftObjects.create_container_and_object_then_delete_all", - platform="openstack") -class CreateContainerAndObjectThenDeleteAll(utils.SwiftScenario): - - def run(self, objects_per_container=1, object_size=1024, **kwargs): - """Create container and objects then delete everything created. - - :param objects_per_container: int, number of objects to upload - :param object_size: int, temporary local object size - :param kwargs: dict, optional parameters to create container - """ - container_name = None - objects_list = [] - with tempfile.TemporaryFile() as dummy_file: - # set dummy file to specified object size - dummy_file.truncate(object_size) - container_name = self._create_container(**kwargs) - for i in range(objects_per_container): - dummy_file.seek(0) - object_name = self._upload_object(container_name, - dummy_file)[1] - objects_list.append(object_name) - - for object_name in objects_list: - self._delete_object(container_name, object_name) - self._delete_container(container_name) - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup": ["swift"]}, - name="SwiftObjects.create_container_and_object_then_download_object", - platform="openstack") -class CreateContainerAndObjectThenDownloadObject(utils.SwiftScenario): - - def run(self, objects_per_container=1, object_size=1024, **kwargs): - """Create container and objects then download all objects. - - :param objects_per_container: int, number of objects to upload - :param object_size: int, temporary local object size - :param kwargs: dict, optional parameters to create container - """ - container_name = None - objects_list = [] - with tempfile.TemporaryFile() as dummy_file: - # set dummy file to specified object size - dummy_file.truncate(object_size) - container_name = self._create_container(**kwargs) - for i in range(objects_per_container): - dummy_file.seek(0) - object_name = self._upload_object(container_name, - dummy_file)[1] - objects_list.append(object_name) - - for object_name in objects_list: - self._download_object(container_name, object_name) - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"swift_objects": {}}, - name="SwiftObjects.list_objects_in_containers", - platform="openstack") -class ListObjectsInContainers(utils.SwiftScenario): - - def run(self): - """List objects in all containers.""" - - containers = self._list_containers()[1] - - key_suffix = "container" - if len(containers) > 1: - key_suffix = "%i_containers" % len(containers) - - with atomic.ActionTimer(self, "swift.list_objects_in_%s" % key_suffix): - for container in containers: - self._list_objects(container["name"]) - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"swift_objects": {}}, - name="SwiftObjects.list_and_download_objects_in_containers", - platform="openstack") -class ListAndDownloadObjectsInContainers(utils.SwiftScenario): - - def run(self): - """List and download objects in all containers.""" - - containers = self._list_containers()[1] - - list_key_suffix = "container" - if len(containers) > 1: - list_key_suffix = "%i_containers" % len(containers) - - objects_dict = {} - with atomic.ActionTimer(self, - "swift.list_objects_in_%s" % list_key_suffix): - for container in containers: - container_name = container["name"] - objects_dict[container_name] = self._list_objects( - container_name)[1] - - objects_total = sum(map(len, objects_dict.values())) - download_key_suffix = "object" - if objects_total > 1: - download_key_suffix = "%i_objects" % objects_total - - with atomic.ActionTimer(self, - "swift.download_%s" % download_key_suffix): - for container_name, objects in objects_dict.items(): - for obj in objects: - self._download_object(container_name, obj["name"]) diff --git a/rally/plugins/openstack/scenarios/swift/utils.py b/rally/plugins/openstack/scenarios/swift/utils.py deleted file mode 100644 index 536e4564..00000000 --- a/rally/plugins/openstack/scenarios/swift/utils.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack import scenario -from rally.task import atomic - - -class SwiftScenario(scenario.OpenStackScenario): - """Base class for Swift scenarios with basic atomic actions.""" - - @atomic.action_timer("swift.list_containers") - def _list_containers(self, full_listing=True, **kwargs): - """Return list of containers. - - :param full_listing: bool, enable unlimit number of listing returned - :param kwargs: dict, other optional parameters to get_account - - :returns: tuple, (dict of response headers, a list of containers) - """ - return self.clients("swift").get_account(full_listing=full_listing, - **kwargs) - - @atomic.action_timer("swift.create_container") - def _create_container(self, public=False, **kwargs): - """Create a new container. - - :param public: bool, set container as public - :param kwargs: dict, other optional parameters to put_container - - :returns: container name - """ - if public: - kwargs.setdefault("headers", {}) - kwargs["headers"].setdefault("X-Container-Read", ".r:*,.rlistings") - - container_name = self.generate_random_name() - - self.clients("swift").put_container(container_name, **kwargs) - return container_name - - @atomic.action_timer("swift.delete_container") - def _delete_container(self, container_name, **kwargs): - """Delete a container with given name. - - :param container_name: str, name of the container to delete - :param kwargs: dict, other optional parameters to delete_container - """ - self.clients("swift").delete_container(container_name, **kwargs) - - @atomic.action_timer("swift.list_objects") - def _list_objects(self, container_name, full_listing=True, **kwargs): - """Return objects inside container. - - :param container_name: str, name of the container to make the list - objects operation against - :param full_listing: bool, enable unlimit number of listing returned - :param kwargs: dict, other optional parameters to get_container - - :returns: tuple, (dict of response headers, a list of objects) - """ - return self.clients("swift").get_container(container_name, - full_listing=full_listing, - **kwargs) - - @atomic.action_timer("swift.upload_object") - def _upload_object(self, container_name, content, **kwargs): - """Upload content to a given container. - - :param container_name: str, name of the container to upload object to - :param content: file stream, content to upload - :param kwargs: dict, other optional parameters to put_object - - :returns: tuple, (etag and object name) - """ - object_name = self.generate_random_name() - - return (self.clients("swift").put_object(container_name, object_name, - content, **kwargs), - object_name) - - @atomic.action_timer("swift.download_object") - def _download_object(self, container_name, object_name, **kwargs): - """Download object from container. - - :param container_name: str, name of the container to download object - from - :param object_name: str, name of the object to download - :param kwargs: dict, other optional parameters to get_object - - :returns: tuple, (dict of response headers, the object's contents) - """ - return self.clients("swift").get_object(container_name, object_name, - **kwargs) - - @atomic.action_timer("swift.delete_object") - def _delete_object(self, container_name, object_name, **kwargs): - """Delete object from container. - - :param container_name: str, name of the container to delete object from - :param object_name: str, name of the object to delete - :param kwargs: dict, other optional parameters to delete_object - """ - self.clients("swift").delete_object(container_name, object_name, - **kwargs) diff --git a/rally/plugins/openstack/scenarios/vm/__init__.py b/rally/plugins/openstack/scenarios/vm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/vm/utils.py b/rally/plugins/openstack/scenarios/vm/utils.py deleted file mode 100644 index 7895a6b8..00000000 --- a/rally/plugins/openstack/scenarios/vm/utils.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os.path -import subprocess -import sys - -import netaddr -from oslo_config import cfg -import six - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import sshutils -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import atomic -from rally.task import utils - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF - - -class Host(object): - - ICMP_UP_STATUS = "ICMP UP" - ICMP_DOWN_STATUS = "ICMP DOWN" - - name = "ip" - - def __init__(self, ip): - self.ip = netaddr.IPAddress(ip) - self.status = self.ICMP_DOWN_STATUS - - @property - def id(self): - return self.ip.format() - - @classmethod - def update_status(cls, server): - """Check ip address is pingable and update status.""" - ping = "ping" if server.ip.version == 4 else "ping6" - if sys.platform.startswith("linux"): - cmd = [ping, "-c1", "-w1", server.ip.format()] - else: - cmd = [ping, "-c1", server.ip.format()] - - proc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - proc.wait() - LOG.debug("Host %s is ICMP %s" - % (server.ip.format(), proc.returncode and "down" or "up")) - if proc.returncode == 0: - server.status = cls.ICMP_UP_STATUS - else: - server.status = cls.ICMP_DOWN_STATUS - return server - - def __eq__(self, other): - if not isinstance(other, Host): - raise TypeError("%s should be an instance of %s" % ( - other, Host.__class__.__name__)) - return self.ip == other.ip and self.status == other.status - - def __ne__(self, other): - return not self.__eq__(other) - - -class VMScenario(nova_utils.NovaScenario): - """Base class for VM scenarios with basic atomic actions. - - VM scenarios are scenarios executed inside some launched VM instance. - """ - - USER_RWX_OTHERS_RX_ACCESS_MODE = 0o755 - - RESOURCE_NAME_PREFIX = "rally_vm_" - - @atomic.action_timer("vm.run_command_over_ssh") - def _run_command_over_ssh(self, ssh, command): - """Run command inside an instance. - - This is a separate function so that only script execution is timed. - - :param ssh: A SSHClient instance. - :param command: Dictionary specifying command to execute. - See `rally info find VMTasks.boot_runcommand_delete' parameter - `command' docstring for explanation. - - :returns: tuple (exit_status, stdout, stderr) - """ - cmd, stdin = [], None - - interpreter = command.get("interpreter") or [] - if interpreter: - if isinstance(interpreter, six.string_types): - interpreter = [interpreter] - elif type(interpreter) != list: - raise ValueError("command 'interpreter' value must be str " - "or list type") - cmd.extend(interpreter) - - remote_path = command.get("remote_path") or [] - if remote_path: - if isinstance(remote_path, six.string_types): - remote_path = [remote_path] - elif type(remote_path) != list: - raise ValueError("command 'remote_path' value must be str " - "or list type") - cmd.extend(remote_path) - if command.get("local_path"): - ssh.put_file(os.path.expanduser( - command["local_path"]), remote_path[-1], - mode=self.USER_RWX_OTHERS_RX_ACCESS_MODE) - - if command.get("script_file"): - stdin = open(os.path.expanduser(command["script_file"]), "rb") - - elif command.get("script_inline"): - stdin = six.moves.StringIO(command["script_inline"]) - - cmd.extend(command.get("command_args") or []) - - return ssh.execute(cmd, stdin=stdin) - - def _boot_server_with_fip(self, image, flavor, use_floating_ip=True, - floating_network=None, **kwargs): - """Boot server prepared for SSH actions.""" - kwargs["auto_assign_nic"] = True - server = self._boot_server(image, flavor, **kwargs) - - if not server.networks: - raise RuntimeError( - "Server `%s' is not connected to any network. " - "Use network context for auto-assigning networks " - "or provide `nics' argument with specific net-id." % - server.name) - - if use_floating_ip: - fip = self._attach_floating_ip(server, floating_network) - else: - internal_network = list(server.networks)[0] - fip = {"ip": server.addresses[internal_network][0]["addr"]} - - return server, {"ip": fip.get("ip"), - "id": fip.get("id"), - "is_floating": use_floating_ip} - - @atomic.action_timer("vm.attach_floating_ip") - def _attach_floating_ip(self, server, floating_network): - internal_network = list(server.networks)[0] - fixed_ip = server.addresses[internal_network][0]["addr"] - - fip = network_wrapper.wrap(self.clients, self).create_floating_ip( - ext_network=floating_network, - tenant_id=server.tenant_id, fixed_ip=fixed_ip) - - self._associate_floating_ip(server, fip["ip"], fixed_address=fixed_ip) - - return fip - - @atomic.action_timer("vm.delete_floating_ip") - def _delete_floating_ip(self, server, fip): - with logging.ExceptionLogger( - LOG, _("Unable to delete IP: %s") % fip["ip"]): - if self.check_ip_address(fip["ip"])(server): - self._dissociate_floating_ip(server, fip["ip"]) - network_wrapper.wrap(self.clients, self).delete_floating_ip( - fip["id"], wait=True) - - def _delete_server_with_fip(self, server, fip, force_delete=False): - if fip["is_floating"]: - self._delete_floating_ip(server, fip) - return self._delete_server(server, force=force_delete) - - @atomic.action_timer("vm.wait_for_ssh") - def _wait_for_ssh(self, ssh, timeout=120, interval=1): - ssh.wait(timeout, interval) - - @atomic.action_timer("vm.wait_for_ping") - def _wait_for_ping(self, server_ip): - server = Host(server_ip) - utils.wait_for_status( - server, - ready_statuses=[Host.ICMP_UP_STATUS], - update_resource=Host.update_status, - timeout=CONF.benchmark.vm_ping_timeout, - check_interval=CONF.benchmark.vm_ping_poll_interval - ) - - def _run_command(self, server_ip, port, username, password, command, - pkey=None, timeout=120, interval=1): - """Run command via SSH on server. - - Create SSH connection for server, wait for server to become available - (there is a delay between server being set to ACTIVE and sshd being - available). Then call run_command_over_ssh to actually execute the - command. - - :param server_ip: server ip address - :param port: ssh port for SSH connection - :param username: str. ssh username for server - :param password: Password for SSH authentication - :param command: Dictionary specifying command to execute. - See `rally info find VMTasks.boot_runcommand_delete' parameter - `command' docstring for explanation. - :param pkey: key for SSH authentication - :param timeout: wait for ssh timeout. Default is 120 seconds - :param interval: ssh retry interval. Default is 1 second - - :returns: tuple (exit_status, stdout, stderr) - """ - pkey = pkey if pkey else self.context["user"]["keypair"]["private"] - ssh = sshutils.SSH(username, server_ip, port=port, - pkey=pkey, password=password) - self._wait_for_ssh(ssh, timeout, interval) - return self._run_command_over_ssh(ssh, command) diff --git a/rally/plugins/openstack/scenarios/vm/vmtasks.py b/rally/plugins/openstack/scenarios/vm/vmtasks.py deleted file mode 100644 index 9c66bd09..00000000 --- a/rally/plugins/openstack/scenarios/vm/vmtasks.py +++ /dev/null @@ -1,421 +0,0 @@ -# Copyright 2014: Rackspace UK -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import pkgutil - -from rally.common import logging -from rally.common import sshutils -from rally import consts -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.scenarios.vm import utils as vm_utils -from rally.plugins.openstack.services import heat -from rally.task import atomic -from rally.task import types -from rally.task import validation - - -"""Scenarios that are to be run inside VM instances.""" - - -LOG = logging.getLogger(__name__) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", fail_on_404_image=False) -@validation.valid_command("command") -@validation.add("number", param_name="port", minval=1, maxval=65535, - nullable=True, integer_only=True) -@validation.add("external_network_exists", param_name="floating_network") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_param_or_context", - param_name="image", ctx_name="image_command_customizer") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova", "cinder"], - "keypair": {}, "allow_ssh": None}, - name="VMTasks.boot_runcommand_delete", - platform="openstack") -class BootRuncommandDelete(vm_utils.VMScenario, cinder_utils.CinderBasic): - - def run(self, flavor, username, password=None, - image=None, - command=None, - volume_args=None, floating_network=None, port=22, - use_floating_ip=True, force_delete=False, wait_for_ping=True, - max_log_length=None, **kwargs): - """Boot a server, run script specified in command and delete server. - - :param image: glance image name to use for the vm. Optional - in case of specified "image_command_customizer" context - :param flavor: VM flavor name - :param username: ssh username on server, str - :param password: Password on SSH authentication - :param command: Command-specifying dictionary that either specifies - remote command path via `remote_path' (can be uploaded from a - local file specified by `local_path`), an inline script via - `script_inline' or a local script file path using `script_file'. - Both `script_file' and `local_path' are checked to be accessible - by the `file_exists' validator code. - - The `script_inline' and `script_file' both require an `interpreter' - value to specify the interpreter script should be run with. - - Note that any of `interpreter' and `remote_path' can be an array - prefixed with environment variables and suffixed with args for - the `interpreter' command. `remote_path's last component must be - a path to a command to execute (also upload destination if a - `local_path' is given). Uploading an interpreter is possible - but requires that `remote_path' and `interpreter' path do match. - - Examples: - - .. code-block:: python - - # Run a `local_script.pl' file sending it to a remote - # Perl interpreter - command = { - "script_file": "local_script.pl", - "interpreter": "/usr/bin/perl" - } - - # Run an inline script sending it to a remote interpreter - command = { - "script_inline": "echo 'Hello, World!'", - "interpreter": "/bin/sh" - } - - # Run a remote command - command = { - "remote_path": "/bin/false" - } - - # Copy a local command and run it - command = { - "remote_path": "/usr/local/bin/fio", - "local_path": "/home/foobar/myfiodir/bin/fio" - } - - # Copy a local command and run it with environment variable - command = { - "remote_path": ["HOME=/root", "/usr/local/bin/fio"], - "local_path": "/home/foobar/myfiodir/bin/fio" - } - - # Run an inline script sending it to a remote interpreter - command = { - "script_inline": "echo \"Hello, ${NAME:-World}\"", - "interpreter": ["NAME=Earth", "/bin/sh"] - } - - # Run an inline script sending it to an uploaded remote - # interpreter - command = { - "script_inline": "echo \"Hello, ${NAME:-World}\"", - "interpreter": ["NAME=Earth", "/tmp/sh"], - "remote_path": "/tmp/sh", - "local_path": "/home/user/work/cve/sh-1.0/bin/sh" - } - - - :param volume_args: volume args for booting server from volume - :param floating_network: external network name, for floating ip - :param port: ssh port for SSH connection - :param use_floating_ip: bool, floating or fixed IP for SSH connection - :param force_delete: whether to use force_delete for servers - :param wait_for_ping: whether to check connectivity on server creation - :param **kwargs: extra arguments for booting the server - :param max_log_length: The number of tail nova console-log lines user - would like to retrieve - :returns: dictionary with keys `data' and `errors': - data: dict, JSON output from the script - errors: str, raw data from the script's stderr stream - """ - if volume_args: - volume = self.cinder.create_volume(volume_args["size"], - imageRef=None) - kwargs["block_device_mapping"] = {"vdrally": "%s:::1" % volume.id} - - if not image: - image = self.context["tenant"]["custom_image"]["id"] - - server, fip = self._boot_server_with_fip( - image, flavor, use_floating_ip=use_floating_ip, - floating_network=floating_network, - key_name=self.context["user"]["keypair"]["name"], - **kwargs) - try: - if wait_for_ping: - self._wait_for_ping(fip["ip"]) - - code, out, err = self._run_command( - fip["ip"], port, username, password, command=command) - text_area_output = ["StdErr: %s" % (err or "(none)"), - "StdOut:"] - if code: - raise exceptions.ScriptError( - "Error running command %(command)s. " - "Error %(code)s: %(error)s" % { - "command": command, "code": code, "error": err}) - # Let's try to load output data - try: - data = json.loads(out) - # 'echo 42' produces very json-compatible result - # - check it here - if not isinstance(data, dict): - raise ValueError - except ValueError: - # It's not a JSON, probably it's 'script_inline' result - data = [] - except (exceptions.TimeoutException, - exceptions.SSHTimeout): - console_logs = self._get_server_console_output(server, - max_log_length) - LOG.debug("VM console logs:\n%s", console_logs) - raise - - finally: - self._delete_server_with_fip(server, fip, - force_delete=force_delete) - - if isinstance(data, dict) and set(data) == {"additive", "complete"}: - for chart_type, charts in data.items(): - for chart in charts: - self.add_output(**{chart_type: chart}) - else: - # it's a dict with several unknown lines - text_area_output.extend(out.split("\n")) - self.add_output(complete={"title": "Script Output", - "chart_plugin": "TextArea", - "data": text_area_output}) - - -@scenario.configure(context={"cleanup": ["nova", "heat"], - "keypair": {}, "network": {}}, - name="VMTasks.runcommand_heat") -class RuncommandHeat(vm_utils.VMScenario): - - def run(self, workload, template, files, parameters): - """Run workload on stack deployed by heat. - - Workload can be either file or resource: - - .. code-block:: json - - {"file": "/path/to/file.sh"} - {"resource": ["package.module", "workload.py"]} - - - Also it should contain "username" key. - - Given file will be uploaded to `gate_node` and started. This script - should print `key` `value` pairs separated by colon. These pairs will - be presented in results. - - Gate node should be accessible via ssh with keypair `key_name`, so - heat template should accept parameter `key_name`. - - :param workload: workload to run - :param template: path to heat template file - :param files: additional template files - :param parameters: parameters for heat template - """ - keypair = self.context["user"]["keypair"] - parameters["key_name"] = keypair["name"] - network = self.context["tenant"]["networks"][0] - parameters["router_id"] = network["router_id"] - self.stack = heat.main.Stack(self, self.task, - template, files=files, - parameters=parameters) - self.stack.create() - for output in self.stack.stack.outputs: - if output["output_key"] == "gate_node": - ip = output["output_value"] - break - ssh = sshutils.SSH(workload["username"], ip, pkey=keypair["private"]) - ssh.wait() - script = workload.get("resource") - if script: - script = pkgutil.get_data(*script) - else: - script = open(workload["file"]).read() - ssh.execute("cat > /tmp/.rally-workload", stdin=script) - ssh.execute("chmod +x /tmp/.rally-workload") - with atomic.ActionTimer(self, "runcommand_heat.workload"): - status, out, err = ssh.execute( - "/tmp/.rally-workload", - stdin=json.dumps(self.stack.stack.outputs)) - rows = [] - for line in out.splitlines(): - row = line.split(":") - if len(row) != 2: - raise exceptions.ScriptError("Invalid data '%s'" % line) - rows.append(row) - if not rows: - raise exceptions.ScriptError("No data returned. Original error " - "message is %s" % err) - self.add_output( - complete={"title": "Workload summary", - "description": "Data generated by workload", - "chart_plugin": "Table", - "data": { - "cols": ["key", "value"], - "rows": rows}} - ) - -BASH_DD_LOAD_TEST = """ -#!/bin/sh -# Load server and output JSON results ready to be processed -# by Rally scenario - -for ex in awk top grep free tr df dc dd gzip -do - if ! type ${ex} >/dev/null - then - echo "Executable is required by script but not available\ - on a server: ${ex}" >&2 - return 1 - fi -done - -get_used_cpu_percent() { - echo 100\ - $(top -b -n 1 | grep -i CPU | head -n 1 | awk '{print $8}' | tr -d %)\ - - p | dc -} - -get_used_ram_percent() { - local total=$(free | grep Mem: | awk '{print $2}') - local used=$(free | grep -- -/+\ buffers | awk '{print $3}') - echo ${used} 100 \* ${total} / p | dc -} - -get_used_disk_percent() { - df -P / | grep -v Filesystem | awk '{print $5}' | tr -d % -} - -get_seconds() { - (time -p ${1}) 2>&1 | awk '/real/{print $2}' -} - -complete_load() { - local script_file=${LOAD_SCRIPT_FILE:-/tmp/load.sh} - local stop_file=${LOAD_STOP_FILE:-/tmp/load.stop} - local processes_num=${LOAD_PROCESSES_COUNT:-20} - local size=${LOAD_SIZE_MB:-5} - - cat << EOF > ${script_file} -until test -e ${stop_file} -do dd if=/dev/urandom bs=1M count=${size} 2>/dev/null | gzip >/dev/null ; done -EOF - - local sep - local cpu - local ram - local dis - rm -f ${stop_file} - for i in $(seq ${processes_num}) - do - i=$((i-1)) - sh ${script_file} & - cpu="${cpu}${sep}[${i}, $(get_used_cpu_percent)]" - ram="${ram}${sep}[${i}, $(get_used_ram_percent)]" - dis="${dis}${sep}[${i}, $(get_used_disk_percent)]" - sep=", " - done - > ${stop_file} - cat << EOF - { - "title": "Generate load by spawning processes", - "description": "Each process runs gzip for ${size}M urandom data\ - in a loop", - "chart_plugin": "Lines", - "axis_label": "Number of processes", - "label": "Usage, %", - "data": [ - ["CPU", [${cpu}]], - ["Memory", [${ram}]], - ["Disk", [${dis}]]] - } -EOF -} - -additive_dd() { - local c=${1:-50} # Megabytes - local file=/tmp/dd_test.img - local write=$(get_seconds "dd if=/dev/urandom of=${file} bs=1M count=${c}") - local read=$(get_seconds "dd if=${file} of=/dev/null bs=1M count=${c}") - local gzip=$(get_seconds "gzip ${file}") - rm ${file}.gz - cat << EOF - { - "title": "Write, read and gzip file", - "description": "Using file '${file}', size ${c}Mb.", - "chart_plugin": "StackedArea", - "data": [ - ["write_${c}M", ${write}], - ["read_${c}M", ${read}], - ["gzip_${c}M", ${gzip}]] - }, - { - "title": "Statistics for write/read/gzip", - "chart_plugin": "StatsTable", - "data": [ - ["write_${c}M", ${write}], - ["read_${c}M", ${read}], - ["gzip_${c}M", ${gzip}]] - } - -EOF -} - -cat << EOF -{ - "additive": [$(additive_dd)], - "complete": [$(complete_load)] -} -EOF -""" - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.valid_command("command") -@validation.add("number", param_name="port", minval=1, maxval=65535, - nullable=True, integer_only=True) -@validation.add("external_network_exists", param_name="floating_network") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup": ["nova", "cinder"], - "keypair": {}, "allow_ssh": None}, - name="VMTasks.dd_load_test", - platform="openstack") -class DDLoadTest(BootRuncommandDelete): - - def run(self, command, **kwargs): - """Boot a server from a custom image, run a command that outputs JSON. - - Example Script in rally-jobs/extra/install_benchmark.sh - :param command: default parameter from scenario - """ - command["script_inline"] = BASH_DD_LOAD_TEST - return super(DDLoadTest, self).run(command=command, **kwargs) diff --git a/rally/plugins/openstack/scenarios/watcher/__init__.py b/rally/plugins/openstack/scenarios/watcher/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/watcher/basic.py b/rally/plugins/openstack/scenarios/watcher/basic.py deleted file mode 100644 index 6e1d4e55..00000000 --- a/rally/plugins/openstack/scenarios/watcher/basic.py +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.watcher import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Watcher servers.""" - - -@types.convert(strategy={"type": "watcher_strategy"}, - goal={"type": "watcher_goal"}) -@validation.add("required_services", services=[consts.Service.WATCHER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup": ["watcher"]}, - name="Watcher.create_audit_template_and_delete", - platform="openstack") -class CreateAuditTemplateAndDelete(utils.WatcherScenario): - - @logging.log_deprecated_args("Extra field has been removed " - "since it isn't used.", "0.8.0", ["extra"], - once=True) - def run(self, goal, strategy): - """Create audit template and delete it. - - :param goal: The goal audit template is based on - :param strategy: The strategy used to provide resource optimization - algorithm - """ - - audit_template = self._create_audit_template(goal, strategy) - self._delete_audit_template(audit_template.uuid) - - -@validation.add("required_services", services=[consts.Service.WATCHER]) -@scenario.configure(name="Watcher.list_audit_templates", platform="openstack") -class ListAuditTemplates(utils.WatcherScenario): - - def run(self, name=None, goal=None, strategy=None, - limit=None, sort_key=None, sort_dir=None, - detail=False): - """List existing audit templates. - - Audit templates are being created by Audit Template Context. - - :param name: Name of the audit template - :param goal: Name of the goal - :param strategy: Name of the strategy - :param limit: The maximum number of results to return per - request, if: - - 1) limit > 0, the maximum number of audit templates to return. - 2) limit == 0, return the entire list of audit_templates. - 3) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Watcher API - (see Watcher's api.max_limit option). - :param sort_key: Optional, field used for sorting. - :param sort_dir: Optional, direction of sorting, either 'asc' (the - default) or 'desc'. - :param detail: Optional, boolean whether to return detailed information - about audit_templates. - """ - - self._list_audit_templates(name=name, goal=goal, strategy=strategy, - limit=limit, sort_key=sort_key, - sort_dir=sort_dir, detail=detail) - - -@validation.add("required_services", services=[consts.Service.WATCHER]) -@validation.add("required_contexts", contexts="audit_templates") -@scenario.configure(context={"admin_cleanup": ["watcher"]}, - name="Watcher.create_audit_and_delete", - platform="openstack") -class CreateAuditAndDelete(utils.WatcherScenario): - - def run(self): - """Create and delete audit. - - Create Audit, wait until whether Audit is in SUCCEEDED state or in - FAILED and delete audit. - """ - - audit_template_uuid = self.context["audit_templates"][0] - audit = self._create_audit(audit_template_uuid) - self._delete_audit(audit) diff --git a/rally/plugins/openstack/scenarios/watcher/utils.py b/rally/plugins/openstack/scenarios/watcher/utils.py deleted file mode 100644 index d8e3fe6c..00000000 --- a/rally/plugins/openstack/scenarios/watcher/utils.py +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class WatcherScenario(scenario.OpenStackScenario): - """Base class for Watcher scenarios with basic atomic actions.""" - - @atomic.action_timer("watcher.create_audit_template") - def _create_audit_template(self, goal_id, strategy_id): - """Create Audit Template in DB - - :param goal_id: UUID Goal - :param strategy_id: UUID Strategy - :return: Audit Template object - """ - return self.admin_clients("watcher").audit_template.create( - goal=goal_id, - strategy=strategy_id, - name=self.generate_random_name()) - - @atomic.action_timer("watcher.delete_audit_template") - def _delete_audit_template(self, audit_template): - """Delete Audit Template from DB - - :param audit_template: Audit Template object - """ - self.admin_clients("watcher").audit_template.delete(audit_template) - - @atomic.action_timer("watcher.list_audit_templates") - def _list_audit_templates(self, name=None, goal=None, strategy=None, - limit=None, sort_key=None, sort_dir=None, - detail=False): - return self.admin_clients("watcher").audit_template.list( - name=name, goal=goal, strategy=strategy, limit=limit, - sort_key=sort_key, sort_dir=sort_dir, detail=detail) - - @atomic.action_timer("watcher.create_audit") - def _create_audit(self, audit_template_uuid): - audit = self.admin_clients("watcher").audit.create( - audit_template_uuid=audit_template_uuid, - audit_type="ONESHOT") - utils.wait_for_status( - audit, - ready_statuses=["SUCCEEDED"], - failure_statuses=["FAILED"], - status_attr="state", - update_resource=utils.get_from_manager(), - timeout=CONF.benchmark.watcher_audit_launch_timeout, - check_interval=CONF.benchmark.watcher_audit_launch_poll_interval, - id_attr="uuid" - ) - return audit - - @atomic.action_timer("watcher.delete_audit") - def _delete_audit(self, audit): - self.admin_clients("watcher").audit.delete(audit.uuid) diff --git a/rally/plugins/openstack/scenarios/zaqar/__init__.py b/rally/plugins/openstack/scenarios/zaqar/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/scenarios/zaqar/basic.py b/rally/plugins/openstack/scenarios/zaqar/basic.py deleted file mode 100644 index 36ecd458..00000000 --- a/rally/plugins/openstack/scenarios/zaqar/basic.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import logging -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.zaqar import utils as zutils - - -"""Scenarios for Zaqar.""" - - -@scenario.configure(context={"cleanup": ["zaqar"]}, - name="ZaqarBasic.create_queue", platform="openstack") -class CreateQueue(zutils.ZaqarScenario): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_queue is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=None, **kwargs): - """Create a Zaqar queue with a random name. - - :param kwargs: other optional parameters to create queues like - "metadata" - """ - self._queue_create(**kwargs) - - -@scenario.configure(context={"cleanup": ["zaqar"]}, - name="ZaqarBasic.producer_consumer", platform="openstack") -class ProducerConsumer(zutils.ZaqarScenario): - - @logging.log_deprecated_args( - "The 'name_length' argument to producer_consumer is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=None, - min_msg_count=50, max_msg_count=200, **kwargs): - """Serial message producer/consumer. - - Creates a Zaqar queue with random name, sends a set of messages - and then retrieves an iterator containing those. - - :param min_msg_count: min number of messages to be posted - :param max_msg_count: max number of messages to be posted - :param kwargs: other optional parameters to create queues like - "metadata" - """ - - queue = self._queue_create(**kwargs) - msg_count = random.randint(min_msg_count, max_msg_count) - messages = [{"body": {"id": idx}, "ttl": 360} for idx - in range(msg_count)] - self._messages_post(queue, messages, min_msg_count, max_msg_count) - self._messages_list(queue) - self._queue_delete(queue) diff --git a/rally/plugins/openstack/scenarios/zaqar/utils.py b/rally/plugins/openstack/scenarios/zaqar/utils.py deleted file mode 100644 index eb680239..00000000 --- a/rally/plugins/openstack/scenarios/zaqar/utils.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack import scenario -from rally.task import atomic - - -class ZaqarScenario(scenario.OpenStackScenario): - """Base class for Zaqar scenarios with basic atomic actions.""" - - @atomic.action_timer("zaqar.create_queue") - def _queue_create(self, **kwargs): - """Create a Zaqar queue with random name. - - :param kwargs: other optional parameters to create queues like - "metadata" - :returns: Zaqar queue instance - """ - name = self.generate_random_name() - return self.clients("zaqar").queue(name, **kwargs) - - @atomic.action_timer("zaqar.delete_queue") - def _queue_delete(self, queue): - """Removes a Zaqar queue. - - :param queue: queue to remove - """ - - queue.delete() - - def _messages_post(self, queue, messages, min_msg_count, max_msg_count): - """Post a list of messages to a given Zaqar queue. - - :param queue: post the messages to queue - :param messages: messages to post - :param min_msg_count: minimum number of messages - :param max_msg_count: maximum number of messages - """ - with atomic.ActionTimer(self, "zaqar.post_between_%s_and_%s_messages" % - (min_msg_count, max_msg_count)): - queue.post(messages) - - @atomic.action_timer("zaqar.list_messages") - def _messages_list(self, queue): - """Gets messages from a given Zaqar queue. - - :param queue: get messages from queue - :returns: messages iterator - """ - - return queue.messages() diff --git a/rally/plugins/openstack/service.py b/rally/plugins/openstack/service.py deleted file mode 100644 index 634e77e1..00000000 --- a/rally/plugins/openstack/service.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.task import service as base_service - - -service = base_service.service -compat_layer = base_service.compat_layer -Service = base_service.Service -should_be_overridden = base_service.should_be_overridden diff --git a/rally/plugins/openstack/services/__init__.py b/rally/plugins/openstack/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/services/heat/__init__.py b/rally/plugins/openstack/services/heat/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/services/heat/main.py b/rally/plugins/openstack/services/heat/main.py deleted file mode 100644 index 6a767159..00000000 --- a/rally/plugins/openstack/services/heat/main.py +++ /dev/null @@ -1,79 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from rally.common import utils as common_utils -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF - - -class Stack(common_utils.RandomNameGeneratorMixin): - """Represent heat stack. - - Usage: - >>> stack = Stack(scenario, task, "template.yaml", parameters={"nodes": 3}) - >>> run_benchmark(stack) - >>> stack.update(nodes=4) - >>> run_benchmark(stack) - """ - - def __init__(self, scenario, task, template, files, parameters=None): - """Init heat wrapper. - - :param Scenario scenario: scenario instance - :param Task task: task instance - :param str template: template file path - :param dict files: dict with file name and path - :param dict parameters: parameters for template - - """ - self.scenario = scenario - self.task = task - self.template = open(template).read() - self.files = {} - self.parameters = parameters - for name, path in files.items(): - self.files[name] = open(path).read() - - def _wait(self, ready_statuses, failure_statuses): - self.stack = utils.wait_for_status( - self.stack, - check_interval=CONF.benchmark.heat_stack_create_poll_interval, - timeout=CONF.benchmark.heat_stack_create_timeout, - ready_statuses=ready_statuses, - failure_statuses=failure_statuses, - update_resource=utils.get_from_manager(), - ) - - def create(self): - with atomic.ActionTimer(self.scenario, "heat.create"): - self.stack = self.scenario.clients("heat").stacks.create( - stack_name=self.scenario.generate_random_name(), - template=self.template, - files=self.files, - parameters=self.parameters) - self.stack_id = self.stack["stack"]["id"] - self.stack = self.scenario.clients( - "heat").stacks.get(self.stack_id) - self._wait(["CREATE_COMPLETE"], ["CREATE_FAILED"]) - - def update(self, data): - self.parameters.update(data) - with atomic.ActionTimer(self.scenario, "heat.update"): - self.scenario.clients("heat").stacks.update( - self.stack_id, template=self.template, - files=self.files, parameters=self.parameters) - self._wait(["UPDATE_COMPLETE"], ["UPDATE_FAILED"]) diff --git a/rally/plugins/openstack/services/identity/__init__.py b/rally/plugins/openstack/services/identity/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/services/identity/identity.py b/rally/plugins/openstack/services/identity/identity.py deleted file mode 100644 index 4ce28e04..00000000 --- a/rally/plugins/openstack/services/identity/identity.py +++ /dev/null @@ -1,249 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from rally.task import service - - -Project = collections.namedtuple("Project", ["id", "name", "domain_id"]) -User = collections.namedtuple("User", - ["id", "name", "project_id", "domain_id"]) -Service = collections.namedtuple("Service", ["id", "name"]) -Role = collections.namedtuple("Role", ["id", "name"]) - - -class Identity(service.UnifiedService): - @classmethod - def is_applicable(cls, clients): - cloud_version = clients.keystone().version.split(".")[0][1:] - return cloud_version == cls._meta_get("impl")._meta_get("version") - - @service.should_be_overridden - def create_project(self, project_name=None, domain_name="Default"): - """Creates new project/tenant and return project object. - - :param project_name: Name of project to be created. - :param domain_name: Name or id of domain where to create project, for - those service implementations that don't support - domains you should use None or 'Default' value. - """ - return self._impl.create_project(project_name, - domain_name=domain_name) - - @service.should_be_overridden - def update_project(self, project_id, name=None, enabled=None, - description=None): - """Update project name, enabled and description - - :param project_id: Id of project to update - :param name: project name to be set - :param enabled: enabled status of project - :param description: project description to be set - """ - self._impl.update_project(project_id, name=name, enabled=enabled, - description=description) - - @service.should_be_overridden - def delete_project(self, project_id): - """Deletes project.""" - return self._impl.delete_project(project_id) - - @service.should_be_overridden - def list_projects(self): - """List all projects.""" - return self._impl.list_projects() - - @service.should_be_overridden - def get_project(self, project_id): - """Get project.""" - return self._impl.get_project(project_id) - - @service.should_be_overridden - def create_user(self, username=None, password=None, project_id=None, - domain_name="Default", enabled=True, - default_role="member"): - """Create user. - - :param username: name of user - :param password: user password - :param project_id: user's default project - :param domain_name: Name or id of domain where to create user, for - those service implementations that don't support - domains you should use None or 'Default' value. - :param enabled: whether the user is enabled. - :param default_role: Name of role, for implementations that don't - support domains this argument must be None or - 'member'. - """ - return self._impl.create_user(username=username, - password=password, - project_id=project_id, - domain_name=domain_name, - default_role=default_role) - - @service.should_be_overridden - def create_users(self, owner_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param owner_id: Id of tenant/project - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - return self._impl.create_users(owner_id, - number_of_users=number_of_users, - user_create_args=user_create_args) - - @service.should_be_overridden - def delete_user(self, user_id): - """Deletes user by its id.""" - self._impl.delete_user(user_id) - - @service.should_be_overridden - def list_users(self): - """List all users.""" - return self._impl.list_users() - - @service.should_be_overridden - def update_user(self, user_id, enabled=None, name=None, email=None, - password=None): - return self._impl.update_user(user_id, enabled=enabled, name=name, - email=email, password=password) - - @service.should_be_overridden - def get_user(self, user_id): - """Get user.""" - return self._impl.get_user(user_id) - - @service.should_be_overridden - def create_service(self, name=None, service_type=None, description=None): - """Creates keystone service with random name. - - :param name: name of service to create - :param service_type: type of the service - :param description: description of the service - """ - return self._impl.create_service(name=name, service_type=service_type, - description=description) - - @service.should_be_overridden - def delete_service(self, service_id): - """Deletes service.""" - self._impl.delete_service(service_id) - - @service.should_be_overridden - def list_services(self): - """List all services.""" - return self._impl.list_services() - - @service.should_be_overridden - def get_service(self, service_id): - """Get service.""" - return self._impl.get_service(service_id) - - @service.should_be_overridden - def create_role(self, name=None, domain_name=None): - """Create role with specific name - - :param name: role name - :param domain_name: Name or id of domain where to create role, for - those service implementations that don't support - domains you should use None or 'Default' value. - """ - return self._impl.create_role(name=name, domain_name=domain_name) - - @service.should_be_overridden - def add_role(self, role_id, user_id, project_id): - """Add role to user.""" - return self._impl.add_role(role_id=role_id, user_id=user_id, - project_id=project_id) - - @service.should_be_overridden - def delete_role(self, role_id): - """Deletes role.""" - self._impl.delete_role(role_id) - - @service.should_be_overridden - def revoke_role(self, role_id, user_id, project_id): - """Revokes a role from a user.""" - return self._impl.revoke_role(role_id=role_id, user_id=user_id, - project_id=project_id) - - @service.should_be_overridden - def list_roles(self, user_id=None, project_id=None, domain_name=None): - """List all roles. - - :param user_id: filter in role grants for the specified user on a - resource. Domain or project must be specified. - :param project_id: filter in role grants on the specified project. - user_id should be specified - :param domain_name: filter in role grants on the specified domain. - user_id should be specified - """ - return self._impl.list_roles(user_id=user_id, project_id=project_id, - domain_name=domain_name) - - @service.should_be_overridden - def get_role(self, role_id): - """Get role.""" - return self._impl.get_role(role_id) - - @service.should_be_overridden - def get_service_by_name(self, name): - """List all services to find proper one.""" - return self._impl.get_service_by_name(name) - - @service.should_be_overridden - def create_ec2credentials(self, user_id, project_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param project_id: Project ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._impl.create_ec2credentials(user_id=user_id, - project_id=project_id) - - @service.should_be_overridden - def list_ec2credentials(self, user_id): - """List of access/secret pairs for a user_id. - - :param user_id: List all ec2-credentials for User ID - - :returns: Return ec2-credentials list - """ - return self._impl.list_ec2credentials(user_id) - - @service.should_be_overridden - def delete_ec2credential(self, user_id, access): - """Delete ec2credential. - - :param user_id: User ID for which to delete credential - :param access: access key for ec2credential to delete - """ - return self._impl.delete_ec2credential(user_id=user_id, access=access) - - @service.should_be_overridden - def fetch_token(self): - """Authenticate user token.""" - return self._impl.fetch_token() - - @service.should_be_overridden - def validate_token(self, token): - """Validate user token. - - :param token: Auth token to validate - """ - return self._impl.validate_token(token) diff --git a/rally/plugins/openstack/services/identity/keystone_common.py b/rally/plugins/openstack/services/identity/keystone_common.py deleted file mode 100644 index 49e6b9c7..00000000 --- a/rally/plugins/openstack/services/identity/keystone_common.py +++ /dev/null @@ -1,192 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import osclients -from rally.plugins.openstack.services.identity import identity -from rally.task import atomic - - -class UnifiedKeystoneMixin(object): - @staticmethod - def _unify_service(service): - return identity.Service(id=service.id, name=service.name) - - @staticmethod - def _unify_role(role): - return identity.Role(id=role.id, name=role.name) - - def delete_user(self, user_id): - """Deletes user by its id.""" - return self._impl.delete_user(user_id) - - def get_user(self, user_id): - """Get user.""" - return self._unify_user(self._impl.get_user(user_id)) - - def create_service(self, name=None, service_type=None, description=None): - """Creates keystone service.""" - - return self._unify_service(self._impl.create_service( - name=name, service_type=service_type, description=description)) - - def delete_service(self, service_id): - """Deletes service.""" - return self._impl.delete_service(service_id) - - def get_service(self, service_id): - """Get service.""" - return self._unify_service(self._impl.get_service(service_id)) - - def get_service_by_name(self, name): - """List all services to find proper one.""" - return self._unify_service(self._impl.get_service_by_name(name)) - - def get_role(self, role_id): - """Get role.""" - return self._unify_role(self._impl.get_role(role_id)) - - def delete_role(self, role_id): - """Deletes role.""" - return self._impl.delete_role(role_id) - - def list_ec2credentials(self, user_id): - """List of access/secret pairs for a user_id. - - :param user_id: List all ec2-credentials for User ID - - :returns: Return ec2-credentials list - """ - return self._impl.list_ec2credentials(user_id) - - def delete_ec2credential(self, user_id, access): - """Delete ec2credential. - - :param user_id: User ID for which to delete credential - :param access: access key for ec2credential to delete - """ - return self._impl.delete_ec2credential(user_id=user_id, access=access) - - def fetch_token(self): - """Authenticate user token.""" - return self._impl.fetch_token() - - def validate_token(self, token): - """Validate user token. - - :param token: Auth token to validate - """ - return self._impl.validate_token(token) - - -class KeystoneMixin(object): - - def list_users(self): - aname = "keystone_v%s.list_users" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).users.list() - - def delete_user(self, user_id): - """Deletes user by its id.""" - aname = "keystone_v%s.delete_user" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).users.delete(user_id) - - def get_user(self, user_id): - """Get user by its id.""" - aname = "keystone_v%s.get_user" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).users.get(user_id) - - def delete_service(self, service_id): - """Deletes service.""" - aname = "keystone_v%s.delete_service" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).services.delete(service_id) - - def list_services(self): - """List all services.""" - aname = "keystone_v%s.list_services" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).services.list() - - def get_service(self, service_id): - """Get service.""" - aname = "keystone_v%s.get_services" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).services.get( - service_id) - - def get_service_by_name(self, name): - """List all services to find proper one.""" - for s in self.list_services(): - if s.name == name: - return s - - def delete_role(self, role_id): - """Deletes role.""" - aname = "keystone_v%s.delete_role" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).roles.delete(role_id) - - def list_roles(self): - """List all roles.""" - aname = "keystone_v%s.list_roles" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).roles.list() - - def get_role(self, role_id): - """Get role.""" - aname = "keystone_v%s.get_role" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).roles.get(role_id) - - def list_ec2credentials(self, user_id): - """List of access/secret pairs for a user_id. - - :param user_id: List all ec2-credentials for User ID - - :returns: Return ec2-credentials list - """ - aname = "keystone_v%s.list_ec2creds" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).ec2.list(user_id) - - def delete_ec2credential(self, user_id, access): - """Delete ec2credential. - - :param user_id: User ID for which to delete credential - :param access: access key for ec2credential to delete - """ - aname = "keystone_v%s.delete_ec2creds" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).ec2.delete(user_id=user_id, - access=access) - - def fetch_token(self): - """Authenticate user token.""" - cred = self._clients.credential - aname = "keystone_v%s.fetch_token" % self.version - with atomic.ActionTimer(self, aname): - clients = osclients.Clients(credential=cred, - api_info=self._clients.api_info) - return clients.keystone.auth_ref.auth_token - - def validate_token(self, token): - """Validate user token. - - :param token: Auth token to validate - """ - aname = "keystone_v%s.validate_token" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).tokens.validate(token) diff --git a/rally/plugins/openstack/services/identity/keystone_v2.py b/rally/plugins/openstack/services/identity/keystone_v2.py deleted file mode 100644 index 6212560e..00000000 --- a/rally/plugins/openstack/services/identity/keystone_v2.py +++ /dev/null @@ -1,316 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from rally.plugins.openstack import service -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.services.identity import keystone_common -from rally.task import atomic - - -@service.service("keystone", service_type="identity", version="2") -class KeystoneV2Service(service.Service, keystone_common.KeystoneMixin): - - @atomic.action_timer("keystone_v2.create_tenant") - def create_tenant(self, tenant_name=None): - tenant_name = tenant_name or self.generate_random_name() - return self._clients.keystone("2").tenants.create(tenant_name) - - @atomic.action_timer("keystone_v2.update_tenant") - def update_tenant(self, tenant_id, name=None, enabled=None, - description=None): - """Update tenant name and description. - - :param tenant_id: Id of tenant to update - :param name: tenant name to be set (if boolean True, random name will - be set) - :param enabled: enabled status of project - :param description: tenant description to be set (if boolean True, - random description will be set) - """ - if name is True: - name = self.generate_random_name() - if description is True: - description = self.generate_random_name() - self._clients.keystone("2").tenants.update( - tenant_id, name=name, description=description, enabled=enabled) - - @atomic.action_timer("keystone_v2.delete_tenant") - def delete_tenant(self, tenant_id): - return self._clients.keystone("2").tenants.delete(tenant_id) - - @atomic.action_timer("keystone_v2.list_tenants") - def list_tenants(self): - return self._clients.keystone("2").tenants.list() - - @atomic.action_timer("keystone_v2.get_tenant") - def get_tenant(self, tenant_id): - """Get tenant.""" - return self._clients.keystone("2").tenants.get(tenant_id) - - @atomic.action_timer("keystone_v2.create_user") - def create_user(self, username=None, password=None, email=None, - tenant_id=None, enabled=True): - username = username or self.generate_random_name() - password = password or str(uuid.uuid4()) - email = email or (username + "@rally.me") - return self._clients.keystone("2").users.create(name=username, - password=password, - email=email, - tenant_id=tenant_id, - enabled=enabled) - - @atomic.action_timer("keystone_v2.create_users") - def create_users(self, tenant_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param tenant_id: Id of tenant - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - users = [] - for _i in range(number_of_users): - users.append(self.create_user(tenant_id=tenant_id, - **(user_create_args or {}))) - return users - - @atomic.action_timer("keystone_v2.update_user") - def update_user(self, user_id, **kwargs): - allowed_args = ("name", "email", "enabled") - restricted = set(kwargs) - set(allowed_args) - if restricted: - raise NotImplementedError( - "Failed to update '%s', since Keystone V2 allows to update " - "only '%s'." % ("', '".join(restricted), - "', '".join(allowed_args))) - self._clients.keystone("2").users.update(user_id, **kwargs) - - @atomic.action_timer("keystone_v2.update_user_password") - def update_user_password(self, user_id, password): - self._clients.keystone("2").users.update_password(user_id, - password=password) - - @atomic.action_timer("keystone_v2.create_service") - def create_service(self, name=None, service_type=None, description=None): - """Creates keystone service. - - :param name: name of service to create - :param service_type: type of the service - :param description: description of the service - :returns: keystone service instance - """ - name = name or self.generate_random_name() - service_type = service_type or "rally_test_type" - description = description or self.generate_random_name() - return self._clients.keystone("2").services.create( - name, - service_type=service_type, - description=description) - - @atomic.action_timer("keystone_v2.create_role") - def create_role(self, name=None): - name = name or self.generate_random_name() - return self._clients.keystone("2").roles.create(name) - - @atomic.action_timer("keystone_v2.add_role") - def add_role(self, role_id, user_id, tenant_id): - self._clients.keystone("2").roles.add_user_role( - user=user_id, role=role_id, tenant=tenant_id) - - @atomic.action_timer("keystone_v2.list_roles") - def list_roles(self): - """List all roles.""" - return self._clients.keystone("2").roles.list() - - @atomic.action_timer("keystone_v2.list_roles_for_user") - def list_roles_for_user(self, user_id, tenant_id=None): - return self._clients.keystone("2").roles.roles_for_user( - user_id, tenant_id) - - @atomic.action_timer("keystone_v2.revoke_role") - def revoke_role(self, role_id, user_id, tenant_id): - self._clients.keystone("2").roles.remove_user_role(user=user_id, - role=role_id, - tenant=tenant_id) - - @atomic.action_timer("keystone_v2.create_ec2creds") - def create_ec2credentials(self, user_id, tenant_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param tenant_id: Tenant ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._clients.keystone("2").ec2.create(user_id, - tenant_id=tenant_id) - - -@service.compat_layer(KeystoneV2Service) -class UnifiedKeystoneV2Service(keystone_common.UnifiedKeystoneMixin, - identity.Identity): - """Compatibility layer for Keystone V2.""" - - @staticmethod - def _check_domain(domain_name): - if domain_name.lower() != "default": - raise NotImplementedError("Domain functionality not implemented " - "in Keystone v2") - - @staticmethod - def _unify_tenant(tenant): - return identity.Project(id=tenant.id, name=tenant.name, - domain_id="default") - - @staticmethod - def _unify_user(user): - return identity.User(id=user.id, name=user.name, - project_id=getattr(user, "tenantId", None), - domain_id="default") - - def create_project(self, project_name=None, domain_name="Default"): - """Creates new project/tenant and return project object. - - :param project_name: Name of project to be created. - :param domain_name: Restricted for Keystone V2. Should not be set or - "Default" is expected. - """ - self._check_domain(domain_name) - tenant = self._impl.create_tenant(project_name) - return self._unify_tenant(tenant) - - def update_project(self, project_id, name=None, enabled=None, - description=None): - """Update project name, enabled and description - - :param project_id: Id of project to update - :param name: project name to be set - :param enabled: enabled status of project - :param description: project description to be set - """ - self._impl.update_tenant(tenant_id=project_id, name=name, - enabled=enabled, description=description) - - def delete_project(self, project_id): - """Deletes project.""" - return self._impl.delete_tenant(project_id) - - def list_projects(self): - """List all projects.""" - return [self._unify_tenant(t) for t in self._impl.list_tenants()] - - def get_project(self, project_id): - """Get project.""" - return self._unify_tenant(self._impl.get_tenant(project_id)) - - def create_user(self, username=None, password=None, project_id=None, - domain_name="Default", enabled=True, - default_role="member"): - """Create user. - - :param username: name of user - :param password: user password - :param project_id: user's default project - :param domain_name: Restricted for Keystone V2. Should not be set or - "Default" is expected. - :param enabled: whether the user is enabled. - :param default_role: Restricted for Keystone V2. Should not be set or - "member" is expected. - """ - self._check_domain(domain_name) - user = self._impl.create_user(username=username, - password=password, - tenant_id=project_id, - enabled=enabled) - return self._unify_user(user) - - def create_users(self, tenant_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param tenant_id: Id of tenant - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - if user_create_args and "domain_name" in user_create_args: - self._check_domain(user_create_args["domain_name"]) - return [self._unify_user(u) - for u in self._impl.create_users( - tenant_id=tenant_id, number_of_users=number_of_users, - user_create_args=user_create_args)] - - def list_users(self): - """List all users.""" - return [self._unify_user(u) for u in self._impl.list_users()] - - def update_user(self, user_id, enabled=None, name=None, email=None, - password=None): - if password is not None: - self._impl.update_user_password(user_id=user_id, password=password) - - update_args = {} - if enabled is not None: - update_args["enabled"] = enabled - if name is not None: - update_args["name"] = name - if email is not None: - update_args["email"] = email - - if update_args: - self._impl.update_user(user_id, **update_args) - - def list_services(self): - """List all services.""" - return [self._unify_service(s) for s in self._impl.list_services()] - - def create_role(self, name=None, domain_name=None): - """Add role to user.""" - if domain_name is not None: - raise NotImplementedError("Domain functionality not implemented " - "in Keystone v2") - - return self._unify_role(self._impl.create_role(name)) - - def add_role(self, role_id, user_id, project_id): - """Add role to user.""" - self._impl.add_role(role_id=role_id, user_id=user_id, - tenant_id=project_id) - - def revoke_role(self, role_id, user_id, project_id): - """Revokes a role from a user.""" - return self._impl.revoke_role(role_id=role_id, user_id=user_id, - tenant_id=project_id) - - def list_roles(self, user_id=None, project_id=None, domain_name=None): - """List all roles.""" - if domain_name: - raise NotImplementedError("Domain functionality not implemented " - "in Keystone v2") - if user_id: - roles = self._impl.list_roles_for_user(user_id, - tenant_id=project_id) - else: - roles = self._impl.list_roles() - return [self._unify_role(role) for role in roles] - - def create_ec2credentials(self, user_id, project_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param project_id: Project ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._impl.create_ec2credentials(user_id=user_id, - tenant_id=project_id) diff --git a/rally/plugins/openstack/services/identity/keystone_v3.py b/rally/plugins/openstack/services/identity/keystone_v3.py deleted file mode 100644 index e2bce658..00000000 --- a/rally/plugins/openstack/services/identity/keystone_v3.py +++ /dev/null @@ -1,340 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import service -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.services.identity import keystone_common -from rally.task import atomic - - -LOG = logging.getLogger(__name__) - - -@service.service("keystone", service_type="identity", version="3") -class KeystoneV3Service(service.Service, keystone_common.KeystoneMixin): - - def _get_domain_id(self, domain_name_or_id): - from keystoneclient import exceptions as kc_exceptions - - try: - # First try to find domain by ID - return self._clients.keystone("3").domains.get( - domain_name_or_id).id - except kc_exceptions.NotFound: - # Domain not found by ID, try to find it by name - domains = self._clients.keystone("3").domains.list( - name=domain_name_or_id) - if domains: - return domains[0].id - # Domain not found by name - raise exceptions.GetResourceNotFound( - resource="KeystoneDomain(%s)" % domain_name_or_id) - - @atomic.action_timer("keystone_v3.create_project") - def create_project(self, project_name=None, domain_name="Default"): - project_name = project_name or self.generate_random_name() - domain_id = self._get_domain_id(domain_name) - return self._clients.keystone("3").projects.create(name=project_name, - domain=domain_id) - - @atomic.action_timer("keystone_v3.update_project") - def update_project(self, project_id, name=None, enabled=None, - description=None): - """Update tenant name and description. - - :param project_id: Id of project to update - :param name: project name to be set (if boolean True, random name will - be set) - :param enabled: enabled status of project - :param description: project description to be set (if boolean True, - random description will be set) - """ - if name is True: - name = self.generate_random_name() - if description is True: - description = self.generate_random_name() - self._clients.keystone("3").projects.update( - project_id, name=name, description=description, enabled=enabled) - - @atomic.action_timer("keystone_v3.delete_project") - def delete_project(self, project_id): - self._clients.keystone("3").projects.delete(project_id) - - @atomic.action_timer("keystone_v3.list_projects") - def list_projects(self): - return self._clients.keystone("3").projects.list() - - @atomic.action_timer("keystone_v3.get_project") - def get_project(self, project_id): - """Get project.""" - return self._clients.keystone("3").projects.get(project_id) - - @atomic.action_timer("keystone_v3.create_user") - def create_user(self, username=None, password=None, project_id=None, - domain_name="Default", enabled=True, - default_role="member"): - """Create user. - - - :param username: name of user - :param password: user password - :param project_id: user's default project - :param domain_name: Name or id of domain where to create project. - :param enabled: whether the user is enabled. - :param default_role: user's default role - """ - domain_id = self._get_domain_id(domain_name) - username = username or self.generate_random_name() - user = self._clients.keystone("3").users.create( - name=username, password=password, default_project=project_id, - domain=domain_id, enabled=enabled) - - if project_id: - # we can't setup role without project_id - roles = self.list_roles() - for role in roles: - if default_role == role.name.lower(): - self.add_role(role_id=role.id, - user_id=user.id, - project_id=project_id) - return user - for role in roles: - if default_role == role.name.lower().strip("_"): - self.add_role(role_id=role.id, - user_id=user.id, - project_id=project_id) - return user - - LOG.warning("Unable to set %s role to created user." % - default_role) - return user - - @atomic.action_timer("keystone_v3.create_users") - def create_users(self, project_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param project_id: Id of project - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - users = [] - for _i in range(number_of_users): - users.append(self.create_user(project_id=project_id, - **(user_create_args or {}))) - return users - - @atomic.action_timer("keystone_v3.update_user") - def update_user(self, user_id, name=None, domain_name=None, - project_id=None, password=None, email=None, - description=None, enabled=None, default_project=None): - domain = None - if domain_name: - domain = self._get_domain_id(domain_name) - - self._clients.keystone("3").users.update( - user_id, name=name, domain=domain, project=project_id, - password=password, email=email, description=description, - enabled=enabled, default_project=default_project) - - @atomic.action_timer("keystone_v3.create_service") - def create_service(self, name=None, service_type=None, description=None, - enabled=True): - """Creates keystone service. - - :param name: name of service to create - :param service_type: type of the service - :param description: description of the service - :param enabled: whether the service appears in the catalog - :returns: keystone service instance - """ - name = name or self.generate_random_name() - service_type = service_type or "rally_test_type" - description = description or self.generate_random_name() - return self._clients.keystone("3").services.create( - name, type=service_type, description=description, enabled=enabled) - - @atomic.action_timer("keystone_v3.create_role") - def create_role(self, name=None, domain_name=None): - domain_id = None - if domain_name: - domain_id = self._get_domain_id(domain_name) - name = name or self.generate_random_name() - return self._clients.keystone("3").roles.create(name, domain=domain_id) - - @atomic.action_timer("keystone_v3.add_role") - def add_role(self, role_id, user_id, project_id): - self._clients.keystone("3").roles.grant(role=role_id, - user=user_id, - project=project_id) - - @atomic.action_timer("keystone_v3.list_roles") - def list_roles(self, user_id=None, project_id=None, domain_name=None): - """List all roles.""" - domain_id = None - if domain_name: - domain_id = self._get_domain_id(domain_name) - return self._clients.keystone("3").roles.list(user=user_id, - project=project_id, - domain=domain_id) - - @atomic.action_timer("keystone_v3.revoke_role") - def revoke_role(self, role_id, user_id, project_id): - self._clients.keystone("3").roles.revoke(role=role_id, - user=user_id, - project=project_id) - - @atomic.action_timer("keystone_v3.create_domain") - def create_domain(self, name, description=None, enabled=True): - return self._clients.keystone("3").domains.create( - name, description=description, enabled=enabled) - - @atomic.action_timer("keystone_v3.create_ec2creds") - def create_ec2credentials(self, user_id, project_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param project_id: Tenant ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._clients.keystone("3").ec2.create(user_id, - project_id=project_id) - - -@service.compat_layer(KeystoneV3Service) -class UnifiedKeystoneV3Service(keystone_common.UnifiedKeystoneMixin, - identity.Identity): - - @staticmethod - def _unify_project(project): - return identity.Project(id=project.id, name=project.name, - domain_id=project.domain_id) - - @staticmethod - def _unify_user(user): - # When user has default_project_id that is None user.default_project_id - # will raise AttributeError - project_id = getattr(user, "project_id", - getattr(user, "default_project_id", None)) - return identity.User(id=user.id, name=user.name, project_id=project_id, - domain_id=user.domain_id) - - def create_project(self, project_name=None, domain_name="Default"): - """Creates new project/tenant and return project object. - - :param project_name: Name of project to be created. - :param domain_name: Name or id of domain where to create project, - """ - project = self._impl.create_project(project_name, - domain_name=domain_name) - return self._unify_project(project) - - def update_project(self, project_id, name=None, enabled=None, - description=None): - """Update project name, enabled and description - - :param project_id: Id of project to update - :param name: project name to be set - :param enabled: enabled status of project - :param description: project description to be set - """ - self._impl.update_project(project_id=project_id, name=name, - enabled=enabled, description=description) - - def delete_project(self, project_id): - """Deletes project.""" - return self._impl.delete_project(project_id) - - def list_projects(self): - """List all projects.""" - return [self._unify_project(p) for p in self._impl.list_projects()] - - def get_project(self, project_id): - """Get project.""" - return self._unify_project(self._impl.get_project(project_id)) - - def create_user(self, username=None, password=None, project_id=None, - domain_name="Default", enabled=True, - default_role="member"): - """Create user. - - :param username: name of user - :param password: user password - :param project_id: user's default project - :param domain_name: Name or id of domain where to create project, - :param enabled: whether the user is enabled. - :param default_role: Name of default user's role - """ - return self._unify_user(self._impl.create_user( - username=username, password=password, project_id=project_id, - domain_name=domain_name, default_role=default_role, - enabled=enabled)) - - def create_users(self, project_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param project_id: Id of project - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - return [self._unify_user(u) - for u in self._impl.create_users( - project_id=project_id, number_of_users=number_of_users, - user_create_args=user_create_args)] - - def list_users(self): - """List all users.""" - return [self._unify_user(u) for u in self._impl.list_users()] - - def update_user(self, user_id, enabled=None, name=None, email=None, - password=None): - return self._impl.update_user(user_id, enabled=enabled, name=name, - email=email, password=password) - - def list_services(self): - """List all services.""" - return [self._unify_service(s) for s in self._impl.list_services()] - - def create_role(self, name=None, domain_name=None): - """Add role to user.""" - return self._unify_role(self._impl.create_role( - name, domain_name=domain_name)) - - def add_role(self, role_id, user_id, project_id): - """Add role to user.""" - self._impl.add_role(role_id=role_id, user_id=user_id, - project_id=project_id) - - def revoke_role(self, role_id, user_id, project_id): - """Revokes a role from a user.""" - return self._impl.revoke_role(role_id=role_id, user_id=user_id, - project_id=project_id) - - def list_roles(self, user_id=None, project_id=None, domain_name=None): - """List all roles.""" - return [self._unify_role(role) for role in self._impl.list_roles( - user_id=user_id, project_id=project_id, domain_name=domain_name)] - - def create_ec2credentials(self, user_id, project_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param project_id: Project ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._impl.create_ec2credentials(user_id=user_id, - project_id=project_id) diff --git a/rally/plugins/openstack/services/image/__init__.py b/rally/plugins/openstack/services/image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/services/image/glance_common.py b/rally/plugins/openstack/services/image/glance_common.py deleted file mode 100644 index 40373ab7..00000000 --- a/rally/plugins/openstack/services/image/glance_common.py +++ /dev/null @@ -1,71 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glanceclient import exc as glance_exc - -from rally import exceptions -from rally.plugins.openstack.services.image import image as image_service -from rally.task import atomic - - -class GlanceMixin(object): - - def _get_client(self): - return self._clients.glance(self.version) - - def get_image(self, image): - """Get specified image. - - :param image: ID or object with ID of image to obtain. - """ - image_id = getattr(image, "id", image) - try: - aname = "glance_v%s.get_image" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().images.get(image_id) - except glance_exc.HTTPNotFound: - raise exceptions.GetResourceNotFound(resource=image) - - def delete_image(self, image_id): - """Delete image.""" - aname = "glance_v%s.delete_image" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().images.delete(image_id) - - -class UnifiedGlanceMixin(object): - - @staticmethod - def _unify_image(image): - if hasattr(image, "visibility"): - return image_service.UnifiedImage(id=image.id, name=image.name, - status=image.status, - visibility=image.visibility) - else: - return image_service.UnifiedImage( - id=image.id, name=image.name, - status=image.status, - visibility=("public" if image.is_public else "private")) - - def get_image(self, image): - """Get specified image. - - :param image: ID or object with ID of image to obtain. - """ - image_obj = self._impl.get_image(image=image) - return self._unify_image(image_obj) - - def delete_image(self, image_id): - """Delete image.""" - self._impl.delete_image(image_id=image_id) diff --git a/rally/plugins/openstack/services/image/glance_v1.py b/rally/plugins/openstack/services/image/glance_v1.py deleted file mode 100644 index 03aa6a14..00000000 --- a/rally/plugins/openstack/services/image/glance_v1.py +++ /dev/null @@ -1,205 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_config import cfg - -from rally.common import utils as rutils -from rally.plugins.openstack import service -from rally.plugins.openstack.services.image import glance_common -from rally.plugins.openstack.services.image import image -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF - - -@service.service("glance", service_type="image", version="1") -class GlanceV1Service(service.Service, glance_common.GlanceMixin): - - @atomic.action_timer("glance_v1.create_image") - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - is_public=True, min_disk=0, min_ram=0): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param is_public: The created image's public status - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - """ - image_location = os.path.expanduser(image_location) - image_name = image_name or self.generate_random_name() - kwargs = {} - - try: - if os.path.isfile(image_location): - kwargs["data"] = open(image_location) - else: - kwargs["copy_from"] = image_location - - image_obj = self._clients.glance("1").images.create( - name=image_name, - container_format=container_format, - disk_format=disk_format, - is_public=is_public, - min_disk=min_disk, - min_ram=min_ram, - **kwargs) - - rutils.interruptable_sleep(CONF.benchmark. - glance_image_create_prepoll_delay) - - image_obj = utils.wait_for_status( - image_obj, ["active"], - update_resource=self.get_image, - timeout=CONF.benchmark.glance_image_create_timeout, - check_interval=CONF.benchmark.glance_image_create_poll_interval - ) - - finally: - if "data" in kwargs: - kwargs["data"].close() - - return image_obj - - @atomic.action_timer("glance_v1.update_image") - def update_image(self, image_id, image_name=None, min_disk=0, - min_ram=0): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - """ - image_name = image_name or self.generate_random_name() - - return self._clients.glance("1").images.update(image_id=image_id, - name=image_name, - min_disk=min_disk, - min_ram=min_ram) - - @atomic.action_timer("glance_v1.list_images") - def list_images(self, status="active", is_public=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param is_public: Filter in images for the specified public status - :param owner: Filter in images for tenant ID - """ - images = self._clients.glance("1").images.list(status=status, - owner=owner) - if is_public in [True, False]: - return [i for i in images if i.is_public is is_public] - return images - - @atomic.action_timer("glance_v1.set_visibility") - def set_visibility(self, image_id, is_public=True): - """Update visibility. - - :param image_id: ID of image to update - :param is_public: Image is public or not - """ - self._clients.glance("1").images.update(image_id, is_public=is_public) - - -@service.compat_layer(GlanceV1Service) -class UnifiedGlanceV1Service(glance_common.UnifiedGlanceMixin, image.Image): - """Compatibility layer for Glance V1.""" - - @staticmethod - def _check_v1_visibility(visibility): - visibility_values = ["public", "private"] - if visibility and visibility not in visibility_values: - raise image.VisibilityException( - message="Improper visibility value: %s in glance_v1" - % visibility) - - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - visibility="public", min_disk=0, - min_ram=0): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param visibility: The created image's visible status - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - """ - self._check_v1_visibility(visibility) - - is_public = visibility != "private" - image_obj = self._impl.create_image( - image_name=image_name, - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - is_public=is_public, - min_disk=min_disk, - min_ram=min_ram) - return self._unify_image(image_obj) - - def update_image(self, image_id, image_name=None, min_disk=0, - min_ram=0, remove_props=None): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - :param remove_props: List of property names to remove - """ - if remove_props is not None: - raise image.RemovePropsException("Remove prop: %s is not" - "supported in" - "glance_v1" % remove_props) - image_obj = self._impl.update_image( - image_id=image_id, - image_name=image_name, - min_disk=min_disk, - min_ram=min_ram) - return self._unify_image(image_obj) - - def list_images(self, status="active", visibility=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param visibility: Filter in images for the specified visibility - :param owner: Filter in images for tenant ID - """ - self._check_v1_visibility(visibility) - - is_public = visibility != "private" - - images = self._impl.list_images(status=status, is_public=is_public) - return [self._unify_image(i) for i in images] - - def set_visibility(self, image_id, visibility="public"): - """Update visibility. - - :param image_id: ID of image to update - :param visibility: The visibility of specified image - """ - self._check_v1_visibility(visibility) - - is_public = visibility != "private" - self._impl.set_visibility(image_id=image_id, is_public=is_public) diff --git a/rally/plugins/openstack/services/image/glance_v2.py b/rally/plugins/openstack/services/image/glance_v2.py deleted file mode 100644 index afe1be8c..00000000 --- a/rally/plugins/openstack/services/image/glance_v2.py +++ /dev/null @@ -1,215 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import time - -from oslo_config import cfg -import requests - -from rally.common import utils as rutils -from rally.plugins.openstack import service -from rally.plugins.openstack.services.image import glance_common -from rally.plugins.openstack.services.image import image -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF - - -@service.service("glance", service_type="image", version="2") -class GlanceV2Service(service.Service, glance_common.GlanceMixin): - - @atomic.action_timer("glance_v2.create_image") - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - visibility=None, min_disk=0, - min_ram=0): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param visibility: The created image's visible status. - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - """ - image_name = image_name or self.generate_random_name() - - image_obj = self._clients.glance("2").images.create( - name=image_name, - container_format=container_format, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - - image_location = os.path.expanduser(image_location) - rutils.interruptable_sleep(CONF.benchmark. - glance_image_create_prepoll_delay) - - start = time.time() - image_obj = utils.wait_for_status( - image_obj.id, ["queued"], - update_resource=self.get_image, - timeout=CONF.benchmark.glance_image_create_timeout, - check_interval=CONF.benchmark.glance_image_create_poll_interval) - timeout = time.time() - start - - image_data = None - response = None - try: - if os.path.isfile(image_location): - image_data = open(image_location) - else: - response = requests.get(image_location, stream=True) - image_data = response.raw - self._clients.glance("2").images.upload(image_obj.id, image_data) - finally: - if image_data is not None: - image_data.close() - if response is not None: - response.close() - - image_obj = utils.wait_for_status( - image_obj, ["active"], - update_resource=self.get_image, - timeout=timeout, - check_interval=CONF.benchmark.glance_image_create_poll_interval) - return image_obj - - @atomic.action_timer("glance_v2.update_image") - def update_image(self, image_id, image_name=None, min_disk=0, - min_ram=0, remove_props=None): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - :param remove_props: List of property names to remove - """ - image_name = image_name or self.generate_random_name() - - return self._clients.glance("2").images.update( - image_id=image_id, - name=image_name, - min_disk=min_disk, - min_ram=min_ram, - remove_props=remove_props) - - @atomic.action_timer("glance_v2.list_images") - def list_images(self, status="active", visibility=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param visibility: Filter in images for the specified visibility - :param owner: Filter in images for tenant ID - """ - filters = {} - filters["status"] = status - if visibility: - filters["visibility"] = visibility - if owner: - filters["owner"] = owner - images = self._clients.glance("2").images.list(filters=filters) - return images - - @atomic.action_timer("glance_v2.set_visibility") - def set_visibility(self, image_id, visibility="shared"): - """Update visibility. - - :param image_id: ID of image to update - :param visibility: The visibility of specified image - """ - self._clients.glance("2").images.update(image_id, - visibility=visibility) - - -@service.compat_layer(GlanceV2Service) -class UnifiedGlanceV2Service(glance_common.UnifiedGlanceMixin, image.Image): - """Compatibility layer for Glance V2.""" - - @staticmethod - def _check_v2_visibility(visibility): - visibility_values = ["public", "private", "shared", "community"] - if visibility and visibility not in visibility_values: - raise image.VisibilityException( - message="Improper visibility value: %s in glance_v2" - % visibility) - - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - visibility=None, min_disk=0, - min_ram=0): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param visibility: The access permission for the created image. - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - """ - image_obj = self._impl.create_image( - image_name=image_name, - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - return self._unify_image(image_obj) - - def update_image(self, image_id, image_name=None, min_disk=0, - min_ram=0, remove_props=None): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - :param remove_props: List of property names to remove - """ - image_obj = self._impl.update_image( - image_id=image_id, - image_name=image_name, - min_disk=min_disk, - min_ram=min_ram, - remove_props=remove_props) - return self._unify_image(image_obj) - - def list_images(self, status="active", visibility=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param visibility: Filter in images for the specified visibility - :param owner: Filter in images for tenant ID - """ - self._check_v2_visibility(visibility) - - images = self._impl.list_images(status=status, visibility=visibility) - return [self._unify_image(i) for i in images] - - def set_visibility(self, image_id, visibility="shared"): - """Update visibility. - - :param image_id: ID of image to update - :param visibility: The visibility of specified image - """ - self._check_v2_visibility(visibility) - - self._impl.set_visibility(image_id=image_id, visibility=visibility) diff --git a/rally/plugins/openstack/services/image/image.py b/rally/plugins/openstack/services/image/image.py deleted file mode 100644 index 8a30e0bf..00000000 --- a/rally/plugins/openstack/services/image/image.py +++ /dev/null @@ -1,122 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from rally import exceptions -from rally.task import service - -from oslo_config import cfg - -CONF = cfg.CONF - -UnifiedImage = collections.namedtuple("Image", ["id", "name", "visibility", - "status"]) - - -class VisibilityException(exceptions.RallyException): - """Wrong visibility value exception. - - """ - error_code = 531 - - -class RemovePropsException(exceptions.RallyException): - """Remove Props it not supported exception. - - """ - error_code = 560 - - -class Image(service.UnifiedService): - @classmethod - def is_applicable(cls, clients): - cloud_version = str(clients.glance().version).split(".")[0] - return cloud_version == cls._meta_get("impl")._meta_get("version") - - @service.should_be_overridden - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - visibility="private", min_disk=0, - min_ram=0): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param visibility: The access permission for the created image. - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - """ - image = self._impl.create_image( - image_name=image_name, - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - return image - - @service.should_be_overridden - def update_image(self, image_id, image_name=None, - min_disk=0, min_ram=0, remove_props=None): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - :param remove_props: List of property names to remove - """ - return self._impl.update_image( - image_id, - image_name=image_name, - min_disk=min_disk, - min_ram=min_ram, - remove_props=remove_props) - - @service.should_be_overridden - def list_images(self, status="active", visibility=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param visibility: Filter in images for the specified visibility - :param owner: Filter in images for tenant ID - """ - return self._impl.list_images(status=status, - visibility=visibility, - owner=owner) - - @service.should_be_overridden - def set_visibility(self, image_id, visibility="public"): - """Update visibility. - - :param image_id: ID of image to update - :param visibility: The visibility of specified image - """ - self._impl.set_visibility(image_id, visibility=visibility) - - @service.should_be_overridden - def get_image(self, image): - """Get specified image. - - :param image: ID or object with ID of image to obtain. - """ - return self._impl.get_image(image) - - @service.should_be_overridden - def delete_image(self, image_id): - """delete image.""" - self._impl.delete_image(image_id) diff --git a/rally/plugins/openstack/services/storage/__init__.py b/rally/plugins/openstack/services/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/services/storage/block.py b/rally/plugins/openstack/services/storage/block.py deleted file mode 100644 index 9e52f332..00000000 --- a/rally/plugins/openstack/services/storage/block.py +++ /dev/null @@ -1,424 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from oslo_config import cfg - -from rally.task import service - - -CONF = cfg.CONF - - -Volume = collections.namedtuple("Volume", ["id", "name", "size", "status"]) -VolumeSnapshot = collections.namedtuple("VolumeSnapshot", ["id", "name", - "volume_id", - "status"]) -VolumeBackup = collections.namedtuple("VolumeBackup", ["id", "name", - "volume_id", - "status"]) -VolumeTransfer = collections.namedtuple("VolumeTransfer", ["id", "name", - "volume_id", - "auth_key"]) -VolumeEncryptionType = collections.namedtuple("VolumeEncryptionType", - ["id", "volume_type_id"]) -QoSSpecs = collections.namedtuple("QoSSpecs", ["id", "name", "specs"]) - - -class BlockStorage(service.UnifiedService): - - @service.should_be_overridden - def create_volume(self, size, consistencygroup_id=None, - group_id=None, snapshot_id=None, source_volid=None, - name=None, description=None, - volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None, scheduler_hints=None, - source_replica=None, multiattach=False): - """Creates a volume. - - :param size: Size of volume in GB - :param consistencygroup_id: ID of the consistencygroup - :param group_id: ID of the group - :param snapshot_id: ID of the snapshot - :param name: Name of the volume - :param description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - :param source_volid: ID of source volume to clone from - :param source_replica: ID of source volume to clone replica - :param scheduler_hints: (optional extension) arbitrary key-value pairs - specified by the client to help boot an instance - :param multiattach: Allow the volume to be attached to more than - one instance - - :returns: Return a new volume. - """ - return self._impl.create_volume( - size, consistencygroup_id=consistencygroup_id, group_id=group_id, - snapshot_id=snapshot_id, source_volid=source_volid, - name=name, description=description, volume_type=volume_type, - user_id=user_id, project_id=project_id, - availability_zone=availability_zone, metadata=metadata, - imageRef=imageRef, scheduler_hints=scheduler_hints, - source_replica=source_replica, multiattach=multiattach) - - @service.should_be_overridden - def list_volumes(self, detailed=True): - """Lists all volumes. - - :param detailed: Whether to return detailed volume info. - :returns: Return volumes list. - """ - return self._impl.list_volumes(detailed=detailed) - - @service.should_be_overridden - def get_volume(self, volume_id): - """Get a volume. - - :param volume_id: The ID of the volume to get. - - :returns: Return the volume. - """ - return self._impl.get_volume(volume_id) - - @service.should_be_overridden - def update_volume(self, volume_id, - name=None, description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param name: The volume name. - :param description: The volume description. - - :returns: The updated volume. - """ - return self._impl.update_volume( - volume_id, name=name, description=description) - - @service.should_be_overridden - def delete_volume(self, volume): - """Delete a volume.""" - self._impl.delete_volume(volume) - - @service.should_be_overridden - def extend_volume(self, volume, new_size): - """Extend the size of the specified volume.""" - return self._impl.extend_volume(volume, new_size=new_size) - - @service.should_be_overridden - def list_snapshots(self, detailed=True): - """Get a list of all snapshots.""" - return self._impl.list_snapshots(detailed=detailed) - - @service.should_be_overridden - def list_types(self, search_opts=None, is_public=None): - """Lists all volume types.""" - return self._impl.list_types(search_opts=search_opts, - is_public=is_public) - - @service.should_be_overridden - def set_metadata(self, volume, sets=10, set_size=3): - """Update/Set a volume metadata. - - :param volume: The updated/setted volume. - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :returns: A list of keys that were set - """ - return self._impl.set_metadata(volume, sets=sets, set_size=set_size) - - @service.should_be_overridden - def delete_metadata(self, volume, keys, deletes=10, delete_size=3): - """Delete volume metadata keys. - - Note that ``len(keys)`` must be greater than or equal to - ``deletes * delete_size``. - - :param volume: The volume to delete metadata from - :param deletes: how many operations to perform - :param delete_size: number of metadata keys to delete in each operation - :param keys: a list of keys to choose deletion candidates from - """ - self._impl.delete_metadata(volume, keys, deletes=deletes, - delete_size=delete_size) - - @service.should_be_overridden - def update_readonly_flag(self, volume, read_only): - """Update the read-only access mode flag of the specified volume. - - :param volume: The UUID of the volume to update. - :param read_only: The value to indicate whether to update volume to - read-only access mode. - :returns: A tuple of http Response and body - """ - return self._impl.update_readonly_flag(volume, read_only=read_only) - - @service.should_be_overridden - def upload_volume_to_image(self, volume, force=False, - container_format="bare", disk_format="raw"): - """Upload the given volume to image. - - Returns created image. - - :param volume: volume object - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso - :returns: Returns created image object - """ - return self._impl.upload_volume_to_image( - volume, force=force, container_format=container_format, - disk_format=disk_format) - - @service.should_be_overridden - def create_qos(self, specs): - """Create a qos specs. - - :param specs: A dict of key/value pairs to be set - :rtype: :class:'QoSSpecs' - """ - return self._impl.create_qos(specs) - - @service.should_be_overridden - def list_qos(self, search_opts=None): - """Get a list of all qos specs. - - :param search_opts: search options - :rtype: list of :class: 'QoSpecs' - """ - return self._impl.list_qos(search_opts) - - @service.should_be_overridden - def get_qos(self, qos_id): - """Get a specific qos specs. - - :param qos_id: The ID of the :class:`QoSSpecs` to get. - :rtype: :class:`QoSSpecs` - """ - return self._impl.get_qos(qos_id) - - @service.should_be_overridden - def set_qos(self, qos, set_specs_args): - """Add/Update keys in qos specs. - - :param qos: The instance of the :class:`QoSSpecs` to set - :param set_specs_args: A dict of key/value pairs to be set - :rtype: :class:`QoSSpecs` - """ - return self._impl.set_qos(qos=qos, - set_specs_args=set_specs_args) - - @service.should_be_overridden - def create_snapshot(self, volume_id, force=False, - name=None, description=None, metadata=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: If force is True, create a snapshot even if the volume is - attached to an instance. Default is False. - :param name: Name of the snapshot - :param description: Description of the snapshot - :param metadata: Metadata of the snapshot - :returns: Created snapshot object - """ - return self._impl.create_snapshot( - volume_id, force=force, name=name, - description=description, metadata=metadata) - - @service.should_be_overridden - def delete_snapshot(self, snapshot): - """Delete the given snapshot. - - Returns when the snapshot is actually deleted. - - :param snapshot: snapshot instance - """ - self._impl.delete_snapshot(snapshot) - - @service.should_be_overridden - def create_backup(self, volume_id, container=None, - name=None, description=None, - incremental=False, force=False, - snapshot_id=None): - """Creates a volume backup. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - :param incremental: Incremental backup. - :param force: If True, allows an in-use volume to be backed up. - :param snapshot_id: The ID of the snapshot to backup. - - :returns: The created backup object. - """ - return self._impl.create_backup(volume_id, container=container, - name=name, description=description, - incremental=incremental, force=force, - snapshot_id=snapshot_id) - - @service.should_be_overridden - def delete_backup(self, backup): - """Delete a volume backup.""" - self._impl.delete_backup(backup) - - @service.should_be_overridden - def restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - - :returns: Return the restored backup. - """ - return self._impl.restore_backup(backup_id, volume_id=volume_id) - - @service.should_be_overridden - def list_backups(self, detailed=True): - """Return user volume backups list.""" - return self._impl.list_backups(detailed=detailed) - - @service.should_be_overridden - def list_transfers(self, detailed=True, search_opts=None): - """Get a list of all volume transfers. - - :param detailed: If True, detailed information about transfer - should be listed - :param search_opts: Search options to filter out volume transfers - :returns: list of :class:`VolumeTransfer` - """ - return self._impl.list_transfers(detailed=detailed, - search_opts=search_opts) - - @service.should_be_overridden - def create_volume_type(self, name=None, description=None, is_public=True): - """Creates a volume type. - - :param name: Descriptive name of the volume type - :param description: Description of the volume type - :param is_public: Volume type visibility - :returns: Return the created volume type. - """ - return self._impl.create_volume_type(name=name, - description=description, - is_public=is_public) - - @service.should_be_overridden - def get_volume_type(self, volume_type): - """get details of volume_type. - - :param volume_type: The ID of the :class:`VolumeType` to get - :returns: :class:`VolumeType` - """ - return self._impl.get_volume_type(volume_type) - - @service.should_be_overridden - def delete_volume_type(self, volume_type): - """delete a volume type. - - :param volume_type: Name or Id of the volume type - :returns: base on client response return True if the request - has been accepted or not - """ - return self._impl.delete_volume_type(volume_type) - - @service.should_be_overridden - def set_volume_type_keys(self, volume_type, metadata): - """Set extra specs on a volume type. - - :param volume_type: The :class:`VolumeType` to set extra spec on - :param metadata: A dict of key/value pairs to be set - :returns: extra_specs if the request has been accepted - """ - return self._impl.set_volume_type_keys(volume_type, metadata) - - @service.should_be_overridden - def transfer_create(self, volume_id, name=None): - """Creates a volume transfer. - - :param name: The name of created transfer - :param volume_id: The ID of the volume to transfer. - :returns: Return the created transfer. - """ - return self._impl.transfer_create(volume_id, name=name) - - @service.should_be_overridden - def transfer_accept(self, transfer_id, auth_key): - """Accept a volume transfer. - - :param transfer_id: The ID of the transfer to accept. - :param auth_key: The auth_key of the transfer. - :returns: VolumeTransfer - """ - return self._impl.transfer_accept(transfer_id, auth_key=auth_key) - - @service.should_be_overridden - def create_encryption_type(self, volume_type, specs): - """Create encryption type for a volume type. Default: admin only. - - :param volume_type: the volume type on which to add an encryption type - :param specs: the encryption type specifications to add - :return: an instance of :class: VolumeEncryptionType - """ - return self._impl.create_encryption_type(volume_type, specs=specs) - - @service.should_be_overridden - def get_encryption_type(self, volume_type): - """Get the volume encryption type for the specified volume type. - - :param volume_type: the volume type to query - :return: an instance of :class: VolumeEncryptionType - """ - return self._impl.get_encryption_type(volume_type) - - @service.should_be_overridden - def list_encryption_type(self, search_opts=None): - """List all volume encryption types. - - :param search_opts: Options used when search for encryption types - :return: a list of :class: VolumeEncryptionType instances - """ - return self._impl.list_encryption_type(search_opts=search_opts) - - @service.should_be_overridden - def delete_encryption_type(self, volume_type): - """Delete the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be deleted - """ - self._impl.delete_encryption_type(volume_type) - - @service.should_be_overridden - def update_encryption_type(self, volume_type, specs): - """Update the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - will be updated - :param specs: the encryption type specifications to update - :return: an instance of :class: VolumeEncryptionType - """ - return self._impl.update_encryption_type(volume_type, specs=specs) diff --git a/rally/plugins/openstack/services/storage/cinder_common.py b/rally/plugins/openstack/services/storage/cinder_common.py deleted file mode 100644 index a7b9e37f..00000000 --- a/rally/plugins/openstack/services/storage/cinder_common.py +++ /dev/null @@ -1,683 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common.i18n import _ -from rally import exceptions -from rally.plugins.openstack.services.image import image -from rally.plugins.openstack.services.storage import block -from rally.task import atomic -from rally.task import utils as bench_utils - -CONF = block.CONF - - -class CinderMixin(object): - - def _get_client(self): - return self._clients.cinder(self.version) - - def _update_resource(self, resource): - try: - manager = getattr(resource, "manager", None) - if manager: - res = manager.get(resource.id) - else: - if isinstance(resource, block.Volume): - attr = "volumes" - elif isinstance(resource, block.VolumeSnapshot): - attr = "volume_snapshots" - elif isinstance(resource, block.VolumeBackup): - attr = "backups" - res = getattr(self._get_client(), attr).get(resource.id) - except Exception as e: - if getattr(e, "code", getattr(e, "http_status", 400)) == 404: - raise exceptions.GetResourceNotFound(resource=resource) - raise exceptions.GetResourceFailure(resource=resource, err=e) - return res - - def _wait_available_volume(self, volume): - return bench_utils.wait_for_status( - volume, - ready_statuses=["available"], - update_resource=self._update_resource, - timeout=CONF.benchmark.cinder_volume_create_timeout, - check_interval=CONF.benchmark.cinder_volume_create_poll_interval - ) - - def list_volumes(self, detailed=True): - """List all volumes.""" - aname = "cinder_v%s.list_volumes" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volumes.list(detailed) - - def get_volume(self, volume_id): - """Get target volume information.""" - aname = "cinder_v%s.get_volume" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volumes.get(volume_id) - - def delete_volume(self, volume): - """Delete target volume.""" - aname = "cinder_v%s.delete_volume" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().volumes.delete(volume) - bench_utils.wait_for_status( - volume, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self._update_resource, - timeout=CONF.benchmark.cinder_volume_delete_timeout, - check_interval=(CONF.benchmark - .cinder_volume_delete_poll_interval) - ) - - def extend_volume(self, volume, new_size): - """Extend the size of the specified volume.""" - if isinstance(new_size, dict): - new_size = random.randint(new_size["min"], new_size["max"]) - - aname = "cinder_v%s.extend_volume" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().volumes.extend(volume, new_size) - return self._wait_available_volume(volume) - - def list_snapshots(self, detailed=True): - """Get a list of all snapshots.""" - aname = "cinder_v%s.list_snapshots" % self.version - with atomic.ActionTimer(self, aname): - return (self._get_client() - .volume_snapshots.list(detailed)) - - def set_metadata(self, volume, sets=10, set_size=3): - """Set volume metadata. - - :param volume: The volume to set metadata on - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :returns: A list of keys that were set - """ - key = "cinder_v%s.set_%s_metadatas_%s_times" % (self.version, - set_size, - sets) - with atomic.ActionTimer(self, key): - keys = [] - for i in range(sets): - metadata = {} - for j in range(set_size): - key = self.generate_random_name() - keys.append(key) - metadata[key] = self.generate_random_name() - - self._get_client().volumes.set_metadata(volume, metadata) - return keys - - def delete_metadata(self, volume, keys, deletes=10, delete_size=3): - """Delete volume metadata keys. - - Note that ``len(keys)`` must be greater than or equal to - ``deletes * delete_size``. - - :param volume: The volume to delete metadata from - :param deletes: how many operations to perform - :param delete_size: number of metadata keys to delete in each operation - :param keys: a list of keys to choose deletion candidates from - """ - if len(keys) < deletes * delete_size: - raise exceptions.InvalidArgumentsException( - "Not enough metadata keys to delete: " - "%(num_keys)s keys, but asked to delete %(num_deletes)s" % - {"num_keys": len(keys), - "num_deletes": deletes * delete_size}) - # make a shallow copy of the list of keys so that, when we pop - # from it later, we don't modify the original list. - keys = list(keys) - random.shuffle(keys) - action_name = ("cinder_v%s.delete_%s_metadatas_%s_times" - % (self.version, delete_size, deletes)) - with atomic.ActionTimer(self, action_name): - for i in range(deletes): - to_del = keys[i * delete_size:(i + 1) * delete_size] - self._get_client().volumes.delete_metadata(volume, to_del) - - def update_readonly_flag(self, volume, read_only): - """Update the read-only access mode flag of the specified volume. - - :param volume: The UUID of the volume to update. - :param read_only: The value to indicate whether to update volume to - read-only access mode. - :returns: A tuple of http Response and body - """ - aname = "cinder_v%s.update_readonly_flag" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volumes.update_readonly_flag( - volume, read_only) - - def upload_volume_to_image(self, volume, force=False, - container_format="bare", disk_format="raw"): - """Upload the given volume to image. - - Returns created image. - - :param volume: volume object - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso - :returns: Returns created image object - """ - aname = "cinder_v%s.upload_volume_to_image" % self.version - with atomic.ActionTimer(self, aname): - resp, img = self._get_client().volumes.upload_to_image( - volume, force, self.generate_random_name(), container_format, - disk_format) - # NOTE (e0ne): upload_to_image changes volume status to uploading - # so we need to wait until it will be available. - volume = self._wait_available_volume(volume) - - image_id = img["os-volume_upload_image"]["image_id"] - glance = image.Image(self._clients) - - image_inst = glance.get_image(image_id) - image_inst = bench_utils.wait_for_status( - image_inst, - ready_statuses=["active"], - update_resource=glance.get_image, - timeout=CONF.benchmark.glance_image_create_timeout, - check_interval=(CONF.benchmark - .glance_image_create_poll_interval) - ) - - return image_inst - - def create_qos(self, specs): - """Create a qos specs. - - :param specs: A dict of key/value pairs to be set - :rtype: :class:'QoSSpecs' - """ - aname = "cinder_v%s.create_qos" % self.version - name = self.generate_random_name() - - with atomic.ActionTimer(self, aname): - return self._get_client().qos_specs.create(name, specs) - - def list_qos(self, search_opts=None): - """Get a list of all qos specs. - - :param search_opts: search options - :rtype: list of :class: 'QoSpecs' - """ - aname = "cinder_v%s.list_qos" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().qos_specs.list(search_opts) - - def get_qos(self, qos_id): - """Get a specific qos specs. - - :param qos_id: The ID of the :class: 'QoSSpecs' to get - :rtype: :class: 'QoSSpecs' - """ - aname = "cinder_v%s.get_qos" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().qos_specs.get(qos_id) - - def set_qos(self, qos_id, set_specs_args): - """Add/Update keys in qos specs. - - :param qos_id: The ID of the :class:`QoSSpecs` to get - :param set_specs_args: A dict of key/value pairs to be set - :rtype: class 'cinderclient.apiclient.base.DictWithMeta' - {"qos_specs": set_specs_args} - """ - aname = "cinder_v%s.set_qos" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().qos_specs.set_keys(qos_id, - set_specs_args) - - def delete_snapshot(self, snapshot): - """Delete the given snapshot. - - Returns when the snapshot is actually deleted. - - :param snapshot: snapshot object - """ - aname = "cinder_v%s.delete_snapshot" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().volume_snapshots.delete(snapshot) - bench_utils.wait_for_status( - snapshot, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self._update_resource, - timeout=CONF.benchmark.cinder_volume_delete_timeout, - check_interval=(CONF.benchmark - .cinder_volume_delete_poll_interval) - ) - - def delete_backup(self, backup): - """Delete the given backup. - - Returns when the backup is actually deleted. - - :param backup: backup instance - """ - aname = "cinder_v%s.delete_backup" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().backups.delete(backup) - bench_utils.wait_for_status( - backup, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self._update_resource, - timeout=CONF.benchmark.cinder_volume_delete_timeout, - check_interval=(CONF.benchmark - .cinder_volume_delete_poll_interval) - ) - - def restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - """ - aname = "cinder_v%s.restore_backup" % self.version - with atomic.ActionTimer(self, aname): - restore = self._get_client().restores.restore(backup_id, volume_id) - restored_volume = self._get_client().volumes.get(restore.volume_id) - return self._wait_available_volume(restored_volume) - - def list_backups(self, detailed=True): - """Return user volume backups list. - - :param detailed: True if detailed information about backup - should be listed - """ - aname = "cinder_v%s.list_backups" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().backups.list(detailed) - - def list_transfers(self, detailed=True, search_opts=None): - """Get a list of all volume transfers. - - :param detailed: If True, detailed information about transfer - should be listed - :param search_opts: Search options to filter out volume transfers - :returns: list of :class:`VolumeTransfer` - """ - aname = "cinder_v%s.list_transfers" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().transfers.list(detailed, search_opts) - - def get_volume_type(self, volume_type): - """get details of volume_type. - - :param volume_type: The ID of the :class:`VolumeType` to get - :returns: :class:`VolumeType` - """ - aname = "cinder_v%s.get_volume_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_types.get(volume_type) - - def delete_volume_type(self, volume_type): - """delete a volume type. - - :param volume_type: Name or Id of the volume type - :returns: base on client response return True if the request - has been accepted or not - """ - aname = "cinder_v%s.delete_volume_type" % self.version - with atomic.ActionTimer(self, aname): - tuple_res = self._get_client().volume_types.delete( - volume_type) - return (tuple_res[0].status_code == 202) - - def set_volume_type_keys(self, volume_type, metadata): - """Set extra specs on a volume type. - - :param volume_type: The :class:`VolumeType` to set extra spec on - :param metadata: A dict of key/value pairs to be set - :returns: extra_specs if the request has been accepted - """ - aname = "cinder_v%s.set_volume_type_keys" % self.version - with atomic.ActionTimer(self, aname): - return volume_type.set_keys(metadata) - - def transfer_create(self, volume_id, name=None): - """Create a volume transfer. - - :param name: The name of created transfer - :param volume_id: The ID of the volume to transfer - :rtype: VolumeTransfer - """ - name = name or self.generate_random_name() - aname = "cinder_v%s.transfer_create" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().transfers.create(volume_id, name=name) - - def transfer_accept(self, transfer_id, auth_key): - """Accept a volume transfer. - - :param transfer_id: The ID of the transfer to accept. - :param auth_key: The auth_key of the transfer. - :rtype: VolumeTransfer - """ - aname = "cinder_v%s.transfer_accept" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().transfers.accept(transfer_id, auth_key) - - def create_encryption_type(self, volume_type, specs): - """Create encryption type for a volume type. Default: admin only. - - :param volume_type: the volume type on which to add an encryption type - :param specs: the encryption type specifications to add - :return: an instance of :class: VolumeEncryptionType - """ - aname = "cinder_v%s.create_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_encryption_types.create( - volume_type, specs) - - def get_encryption_type(self, volume_type): - """Get the volume encryption type for the specified volume type. - - :param volume_type: the volume type to query - :return: an instance of :class: VolumeEncryptionType - """ - aname = "cinder_v%s.get_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_encryption_types.get( - volume_type) - - def list_encryption_type(self, search_opts=None): - """List all volume encryption types. - - :param search_opts: Options used when search for encryption types - :return: a list of :class: VolumeEncryptionType instances - """ - aname = "cinder_v%s.list_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_encryption_types.list( - search_opts) - - def delete_encryption_type(self, volume_type): - """Delete the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be deleted - """ - aname = "cinder_v%s.delete_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - resp = self._get_client().volume_encryption_types.delete( - volume_type) - if (resp[0].status_code != 202): - raise exceptions.RallyException( - _("EncryptionType Deletion Failed")) - - def update_encryption_type(self, volume_type, specs): - """Update the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be updated - :param specs: the encryption type specifications to update - :return: an instance of :class: VolumeEncryptionType - """ - aname = "cinder_v%s.update_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_encryption_types.update( - volume_type, specs) - - -class UnifiedCinderMixin(object): - - @staticmethod - def _unify_backup(backup): - return block.VolumeBackup(id=backup.id, name=backup.name, - volume_id=backup.volume_id, - status=backup.status) - - @staticmethod - def _unify_transfer(transfer): - auth_key = transfer.auth_key if hasattr(transfer, "auth_key") else None - return block.VolumeTransfer(id=transfer.id, name=transfer.name, - volume_id=transfer.volume_id, - auth_key=auth_key) - - @staticmethod - def _unify_qos(qos): - return block.QoSSpecs(id=qos.id, name=qos.name, specs=qos.specs) - - @staticmethod - def _unify_encryption_type(encryption_type): - return block.VolumeEncryptionType( - id=encryption_type.encryption_id, - volume_type_id=encryption_type.volume_type_id) - - def delete_volume(self, volume): - """Delete a volume.""" - self._impl.delete_volume(volume) - - def set_metadata(self, volume, sets=10, set_size=3): - """Update/Set a volume metadata. - - :param volume: The updated/setted volume. - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :returns: A list of keys that were set - """ - return self._impl.set_metadata(volume, sets=sets, set_size=set_size) - - def delete_metadata(self, volume, keys, deletes=10, delete_size=3): - """Delete volume metadata keys. - - Note that ``len(keys)`` must be greater than or equal to - ``deletes * delete_size``. - - :param volume: The volume to delete metadata from - :param deletes: how many operations to perform - :param delete_size: number of metadata keys to delete in each operation - :param keys: a list of keys to choose deletion candidates from - """ - self._impl.delete_metadata(volume, keys=keys, deletes=10, - delete_size=3) - - def update_readonly_flag(self, volume, read_only): - """Update the read-only access mode flag of the specified volume. - - :param volume: The UUID of the volume to update. - :param read_only: The value to indicate whether to update volume to - read-only access mode. - :returns: A tuple of http Response and body - """ - return self._impl.update_readonly_flag(volume, read_only=read_only) - - def upload_volume_to_image(self, volume, force=False, - container_format="bare", disk_format="raw"): - """Upload the given volume to image. - - Returns created image. - - :param volume: volume object - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso - :returns: Returns created image object - """ - return self._impl.upload_volume_to_image( - volume, force=force, container_format=container_format, - disk_format=disk_format) - - def create_qos(self, specs): - """Create a qos specs. - - :param specs: A dict of key/value pairs to be set - :rtype: :class:'QoSSpecs' - """ - return self._unify_qos(self._impl.create_qos(specs)) - - def list_qos(self, search_opts=None): - """Get a list of all qos specs. - - :param search_opts: search options - :rtype: list of :class: 'QoSpecs' - """ - return [self._unify_qos(qos) - for qos in self._impl.list_qos(search_opts)] - - def get_qos(self, qos_id): - """Get a specific qos specs. - - :param qos_id: The ID of the :class: 'QoSSpecs' to get - :rtype: :class: 'QoSSpecs' - """ - return self._unify_qos(self._impl.get_qos(qos_id)) - - def set_qos(self, qos, set_specs_args): - """Add/Update keys in qos specs. - - :param qos: The instance of the :class:`QoSSpecs` to set - :param set_specs_args: A dict of key/value pairs to be set - :rtype: :class: 'QoSSpecs' - """ - self._impl.set_qos(qos.id, set_specs_args) - return self._unify_qos(qos) - - def delete_snapshot(self, snapshot): - """Delete the given backup. - - Returns when the backup is actually deleted. - - :param backup: backup instance - """ - self._impl.delete_snapshot(snapshot) - - def delete_backup(self, backup): - """Delete a volume backup.""" - self._impl.delete_backup(backup) - - def list_backups(self, detailed=True): - """Return user volume backups list.""" - return [self._unify_backup(backup) - for backup in self._impl.list_backups(detailed=detailed)] - - def list_transfers(self, detailed=True, search_opts=None): - """Get a list of all volume transfers. - - :param detailed: If True, detailed information about transfer - should be listed - :param search_opts: Search options to filter out volume transfers - :returns: list of :class:`VolumeTransfer` - """ - return [self._unify_transfer(transfer) - for transfer in self._impl.list_transfers( - detailed=detailed, search_opts=search_opts)] - - def get_volume_type(self, volume_type): - """get details of volume_type. - - :param volume_type: The ID of the :class:`VolumeType` to get - :returns: :class:`VolumeType` - """ - return self._impl.get_volume_type(volume_type) - - def delete_volume_type(self, volume_type): - """delete a volume type. - - :param volume_type: Name or Id of the volume type - :returns: base on client response return True if the request - has been accepted or not - """ - return self._impl.delete_volume_type(volume_type) - - def set_volume_type_keys(self, volume_type, metadata): - """Set extra specs on a volume type. - - :param volume_type: The :class:`VolumeType` to set extra spec on - :param metadata: A dict of key/value pairs to be set - :returns: extra_specs if the request has been accepted - """ - return self._impl.set_volume_type_keys(volume_type, metadata) - - def transfer_create(self, volume_id, name=None): - """Creates a volume transfer. - - :param name: The name of created transfer - :param volume_id: The ID of the volume to transfer. - :returns: Return the created transfer. - """ - return self._unify_transfer( - self._impl.transfer_create(volume_id, name=name)) - - def transfer_accept(self, transfer_id, auth_key): - """Accept a volume transfer. - - :param transfer_id: The ID of the transfer to accept. - :param auth_key: The auth_key of the transfer. - :returns: VolumeTransfer - """ - return self._unify_transfer( - self._impl.transfer_accept(transfer_id, auth_key=auth_key)) - - def create_encryption_type(self, volume_type, specs): - """Create encryption type for a volume type. Default: admin only. - - :param volume_type: the volume type on which to add an encryption type - :param specs: the encryption type specifications to add - :return: an instance of :class: VolumeEncryptionType - """ - return self._unify_encryption_type( - self._impl.create_encryption_type(volume_type, specs=specs)) - - def get_encryption_type(self, volume_type): - """Get the volume encryption type for the specified volume type. - - :param volume_type: the volume type to query - :return: an instance of :class: VolumeEncryptionType - """ - return self._unify_encryption_type( - self._impl.get_encryption_type(volume_type)) - - def list_encryption_type(self, search_opts=None): - """List all volume encryption types. - - :param search_opts: Options used when search for encryption types - :return: a list of :class: VolumeEncryptionType instances - """ - return [self._unify_encryption_type(encryption_type) - for encryption_type in self._impl.list_encryption_type( - search_opts=search_opts)] - - def delete_encryption_type(self, volume_type): - """Delete the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be deleted - """ - return self._impl.delete_encryption_type(volume_type) - - def update_encryption_type(self, volume_type, specs): - """Update the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be updated - :param specs: the encryption type specifications to update - :return: an instance of :class: VolumeEncryptionType - """ - return self._impl.update_encryption_type(volume_type, specs=specs) diff --git a/rally/plugins/openstack/services/storage/cinder_v1.py b/rally/plugins/openstack/services/storage/cinder_v1.py deleted file mode 100644 index f2fb2a52..00000000 --- a/rally/plugins/openstack/services/storage/cinder_v1.py +++ /dev/null @@ -1,314 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import utils as rutils -from rally.plugins.openstack import service -from rally.plugins.openstack.services.storage import block -from rally.plugins.openstack.services.storage import cinder_common -from rally.task import atomic - -CONF = block.CONF - - -@service.service("cinder", service_type="block-storage", version="1") -class CinderV1Service(service.Service, cinder_common.CinderMixin): - - @atomic.action_timer("cinder_v1.create_volume") - def create_volume(self, size, snapshot_id=None, source_volid=None, - display_name=None, display_description=None, - volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None): - """Creates a volume. - - :param size: Size of volume in GB - :param snapshot_id: ID of the snapshot - :param display_name: Name of the volume - :param display_description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - - :returns: Return a new volume. - """ - if isinstance(size, dict): - size = random.randint(size["min"], size["max"]) - - volume = self._get_client().volumes.create( - size, - display_name=(display_name or self.generate_random_name()), - display_description=display_description, - snapshot_id=snapshot_id, - source_volid=source_volid, - volume_type=volume_type, - user_id=user_id, - project_id=project_id, - availability_zone=availability_zone, - metadata=metadata, - imageRef=imageRef - ) - - # NOTE(msdubov): It is reasonable to wait 5 secs before starting to - # check whether the volume is ready => less API calls. - rutils.interruptable_sleep( - CONF.benchmark.cinder_volume_create_prepoll_delay) - - return self._wait_available_volume(volume) - - @atomic.action_timer("cinder_v1.update_volume") - def update_volume(self, volume_id, display_name=None, - display_description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param display_name: The volume name. - :param display_description: The volume description. - - :returns: The updated volume. - """ - kwargs = {} - if display_name is not None: - kwargs["display_name"] = display_name - if display_description is not None: - kwargs["display_description"] = display_description - updated_volume = self._get_client().volumes.update( - volume_id, **kwargs) - return updated_volume["volume"] - - @atomic.action_timer("cinder_v1.list_types") - def list_types(self, search_opts=None): - """Lists all volume types.""" - return (self._get_client() - .volume_types.list(search_opts)) - - @atomic.action_timer("cinder_v1.create_snapshot") - def create_snapshot(self, volume_id, force=False, - display_name=None, display_description=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param display_name: Name of the snapshot - :param display_description: Description of the snapshot - :returns: Created snapshot object - """ - kwargs = {"force": force, - "display_name": display_name or self.generate_random_name(), - "display_description": display_description} - - snapshot = self._get_client().volume_snapshots.create(volume_id, - **kwargs) - rutils.interruptable_sleep( - CONF.benchmark.cinder_volume_create_prepoll_delay) - snapshot = self._wait_available_volume(snapshot) - return snapshot - - @atomic.action_timer("cinder_v1.create_backup") - def create_backup(self, volume_id, container=None, - name=None, description=None): - """Create a volume backup of the given volume. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - """ - kwargs = {"name": name or self.generate_random_name(), - "description": description, - "container": container} - backup = self._get_client().backups.create(volume_id, **kwargs) - return self._wait_available_volume(backup) - - @atomic.action_timer("cinder_v1.create_volume_type") - def create_volume_type(self, name=None): - """create volume type. - - :param kwargs: Optional additional arguments for volume type creation - :param name: Descriptive name of the volume type - """ - kwargs = {"name": name or self.generate_random_name()} - return self._get_client().volume_types.create(**kwargs) - - -@service.compat_layer(CinderV1Service) -class UnifiedCinderV1Service(cinder_common.UnifiedCinderMixin, - block.BlockStorage): - - @staticmethod - def _unify_volume(volume): - if isinstance(volume, dict): - return block.Volume(id=volume["id"], name=volume["display_name"], - size=volume["size"], status=volume["status"]) - else: - return block.Volume(id=volume.id, name=volume.display_name, - size=volume.size, status=volume.status) - - @staticmethod - def _unify_snapshot(snapshot): - return block.VolumeSnapshot(id=snapshot.id, name=snapshot.display_name, - volume_id=snapshot.volume_id, - status=snapshot.status) - - def create_volume(self, size, consistencygroup_id=None, - group_id=None, snapshot_id=None, source_volid=None, - name=None, description=None, - volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None, scheduler_hints=None, - source_replica=None, multiattach=False): - """Creates a volume. - - :param size: Size of volume in GB - :param consistencygroup_id: ID of the consistencygroup - :param group_id: ID of the group - :param snapshot_id: ID of the snapshot - :param name: Name of the volume - :param description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - :param source_volid: ID of source volume to clone from - :param source_replica: ID of source volume to clone replica - :param scheduler_hints: (optional extension) arbitrary key-value pairs - specified by the client to help boot an instance - :param multiattach: Allow the volume to be attached to more than - one instance - - :returns: Return a new volume. - """ - return self._unify_volume(self._impl.create_volume( - size, snapshot_id=snapshot_id, source_volid=source_volid, - display_name=name, - display_description=description, - volume_type=volume_type, user_id=user_id, - project_id=project_id, availability_zone=availability_zone, - metadata=metadata, imageRef=imageRef)) - - def list_volumes(self, detailed=True): - """Lists all volumes. - - :param detailed: Whether to return detailed volume info. - :returns: Return volumes list. - """ - return [self._unify_volume(volume) - for volume in self._impl.list_volumes(detailed=detailed)] - - def get_volume(self, volume_id): - """Get a volume. - - :param volume_id: The ID of the volume to get. - - :returns: Return the volume. - """ - return self._unify_volume(self._impl.get_volume(volume_id)) - - def extend_volume(self, volume, new_size): - """Extend the size of the specified volume.""" - return self._unify_volume( - self._impl.extend_volume(volume, new_size=new_size)) - - def update_volume(self, volume_id, - name=None, description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param name: The volume name. - :param description: The volume description. - - :returns: The updated volume. - """ - return self._unify_volume(self._impl.update_volume( - volume_id, display_name=name, - display_description=description)) - - def list_types(self, search_opts=None, is_public=None): - """Lists all volume types.""" - return self._impl.list_types(search_opts=search_opts) - - def create_snapshot(self, volume_id, force=False, - name=None, description=None, metadata=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: If force is True, create a snapshot even if the volume is - attached to an instance. Default is False. - :param name: Name of the snapshot - :param description: Description of the snapshot - :param metadata: Metadata of the snapshot - :returns: Created snapshot object - """ - return self._unify_snapshot(self._impl.create_snapshot( - volume_id, force=force, display_name=name, - display_description=description)) - - def list_snapshots(self, detailed=True): - """Get a list of all snapshots.""" - return [self._unify_snapshot(snapshot) - for snapshot in self._impl.list_snapshots(detailed=detailed)] - - def create_backup(self, volume_id, container=None, - name=None, description=None, - incremental=False, force=False, - snapshot_id=None): - """Creates a volume backup. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - :param incremental: Incremental backup. - :param force: If True, allows an in-use volume to be backed up. - :param snapshot_id: The ID of the snapshot to backup. - - :returns: The created backup object. - """ - return self._unify_backup(self._impl.create_backup( - volume_id, container=container, name=name, - description=description)) - - def create_volume_type(self, name=None, description=None, is_public=True): - """Creates a volume type. - - :param name: Descriptive name of the volume type - :param description: Description of the volume type - :param is_public: Volume type visibility - :returns: Return the created volume type. - """ - return self._impl.create_volume_type(name=name) - - def restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - - :returns: Return the restored backup. - """ - return self._unify_volume(self._impl.restore_backup( - backup_id, volume_id=volume_id)) diff --git a/rally/plugins/openstack/services/storage/cinder_v2.py b/rally/plugins/openstack/services/storage/cinder_v2.py deleted file mode 100644 index 1dcaf502..00000000 --- a/rally/plugins/openstack/services/storage/cinder_v2.py +++ /dev/null @@ -1,384 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import utils as rutils -from rally.plugins.openstack import service -from rally.plugins.openstack.services.storage import block -from rally.plugins.openstack.services.storage import cinder_common -from rally.task import atomic - -CONF = block.CONF - - -@service.service("cinder", service_type="block-storage", version="2") -class CinderV2Service(service.Service, cinder_common.CinderMixin): - - @atomic.action_timer("cinder_v2.create_volume") - def create_volume(self, size, consistencygroup_id=None, - snapshot_id=None, source_volid=None, name=None, - description=None, volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None, scheduler_hints=None, - source_replica=None, multiattach=False): - """Creates a volume. - - :param size: Size of volume in GB - :param consistencygroup_id: ID of the consistencygroup - :param snapshot_id: ID of the snapshot - :param name: Name of the volume - :param description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - :param source_volid: ID of source volume to clone from - :param source_replica: ID of source volume to clone replica - :param scheduler_hints: (optional extension) arbitrary key-value pairs - specified by the client to help boot an instance - :param multiattach: Allow the volume to be attached to more than - one instance - - :returns: Return a new volume. - """ - kwargs = {"name": name or self.generate_random_name(), - "description": description, - "consistencygroup_id": consistencygroup_id, - "snapshot_id": snapshot_id, - "source_volid": source_volid, - "volume_type": volume_type, - "user_id": user_id, - "project_id": project_id, - "availability_zone": availability_zone, - "metadata": metadata, - "imageRef": imageRef, - "scheduler_hints": scheduler_hints, - "source_replica": source_replica, - "multiattach": multiattach} - if isinstance(size, dict): - size = random.randint(size["min"], size["max"]) - - volume = (self._get_client() - .volumes.create(size, **kwargs)) - - # NOTE(msdubov): It is reasonable to wait 5 secs before starting to - # check whether the volume is ready => less API calls. - rutils.interruptable_sleep( - CONF.benchmark.cinder_volume_create_prepoll_delay) - - return self._wait_available_volume(volume) - - @atomic.action_timer("cinder_v2.update_volume") - def update_volume(self, volume_id, name=None, description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param name: The volume name. - :param description: The volume description. - - :returns: The updated volume. - """ - kwargs = {} - if name is not None: - kwargs["name"] = name - if description is not None: - kwargs["description"] = description - updated_volume = self._get_client().volumes.update( - volume_id, **kwargs) - return updated_volume["volume"] - - @atomic.action_timer("cinder_v2.list_types") - def list_types(self, search_opts=None, is_public=None): - """Lists all volume types.""" - return (self._get_client() - .volume_types.list(search_opts=search_opts, - is_public=is_public)) - - @atomic.action_timer("cinder_v2.create_snapshot") - def create_snapshot(self, volume_id, force=False, - name=None, description=None, metadata=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param name: Name of the snapshot - :param description: Description of the snapshot - :returns: Created snapshot object - """ - kwargs = {"force": force, - "name": name or self.generate_random_name(), - "description": description, - "metadata": metadata} - - snapshot = self._get_client().volume_snapshots.create(volume_id, - **kwargs) - rutils.interruptable_sleep( - CONF.benchmark.cinder_volume_create_prepoll_delay) - snapshot = self._wait_available_volume(snapshot) - return snapshot - - @atomic.action_timer("cinder_v2.create_backup") - def create_backup(self, volume_id, container=None, - name=None, description=None, - incremental=False, force=False, - snapshot_id=None): - """Create a volume backup of the given volume. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - :param incremental: Incremental backup. - :param force: If True, allows an in-use volume to be backed up. - :param snapshot_id: The ID of the snapshot to backup. - """ - kwargs = {"force": force, - "name": name or self.generate_random_name(), - "description": description, - "container": container, - "incremental": incremental, - "force": force, - "snapshot_id": snapshot_id} - backup = self._get_client().backups.create(volume_id, **kwargs) - return self._wait_available_volume(backup) - - @atomic.action_timer("cinder_v2.create_volume_type") - def create_volume_type(self, name=None, description=None, is_public=True): - """create volume type. - - :param name: Descriptive name of the volume type - :param description: Description of the volume type - :param is_public: Volume type visibility - :returns: Return the created volume type. - :returns: VolumeType object - """ - kwargs = {"name": name or self.generate_random_name(), - "description": description, - "is_public": is_public} - return self._get_client().volume_types.create(**kwargs) - - @atomic.action_timer("cinder_v2.update_volume_type") - def update_volume_type(self, volume_type, update_name=False, - description=None, is_public=None): - """Update the name and/or description for a volume type. - - :param volume_type: The ID or a instance of the :class:`VolumeType` - to update. - :param update_name: if True, can update name by generating random name. - if False, don't update name. - :param description: Description of the the volume type. - :rtype: :class:`VolumeType` - """ - name = None - if update_name: - name = self.generate_random_name() - - return self._get_client().volume_types.update(volume_type, name, - description, is_public) - - @atomic.action_timer("cinder_v2.add_type_access") - def add_type_access(self, volume_type, project): - """Add a project to the given volume type access list. - - :param volume_type: Volume type name or ID to add access for the given - project - :project: Project ID to add volume type access for - :return: An instance of cinderclient.apiclient.base.TupleWithMeta - """ - return self._get_client().volume_type_access.add_project_access( - volume_type, project) - - @atomic.action_timer("cinder_v2.list_type_access") - def list_type_access(self, volume_type): - """Print access information about the given volume type - - :param volume_type: Filter results by volume type name or ID - :return: VolumeTypeAcces of specific project - """ - return self._get_client().volume_type_access.list(volume_type) - - -@service.compat_layer(CinderV2Service) -class UnifiedCinderV2Service(cinder_common.UnifiedCinderMixin, - block.BlockStorage): - - @staticmethod - def _unify_volume(volume): - if isinstance(volume, dict): - return block.Volume(id=volume["id"], name=volume["name"], - size=volume["size"], status=volume["status"]) - else: - return block.Volume(id=volume.id, name=volume.name, - size=volume.size, status=volume.status) - - @staticmethod - def _unify_snapshot(snapshot): - return block.VolumeSnapshot(id=snapshot.id, name=snapshot.name, - volume_id=snapshot.volume_id, - status=snapshot.status) - - def create_volume(self, size, consistencygroup_id=None, - group_id=None, snapshot_id=None, source_volid=None, - name=None, description=None, - volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None, scheduler_hints=None, - source_replica=None, multiattach=False): - """Creates a volume. - - :param size: Size of volume in GB - :param consistencygroup_id: ID of the consistencygroup - :param group_id: ID of the group - :param snapshot_id: ID of the snapshot - :param name: Name of the volume - :param description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - :param source_volid: ID of source volume to clone from - :param source_replica: ID of source volume to clone replica - :param scheduler_hints: (optional extension) arbitrary key-value pairs - specified by the client to help boot an instance - :param multiattach: Allow the volume to be attached to more than - one instance - - :returns: Return a new volume. - """ - return self._unify_volume(self._impl.create_volume( - size, consistencygroup_id=consistencygroup_id, - snapshot_id=snapshot_id, - source_volid=source_volid, name=name, - description=description, volume_type=volume_type, - user_id=user_id, project_id=project_id, - availability_zone=availability_zone, metadata=metadata, - imageRef=imageRef, scheduler_hints=scheduler_hints, - source_replica=source_replica, multiattach=multiattach)) - - def list_volumes(self, detailed=True): - """Lists all volumes. - - :param detailed: Whether to return detailed volume info. - :returns: Return volumes list. - """ - return [self._unify_volume(volume) - for volume in self._impl.list_volumes(detailed=detailed)] - - def get_volume(self, volume_id): - """Get a volume. - - :param volume_id: The ID of the volume to get. - - :returns: Return the volume. - """ - return self._unify_volume(self._impl.get_volume(volume_id)) - - def extend_volume(self, volume, new_size): - """Extend the size of the specified volume.""" - return self._unify_volume( - self._impl.extend_volume(volume, new_size=new_size)) - - def update_volume(self, volume_id, - name=None, description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param name: The volume name. - :param description: The volume description. - - :returns: The updated volume. - """ - return self._unify_volume(self._impl.update_volume( - volume_id, name=name, description=description)) - - def list_types(self, search_opts=None, is_public=None): - """Lists all volume types.""" - return self._impl.list_types(search_opts=search_opts, - is_public=is_public) - - def create_snapshot(self, volume_id, force=False, - name=None, description=None, metadata=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: If force is True, create a snapshot even if the volume is - attached to an instance. Default is False. - :param name: Name of the snapshot - :param description: Description of the snapshot - :param metadata: Metadata of the snapshot - :returns: Created snapshot object - """ - return self._unify_snapshot(self._impl.create_snapshot( - volume_id, force=force, name=name, - description=description, metadata=metadata)) - - def list_snapshots(self, detailed=True): - """Get a list of all snapshots.""" - return [self._unify_snapshot(snapshot) - for snapshot in self._impl.list_snapshots(detailed=detailed)] - - def create_backup(self, volume_id, container=None, - name=None, description=None, - incremental=False, force=False, - snapshot_id=None): - """Creates a volume backup. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - :param incremental: Incremental backup. - :param force: If True, allows an in-use volume to be backed up. - :param snapshot_id: The ID of the snapshot to backup. - - :returns: The created backup object. - """ - return self._unify_backup(self._impl.create_backup( - volume_id, container=container, name=name, description=description, - incremental=incremental, force=force, snapshot_id=snapshot_id)) - - def create_volume_type(self, name=None, description=None, is_public=True): - """Creates a volume type. - - :param name: Descriptive name of the volume type - :param description: Description of the volume type - :param is_public: Volume type visibility - :returns: Return the created volume type. - """ - return self._impl.create_volume_type(name=name, - description=description, - is_public=is_public) - - def restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - - :returns: Return the restored backup. - """ - return self._unify_volume(self._impl.restore_backup( - backup_id, volume_id=volume_id)) diff --git a/rally/plugins/openstack/types.py b/rally/plugins/openstack/types.py deleted file mode 100644 index 79a227fa..00000000 --- a/rally/plugins/openstack/types.py +++ /dev/null @@ -1,241 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from rally.common.plugin import plugin -from rally import exceptions -from rally.task import types - - -@plugin.configure(name="nova_flavor") -class Flavor(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Transform the resource config to id. - - :param clients: openstack admin client handles - :param resource_config: scenario config with `id`, `name` or `regex` - - :returns: id matching resource - """ - resource_id = resource_config.get("id") - if not resource_id: - novaclient = clients.nova() - resource_id = types._id_from_name( - resource_config=resource_config, - resources=novaclient.flavors.list(), - typename="flavor") - return resource_id - - -@plugin.configure(name="ec2_flavor") -class EC2Flavor(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Transform the resource config to name. - - In the case of using EC2 API, flavor name is used for launching - servers. - - :param clients: openstack admin client handles - :param resource_config: scenario config with `id`, `name` or `regex` - - :returns: name matching resource - """ - resource_name = resource_config.get("name") - if not resource_name: - # NOTE(wtakase): gets resource name from OpenStack id - novaclient = clients.nova() - resource_name = types._name_from_id( - resource_config=resource_config, - resources=novaclient.flavors.list(), - typename="flavor") - return resource_name - - -@plugin.configure(name="glance_image") -class GlanceImage(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Transform the resource config to id. - - :param clients: openstack admin client handles - :param resource_config: scenario config with `id`, `name` or `regex` - - :returns: id matching resource - """ - resource_id = resource_config.get("id") - if not resource_id: - glanceclient = clients.glance() - resource_id = types._id_from_name( - resource_config=resource_config, - resources=list(glanceclient.images.list()), - typename="image") - return resource_id - - -@plugin.configure(name="glance_image_args") -class GlanceImageArguments(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Transform the resource config to id. - - :param clients: openstack admin client handles - :param resource_config: scenario config with `id`, `name` or `regex` - - :returns: id matching resource - """ - resource_config = copy.deepcopy(resource_config) - if "is_public" in resource_config: - if "visibility" in resource_config: - resource_config.pop("is_public") - else: - visibility = ("public" if resource_config.pop("is_public") - else "private") - resource_config["visibility"] = visibility - return resource_config - - -@plugin.configure(name="ec2_image") -class EC2Image(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Transform the resource config to EC2 id. - - If OpenStack resource id is given, this function gets resource name - from the id and then gets EC2 resource id from the name. - - :param clients: openstack admin client handles - :param resource_config: scenario config with `id`, `name` or `regex` - - :returns: EC2 id matching resource - """ - if "name" not in resource_config and "regex" not in resource_config: - # NOTE(wtakase): gets resource name from OpenStack id - glanceclient = clients.glance() - resource_name = types._name_from_id( - resource_config=resource_config, - resources=list(glanceclient.images.list()), - typename="image") - resource_config["name"] = resource_name - - # NOTE(wtakase): gets EC2 resource id from name or regex - ec2client = clients.ec2() - resource_ec2_id = types._id_from_name( - resource_config=resource_config, - resources=list(ec2client.get_all_images()), - typename="ec2_image") - return resource_ec2_id - - -@plugin.configure(name="cinder_volume_type") -class VolumeType(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Transform the resource config to id. - - :param clients: openstack admin client handles - :param resource_config: scenario config with `id`, `name` or `regex` - - :returns: id matching resource - """ - resource_id = resource_config.get("id") - if not resource_id: - cinderclient = clients.cinder() - resource_id = types._id_from_name(resource_config=resource_config, - resources=cinderclient. - volume_types.list(), - typename="volume_type") - return resource_id - - -@plugin.configure(name="neutron_network") -class NeutronNetwork(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Transform the resource config to id. - - :param clients: openstack admin client handles - :param resource_config: scenario config with `id`, `name` or `regex` - - :returns: id matching resource - """ - resource_id = resource_config.get("id") - if resource_id: - return resource_id - else: - neutronclient = clients.neutron() - for net in neutronclient.list_networks()["networks"]: - if net["name"] == resource_config.get("name"): - return net["id"] - - raise exceptions.InvalidScenarioArgument( - "Neutron network with name '{name}' not found".format( - name=resource_config.get("name"))) - - -@plugin.configure(name="watcher_strategy") -class WatcherStrategy(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Transform the resource config to id. - - :param clients: openstack admin client handles - :param resource_config: scenario config with `id`, `name` or `regex` - - :returns: id matching resource - """ - resource_id = resource_config.get("id") - if not resource_id: - watcherclient = clients.watcher() - resource_id = types._id_from_name( - resource_config=resource_config, - resources=[watcherclient.strategy.get( - resource_config.get("name"))], - typename="strategy", - id_attr="uuid") - return resource_id - - -@plugin.configure(name="watcher_goal") -class WatcherGoal(types.ResourceType): - - @classmethod - def transform(cls, clients, resource_config): - """Transform the resource config to id. - - :param clients: openstack admin client handles - :param resource_config: scenario config with `id`, `name` or `regex` - - :returns: id matching resource - """ - resource_id = resource_config.get("id") - if not resource_id: - watcherclient = clients.watcher() - resource_id = types._id_from_name( - resource_config=resource_config, - resources=[watcherclient.goal.get( - resource_config.get("name"))], - typename="goal", - id_attr="uuid") - return resource_id diff --git a/rally/plugins/openstack/validators.py b/rally/plugins/openstack/validators.py deleted file mode 100644 index 94852e30..00000000 --- a/rally/plugins/openstack/validators.py +++ /dev/null @@ -1,557 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import six - -from glanceclient import exc as glance_exc -from novaclient import exceptions as nova_exc -from rally.task import types - -from rally.common import logging -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.context.nova import flavors as flavors_ctx -from rally.plugins.openstack import types as openstack_types - -LOG = logging.getLogger(__name__) -ValidationResult = validation.ValidationResult - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="image_exists", platform="openstack") -class ImageExistsValidator(validation.Validator): - - def __init__(self, param_name, nullable): - """Validator checks existed image or not - - :param param_name: defines which variable should be used - to get image id value. - :param nullable: defines image id param is required - """ - super(ImageExistsValidator, self).__init__() - self.param_name = param_name - self.nullable = nullable - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - - image_args = config.get("args", {}).get(self.param_name) - - if not image_args and self.nullable: - return - - image_context = config.get("context", {}).get("images", {}) - image_ctx_name = image_context.get("image_name") - - if not image_args: - message = ("Parameter %s is not specified.") % self.param_name - return self.fail(message) - - if "image_name" in image_context: - # NOTE(rvasilets) check string is "exactly equal to" a regex - # or image name from context equal to image name from args - if "regex" in image_args: - match = re.match(image_args.get("regex"), image_ctx_name) - if image_ctx_name == image_args.get("name") or ( - "regex" in image_args and match): - return - try: - for user in credentials["openstack"]["users"]: - clients = user.get("credential", {}).clients() - image_id = openstack_types.GlanceImage.transform( - clients=clients, resource_config=image_args) - clients.glance().images.get(image_id) - except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument): - message = ("Image '%s' not found") % image_args - return self.fail(message) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="external_network_exists", platform="openstack") -class ExternalNetworkExistsValidator(validation.Validator): - - def __init__(self, param_name): - """Validator checks that external network with given name exists. - - :param param_name: name of validated network - """ - super(ExternalNetworkExistsValidator, self).__init__() - self.param_name = param_name - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - - ext_network = config.get("args", {}).get(self.param_name) - if not ext_network: - return - - users = credentials["openstack"]["users"] - result = [] - for user in users: - creds = user["credential"] - - networks = creds.clients().neutron().list_networks()["networks"] - external_networks = [net["name"] for net in networks if - net.get("router:external", False)] - if ext_network not in external_networks: - message = ("External (floating) network with name {1} " - "not found by user {0}. " - "Available networks: {2}").format(creds.username, - ext_network, - networks) - result.append(message) - if result: - return self.fail(result) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="required_neutron_extensions", platform="openstack") -class RequiredNeutronExtensionsValidator(validation.Validator): - - def __init__(self, extensions, *args): - """Validator checks if the specified Neutron extension is available - - :param extensions: list of Neutron extensions - """ - super(RequiredNeutronExtensionsValidator, self).__init__() - if isinstance(extensions, (list, tuple)): - # services argument is a list, so it is a new way of validators - # usage, args in this case should not be provided - self.req_ext = extensions - if args: - LOG.warning("Positional argument is not what " - "'required_neutron_extensions' decorator expects. " - "Use `extensions` argument instead") - else: - # it is old way validator - self.req_ext = [extensions] - self.req_ext.extend(args) - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - clients = credentials["openstack"]["users"][0]["credential"].clients() - extensions = clients.neutron().list_extensions()["extensions"] - aliases = [x["alias"] for x in extensions] - for extension in self.req_ext: - if extension not in aliases: - msg = ("Neutron extension %s " - "is not configured") % extension - return self.fail(msg) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="image_valid_on_flavor", platform="openstack") -class ImageValidOnFlavorValidator(validation.Validator): - - def __init__(self, flavor_param, image_param, - fail_on_404_image=True, validate_disk=True): - """Returns validator for image could be used for current flavor - - :param flavor_param: defines which variable should be used - to get flavor id value. - :param image_param: defines which variable should be used - to get image id value. - :param validate_disk: flag to indicate whether to validate flavor's - disk. Should be True if instance is booted from - image. Should be False if instance is booted - from volume. Default value is True. - :param fail_on_404_image: flag what indicate whether to validate image - or not. - """ - super(ImageValidOnFlavorValidator, self).__init__() - self.flavor_name = flavor_param - self.image_name = image_param - self.fail_on_404_image = fail_on_404_image - self.validate_disk = validate_disk - - def _get_validated_image(self, config, clients, param_name): - image_context = config.get("context", {}).get("images", {}) - image_args = config.get("args", {}).get(param_name) - image_ctx_name = image_context.get("image_name") - - if not image_args: - msg = ("Parameter %s is not specified.") % param_name - return (ValidationResult(False, msg), None) - - if "image_name" in image_context: - # NOTE(rvasilets) check string is "exactly equal to" a regex - # or image name from context equal to image name from args - if "regex" in image_args: - match = re.match(image_args.get("regex"), image_ctx_name) - if image_ctx_name == image_args.get("name") or ("regex" - in image_args - and match): - image = { - "size": image_context.get("min_disk", 0), - "min_ram": image_context.get("min_ram", 0), - "min_disk": image_context.get("min_disk", 0) - } - return (ValidationResult(True), image) - try: - image_id = openstack_types.GlanceImage.transform( - clients=clients, resource_config=image_args) - image = clients.glance().images.get(image_id) - if hasattr(image, "to_dict"): - # NOTE(stpierre): Glance v1 images are objects that can be - # converted to dicts; Glance v2 images are already - # dict-like - image = image.to_dict() - if not image.get("size"): - image["size"] = 0 - if not image.get("min_ram"): - image["min_ram"] = 0 - if not image.get("min_disk"): - image["min_disk"] = 0 - return (ValidationResult(True), image) - except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument): - message = ("Image '%s' not found") % image_args - return (ValidationResult(False, message), None) - - def _get_flavor_from_context(self, config, flavor_value): - if "flavors" not in config.get("context", {}): - raise exceptions.InvalidScenarioArgument("No flavors context") - - flavors = [flavors_ctx.FlavorConfig(**f) - for f in config["context"]["flavors"]] - resource = types.obj_from_name(resource_config=flavor_value, - resources=flavors, typename="flavor") - flavor = flavors_ctx.FlavorConfig(**resource) - flavor.id = "" % flavor.name - return (ValidationResult(True), flavor) - - def _get_validated_flavor(self, config, clients, param_name): - flavor_value = config.get("args", {}).get(param_name) - if not flavor_value: - msg = "Parameter %s is not specified." % param_name - return (ValidationResult(False, msg), None) - try: - flavor_id = openstack_types.Flavor.transform( - clients=clients, resource_config=flavor_value) - flavor = clients.nova().flavors.get(flavor=flavor_id) - return (ValidationResult(True), flavor) - except (nova_exc.NotFound, exceptions.InvalidScenarioArgument): - try: - return self._get_flavor_from_context(config, flavor_value) - except exceptions.InvalidScenarioArgument: - pass - message = ("Flavor '%s' not found") % flavor_value - return (ValidationResult(False, message), None) - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - - flavor = None - for user in credentials["openstack"]["users"]: - clients = user["credential"].clients() - - if not flavor: - valid_result, flavor = self._get_validated_flavor( - config, clients, self.flavor_name) - if not valid_result.is_valid: - return valid_result - - valid_result, image = self._get_validated_image( - config, clients, self.image_name) - - if not image and not self.fail_on_404_image: - return - - if not valid_result.is_valid: - return valid_result - - if flavor.ram < image["min_ram"]: - message = ("The memory size for flavor '%s' is too small " - "for requested image '%s'") % (flavor.id, - image["id"]) - return self.fail(message) - - if flavor.disk and self.validate_disk: - if image["size"] > flavor.disk * (1024 ** 3): - message = ("The disk size for flavor '%s' is too small " - "for requested image '%s'") % (flavor.id, - image["id"]) - return self.fail(message) - - if image["min_disk"] > flavor.disk: - message = ("The minimal disk size for flavor '%s' is " - "too small for requested " - "image '%s'") % (flavor.id, image["id"]) - return self.fail(message) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="required_clients", platform="openstack") -class RequiredClientsValidator(validation.Validator): - - def __init__(self, components, *args, **kwargs): - """Validator checks if specified OpenStack clients are available. - - :param components: list of client components names - :param **kwargs: optional parameters: - admin - bool, whether to use admin clients - """ - super(RequiredClientsValidator, self).__init__() - if isinstance(components, (list, tuple)): - # services argument is a list, so it is a new way of validators - # usage, args in this case should not be provided - self.components = components - if args: - LOG.warning("Positional argument is not what " - "'required_clients' decorator expects. " - "Use `components` argument instead") - else: - # it is old way validator - self.components = [components] - self.components.extend(args) - self.options = kwargs - - def _check_component(self, clients): - for client_component in self.components: - try: - getattr(clients, client_component)() - except ImportError: - msg = ("Client for {0} is not installed. To install it run " - "`pip install python-{0}client`").format( - client_component) - return validation.ValidationResult(False, msg) - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - LOG.warning("The validator 'required_clients' is deprecated since " - "Rally 0.10.0. If you are interested in it, please " - "contact Rally team via E-mail, IRC or Gitter (see " - "https://rally.readthedocs.io/en/latest/project_info" - "/index.html#where-can-i-discuss-and-propose-changes for " - "more details).") - if self.options.get("admin", False): - clients = credentials["openstack"]["admin"].clients() - result = self._check_component(clients) - else: - for user in credentials["openstack"]["users"]: - clients = user["credential"].clients() - result = self._check_component(clients) - if result: - return self.fail(result.msg) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="required_services", platform="openstack") -class RequiredServicesValidator(validation.Validator): - - def __init__(self, services, *args): - """Validator checks if specified OpenStack services are available. - - :param services: list with names of required services - """ - - super(RequiredServicesValidator, self).__init__() - if isinstance(services, (list, tuple)): - # services argument is a list, so it is a new way of validators - # usage, args in this case should not be provided - self.services = services - if args: - LOG.warning("Positional argument is not what " - "'required_services' decorator expects. " - "Use `services` argument instead") - else: - # it is old way validator - self.services = [services] - self.services.extend(args) - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - creds = (credentials["openstack"].get("admin") - or credentials["openstack"]["users"][0]["credential"]) - - available_services = creds.clients().services().values() - if consts.Service.NOVA_NET in self.services: - LOG.warning("We are sorry, but Nova-network was deprecated for " - "a long time and latest novaclient doesn't support " - "it, so we too.") - - for service in self.services: - # NOTE(andreykurilin): validator should ignore services configured - # via context(a proper validation should be in context) - service_config = config.get("context", {}).get( - "api_versions", {}).get(service, {}) - - if (service not in available_services and - not ("service_type" in service_config or - "service_name" in service_config)): - return self.fail( - ("'{0}' service is not available. Hint: If '{0}' " - "service has non-default service_type, try to" - " setup it via 'api_versions'" - " context.").format(service)) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="validate_heat_template", platform="openstack") -class ValidateHeatTemplateValidator(validation.Validator): - - def __init__(self, params, *args): - """Validates heat template. - - :param params: list of parameters to be validated. - """ - super(ValidateHeatTemplateValidator, self).__init__() - if isinstance(params, (list, tuple)): - # services argument is a list, so it is a new way of validators - # usage, args in this case should not be provided - self.params = params - if args: - LOG.warning("Positional argument is not what " - "'validate_heat_template' decorator expects. " - "Use `params` argument instead") - else: - # it is old way validator - self.params = [params] - self.params.extend(args) - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - - for param_name in self.params: - template_path = config.get("args", {}).get(param_name) - if not template_path: - msg = ("Path to heat template is not specified. Its needed " - "for heat template validation. Please check the " - "content of `{}` scenario argument.") - - return self.fail(msg.format(param_name)) - template_path = os.path.expanduser(template_path) - if not os.path.exists(template_path): - msg = "No file found by the given path {}" - return self.fail(msg.format(template_path)) - with open(template_path, "r") as f: - try: - for user in credentials["openstack"]["users"]: - clients = user["credential"].clients() - clients.heat().stacks.validate(template=f.read()) - except Exception as e: - dct = {"path": template_path, - "msg": str(e)} - msg = ("Heat template validation failed on %(path)s. " - "Original error message: %(msg)s.") % dct - return self.fail(msg) - - -@validation.add("required_platform", platform="openstack", admin=True) -@validation.configure(name="required_cinder_services", platform="openstack") -class RequiredCinderServicesValidator(validation.Validator): - - def __init__(self, services): - """Validator checks that specified Cinder service is available. - - It uses Cinder client with admin permissions to call - 'cinder service-list' call - - :param services: Cinder service name - """ - super(RequiredCinderServicesValidator, self).__init__() - self.services = services - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - - clients = credentials["openstack"]["admin"].clients().cinder() - for service in clients.services.list(): - if (service.binary == six.text_type(self.services) - and service.state == six.text_type("up")): - return - - msg = ("%s service is not available") % self.services - return self.fail(msg) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="required_api_versions", platform="openstack") -class RequiredAPIVersionsValidator(validation.Validator): - - def __init__(self, component, versions): - """Validator checks component API versions. - - :param component: name of required component - :param versions: version of required component - """ - super(RequiredAPIVersionsValidator, self).__init__() - self.component = component - self.versions = versions - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - versions = [str(v) for v in self.versions] - versions_str = ", ".join(versions) - msg = ("Task was designed to be used with %(component)s " - "V%(version)s, but V%(found_version)s is " - "selected.") - for user in credentials["openstack"]["users"]: - clients = user["credential"].clients() - if self.component == "keystone": - if "2.0" not in versions and hasattr( - clients.keystone(), "tenants"): - return self.fail(msg % {"component": self.component, - "version": versions_str, - "found_version": "2.0"}) - if "3" not in versions and hasattr( - clients.keystone(), "projects"): - return self.fail(msg % {"component": self.component, - "version": versions_str, - "found_version": "3"}) - else: - used_version = config.get( - "context", {}).get( - "api_versions", {}).get( - self.component, {}).get( - "version", getattr( - clients, self.component).choose_version()) - if not used_version: - return self.fail("Unable to determine the API version.") - if str(used_version) not in versions: - return self.fail(msg % {"component": self.component, - "version": versions_str, - "found_version": used_version}) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="volume_type_exists", platform="openstack") -class VolumeTypeExistsValidator(validation.Validator): - - def __init__(self, param_name, nullable=True): - """Returns validator for volume types. - - :param param_name: defines variable to be used as the flag to - determine if volume types should be checked for - existence. - :param nullable: defines volume_type param is required - """ - super(VolumeTypeExistsValidator, self).__init__() - self.param = param_name - self.nullable = nullable - - def validate(self, config, credentials, plugin_cls, plugin_cfg): - volume_type = config.get("args", {}).get(self.param, False) - - if not volume_type and self.nullable: - return - - if volume_type: - for user in credentials["openstack"]["users"]: - clients = user["credential"].clients() - vt_names = [vt.name for vt in - clients.cinder().volume_types.list()] - volume_types_ctx = config.get( - "context", {}).get("volume_types", []) - if volume_type not in vt_names + volume_types_ctx: - msg = ("Specified volume type {} not found for user {}. " - "List of available types: {}") - return self.fail(msg.format(volume_type, user, vt_names)) - else: - msg = ("The parameter '{}' is required and should not be empty.") - return self.fail(msg.format(self.param)) diff --git a/rally/plugins/openstack/verification/__init__.py b/rally/plugins/openstack/verification/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/verification/tempest/__init__.py b/rally/plugins/openstack/verification/tempest/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/verification/tempest/config.ini b/rally/plugins/openstack/verification/tempest/config.ini deleted file mode 100644 index 061c306f..00000000 --- a/rally/plugins/openstack/verification/tempest/config.ini +++ /dev/null @@ -1,57 +0,0 @@ -[DEFAULT] -debug = True -use_stderr = False -log_file = - -[auth] -use_dynamic_credentials = True - -[compute] -image_ref = -image_ref_alt = -flavor_ref = -flavor_ref_alt = -fixed_network_name = - -[compute-feature-enabled] -live_migration = False -resize = True -vnc_console = True -attach_encrypted_volume = False - -[data-processing] - -[identity] - -[image-feature-enabled] -deactivate_image = True - -[input-scenario] -ssh_user_regex = [["^.*[Cc]irros.*$", "cirros"], ["^.*[Tt]est[VvMm].*$", "cirros"], ["^.*rally_verify.*$", "cirros"]] - -[network] - -[network-feature-enabled] -ipv6_subnet_attributes = True -ipv6 = True - -[object-storage] - -[oslo_concurrency] -lock_path = - -[orchestration] -instance_type = - -[scenario] -img_dir = -img_file = - -[service_available] - -[validation] -run_validation = True -image_ssh_user = cirros - -[volume-feature-enabled] -bootable = True diff --git a/rally/plugins/openstack/verification/tempest/config.py b/rally/plugins/openstack/verification/tempest/config.py deleted file mode 100644 index 0cbb5072..00000000 --- a/rally/plugins/openstack/verification/tempest/config.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import os - -from oslo_config import cfg -import six -from six.moves import configparser -from six.moves.urllib import parse - -from rally.common import logging -from rally import exceptions -from rally.verification import utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class TempestConfigfileManager(object): - """Class to create a Tempest config file.""" - - def __init__(self, deployment): - self.credential = deployment.get_credentials_for("openstack")["admin"] - self.clients = self.credential.clients() - self.available_services = self.clients.services().values() - - self.conf = configparser.ConfigParser() - - def _get_service_type_by_service_name(self, service_name): - for s_type, s_name in self.clients.services().items(): - if s_name == service_name: - return s_type - - def _configure_auth(self, section_name="auth"): - self.conf.set(section_name, "admin_username", - self.credential.username) - self.conf.set(section_name, "admin_password", - self.credential.password) - self.conf.set(section_name, "admin_project_name", - self.credential.tenant_name) - # Keystone v3 related parameter - self.conf.set(section_name, "admin_domain_name", - self.credential.user_domain_name or "Default") - - # Sahara has two service types: 'data_processing' and 'data-processing'. - # 'data_processing' is deprecated, but it can be used in previous OpenStack - # releases. So we need to configure the 'catalog_type' option to support - # environments where 'data_processing' is used as service type for Sahara. - def _configure_data_processing(self, section_name="data-processing"): - if "sahara" in self.available_services: - self.conf.set(section_name, "catalog_type", - self._get_service_type_by_service_name("sahara")) - - def _configure_identity(self, section_name="identity"): - self.conf.set(section_name, "region", - self.credential.region_name) - # discover keystone versions - - def get_versions(auth_url): - from keystoneauth1 import discover - from keystoneauth1 import session - - temp_session = session.Session( - verify=(self.credential.https_cacert or - not self.credential.https_insecure), - timeout=CONF.openstack_client_http_timeout) - data = discover.Discover(temp_session, auth_url).version_data() - return dict([(v["version"][0], v["url"]) for v in data]) - - # check the original auth_url without cropping versioning to identify - # the default version - - versions = get_versions(self.credential.auth_url) - cropped_auth_url = self.clients.keystone._remove_url_version() - if cropped_auth_url == self.credential.auth_url: - # the given auth_url doesn't contain version - if set(versions.keys()) == {2, 3}: - # ok, both versions of keystone are enabled, we can take urls - # there - uri = versions[2] - uri_v3 = versions[3] - target_version = 3 - elif set(versions.keys()) == {2} or set(versions.keys()) == {3}: - # only one version is available while discovering, let's just - # guess the second auth_url (it should not be used) - - # get the most recent version - target_version = sorted(versions.keys())[-1] - if target_version == 2: - uri = self.credential.auth_url - uri_v3 = parse.urljoin(uri, "/v3") - else: - uri_v3 = self.credential.auth_url - uri = parse.urljoin(uri_v3, "/v2.0") - else: - # Does Keystone released new version of API ?! - LOG.debug("Discovered keystone versions: %s" % versions) - raise exceptions.RallyException("Failed to discover keystone " - "auth urls.") - - else: - if self.credential.auth_url.rstrip("/").endswith("v2.0"): - uri = self.credential.auth_url - uri_v3 = uri.replace("/v2.0", "/v3") - target_version = 2 - else: - uri_v3 = self.credential.auth_url - uri = uri_v3.replace("/v3", "/v2.0") - target_version = 3 - - self.conf.set(section_name, "auth_version", "v%s" % target_version) - self.conf.set(section_name, "uri", uri) - self.conf.set(section_name, "uri_v3", uri_v3) - - self.conf.set(section_name, "disable_ssl_certificate_validation", - str(self.credential.https_insecure)) - self.conf.set(section_name, "ca_certificates_file", - self.credential.https_cacert) - - # The compute section is configured in context class for Tempest resources. - # Options which are configured there: 'image_ref', 'image_ref_alt', - # 'flavor_ref', 'flavor_ref_alt'. - - def _configure_network(self, section_name="network"): - if "neutron" in self.available_services: - neutronclient = self.clients.neutron() - public_nets = [net for net - in neutronclient.list_networks()["networks"] - if net["status"] == "ACTIVE" and - net["router:external"] is True] - if public_nets: - net_id = public_nets[0]["id"] - net_name = public_nets[0]["name"] - self.conf.set(section_name, "public_network_id", net_id) - self.conf.set(section_name, "floating_network_name", net_name) - else: - novaclient = self.clients.nova() - net_name = next(net.human_id for net in novaclient.networks.list() - if net.human_id is not None) - self.conf.set("compute", "fixed_network_name", net_name) - self.conf.set("validation", "network_for_ssh", net_name) - - def _configure_network_feature_enabled( - self, section_name="network-feature-enabled"): - if "neutron" in self.available_services: - neutronclient = self.clients.neutron() - extensions = neutronclient.list_ext("extensions", "/extensions", - retrieve_all=True) - aliases = [ext["alias"] for ext in extensions["extensions"]] - aliases_str = ",".join(aliases) - self.conf.set(section_name, "api_extensions", aliases_str) - - def _configure_object_storage(self, section_name="object-storage"): - self.conf.set(section_name, "operator_role", - CONF.tempest.swift_operator_role) - self.conf.set(section_name, "reseller_admin_role", - CONF.tempest.swift_reseller_admin_role) - - def _configure_service_available(self, section_name="service_available"): - services = ["cinder", "glance", "heat", "ironic", "neutron", "nova", - "sahara", "swift"] - for service in services: - # Convert boolean to string because ConfigParser fails - # on attempt to get option with boolean value - self.conf.set(section_name, service, - str(service in self.available_services)) - - def _configure_validation(self, section_name="validation"): - if "neutron" in self.available_services: - self.conf.set(section_name, "connect_method", "floating") - else: - self.conf.set(section_name, "connect_method", "fixed") - - def _configure_orchestration(self, section_name="orchestration"): - self.conf.set(section_name, "stack_owner_role", - CONF.tempest.heat_stack_owner_role) - self.conf.set(section_name, "stack_user_role", - CONF.tempest.heat_stack_user_role) - - def create(self, conf_path, extra_options=None): - self.conf.read(os.path.join(os.path.dirname(__file__), "config.ini")) - - for name, method in inspect.getmembers(self, inspect.ismethod): - if name.startswith("_configure_"): - method() - - if extra_options: - utils.add_extra_options(extra_options, self.conf) - - with open(conf_path, "w") as configfile: - self.conf.write(configfile) - - raw_conf = six.StringIO() - raw_conf.write("# Some empty values of options will be replaced while " - "creating required resources (images, flavors, etc).\n") - self.conf.write(raw_conf) - - return raw_conf.getvalue() diff --git a/rally/plugins/openstack/verification/tempest/consts.py b/rally/plugins/openstack/verification/tempest/consts.py deleted file mode 100644 index fa35bc26..00000000 --- a/rally/plugins/openstack/verification/tempest/consts.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils - - -class _TempestApiTestSets(utils.ImmutableMixin, utils.EnumMixin): - BAREMETAL = "baremetal" - CLUSTERING = "clustering" - COMPUTE = "compute" - DATA_PROCESSING = "data_processing" - DATABASE = "database" - IDENTITY = "identity" - IMAGE = "image" - MESSAGING = "messaging" - NETWORK = "network" - OBJECT_STORAGE = "object_storage" - ORCHESTRATION = "orchestration" - TELEMETRY = "telemetry" - VOLUME = "volume" - - -class _TempestScenarioTestSets(utils.ImmutableMixin, utils.EnumMixin): - SCENARIO = "scenario" - - -class _TempestTestSets(utils.ImmutableMixin, utils.EnumMixin): - FULL = "full" - SMOKE = "smoke" - - -TempestApiTestSets = _TempestApiTestSets() -TempestScenarioTestSets = _TempestScenarioTestSets() -TempestTestSets = _TempestTestSets() diff --git a/rally/plugins/openstack/verification/tempest/context.py b/rally/plugins/openstack/verification/tempest/context.py deleted file mode 100644 index a6649748..00000000 --- a/rally/plugins/openstack/verification/tempest/context.py +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re - -import requests -from six.moves import configparser - -from rally.common.i18n import _ -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack.services.image import image -from rally.plugins.openstack.verification.tempest import config as conf -from rally.plugins.openstack.wrappers import network -from rally.task import utils as task_utils -from rally.verification import context -from rally.verification import utils - - -LOG = logging.getLogger(__name__) - - -@context.configure("tempest", order=900) -class TempestContext(context.VerifierContext): - """Context class to create/delete resources needed for Tempest.""" - - RESOURCE_NAME_FORMAT = "rally_verify_XXXXXXXX_XXXXXXXX" - - def __init__(self, ctx): - super(TempestContext, self).__init__(ctx) - - creds = self.verifier.deployment.get_credentials_for("openstack") - self.clients = creds["admin"].clients() - self.available_services = self.clients.services().values() - - self.conf = configparser.ConfigParser() - self.conf_path = self.verifier.manager.configfile - - self.data_dir = self.verifier.manager.home_dir - self.image_name = "tempest-image" - - self._created_roles = [] - self._created_images = [] - self._created_flavors = [] - self._created_networks = [] - - def setup(self): - self.conf.read(self.conf_path) - - utils.create_dir(self.data_dir) - - self._create_tempest_roles() - - self._configure_option("DEFAULT", "log_file", - os.path.join(self.data_dir, "tempest.log")) - self._configure_option("oslo_concurrency", "lock_path", - os.path.join(self.data_dir, "lock_files")) - self._configure_option("scenario", "img_dir", self.data_dir) - self._configure_option("scenario", "img_file", self.image_name, - helper_method=self._download_image) - self._configure_option("compute", "image_ref", - helper_method=self._discover_or_create_image) - self._configure_option("compute", "image_ref_alt", - helper_method=self._discover_or_create_image) - self._configure_option("compute", "flavor_ref", - helper_method=self._discover_or_create_flavor, - flv_ram=conf.CONF.tempest.flavor_ref_ram) - self._configure_option("compute", "flavor_ref_alt", - helper_method=self._discover_or_create_flavor, - flv_ram=conf.CONF.tempest.flavor_ref_alt_ram) - if "neutron" in self.available_services: - neutronclient = self.clients.neutron() - if neutronclient.list_networks(shared=True)["networks"]: - # If the OpenStack cloud has some shared networks, we will - # create our own shared network and specify its name in the - # Tempest config file. Such approach will allow us to avoid - # failures of Tempest tests with error "Multiple possible - # networks found". Otherwise the default behavior defined in - # Tempest will be used and Tempest itself will manage network - # resources. - LOG.debug("Shared networks found. " - "'fixed_network_name' option should be configured.") - self._configure_option( - "compute", "fixed_network_name", - helper_method=self._create_network_resources) - if "heat" in self.available_services: - self._configure_option( - "orchestration", "instance_type", - helper_method=self._discover_or_create_flavor, - flv_ram=conf.CONF.tempest.heat_instance_type_ram) - - with open(self.conf_path, "w") as configfile: - self.conf.write(configfile) - - def cleanup(self): - # Tempest tests may take more than 1 hour and we should remove all - # cached clients sessions to avoid tokens expiration when deleting - # Tempest resources. - self.clients.clear() - - self._cleanup_tempest_roles() - self._cleanup_images() - self._cleanup_flavors() - if "neutron" in self.available_services: - self._cleanup_network_resources() - - with open(self.conf_path, "w") as configfile: - self.conf.write(configfile) - - def _create_tempest_roles(self): - keystoneclient = self.clients.verified_keystone() - roles = [conf.CONF.tempest.swift_operator_role, - conf.CONF.tempest.swift_reseller_admin_role, - conf.CONF.tempest.heat_stack_owner_role, - conf.CONF.tempest.heat_stack_user_role] - existing_roles = set(role.name for role in keystoneclient.roles.list()) - - for role in roles: - if role not in existing_roles: - LOG.debug("Creating role '%s'." % role) - self._created_roles.append(keystoneclient.roles.create(role)) - - def _configure_option(self, section, option, value=None, - helper_method=None, *args, **kwargs): - option_value = self.conf.get(section, option) - if not option_value: - LOG.debug("Option '%s' from '%s' section " - "is not configured." % (option, section)) - if helper_method: - res = helper_method(*args, **kwargs) - if res: - value = res["name"] if "network" in option else res.id - LOG.debug("Setting value '%s' to option '%s'." % (value, option)) - self.conf.set(section, option, value) - LOG.debug("Option '{opt}' is configured. " - "{opt} = {value}".format(opt=option, value=value)) - else: - LOG.debug("Option '{opt}' is already configured " - "in Tempest config file. {opt} = {opt_val}" - .format(opt=option, opt_val=option_value)) - - def _discover_image(self): - LOG.debug("Trying to discover a public image with name matching " - "regular expression '%s'. Note that case insensitive " - "matching is performed." % conf.CONF.tempest.img_name_regex) - image_service = image.Image(self.clients) - images = image_service.list_images(status="active", - visibility="public") - for image_obj in images: - if image_obj.name and re.match(conf.CONF.tempest.img_name_regex, - image_obj.name, re.IGNORECASE): - LOG.debug("The following public " - "image discovered: '%s'." % image_obj.name) - return image_obj - - LOG.debug("There is no public image with name matching regular " - "expression '%s'." % conf.CONF.tempest.img_name_regex) - - def _download_image_from_source(self, target_path, image=None): - if image: - LOG.debug("Downloading image '%s' " - "from Glance to %s." % (image.name, target_path)) - with open(target_path, "wb") as image_file: - for chunk in self.clients.glance().images.data(image.id): - image_file.write(chunk) - else: - LOG.debug("Downloading image from %s " - "to %s." % (conf.CONF.tempest.img_url, target_path)) - try: - response = requests.get(conf.CONF.tempest.img_url, stream=True) - except requests.ConnectionError as err: - msg = _("Failed to download image. " - "Possibly there is no connection to Internet. " - "Error: %s.") % (str(err) or "unknown") - raise exceptions.RallyException(msg) - - if response.status_code == 200: - with open(target_path, "wb") as image_file: - for chunk in response.iter_content(chunk_size=1024): - if chunk: # filter out keep-alive new chunks - image_file.write(chunk) - image_file.flush() - else: - if response.status_code == 404: - msg = _("Failed to download image. Image was not found.") - else: - msg = _("Failed to download image. " - "HTTP error code %d.") % response.status_code - raise exceptions.RallyException(msg) - - LOG.debug("The image has been successfully downloaded!") - - def _download_image(self): - image_path = os.path.join(self.data_dir, self.image_name) - if os.path.isfile(image_path): - LOG.debug("Image is already downloaded to %s." % image_path) - return - - if conf.CONF.tempest.img_name_regex: - image = self._discover_image() - if image: - return self._download_image_from_source(image_path, image) - - self._download_image_from_source(image_path) - - def _discover_or_create_image(self): - if conf.CONF.tempest.img_name_regex: - image_obj = self._discover_image() - if image_obj: - LOG.debug("Using image '%s' (ID = %s) " - "for the tests." % (image_obj.name, image_obj.id)) - return image_obj - - params = { - "image_name": self.generate_random_name(), - "disk_format": conf.CONF.tempest.img_disk_format, - "container_format": conf.CONF.tempest.img_container_format, - "image_location": os.path.join(self.data_dir, self.image_name), - "visibility": "public" - } - LOG.debug("Creating image '%s'." % params["image_name"]) - image_service = image.Image(self.clients) - image_obj = image_service.create_image(**params) - LOG.debug("Image '%s' (ID = %s) has been " - "successfully created!" % (image_obj.name, image_obj.id)) - self._created_images.append(image_obj) - - return image_obj - - def _discover_or_create_flavor(self, flv_ram): - novaclient = self.clients.nova() - - LOG.debug("Trying to discover a flavor with the following " - "properties: RAM = %dMB, VCPUs = 1, disk = 0GB." % flv_ram) - for flavor in novaclient.flavors.list(): - if (flavor.ram == flv_ram and - flavor.vcpus == 1 and flavor.disk == 0): - LOG.debug("The following flavor discovered: '{0}'. " - "Using flavor '{0}' (ID = {1}) for the tests." - .format(flavor.name, flavor.id)) - return flavor - - LOG.debug("There is no flavor with the mentioned properties.") - - params = { - "name": self.generate_random_name(), - "ram": flv_ram, - "vcpus": 1, - "disk": 0 - } - LOG.debug("Creating flavor '%s' with the following properties: RAM " - "= %dMB, VCPUs = 1, disk = 0GB." % (params["name"], flv_ram)) - flavor = novaclient.flavors.create(**params) - LOG.debug("Flavor '%s' (ID = %s) has been " - "successfully created!" % (flavor.name, flavor.id)) - self._created_flavors.append(flavor) - - return flavor - - def _create_network_resources(self): - neutron_wrapper = network.NeutronWrapper(self.clients, self) - tenant_id = self.clients.keystone.auth_ref.project_id - LOG.debug("Creating network resources: network, subnet, router.") - net = neutron_wrapper.create_network( - tenant_id, subnets_num=1, add_router=True, - network_create_args={"shared": True}) - LOG.debug("Network resources have been successfully created!") - self._created_networks.append(net) - - return net - - def _cleanup_tempest_roles(self): - keystoneclient = self.clients.keystone() - for role in self._created_roles: - LOG.debug("Deleting role '%s'." % role.name) - keystoneclient.roles.delete(role.id) - LOG.debug("Role '%s' has been deleted." % role.name) - - def _cleanup_images(self): - image_service = image.Image(self.clients) - for image_obj in self._created_images: - LOG.debug("Deleting image '%s'." % image_obj.name) - self.clients.glance().images.delete(image_obj.id) - task_utils.wait_for_status( - image_obj, ["deleted", "pending_delete"], - check_deletion=True, - update_resource=image_service.get_image, - timeout=conf.CONF.benchmark.glance_image_delete_timeout, - check_interval=conf.CONF.benchmark. - glance_image_delete_poll_interval) - LOG.debug("Image '%s' has been deleted." % image_obj.name) - self._remove_opt_value_from_config("compute", image_obj.id) - - def _cleanup_flavors(self): - novaclient = self.clients.nova() - for flavor in self._created_flavors: - LOG.debug("Deleting flavor '%s'." % flavor.name) - novaclient.flavors.delete(flavor.id) - LOG.debug("Flavor '%s' has been deleted." % flavor.name) - self._remove_opt_value_from_config("compute", flavor.id) - self._remove_opt_value_from_config("orchestration", flavor.id) - - def _cleanup_network_resources(self): - neutron_wrapper = network.NeutronWrapper(self.clients, self) - for net in self._created_networks: - LOG.debug("Deleting network resources: router, subnet, network.") - neutron_wrapper.delete_network(net) - self._remove_opt_value_from_config("compute", net["name"]) - LOG.debug("Network resources have been deleted.") - - def _remove_opt_value_from_config(self, section, opt_value): - for option, value in self.conf.items(section): - if opt_value == value: - LOG.debug("Removing value '%s' of option '%s' " - "from Tempest config file." % (opt_value, option)) - self.conf.set(section, option, "") - LOG.debug("Value '%s' has been removed." % opt_value) diff --git a/rally/plugins/openstack/verification/tempest/manager.py b/rally/plugins/openstack/verification/tempest/manager.py deleted file mode 100644 index b1a3844a..00000000 --- a/rally/plugins/openstack/verification/tempest/manager.py +++ /dev/null @@ -1,216 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import shutil -import subprocess - -from rally.common.i18n import _LE -from rally.common import yamlutils as yaml -from rally import exceptions -from rally.plugins.common.verification import testr -from rally.plugins.openstack.verification.tempest import config -from rally.plugins.openstack.verification.tempest import consts -from rally.verification import manager -from rally.verification import utils - - -AVAILABLE_SETS = (list(consts.TempestTestSets) + - list(consts.TempestApiTestSets) + - list(consts.TempestScenarioTestSets)) - - -@manager.configure(name="tempest", namespace="openstack", - default_repo="https://git.openstack.org/openstack/tempest", - context={"tempest": {}, "testr": {}}) -class TempestManager(testr.TestrLauncher): - """Tempest verifier. - - **Description**: - - Quote from official documentation: - - This is a set of integration tests to be run against a live OpenStack - cluster. Tempest has batteries of tests for OpenStack API validation, - Scenarios, and other specific tests useful in validating an OpenStack - deployment. - - Rally supports features listed below: - - * *cloning Tempest*: repository and version can be specified - * *installation*: system-wide with checking existence of required - packages or in virtual environment - * *configuration*: options are discovered via OpenStack API, but you can - override them if you need - * *running*: pre-creating all required resources(i.e images, tenants, - etc), prepare arguments, launching Tempest, live-progress output - * *results*: all verifications are stored in db, you can built reports, - compare verification at whatever you want time. - - Appeared in Rally 0.8.0 *(actually, it appeared long time ago with first - revision of Verification Component, but 0.8.0 is mentioned since it is - first release after Verification Component redesign)* - """ - - RUN_ARGS = {"set": "Name of predefined set of tests. Known names: %s" - % ", ".join(AVAILABLE_SETS)} - - @property - def run_environ(self): - env = super(TempestManager, self).run_environ - env["TEMPEST_CONFIG_DIR"] = os.path.dirname(self.configfile) - env["TEMPEST_CONFIG"] = os.path.basename(self.configfile) - # TODO(andreykurilin): move it to Testr base class - env["OS_TEST_PATH"] = os.path.join(self.repo_dir, - "tempest/test_discover") - return env - - @property - def configfile(self): - return os.path.join(self.home_dir, "tempest.conf") - - def validate_args(self, args): - """Validate given arguments.""" - super(TempestManager, self).validate_args(args) - - if args.get("pattern"): - pattern = args["pattern"].split("=", 1) - if len(pattern) == 1: - pass # it is just a regex - elif pattern[0] == "set": - if pattern[1] not in AVAILABLE_SETS: - raise exceptions.ValidationError( - "Test set '%s' not found in available " - "Tempest test sets. Available sets are '%s'." - % (pattern[1], "', '".join(AVAILABLE_SETS))) - else: - raise exceptions.ValidationError( - "'pattern' argument should be a regexp or set name " - "(format: 'tempest.api.identity.v3', 'set=smoke').") - - def configure(self, extra_options=None): - """Configure Tempest.""" - utils.create_dir(self.home_dir) - tcm = config.TempestConfigfileManager(self.verifier.deployment) - return tcm.create(self.configfile, extra_options) - - def is_configured(self): - """Check whether Tempest is configured or not.""" - return os.path.exists(self.configfile) - - def get_configuration(self): - """Get Tempest configuration.""" - with open(self.configfile) as f: - return f.read() - - def extend_configuration(self, extra_options): - """Extend Tempest configuration with extra options.""" - return utils.extend_configfile(extra_options, self.configfile) - - def override_configuration(self, new_configuration): - """Override Tempest configuration by new configuration.""" - with open(self.configfile, "w") as f: - f.write(new_configuration) - - def install_extension(self, source, version=None, extra_settings=None): - """Install a Tempest plugin.""" - if extra_settings: - raise NotImplementedError( - _LE("'%s' verifiers don't support extra installation settings " - "for extensions.") % self.get_name()) - version = version or "master" - egg = re.sub("\.git$", "", os.path.basename(source.strip("/"))) - full_source = "git+{0}@{1}#egg={2}".format(source, version, egg) - # NOTE(ylobankov): Use 'develop mode' installation to provide an - # ability to advanced users to change tests or - # develop new ones in verifier repo on the fly. - cmd = ["pip", "install", - "--src", os.path.join(self.base_dir, "extensions"), - "-e", full_source] - if self.verifier.system_wide: - cmd.insert(2, "--no-deps") - utils.check_output(cmd, cwd=self.base_dir, env=self.environ) - - # Very often Tempest plugins are inside projects and requirements - # for plugins are listed in the test-requirements.txt file. - test_reqs_path = os.path.join(self.base_dir, "extensions", - egg, "test-requirements.txt") - if os.path.exists(test_reqs_path): - if not self.verifier.system_wide: - utils.check_output(["pip", "install", "-r", test_reqs_path], - cwd=self.base_dir, env=self.environ) - else: - self.check_system_wide(reqs_file_path=test_reqs_path) - - def list_extensions(self): - """List all installed Tempest plugins.""" - # TODO(andreykurilin): find a better way to list tempest plugins - cmd = ("from tempest.test_discover import plugins; " - "plugins_manager = plugins.TempestTestPluginManager(); " - "plugins_map = plugins_manager.get_plugin_load_tests_tuple(); " - "plugins_list = [" - " {'name': p.name, " - " 'entry_point': p.entry_point_target, " - " 'location': plugins_map[p.name][1]} " - " for p in plugins_manager.ext_plugins.extensions]; " - "print(plugins_list)") - try: - output = utils.check_output(["python", "-c", cmd], - cwd=self.base_dir, env=self.environ, - debug_output=False).strip() - except subprocess.CalledProcessError: - raise exceptions.RallyException( - "Cannot list installed Tempest plugins for verifier %s." % - self.verifier) - - return yaml.safe_load(output) - - def uninstall_extension(self, name): - """Uninstall a Tempest plugin.""" - for ext in self.list_extensions(): - if ext["name"] == name and os.path.exists(ext["location"]): - shutil.rmtree(ext["location"]) - break - else: - raise exceptions.RallyException( - "There is no Tempest plugin with name '%s'. " - "Are you sure that it was installed?" % name) - - def list_tests(self, pattern=""): - """List all Tempest tests.""" - if pattern: - pattern = self._transform_pattern(pattern) - return super(TempestManager, self).list_tests(pattern) - - def prepare_run_args(self, run_args): - """Prepare 'run_args' for testr context.""" - if run_args.get("pattern"): - run_args["pattern"] = self._transform_pattern(run_args["pattern"]) - return run_args - - @staticmethod - def _transform_pattern(pattern): - """Transform pattern into Tempest-specific pattern.""" - parsed_pattern = pattern.split("=", 1) - if len(parsed_pattern) == 2: - if parsed_pattern[0] == "set": - if parsed_pattern[1] in consts.TempestTestSets: - return "smoke" if parsed_pattern[1] == "smoke" else "" - elif parsed_pattern[1] in consts.TempestApiTestSets: - return "tempest.api.%s" % parsed_pattern[1] - else: - return "tempest.%s" % parsed_pattern[1] - - return pattern # it is just a regex diff --git a/rally/plugins/openstack/wrappers/__init__.py b/rally/plugins/openstack/wrappers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/openstack/wrappers/cinder.py b/rally/plugins/openstack/wrappers/cinder.py deleted file mode 100644 index 483885f9..00000000 --- a/rally/plugins/openstack/wrappers/cinder.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from rally.common import logging -from rally import exceptions - -import six - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class CinderWrapper(object): - def __init__(self, client, owner): - self.owner = owner - self.client = client - - @abc.abstractmethod - def create_volume(self, volume): - """Creates new volume.""" - - @abc.abstractmethod - def update_volume(self, volume): - """Updates name and description for this volume.""" - - @abc.abstractmethod - def create_snapshot(self, volume_id): - """Creates a volume snapshot.""" - - -class CinderV1Wrapper(CinderWrapper): - def create_volume(self, size, **kwargs): - kwargs["display_name"] = self.owner.generate_random_name() - volume = self.client.volumes.create(size, **kwargs) - return volume - - def update_volume(self, volume, **update_args): - update_args["display_name"] = self.owner.generate_random_name() - update_args["display_description"] = ( - update_args.get("display_description")) - self.client.volumes.update(volume, **update_args) - - def create_snapshot(self, volume_id, **kwargs): - kwargs["display_name"] = self.owner.generate_random_name() - snapshot = self.client.volume_snapshots.create(volume_id, **kwargs) - return snapshot - - -class CinderV2Wrapper(CinderWrapper): - def create_volume(self, size, **kwargs): - kwargs["name"] = self.owner.generate_random_name() - - volume = self.client.volumes.create(size, **kwargs) - return volume - - def update_volume(self, volume, **update_args): - update_args["name"] = self.owner.generate_random_name() - update_args["description"] = update_args.get("description") - self.client.volumes.update(volume, **update_args) - - def create_snapshot(self, volume_id, **kwargs): - kwargs["name"] = self.owner.generate_random_name() - snapshot = self.client.volume_snapshots.create(volume_id, **kwargs) - return snapshot - - -def wrap(client, owner): - """Returns cinderclient wrapper based on cinder client version.""" - LOG.warning("Method wrap from %s and whole Cinder wrappers are " - "deprecated since Rally 0.10.0 and will be removed soon. Use " - "rally.plugins.openstack.services.storage.block.BlockStorage " - "instead." % __file__) - version = client.choose_version() - if version == "1": - return CinderV1Wrapper(client(), owner) - elif version == "2": - return CinderV2Wrapper(client(), owner) - else: - msg = "This version of API %s could not be identified." % version - LOG.warning(msg) - raise exceptions.InvalidArgumentsException(msg) diff --git a/rally/plugins/openstack/wrappers/glance.py b/rally/plugins/openstack/wrappers/glance.py deleted file mode 100644 index aac7a776..00000000 --- a/rally/plugins/openstack/wrappers/glance.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import os -import time - -from rally.common import logging -from rally.common import utils as rutils -from rally import exceptions -from rally.task import utils - -from glanceclient import exc as glance_exc -from oslo_config import cfg -import requests -import six - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF - - -@six.add_metaclass(abc.ABCMeta) -class GlanceWrapper(object): - def __init__(self, client, owner): - self.owner = owner - self.client = client - - def get_image(self, image): - """Gets image. - - This serves to fetch the latest data on the image for the - various wait_for_*() functions. - Must raise rally.exceptions.GetResourceNotFound if the - resource is not found or deleted. - """ - # NOTE(stpierre): This function actually has a single - # implementation that works for both Glance v1 and Glance v2, - # but since we need to use this function in both wrappers, it - # gets implemented here. - try: - return self.client.images.get(image.id) - except glance_exc.HTTPNotFound: - raise exceptions.GetResourceNotFound(resource=image) - - @abc.abstractmethod - def create_image(self, container_format, image_location, disk_format): - """Creates new image. - - Accepts all Glance v2 parameters. - """ - - @abc.abstractmethod - def set_visibility(self, image, visibility="public"): - """Set an existing image to public or private.""" - - @abc.abstractmethod - def list_images(self, **filters): - """List images. - - Accepts all Glance v2 filters. - """ - - -class GlanceV1Wrapper(GlanceWrapper): - def create_image(self, container_format, image_location, - disk_format, **kwargs): - kw = { - "container_format": container_format, - "disk_format": disk_format, - } - kw.update(kwargs) - if "name" not in kw: - kw["name"] = self.owner.generate_random_name() - if "visibility" in kw: - kw["is_public"] = kw.pop("visibility") == "public" - - image_location = os.path.expanduser(image_location) - - try: - if os.path.isfile(image_location): - kw["data"] = open(image_location) - else: - kw["copy_from"] = image_location - - image = self.client.images.create(**kw) - - rutils.interruptable_sleep(CONF.benchmark. - glance_image_create_prepoll_delay) - - image = utils.wait_for_status( - image, ["active"], - update_resource=self.get_image, - timeout=CONF.benchmark.glance_image_create_timeout, - check_interval=CONF.benchmark. - glance_image_create_poll_interval) - finally: - if "data" in kw: - kw["data"].close() - - return image - - def set_visibility(self, image, visibility="public"): - self.client.images.update(image.id, is_public=(visibility == "public")) - - def list_images(self, **filters): - kwargs = {"filters": filters} - if "owner" in filters: - # NOTE(stpierre): in glance v1, "owner" is not a filter, - # so we need to handle it separately. - kwargs["owner"] = kwargs["filters"].pop("owner") - visibility = kwargs["filters"].pop("visibility", None) - images = self.client.images.list(**kwargs) - # NOTE(stpierre): Glance v1 isn't smart enough to filter on - # public/private images, so we have to do it manually. - if visibility is not None: - is_public = visibility == "public" - return [i for i in images if i.is_public is is_public] - return images - - -class GlanceV2Wrapper(GlanceWrapper): - def create_image(self, container_format, image_location, - disk_format, **kwargs): - kw = { - "container_format": container_format, - "disk_format": disk_format, - } - kw.update(kwargs) - if "name" not in kw: - kw["name"] = self.owner.generate_random_name() - if "is_public" in kw: - LOG.warning("is_public is not supported by Glance v2, and is " - "deprecated in Rally v0.8.0") - kw["visibility"] = "public" if kw.pop("is_public") else "private" - - image_location = os.path.expanduser(image_location) - - image = self.client.images.create(**kw) - - rutils.interruptable_sleep(CONF.benchmark. - glance_image_create_prepoll_delay) - - start = time.time() - image = utils.wait_for_status( - image, ["queued"], - update_resource=self.get_image, - timeout=CONF.benchmark.glance_image_create_timeout, - check_interval=CONF.benchmark. - glance_image_create_poll_interval) - timeout = time.time() - start - - image_data = None - response = None - try: - if os.path.isfile(image_location): - image_data = open(image_location) - else: - response = requests.get(image_location, stream=True) - image_data = response.raw - self.client.images.upload(image.id, image_data) - finally: - if image_data is not None: - image_data.close() - if response is not None: - response.close() - - return utils.wait_for_status( - image, ["active"], - update_resource=self.get_image, - timeout=timeout, - check_interval=CONF.benchmark. - glance_image_create_poll_interval) - - def set_visibility(self, image, visibility="public"): - self.client.images.update(image.id, visibility=visibility) - - def list_images(self, **filters): - return self.client.images.list(filters=filters) - - -def wrap(client, owner): - """Returns glanceclient wrapper based on glance client version.""" - LOG.warning("Method wrap from %s and whole Glance wrappers are " - "deprecated since Rally 0.10.0 and will be removed soon. Use " - "rally.plugins.openstack.services.image.image.Image " - "instead." % __file__) - - version = client.choose_version() - if version == "1": - return GlanceV1Wrapper(client(), owner) - elif version == "2": - return GlanceV2Wrapper(client(), owner) - else: - msg = "Version %s of the glance API could not be identified." % version - LOG.warning(msg) - raise exceptions.InvalidArgumentsException(msg) diff --git a/rally/plugins/openstack/wrappers/keystone.py b/rally/plugins/openstack/wrappers/keystone.py deleted file mode 100644 index 0114922c..00000000 --- a/rally/plugins/openstack/wrappers/keystone.py +++ /dev/null @@ -1,273 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections - -from keystoneclient import exceptions -import six - -from rally.common import logging - - -LOG = logging.getLogger(__name__) - -Project = collections.namedtuple("Project", ["id", "name", "domain_id"]) -User = collections.namedtuple("User", - ["id", "name", "project_id", "domain_id"]) -Service = collections.namedtuple("Service", ["id", "name"]) -Role = collections.namedtuple("Role", ["id", "name"]) - - -@six.add_metaclass(abc.ABCMeta) -class KeystoneWrapper(object): - def __init__(self, client): - self.client = client - - LOG.warning( - "Class %s is deprecated since Rally 0.8.0 and will be removed " - "soon. Use " - "rally.plugins.openstack.services.identity.identity.Identity " - "instead." % self.__class__) - - def __getattr__(self, attr_name): - return getattr(self.client, attr_name) - - @abc.abstractmethod - def create_project(self, project_name, domain_name="Default"): - """Creates new project/tenant and return project object. - - :param project_name: Name of project to be created. - :param domain_name: Name or id of domain where to create project, for - implementations that don't support domains this - argument must be None or 'Default'. - """ - - @abc.abstractmethod - def delete_project(self, project_id): - """Deletes project.""" - - @abc.abstractmethod - def create_user(self, username, password, email=None, project_id=None, - domain_name="Default", default_role="member"): - """Create user. - - :param username: name of user - :param password: user password - :param project: user's default project - :param domain_name: Name or id of domain where to create project, for - implementations that don't support domains this - argument must be None or 'Default'. - :param default_role: user's default role - """ - - @abc.abstractmethod - def delete_user(self, user_id): - """Deletes user.""" - - @abc.abstractmethod - def list_users(self): - """List all users.""" - - @abc.abstractmethod - def list_projects(self): - """List all projects/tenants.""" - - def delete_service(self, service_id): - """Deletes service.""" - self.client.services.delete(service_id) - - def list_services(self): - """List all services.""" - return map(KeystoneWrapper._wrap_service, self.client.services.list()) - - def create_role(self, name, **kwargs): - """create a role. - - :param name: name of role - :param kwargs: Optional additional arguments for roles creation - """ - - def delete_role(self, role_id): - """Deletes role.""" - self.client.roles.delete(role_id) - - def list_roles(self): - """List all roles.""" - return map(KeystoneWrapper._wrap_role, self.client.roles.list()) - - @abc.abstractmethod - def add_role(self, role_id, user_id, project_id): - """Assign role to user.""" - - @abc.abstractmethod - def remove_role(self, role_id, user_id, project_id): - """Remove role from user.""" - - @staticmethod - def _wrap_service(service): - return Service(id=service.id, name=service.name) - - @staticmethod - def _wrap_role(role): - return Role(id=role.id, name=role.name) - - -class KeystoneV2Wrapper(KeystoneWrapper): - def _check_domain(self, domain_name): - if domain_name.lower() != "default": - raise NotImplementedError("Domain functionality not implemented " - "in Keystone v2") - - @staticmethod - def _wrap_v2_tenant(tenant): - return Project(id=tenant.id, name=tenant.name, domain_id="default") - - @staticmethod - def _wrap_v2_role(role): - return Role(id=role.id, name=role.name) - - @staticmethod - def _wrap_v2_user(user): - return User(id=user.id, name=user.name, - project_id=getattr(user, "tenantId", None), - domain_id="default") - - def create_project(self, project_name, domain_name="Default"): - self._check_domain(domain_name) - tenant = self.client.tenants.create(project_name) - return KeystoneV2Wrapper._wrap_v2_tenant(tenant) - - def delete_project(self, project_id): - self.client.tenants.delete(project_id) - - def create_user(self, username, password, email=None, project_id=None, - domain_name="Default", default_role="member"): - # NOTE(liuyulong): For v2 wrapper the `default_role` here is not used. - self._check_domain(domain_name) - user = self.client.users.create(username, password, email, project_id) - return KeystoneV2Wrapper._wrap_v2_user(user) - - def delete_user(self, user_id): - self.client.users.delete(user_id) - - def list_users(self): - return map(KeystoneV2Wrapper._wrap_v2_user, self.client.users.list()) - - def list_projects(self): - return map(KeystoneV2Wrapper._wrap_v2_tenant, - self.client.tenants.list()) - - def create_role(self, name): - role = self.client.roles.create(name) - return KeystoneV2Wrapper._wrap_v2_role(role) - - def add_role(self, role_id, user_id, project_id): - self.client.roles.add_user_role(user_id, role_id, tenant=project_id) - - def remove_role(self, role_id, user_id, project_id): - self.client.roles.remove_user_role(user_id, role_id, tenant=project_id) - - -class KeystoneV3Wrapper(KeystoneWrapper): - def _get_domain_id(self, domain_name_or_id): - try: - # First try to find domain by ID - return self.client.domains.get(domain_name_or_id).id - except exceptions.NotFound: - # Domain not found by ID, try to find it by name - domains = self.client.domains.list(name=domain_name_or_id) - if domains: - return domains[0].id - # Domain not found by name, raise original NotFound exception - raise - - @staticmethod - def _wrap_v3_project(project): - return Project(id=project.id, name=project.name, - domain_id=project.domain_id) - - @staticmethod - def _wrap_v3_role(role): - return Role(id=role.id, name=role.name) - - @staticmethod - def _wrap_v3_user(user): - # When user has default_project_id that is None user.default_project_id - # will raise AttributeError - project_id = getattr(user, "default_project_id", None) - return User(id=user.id, name=user.name, project_id=project_id, - domain_id=user.domain_id) - - def create_project(self, project_name, domain_name="Default"): - domain_id = self._get_domain_id(domain_name) - project = self.client.projects.create( - name=project_name, domain=domain_id) - return KeystoneV3Wrapper._wrap_v3_project(project) - - def delete_project(self, project_id): - self.client.projects.delete(project_id) - - def create_user(self, username, password, email=None, project_id=None, - domain_name="Default", default_role="member"): - domain_id = self._get_domain_id(domain_name) - user = self.client.users.create(name=username, password=password, - default_project=project_id, - email=email, domain=domain_id) - for role in self.client.roles.list(): - if default_role in role.name.lower(): - self.client.roles.grant(role.id, user=user.id, - project=project_id) - break - else: - LOG.warning( - "Unable to set %s role to created user." % default_role) - return KeystoneV3Wrapper._wrap_v3_user(user) - - def delete_user(self, user_id): - self.client.users.delete(user_id) - - def list_users(self): - return map(KeystoneV3Wrapper._wrap_v3_user, self.client.users.list()) - - def list_projects(self): - return map(KeystoneV3Wrapper._wrap_v3_project, - self.client.projects.list()) - - def create_role(self, name, domain, **kwargs): - role = self.client.roles.create(name, domain=domain, **kwargs) - return KeystoneV3Wrapper._wrap_v3_role(role) - - def add_role(self, role_id, user_id, project_id): - self.client.roles.grant(role_id, user=user_id, project=project_id) - - def remove_role(self, role_id, user_id, project_id): - self.client.roles.revoke(role_id, user=user_id, project=project_id) - - -def wrap(client): - """Returns keystone wrapper based on keystone client version.""" - LOG.warning("Method wrap from %s and whole Keystone wrappers are " - "deprecated since Rally 0.8.0 and will be removed soon. Use " - "rally.plugins.openstack.services.identity.identity.Identity " - "instead." % __file__) - - if client.version == "v2.0": - return KeystoneV2Wrapper(client) - elif client.version == "v3": - return KeystoneV3Wrapper(client) - else: - raise NotImplementedError( - "Wrapper for version %s is not implemented." % client.version) diff --git a/rally/plugins/openstack/wrappers/network.py b/rally/plugins/openstack/wrappers/network.py deleted file mode 100644 index 4118b201..00000000 --- a/rally/plugins/openstack/wrappers/network.py +++ /dev/null @@ -1,380 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import netaddr -import six - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import utils -from rally import consts -from rally import exceptions - -from neutronclient.common import exceptions as neutron_exceptions - - -LOG = logging.getLogger(__name__) - - -cidr_incr = utils.RAMInt() - - -def generate_cidr(start_cidr="10.2.0.0/24"): - """Generate next CIDR for network or subnet, without IP overlapping. - - This is process and thread safe, because `cidr_incr' points to - value stored directly in RAM. This guarantees that CIDRs will be - serial and unique even under hard multiprocessing/threading load. - - :param start_cidr: start CIDR str - :returns: next available CIDR str - """ - cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr))) - LOG.debug("CIDR generated: %s" % cidr) - return cidr - - -class NetworkWrapperException(exceptions.RallyException): - error_code = 532 - msg_fmt = _("%(message)s") - - -@six.add_metaclass(abc.ABCMeta) -class NetworkWrapper(object): - """Base class for network service implementations. - - We actually have two network services implementations, with different API: - NovaNetwork and Neutron. The idea is (at least to try) to use unified - service, which hides most differences and routines behind the scenes. - This allows to significantly re-use and simplify code. - """ - START_CIDR = "10.2.0.0/24" - SERVICE_IMPL = None - - def __init__(self, clients, owner, config=None): - """Returns available network wrapper instance. - - :param clients: rally.osclients.Clients instance - :param owner: The object that owns resources created by this - wrapper instance. It will be used to generate - random names, so must implement - rally.common.utils.RandomNameGeneratorMixin - :param config: The configuration of the network - wrapper. Currently only one config option is - recognized, 'start_cidr', and only for Nova - network. - :returns: NetworkWrapper subclass instance - """ - if hasattr(clients, self.SERVICE_IMPL): - self.client = getattr(clients, self.SERVICE_IMPL)() - else: - self.client = clients(self.SERVICE_IMPL) - self.config = config or {} - self.owner = owner - self.start_cidr = self.config.get("start_cidr", self.START_CIDR) - - @abc.abstractmethod - def create_network(self): - """Create network.""" - - @abc.abstractmethod - def delete_network(self): - """Delete network.""" - - @abc.abstractmethod - def list_networks(self): - """List networks.""" - - @abc.abstractmethod - def create_floating_ip(self): - """Create floating IP.""" - - @abc.abstractmethod - def delete_floating_ip(self): - """Delete floating IP.""" - - @abc.abstractmethod - def supports_extension(self): - """Checks whether a network extension is supported.""" - - -class NeutronWrapper(NetworkWrapper): - SERVICE_IMPL = consts.Service.NEUTRON - SUBNET_IP_VERSION = 4 - LB_METHOD = "ROUND_ROBIN" - LB_PROTOCOL = "HTTP" - - @property - def external_networks(self): - return self.client.list_networks(**{ - "router:external": True})["networks"] - - def get_network(self, net_id=None, name=None): - net = None - try: - if net_id: - net = self.client.show_network(net_id)["network"] - else: - for net in self.client.list_networks(name=name)["networks"]: - break - return {"id": net["id"], - "name": net["name"], - "tenant_id": net["tenant_id"], - "status": net["status"], - "external": net["router:external"], - "subnets": net["subnets"], - "router_id": None} - except (TypeError, neutron_exceptions.NeutronClientException): - raise NetworkWrapperException( - "Network not found: %s" % (name or net_id)) - - def create_router(self, external=False, **kwargs): - """Create neutron router. - - :param external: bool, whether to set setup external_gateway_info - :param **kwargs: POST /v2.0/routers request options - :returns: neutron router dict - """ - kwargs["name"] = self.owner.generate_random_name() - - if external and "external_gateway_info" not in kwargs: - for net in self.external_networks: - kwargs["external_gateway_info"] = { - "network_id": net["id"], "enable_snat": True} - return self.client.create_router({"router": kwargs})["router"] - - def create_v1_pool(self, tenant_id, subnet_id, **kwargs): - """Create LB Pool (v1). - - :param tenant_id: str, pool tenant id - :param subnet_id: str, neutron subnet-id - :param **kwargs: extra options - :returns: neutron lb-pool dict - """ - pool_args = { - "pool": { - "tenant_id": tenant_id, - "name": self.owner.generate_random_name(), - "subnet_id": subnet_id, - "lb_method": kwargs.get("lb_method", self.LB_METHOD), - "protocol": kwargs.get("protocol", self.LB_PROTOCOL) - } - } - return self.client.create_pool(pool_args) - - def _generate_cidr(self): - # TODO(amaretskiy): Generate CIDRs unique for network, not cluster - return generate_cidr(start_cidr=self.start_cidr) - - def create_network(self, tenant_id, **kwargs): - """Create network. - - The following keyword arguments are accepted: - - * add_router: Create an external router and add an interface to each - subnet created. Default: False - * subnets_num: Number of subnets to create per network. Default: 0 - * dns_nameservers: Nameservers for each subnet. Default: - 8.8.8.8, 8.8.4.4 - * network_create_args: Additional network creation arguments. - - :param tenant_id: str, tenant ID - :param kwargs: Additional options, left open-ended for compatbilitiy. - See above for recognized keyword args. - :returns: dict, network data - """ - network_args = {"network": kwargs.get("network_create_args", {})} - network_args["network"].update({ - "tenant_id": tenant_id, - "name": self.owner.generate_random_name()}) - network = self.client.create_network(network_args)["network"] - - router = None - if kwargs.get("add_router", False): - router = self.create_router(external=True, tenant_id=tenant_id) - - subnets = [] - subnets_num = kwargs.get("subnets_num", 0) - for i in range(subnets_num): - subnet_args = { - "subnet": { - "tenant_id": tenant_id, - "network_id": network["id"], - "name": self.owner.generate_random_name(), - "ip_version": self.SUBNET_IP_VERSION, - "cidr": self._generate_cidr(), - "enable_dhcp": True, - "dns_nameservers": kwargs.get("dns_nameservers", - ["8.8.8.8", "8.8.4.4"]) - } - } - subnet = self.client.create_subnet(subnet_args)["subnet"] - subnets.append(subnet["id"]) - - if router: - self.client.add_interface_router(router["id"], - {"subnet_id": subnet["id"]}) - - return {"id": network["id"], - "name": network["name"], - "status": network["status"], - "subnets": subnets, - "external": network.get("router:external", False), - "router_id": router and router["id"] or None, - "tenant_id": tenant_id} - - def delete_v1_pool(self, pool_id): - """Delete LB Pool (v1) - - :param pool_id: str, Lb-Pool-id - """ - self.client.delete_pool(pool_id) - - def delete_network(self, network): - if self.supports_extension("dhcp_agent_scheduler")[0]: - net_dhcps = self.client.list_dhcp_agent_hosting_networks( - network["id"])["agents"] - for net_dhcp in net_dhcps: - self.client.remove_network_from_dhcp_agent(net_dhcp["id"], - network["id"]) - - if network["router_id"]: - self.client.remove_gateway_router(network["router_id"]) - - for port in self.client.list_ports(network_id=network["id"])["ports"]: - if port["device_owner"] in ( - "network:router_interface", - "network:router_interface_distributed", - "network:ha_router_replicated_interface", - "network:router_gateway"): - self.client.remove_interface_router( - port["device_id"], {"port_id": port["id"]}) - else: - try: - self.client.delete_port(port["id"]) - except neutron_exceptions.PortNotFoundClient: - # port is auto-removed - pass - - for subnet_id in network["subnets"]: - self._delete_subnet(subnet_id) - - responce = self.client.delete_network(network["id"]) - - if network["router_id"]: - self.client.delete_router(network["router_id"]) - - return responce - - def _delete_subnet(self, subnet_id): - self.client.delete_subnet(subnet_id) - - def list_networks(self): - return self.client.list_networks()["networks"] - - def create_port(self, network_id, **kwargs): - """Create neutron port. - - :param network_id: neutron network id - :param **kwargs: POST /v2.0/ports request options - :returns: neutron port dict - """ - kwargs["network_id"] = network_id - kwargs["name"] = self.owner.generate_random_name() - return self.client.create_port({"port": kwargs})["port"] - - def create_floating_ip(self, ext_network=None, - tenant_id=None, port_id=None, **kwargs): - """Create Neutron floating IP. - - :param ext_network: floating network name or dict - :param tenant_id: str tenant id - :param port_id: str port id - :param **kwargs: for compatibility, not used here - :returns: floating IP dict - """ - if not tenant_id: - raise ValueError("Missed tenant_id") - - net_id = None - if type(ext_network) is dict: - net_id = ext_network["id"] - elif ext_network: - ext_net = self.get_network(name=ext_network) - if not ext_net["external"]: - raise NetworkWrapperException("Network is not external: %s" - % ext_network) - net_id = ext_net["id"] - else: - ext_networks = self.external_networks - if not ext_networks: - raise NetworkWrapperException( - "Failed to allocate floating IP: " - "no external networks found") - net_id = ext_networks[0]["id"] - - kwargs = {"floatingip": {"floating_network_id": net_id, - "tenant_id": tenant_id}} - if port_id: - kwargs["floatingip"]["port_id"] = port_id - - fip = self.client.create_floatingip(kwargs)["floatingip"] - return {"id": fip["id"], "ip": fip["floating_ip_address"]} - - def delete_floating_ip(self, fip_id, **kwargs): - """Delete floating IP. - - :param fip_id: int floating IP id - :param **kwargs: for compatibility, not used here - """ - self.client.delete_floatingip(fip_id) - - def supports_extension(self, extension): - """Check whether a neutron extension is supported - - :param extension: str, neutron extension - :returns: result tuple - :rtype: (bool, string) - """ - extensions = self.client.list_extensions().get("extensions", []) - if any(ext.get("alias") == extension for ext in extensions): - return True, "" - - return False, _("Neutron driver does not support %s") % (extension) - - -def wrap(clients, owner, config=None): - """Returns available network wrapper instance. - - :param clients: rally.osclients.Clients instance - :param owner: The object that owns resources created by this - wrapper instance. It will be used to generate random - names, so must implement - rally.common.utils.RandomNameGeneratorMixin - :param config: The configuration of the network wrapper. Currently - only one config option is recognized, 'start_cidr', - and only for Nova network. - :returns: NetworkWrapper subclass instance - """ - if hasattr(clients, "services"): - services = clients.services() - else: - services = clients("services") - - if consts.Service.NEUTRON in services.values(): - return NeutronWrapper(clients, owner, config=config) - LOG.warning(_("NovaNetworkWrapper is deprecated since 0.9.0")) diff --git a/rally/plugins/workload/__init__.py b/rally/plugins/workload/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/plugins/workload/siege.py b/rally/plugins/workload/siege.py deleted file mode 100644 index e761bf39..00000000 --- a/rally/plugins/workload/siege.py +++ /dev/null @@ -1,57 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Run HTTP benchmark by runcommand_heat scenario.""" - -import json -import re -import subprocess -import sys -import tempfile - - -SIEGE_RE = re.compile(r"^(Throughput|Transaction rate):\s+(\d+\.\d+)\s+.*") - - -def get_instances(): - outputs = json.load(sys.stdin) - for output in outputs: - if output["output_key"] == "wp_nodes": - for node in output["output_value"].values(): - yield node["wordpress-network"][0] - - -def generate_urls_list(instances): - urls = tempfile.NamedTemporaryFile(delete=False) - with urls: - for inst in instances: - for i in range(1, 1000): - urls.write("http://%s/wordpress/index.php/%d/\n" % (inst, i)) - return urls.name - - -def run(): - instances = list(get_instances()) - urls = generate_urls_list(instances) - out = subprocess.check_output( - ["siege", "-q", "-t", "60S", "-b", "-f", urls], - stderr=subprocess.STDOUT) - for line in out.splitlines(): - m = SIEGE_RE.match(line) - if m: - sys.stdout.write("%s:%s\n" % m.groups()) - - -if __name__ == "__main__": - sys.exit(run()) diff --git a/rally/task/__init__.py b/rally/task/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/task/atomic.py b/rally/task/atomic.py deleted file mode 100644 index 95035573..00000000 --- a/rally/task/atomic.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import functools - -from rally.common import logging -from rally.common import utils - -LOG = logging.getLogger(__name__) - - -class ActionTimerMixin(object): - - def __init__(self): - self._atomic_actions = [] - - def atomic_actions(self): - """Returns the content of each atomic action.""" - return self._atomic_actions - - -class ActionTimer(utils.Timer): - """A class to measure the duration of atomic operations - - This would simplify the way measure atomic operation duration - in certain cases. For example, if we want to get the duration - for each operation which runs in an iteration - for i in range(repetitions): - with atomic.ActionTimer(instance_of_action_timer, "name_of_action"): - self.clients(). - """ - - def __init__(self, instance, name): - """Create a new instance of the AtomicAction. - - :param instance: instance of subclass of ActionTimerMixin - :param name: name of the ActionBuilder - """ - super(ActionTimer, self).__init__() - self.instance = instance - self.name = name - self._root = self._find_parent(self.instance._atomic_actions) - self.atomic_action = {"name": self.name, - "children": [], - "started_at": None} - self._root.append(self.atomic_action) - - def _find_parent(self, atomic_actions): - if atomic_actions and "finished_at" not in atomic_actions[-1]: - return self._find_parent(atomic_actions[-1]["children"]) - else: - return atomic_actions - - def __enter__(self): - super(ActionTimer, self).__enter__() - self.atomic_action["started_at"] = self.start - - def __exit__(self, type_, value, tb): - super(ActionTimer, self).__exit__(type_, value, tb) - self.atomic_action["finished_at"] = self.finish - - -def action_timer(name): - """Provide measure of execution time. - - Decorates methods of the Scenario class. - This provides duration in seconds of each atomic action. - """ - def wrap(func): - @functools.wraps(func) - def func_atomic_actions(self, *args, **kwargs): - with ActionTimer(self, name): - f = func(self, *args, **kwargs) - return f - return func_atomic_actions - return wrap - - -def optional_action_timer(name, argument_name="atomic_action", default=True): - """Optionally provide measure of execution time. - - Decorates methods of the Scenario class. This provides duration in - seconds of each atomic action. When the decorated function is - called, this inspects the kwarg named by ``argument_name`` and - optionally sets an ActionTimer around the function call. - - The ``atomic_action`` keyword argument does not need to be added - to the function; it will be popped from the kwargs dict by the - wrapper. - - :param name: The name of the timer - :param argument_name: The name of the kwarg to inspect to - determine if a timer should be set. - :param default: Whether or not to set a timer if ``argument_name`` - is not present. - """ - def wrap(func): - @functools.wraps(func) - def func_atomic_actions(self, *args, **kwargs): - LOG.warning("'optional_action_timer' is deprecated" - "since rally v0.10.0." - "Please use action_timer instead, " - "we have improved atomic actions, " - "now do not need to explicitly close " - "original action.") - if kwargs.pop(argument_name, default): - with ActionTimer(self, name): - f = func(self, *args, **kwargs) - else: - f = func(self, *args, **kwargs) - return f - return func_atomic_actions - return wrap - - -def merge_atomic(atomic_actions): - merged_atomic = collections.OrderedDict() - for action in atomic_actions: - name = action["name"] - duration = action["finished_at"] - action["started_at"] - if name not in merged_atomic: - merged_atomic[name] = {"duration": duration, "count": 1} - else: - merged_atomic[name]["duration"] += duration - merged_atomic[name]["count"] += 1 - return merged_atomic diff --git a/rally/task/context.py b/rally/task/context.py deleted file mode 100644 index 8593f06e..00000000 --- a/rally/task/context.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six - -from rally.common import logging -from rally.common.plugin import plugin -from rally.common import utils -from rally.common import validation -from rally import exceptions -from rally.task import functional - -LOG = logging.getLogger(__name__) - - -@logging.log_deprecated_args("Use 'platform' arg instead", "0.10.0", - ["namespace"], log_function=LOG.warning) -def configure(name, order, platform="default", namespace=None, hidden=False): - """Context class wrapper. - - Each context class has to be wrapped by configure() wrapper. It - sets essential configuration of context classes. Actually this wrapper just - adds attributes to the class. - - :param name: Name of the class, used in the input task - :param platform: str plugin's platform - :param order: As far as we can use multiple context classes that sometimes - depend on each other we have to specify order of execution. - Contexts with smaller order are run first - :param hidden: If it is true you won't be able to specify context via - task config - """ - if namespace: - platform = namespace - - def wrapper(cls): - cls = plugin.configure(name=name, platform=platform, - hidden=hidden)(cls) - cls._meta_set("order", order) - return cls - - return wrapper - - -# TODO(andreykurilin): move it to some common place. -@six.add_metaclass(abc.ABCMeta) -class BaseContext(plugin.Plugin, functional.FunctionalMixin, - utils.RandomNameGeneratorMixin): - """This class is a factory for context classes. - - Every context class should be a subclass of this class and implement - 2 abstract methods: setup() and cleanup() - - It covers: - 1) proper setting up of context config - 2) Auto discovering & get by name - 3) Validation by CONFIG_SCHEMA - 4) Order of context creation - - """ - RESOURCE_NAME_FORMAT = "c_rally_XXXXXXXX_XXXXXXXX" - - CONFIG_SCHEMA = {"type": "null"} - - def __init__(self, ctx): - config = ctx.get("config", {}).get(self.get_name(), {}) - # NOTE(amaretskiy): self.config is a constant data and must be - # immutable or write-protected type to prevent - # unexpected changes in runtime - if isinstance(config, dict): - if hasattr(self, "DEFAULT_CONFIG"): - for key, value in self.DEFAULT_CONFIG.items(): - config.setdefault(key, value) - self.config = utils.LockedDict(config) - elif isinstance(config, list): - self.config = tuple(config) - else: - # NOTE(amaretskiy): It is improbable that config can be a None, - # number, boolean or even string, - # however we handle this - self.config = config - self.context = ctx - - def __lt__(self, other): - return self.get_order() < other.get_order() - - def __gt__(self, other): - return self.get_order() > other.get_order() - - def __eq__(self, other): - return self.get_order() == other.get_order() - - def __ne__(self, other): - return not self.__eq__(other) - - @classmethod - def get_order(cls): - return cls._meta_get("order") - - @abc.abstractmethod - def setup(self): - """Prepare environment for test. - - This method is executed only once before load generation. - - self.config contains input arguments of this context - self.context contains information that will be passed to scenario - - The goal of this method is to perform all operation to prepare - environment and store information to self.context that is required - by scenario. - """ - - @abc.abstractmethod - def cleanup(self): - """Clean up environment after load generation. - - This method is run once after load generation is done to cleanup - environment. - - self.config contains input arguments of this context - self.context contains information that was passed to scenario - """ - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_traceback): - self.cleanup() - - -@validation.add_default("jsonschema") -@plugin.base() -class Context(BaseContext, validation.ValidatablePluginMixin): - def __init__(self, ctx): - super(Context, self).__init__(ctx) - self.task = self.context.get("task", {}) - - def get_owner_id(self): - if "owner_id" in self.context: - return self.context["owner_id"] - return super(Context, self).get_owner_id() - - -class ContextManager(object): - """Create context environment and run method inside it.""" - - def __init__(self, context_obj): - self._visited = [] - self.context_obj = context_obj - - def _get_sorted_context_lst(self): - context_list = [] - for ctx_name in self.context_obj["config"].keys(): - # TODO(andreykurilin): move this logic to some "find" method - if "@" in ctx_name: - ctx_name, ctx_namespace = ctx_name.split("@", 1) - context_list.append(Context.get(ctx_name, - platform=ctx_namespace, - fallback_to_default=False, - allow_hidden=True)) - else: - potential_result = Context.get_all(name=ctx_name, - allow_hidden=True) - if len(potential_result) == 1: - context_list.append(potential_result[0]) - continue - elif len(potential_result) > 1: - scen_namespace = self.context_obj["scenario_namespace"] - another_attempt = [c for c in potential_result - if c.get_platform() == scen_namespace] - if another_attempt: - context_list.append(another_attempt[0]) - continue - another_attempt = [c for c in potential_result - if c.get_platform() == "default"] - if another_attempt: - context_list.append(another_attempt[0]) - continue - - raise exceptions.PluginNotFound(name=ctx_name, - platform="any of") - - return sorted([ctx(self.context_obj) for ctx in context_list]) - - def setup(self): - """Creates benchmark environment from config.""" - - self._visited = [] - for ctx in self._get_sorted_context_lst(): - self._visited.append(ctx) - ctx.setup() - - return self.context_obj - - def cleanup(self): - """Destroys benchmark environment.""" - - ctxlst = self._visited or self._get_sorted_context_lst() - for ctx in ctxlst[::-1]: - try: - ctx.cleanup() - except Exception as e: - LOG.error("Context %s failed during cleanup." % ctx.get_name()) - LOG.exception(e) - - def __enter__(self): - try: - self.setup() - except Exception: - self.cleanup() - raise - - def __exit__(self, exc_type, exc_value, exc_traceback): - self.cleanup() diff --git a/rally/task/engine.py b/rally/task/engine.py deleted file mode 100644 index 133b807b..00000000 --- a/rally/task/engine.py +++ /dev/null @@ -1,799 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import copy -import json -import threading -import time -import traceback - -import jsonschema -from oslo_config import cfg - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import objects -from rally.common import utils -from rally import consts -from rally import exceptions -# TODO(andreykurilin): remove openstack specific import after Rally 0.10.0 -from rally.plugins.openstack import scenario as os_scenario -from rally.task import context -from rally.task import hook -from rally.task import runner -from rally.task import scenario -from rally.task import sla -from rally.task import trigger - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -TASK_ENGINE_OPTS = [ - cfg.IntOpt("raw_result_chunk_size", default=1000, min=1, - help="Size of raw result chunk in iterations"), -] -CONF.register_opts(TASK_ENGINE_OPTS) - - -class ResultConsumer(object): - """ResultConsumer class stores results from ScenarioRunner, checks SLA. - - Also ResultConsumer listens for runner events and notifies HookExecutor - about started iterations. - """ - - def __init__(self, key, task, subtask, workload, runner, - abort_on_sla_failure): - """ResultConsumer constructor. - - :param key: Scenario identifier - :param task: Instance of Task, task to run - :param subtask: Instance of Subtask - :param workload: Instance of Workload - :param runner: ScenarioRunner instance that produces results to be - consumed - :param abort_on_sla_failure: True if the execution should be stopped - when some SLA check fails - """ - - self.key = key - self.task = task - self.subtask = subtask - self.workload = workload - self.runner = runner - self.load_started_at = float("inf") - self.load_finished_at = 0 - self.workload_data_count = 0 - - self.sla_checker = sla.SLAChecker(key["kw"]) - self.hook_executor = hook.HookExecutor(key["kw"], self.task) - self.abort_on_sla_failure = abort_on_sla_failure - self.is_done = threading.Event() - self.unexpected_failure = {} - self.results = [] - self.thread = threading.Thread(target=self._consume_results) - self.aborting_checker = threading.Thread(target=self.wait_and_abort) - if "hooks" in self.key["kw"]: - self.event_thread = threading.Thread(target=self._consume_events) - - def __enter__(self): - self.thread.start() - self.aborting_checker.start() - if "hooks" in self.key["kw"]: - self.event_thread.start() - self.start = time.time() - return self - - def _consume_results(self): - task_aborted = False - while True: - if self.runner.result_queue: - results = self.runner.result_queue.popleft() - self.results.extend(results) - for r in results: - self.load_started_at = min(r["timestamp"], - self.load_started_at) - self.load_finished_at = max(r["duration"] + r["timestamp"], - self.load_finished_at) - success = self.sla_checker.add_iteration(r) - if (self.abort_on_sla_failure and - not success and - not task_aborted): - self.sla_checker.set_aborted_on_sla() - self.runner.abort() - self.task.update_status( - consts.TaskStatus.SOFT_ABORTING) - task_aborted = True - - # save results chunks - chunk_size = CONF.raw_result_chunk_size - while len(self.results) >= chunk_size: - results_chunk = self.results[:chunk_size] - self.results = self.results[chunk_size:] - results_chunk.sort(key=lambda x: x["timestamp"]) - self.workload.add_workload_data(self.workload_data_count, - {"raw": results_chunk}) - self.workload_data_count += 1 - - elif self.is_done.isSet(): - break - else: - time.sleep(0.1) - - def _consume_events(self): - while not self.is_done.isSet() or self.runner.event_queue: - if self.runner.event_queue: - event = self.runner.event_queue.popleft() - self.hook_executor.on_event( - event_type=event["type"], value=event["value"]) - else: - time.sleep(0.01) - - def __exit__(self, exc_type, exc_value, exc_traceback): - self.finish = time.time() - self.is_done.set() - self.aborting_checker.join() - self.thread.join() - - if exc_type: - self.sla_checker.set_unexpected_failure(exc_value) - - if objects.Task.get_status( - self.task["uuid"]) == consts.TaskStatus.ABORTED: - self.sla_checker.set_aborted_manually() - - load_duration = max(self.load_finished_at - self.load_started_at, 0) - - LOG.info("Load duration is: %s" % utils.format_float_to_str( - load_duration)) - LOG.info("Full runner duration is: %s" % - utils.format_float_to_str(self.runner.run_duration)) - LOG.info("Full duration is: %s" % utils.format_float_to_str( - self.finish - self.start)) - - results = {} - if "hooks" in self.key["kw"]: - self.event_thread.join() - results["hooks_results"] = self.hook_executor.results() - - if self.results: - # NOTE(boris-42): Sort in order of starting - # instead of order of ending - self.results.sort(key=lambda x: x["timestamp"]) - self.workload.add_workload_data(self.workload_data_count, - {"raw": self.results}) - start_time = (self.load_started_at - if self.load_started_at != float("inf") else None) - self.workload.set_results(load_duration=load_duration, - full_duration=(self.finish - self.start), - sla_results=self.sla_checker.results(), - start_time=start_time, **results) - - @staticmethod - def is_task_in_aborting_status(task_uuid, check_soft=True): - """Checks task is in abort stages - - :param task_uuid: UUID of task to check status - :type task_uuid: str - :param check_soft: check or not SOFT_ABORTING status - :type check_soft: bool - """ - stages = [consts.TaskStatus.ABORTING, consts.TaskStatus.ABORTED] - if check_soft: - stages.append(consts.TaskStatus.SOFT_ABORTING) - return objects.Task.get_status(task_uuid) in stages - - def wait_and_abort(self): - """Waits until abort signal is received and aborts runner in this case. - - Has to be run from different thread simultaneously with the - runner.run method. - """ - - while not self.is_done.isSet(): - if self.is_task_in_aborting_status(self.task["uuid"], - check_soft=False): - self.runner.abort() - self.task.update_status(consts.TaskStatus.ABORTED) - break - time.sleep(2.0) - - -class TaskAborted(Exception): - """Task aborted exception - - Used by TaskEngine to interrupt task run. - """ - - -class TaskEngine(object): - """The Task engine class is used to execute benchmark scenarios. - - An instance of this class is initialized by the API with the task - configuration and then is used to validate and execute all specified - in config subtasks. - - .. note:: - - Typical usage: - ... - - engine = TaskEngine(config, task, deployment) - engine.validate() # to test config - engine.run() # to run config - """ - - def __init__(self, config, task, deployment, - abort_on_sla_failure=False): - """TaskEngine constructor. - - :param config: Dict with configuration of specified benchmark scenarios - :param task: Instance of Task, - the current task which is being performed - :param deployment: Instance of Deployment, - :param abort_on_sla_failure: True if the execution should be stopped - when some SLA check fails - """ - try: - self.config = TaskConfig(config) - except Exception as e: - task.set_failed(type(e).__name__, - str(e), - json.dumps(traceback.format_exc())) - if logging.is_debug(): - LOG.exception(e) - raise exceptions.InvalidTaskException(str(e)) - - self.task = task - self.deployment = deployment - self.abort_on_sla_failure = abort_on_sla_failure - - def _validate_workload(self, workload, credentials=None, vtype=None): - scenario_cls = scenario.Scenario.get(workload.name) - namespace = scenario_cls.get_platform() - scenario_context = copy.deepcopy(scenario_cls.get_default_context()) - - results = [] - - results.extend(scenario.Scenario.validate( - name=workload.name, - credentials=credentials, - config=workload.to_dict(), - plugin_cfg=None, - vtype=vtype)) - - if workload.runner: - results.extend(runner.ScenarioRunner.validate( - name=workload.runner["type"], - credentials=credentials, - config=None, - plugin_cfg=workload.runner, - namespace=namespace, - vtype=vtype)) - - for context_name, context_conf in workload.context.items(): - results.extend(context.Context.validate( - name=context_name, - credentials=credentials, - config=None, - plugin_cfg=context_conf, - namespace=namespace, - vtype=vtype)) - - for context_name, context_conf in scenario_context.items(): - results.extend(context.Context.validate( - name=context_name, - credentials=credentials, - config=None, - plugin_cfg=context_conf, - namespace=namespace, - allow_hidden=True, - vtype=vtype)) - - for sla_name, sla_conf in workload.sla.items(): - results.extend(sla.SLA.validate( - name=sla_name, - credentials=credentials, - config=None, - plugin_cfg=sla_conf, - vtype=vtype)) - - for hook_conf in workload.hooks: - results.extend(hook.Hook.validate( - name=hook_conf["name"], - credentials=credentials, - config=None, - plugin_cfg=hook_conf["args"], - vtype=vtype)) - - trigger_conf = hook_conf["trigger"] - results.extend(trigger.Trigger.validate( - name=trigger_conf["name"], - credentials=credentials, - config=None, - plugin_cfg=trigger_conf["args"], - vtype=vtype)) - - if results: - msg = "\n ".join([str(r) for r in results]) - kw = workload.make_exception_args(msg) - raise exceptions.InvalidTaskConfig(**kw) - - @logging.log_task_wrapper(LOG.info, _("Task validation of syntax.")) - def _validate_config_syntax(self, config): - for subtask in config.subtasks: - for workload in subtask.workloads: - self._validate_workload(workload, vtype="syntax") - - @logging.log_task_wrapper(LOG.info, _("Task validation of required " - "platforms.")) - def _validate_config_platforms(self, config): - credentials = self.deployment.get_all_credentials() - credentials = dict((p, creds[0]) for p, creds in credentials.items()) - for subtask in config.subtasks: - for workload in subtask.workloads: - self._validate_workload(workload, vtype="platform", - credentials=credentials) - - def _validate_config_semantic_helper(self, admin, user_context, - workloads, platform): - with user_context as ctx: - ctx.setup() - users = ctx.context["users"] - for workload in workloads: - credentials = {platform: {"admin": admin, "users": users}} - self._validate_workload(workload, credentials=credentials, - vtype="semantic") - - @logging.log_task_wrapper(LOG.info, _("Task validation of semantic.")) - def _validate_config_semantic(self, config): - # map workloads to platforms - platforms = collections.defaultdict(list) - for subtask in config.subtasks: - for workload in subtask.workloads: - # TODO(astudenov): We need to use a platform validator - # in future to identify what kind of users workload - # requires (regular users or admin) - scenario_cls = scenario.Scenario.get(workload.name) - namespace = scenario_cls.get_platform() - - # TODO(andreykurilin): Remove check for plugin namespace after - # Rally 0.10.0 - - if (issubclass(scenario_cls, os_scenario.OpenStackScenario) - and namespace == "default"): - LOG.warning( - "Scenario '%(scen)s' is located in 'default' " - "namespace. Since it inherits from OpenStackScenario, " - "possibly it's namespace should be 'openstack'. " - "Please contact plugin maintainer to fix that issue if" - " it is true (this change is backward compatible). " - "Proper namespace is a guarantee of proper discovering" - " contexts (users context is different for different " - "platforms, i.e openstack, kubernetes). As for now, " - "we assume that your plugin is in openstack namespace," - " but after Rally 0.10.0 it will be changed since " - "default namespace doesn't need users context and so " - "on + we do not want to align to only OpenStack " - "deployments.") - namespace = "openstack" - platforms[namespace].append(workload) - - for platform, workloads in platforms.items(): - creds = self.deployment.get_credentials_for(platform) - - admin = creds["admin"] - if admin: - admin.verify_connection() - - ctx_conf = {"task": self.task, "admin": {"credential": admin}} - user_context = context.Context.get("users", platform=platform, - allow_hidden=True)(ctx_conf) - - self._validate_config_semantic_helper(admin, user_context, - workloads, platform) - - @logging.log_task_wrapper(LOG.info, _("Task validation.")) - def validate(self, only_syntax=False): - """Perform full task configuration validation. - - :param only_syntax: Check only syntax of task configuration - """ - self.task.update_status(consts.TaskStatus.VALIDATING) - try: - self._validate_config_syntax(self.config) - if only_syntax: - return - self._validate_config_platforms(self.config) - self._validate_config_semantic(self.config) - except Exception as e: - exception_info = json.dumps(traceback.format_exc(), indent=2, - separators=(",", ": ")) - self.task.set_failed(type(e).__name__, - str(e), exception_info) - if (logging.is_debug() and - not isinstance(e, exceptions.InvalidTaskConfig)): - LOG.exception(e) - raise exceptions.InvalidTaskException(str(e)) - - def _get_runner(self, config): - config = config or {"type": "serial"} - return runner.ScenarioRunner.get(config["type"])(self.task, config) - - def _prepare_context(self, ctx, name, owner_id): - scenario_cls = scenario.Scenario.get(name) - namespace = scenario_cls.get_platform() - - creds = self.deployment.get_credentials_for(namespace) - - scenario_context = copy.deepcopy(scenario_cls.get_default_context()) - if "users" not in [c.split("@", 1)[0] for c in ctx.keys()]: - scenario_context.setdefault("users", {}) - - scenario_context.update(ctx) - context_obj = { - "task": self.task, - "owner_id": owner_id, - "scenario_name": name, - "scenario_namespace": namespace, - "config": scenario_context - } - - if creds["admin"]: - context_obj["admin"] = {"credential": creds["admin"]} - - return context_obj - - @logging.log_task_wrapper(LOG.info, _("Benchmarking.")) - def run(self): - """Run the benchmark according to the test configuration. - - Test configuration is specified on engine initialization. - - :returns: List of dicts, each dict containing the results of all the - corresponding benchmark test launches - """ - self.task.update_status(consts.TaskStatus.RUNNING) - - try: - for subtask in self.config.subtasks: - self._run_subtask(subtask) - except TaskAborted: - LOG.info("Received aborting signal.") - self.task.update_status(consts.TaskStatus.ABORTED) - else: - if objects.Task.get_status( - self.task["uuid"]) != consts.TaskStatus.ABORTED: - self.task.update_status(consts.TaskStatus.FINISHED) - - def _run_subtask(self, subtask): - subtask_obj = self.task.add_subtask(**subtask.to_dict()) - - try: - # TODO(astudenov): add subtask context here - for workload in subtask.workloads: - self._run_workload(subtask_obj, workload) - except TaskAborted: - subtask_obj.update_status(consts.SubtaskStatus.ABORTED) - raise - except Exception as e: - subtask_obj.update_status(consts.SubtaskStatus.CRASHED) - # TODO(astudenov): save error to DB - LOG.debug(traceback.format_exc()) - LOG.exception(e) - - # NOTE(astudenov): crash task after exception in subtask - self.task.update_status(consts.TaskStatus.CRASHED) - raise - else: - subtask_obj.update_status(consts.SubtaskStatus.FINISHED) - - def _run_workload(self, subtask_obj, workload): - if ResultConsumer.is_task_in_aborting_status(self.task["uuid"]): - raise TaskAborted() - - key = workload.make_key() - workload_obj = subtask_obj.add_workload( - name=workload.name, description=workload.description, - position=workload.pos, runner=workload.runner, - hooks=workload.hooks, context=workload.context, sla=workload.sla, - args=workload.args) - LOG.info("Running benchmark with key: \n%s" - % json.dumps(key, indent=2)) - runner_obj = self._get_runner(workload.runner) - context_obj = self._prepare_context( - workload.context, workload.name, workload_obj["uuid"]) - try: - with ResultConsumer(key, self.task, subtask_obj, workload_obj, - runner_obj, self.abort_on_sla_failure): - with context.ContextManager(context_obj): - runner_obj.run(workload.name, context_obj, - workload.args) - except Exception as e: - LOG.debug(traceback.format_exc()) - LOG.exception(e) - # TODO(astudenov): save error to DB - - -class TaskConfig(object): - """Version-aware wrapper around task. - - """ - - HOOK_CONFIG = { - "type": "object", - "properties": { - "name": {"type": "string"}, - "description": {"type": "string"}, - "args": {}, - "trigger": { - "type": "object", - "properties": { - "name": {"type": "string"}, - "args": {}, - }, - "required": ["name", "args"], - "additionalProperties": False, - } - }, - "required": ["name", "args", "trigger"], - "additionalProperties": False, - } - - CONFIG_SCHEMA_V1 = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "patternProperties": { - ".*": { - "type": "array", - "items": { - "type": "object", - "properties": { - "args": {"type": "object"}, - "description": { - "type": "string" - }, - "runner": { - "type": "object", - "properties": {"type": {"type": "string"}}, - "required": ["type"] - }, - "context": {"type": "object"}, - "sla": {"type": "object"}, - "hooks": { - "type": "array", - "items": HOOK_CONFIG, - } - }, - "additionalProperties": False - } - } - } - } - - CONFIG_SCHEMA_V2 = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "version": {"type": "number"}, - "title": {"type": "string"}, - "description": {"type": "string"}, - "tags": { - "type": "array", - "items": {"type": "string"} - }, - - "subtasks": { - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "properties": { - "title": {"type": "string"}, - "group": {"type": "string"}, - "description": {"type": "string"}, - "tags": { - "type": "array", - "items": {"type": "string"} - }, - - "run_in_parallel": {"type": "boolean"}, - "workloads": { - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "properties": { - "name": {"type": "string"}, - "description": {"type": "string"}, - "args": {"type": "object"}, - - "runner": { - "type": "object", - "properties": { - "type": {"type": "string"} - }, - "required": ["type"] - }, - - "sla": {"type": "object"}, - "hooks": { - "type": "array", - "items": HOOK_CONFIG, - }, - "context": {"type": "object"} - }, - "additionalProperties": False, - "required": ["name", "runner"] - } - } - }, - "additionalProperties": False, - "required": ["title", "workloads"] - } - } - }, - "additionalProperties": False, - "required": ["title", "subtasks"] - } - - CONFIG_SCHEMAS = {1: CONFIG_SCHEMA_V1, 2: CONFIG_SCHEMA_V2} - - def __init__(self, config): - """TaskConfig constructor. - - :param config: Dict with configuration of specified task - """ - if config is None: - # NOTE(stpierre): This gets reraised as - # InvalidTaskException. if we raise it here as - # InvalidTaskException, then "Task config is invalid: " - # gets prepended to the message twice. - raise Exception(_("Input task is empty")) - - self.version = self._get_version(config) - self._validate_version() - self._validate_json(config) - - self.title = config.get("title", "Task") - self.tags = config.get("tags", []) - self.description = config.get("description") - - self.subtasks = self._make_subtasks(config) - - # if self.version == 1: - # TODO(ikhudoshyn): Warn user about deprecated format - - @staticmethod - def _get_version(config): - return config.get("version", 1) - - def _validate_version(self): - if self.version not in self.CONFIG_SCHEMAS: - allowed = ", ".join([str(k) for k in self.CONFIG_SCHEMAS]) - msg = (_("Task configuration version {0} is not supported. " - "Supported versions: {1}")).format(self.version, allowed) - raise exceptions.InvalidTaskException(msg) - - def _validate_json(self, config): - try: - jsonschema.validate(config, self.CONFIG_SCHEMAS[self.version]) - except Exception as e: - raise exceptions.InvalidTaskException(str(e)) - - def _make_subtasks(self, config): - if self.version == 2: - return [SubTask(s) for s in config["subtasks"]] - elif self.version == 1: - subtasks = [] - for name, v1_workloads in config.items(): - for v1_workload in v1_workloads: - v2_workload = copy.deepcopy(v1_workload) - v2_workload["name"] = name - subtasks.append( - SubTask({"title": name, "workloads": [v2_workload]})) - return subtasks - - -class SubTask(object): - """Subtask -- unit of execution in Task - - """ - def __init__(self, config): - """Subtask constructor. - - :param config: Dict with configuration of specified subtask - """ - self.title = config["title"] - self.tags = config.get("tags", []) - self.group = config.get("group") - self.description = config.get("description") - self.workloads = [Workload(wconf, pos) - for pos, wconf in enumerate(config["workloads"])] - self.context = config.get("context", {}) - - def to_dict(self): - return { - "title": self.title, - "description": self.description, - "context": self.context, - } - - -class Workload(object): - """Workload -- workload configuration in SubTask. - - """ - def __init__(self, config, pos): - self.name = config["name"] - self.description = config.get("description", "") - if not self.description: - try: - self.description = scenario.Scenario.get( - self.name).get_info()["title"] - except (exceptions.PluginNotFound, - exceptions.MultipleMatchesFound): - # let's fail an issue with loading plugin at a validation step - pass - self.runner = config.get("runner", {}) - self.sla = config.get("sla", {}) - self.hooks = config.get("hooks", []) - self.context = config.get("context", {}) - self.args = config.get("args", {}) - self.pos = pos - - def to_dict(self): - workload = {"runner": self.runner} - - for prop in "sla", "args", "context", "hooks": - value = getattr(self, prop) - if value: - workload[prop] = value - - return workload - - def to_task(self): - """Make task configuration for the workload. - - This method returns a dict representing full configuration - of the task containing a single subtask with this single - workload. - - :return: dict containing full task configuration - """ - # NOTE(ikhudoshyn): Result of this method will be used - # to store full task configuration in DB so that - # subtask configuration in reports would be given - # in the same format as it was provided by user. - # Temporarily it returns to_dict() in order not - # to break existing reports. It should be - # properly implemented in a patch that will update reports. - # return {self.name: [self.to_dict()]} - return self.to_dict() - - def make_key(self): - return {"name": self.name, - "description": self.description, - "pos": self.pos, - "kw": self.to_task()} - - def make_exception_args(self, reason): - return {"name": self.name, - "pos": self.pos, - "config": json.dumps(self.to_dict()), - "reason": reason} diff --git a/rally/task/exporter.py b/rally/task/exporter.py deleted file mode 100755 index db60b3f9..00000000 --- a/rally/task/exporter.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -Exporter - its the mechanism for exporting rally tasks into some specified -system by connection string. -""" - -import abc - -import jsonschema -import six - -from rally.common import logging -from rally.common.plugin import plugin -from rally import consts - - -LOG = logging.getLogger(__name__) - -configure = plugin.configure - -REPORT_RESPONSE_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "files": { - "type": "object", - "patternProperties": { - ".{1,}": {"type": "string"} - } - }, - "open": { - "type": "string", - }, - "print": { - "type": "string" - } - }, - "additionalProperties": False -} - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class Exporter(plugin.Plugin): - - def __init__(self, connection_string): - LOG.warning("Sorry, we have not support old Exporter plugin since" - "Rally 0.10.0, please use TaskExporter instead.") - self.connection_string = connection_string - - @abc.abstractmethod - def export(self, task_uuid): - """Export results of the task to the task storage. - - :param task_uuid: uuid of task results - """ - - @abc.abstractmethod - def validate(self): - """Used to validate connection string.""" - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class TaskExporter(plugin.Plugin): - """Base class for all exporters for Tasks.""" - - def __init__(self, tasks_results, output_destination, api=None): - """Init reporter - - :param tasks_results: list of results to generate report for - :param output_destination: destination of export - :param api: an instance of rally.api.API object - """ - super(TaskExporter, self).__init__() - self.tasks_results = tasks_results - self.output_destination = output_destination - self.api = api - - @classmethod - @abc.abstractmethod - def validate(cls, output_destination): - """Validate destination of report. - - :param output_destination: Destination of report - """ - - @abc.abstractmethod - def generate(self): - """Generate report - - :returns: a dict with 3 optional elements: - - - key "files" with a dictionary of files to save on disk. - keys are paths, values are contents; - - key "print" - data to print at CLI level - - key "open" - path to file which should be open in case of - --open flag - """ - - @staticmethod - def make(exporter_cls, task_results, output_destination, api=None): - """Initialize exporter, generate and validate result. - - It is a base method which is called from API layer. It cannot be - overridden. Do not even try! :) - - :param exporter_cls: class of TaskExporter to be used - :param task_results: list of results to generate report for - :param output_destination: destination of export - :param api: an instance of rally.api.API object - """ - report = exporter_cls(task_results, output_destination, - api).generate() - - jsonschema.validate(report, REPORT_RESPONSE_SCHEMA) - - return report diff --git a/rally/task/functional.py b/rally/task/functional.py deleted file mode 100644 index 84542efb..00000000 --- a/rally/task/functional.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2015: Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import exceptions - - -class FunctionalMixin(object): - """Functional assertions. - - The Rally core team deliberately decided not to use an existing framework - for this such a `testtools`. - - Using 'testtools' would introduce the following problems: - - Rally production code works with testing tools code that is not designed - to be used in production. - - Rally code depends on a bunch of new libs introduced by testtools and - testtools itself, which means: more code on which Rally is dependent, - more time required to install Rally, more disk space required by Rally. - - Classes like Scenario & Context are inherited from testtools.TestCase - that makes these classes really hard to learn (for instance: - running dir(base.Scenario) you cannot see a ton of methods inside it) - - It won't be clear for end users what exceptions are raised: unittest - exception are going to be raised during production runs instead of - Rally assertion exceptions. - """ - - def _concatenate_message(self, default, extended): - if not extended: - return default - if default[-1] != ".": - default += "." - return default + " " + extended.capitalize() - - def assertEqual(self, first, second, err_msg=None): - if first != second: - msg = "%s != %s" % (repr(first), - repr(second)) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertNotEqual(self, first, second, err_msg=None): - if first == second: - msg = "%s == %s" % (repr(first), - repr(second)) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertTrue(self, value, err_msg=None): - if not value: - msg = "%s is not True" % repr(value) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertFalse(self, value, err_msg=None): - if value: - msg = "%s is not False" % repr(value) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertIs(self, first, second, err_msg=None): - if first is not second: - msg = "%s is not %s" % (repr(first), - repr(second)) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertIsNot(self, first, second, err_msg=None): - if first is second: - msg = "%s is %s" % (repr(first), - repr(second)) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertIsNone(self, value, err_msg=None): - if value is not None: - msg = "%s is not None" % repr(value) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertIsNotNone(self, value, err_msg=None): - if value is None: - msg = "%s is None" % repr(value) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertIn(self, member, container, err_msg=None): - msg = "%s not found in %s" % (repr(member), - repr(container)) - if member not in container: - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertNotIn(self, member, container, err_msg=None): - msg = "%s found in %s" % (repr(member), - repr(container)) - if member in container: - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertIsInstance(self, first, second, err_msg=None): - if not isinstance(first, second): - msg = "%s is not instance of %s" % (repr(first), - repr(second)) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertIsSubset(self, member, container, err_msg=None): - msg = "%s not found in %s" % (repr(member), - repr(container)) - if set(member) - set(container): - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertIsNotSubset(self, member, container, err_msg=None): - msg = "%s found in %s" % (repr(member), - repr(container)) - if not (set(member) - set(container)): - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertIsNotInstance(self, first, second, err_msg=None): - if isinstance(first, second): - msg = "%s is instance of %s" % (repr(first), - repr(second)) - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertLessEqual(self, first, second, err_msg=None): - msg = "%s is greater than %s" % (repr(first), - repr(second)) - if first > second: - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertLess(self, first, second, err_msg=None): - msg = "%s is greater or equal to %s" % (repr(first), - repr(second)) - if first >= second: - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertGreaterEqual(self, first, second, err_msg=None): - msg = "%s is less than %s" % (repr(first), - repr(second)) - if first < second: - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) - - def assertGreater(self, first, second, err_msg=None): - msg = "%s is less or equal to %s" % (repr(first), - repr(second)) - if first <= second: - raise exceptions.RallyAssertionError( - self._concatenate_message(msg, err_msg)) diff --git a/rally/task/hook.py b/rally/task/hook.py deleted file mode 100644 index 20baf3ff..00000000 --- a/rally/task/hook.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections -import threading - -import six - -from rally.common.i18n import _, _LE -from rally.common import logging -from rally.common.plugin import plugin -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.task.processing import charts -from rally.task import trigger -from rally.task import utils - - -LOG = logging.getLogger(__name__) - - -configure = plugin.configure - - -class HookExecutor(object): - """Runs hooks and collects results from them.""" - - def __init__(self, config, task): - self.config = config - self.task = task - - self.triggers = collections.defaultdict(list) - for hook in config.get("hooks", []): - hook_cls = Hook.get(hook["name"]) - trigger_obj = trigger.Trigger.get( - hook["trigger"]["name"])(hook, self.task, hook_cls) - event_type = trigger_obj.get_listening_event() - self.triggers[event_type].append(trigger_obj) - - if "time" in self.triggers: - self._timer_thread = threading.Thread(target=self._timer_method) - self._timer_stop_event = threading.Event() - - def _timer_method(self): - """Timer thread method. - - It generates events with type "time" to inform HookExecutor - about how many time passed since beginning of the first iteration. - """ - stopwatch = rutils.Stopwatch(stop_event=self._timer_stop_event) - stopwatch.start() - seconds_since_start = 0 - while not self._timer_stop_event.isSet(): - self.on_event(event_type="time", value=seconds_since_start) - seconds_since_start += 1 - stopwatch.sleep(seconds_since_start) - - def _start_timer(self): - self._timer_thread.start() - - def _stop_timer(self): - self._timer_stop_event.set() - if self._timer_thread.ident is not None: - self._timer_thread.join() - - def on_event(self, event_type, value): - """Notify about event. - - This method should be called to inform HookExecutor that - particular event occurred. - It runs hooks configured for event. - """ - if "time" in self.triggers: - # start timer on first iteration - if event_type == "iteration" and value == 1: - self._start_timer() - - for trigger_obj in self.triggers[event_type]: - started = trigger_obj.on_event(event_type, value) - if started: - LOG.info(_("Hook %s is trigged for Task %s by %s=%s") - % (trigger_obj.hook_cls.__name__, self.task["uuid"], - event_type, value)) - - def results(self): - """Returns list of dicts with hook results.""" - if "time" in self.triggers: - self._stop_timer() - results = [] - for triggers_group in self.triggers.values(): - for trigger_obj in triggers_group: - results.append(trigger_obj.get_results()) - return results - - -@validation.add_default("jsonschema") -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class Hook(plugin.Plugin, validation.ValidatablePluginMixin): - """Factory for hook classes.""" - - CONFIG_SCHEMA = {"type": "null"} - - def __init__(self, task, config, triggered_by): - self.task = task - self.config = config - self._triggered_by = triggered_by - self._thread = threading.Thread(target=self._thread_method) - self._started_at = 0.0 - self._finished_at = 0.0 - self._result = { - "status": consts.HookStatus.SUCCESS, - "started_at": self._started_at, - "finished_at": self._finished_at, - "triggered_by": self._triggered_by, - } - - def _thread_method(self): - # Run hook synchronously - self.run_sync() - - def set_error(self, exception_name, description, details): - """Set error related information to result. - - :param exception_name: name of exception as string - :param description: short description as string - :param details: any details as string - """ - self.set_status(consts.HookStatus.FAILED) - self._result["error"] = {"etype": exception_name, - "msg": description, "details": details} - - def set_status(self, status): - """Set status to result.""" - self._result["status"] = status - - def add_output(self, additive=None, complete=None): - """Save custom output. - - :param additive: dict with additive output - :param complete: dict with complete output - :raises RallyException: if output has wrong format - """ - if "output" not in self._result: - self._result["output"] = {"additive": [], "complete": []} - for key, value in (("additive", additive), ("complete", complete)): - if value: - message = charts.validate_output(key, value) - if message: - raise exceptions.RallyException(message) - self._result["output"][key].append(value) - - def run_async(self): - """Run hook asynchronously.""" - self._thread.start() - - def run_sync(self): - """Run hook synchronously.""" - try: - with rutils.Timer() as timer: - self.run() - except Exception as exc: - LOG.error(_LE("Hook %s failed during run."), self.get_name()) - LOG.exception(exc) - self.set_error(*utils.format_exc(exc)) - - self._started_at = timer.timestamp() - self._result["started_at"] = self._started_at - self._finished_at = timer.finish_timestamp() - self._result["finished_at"] = self._finished_at - - @abc.abstractmethod - def run(self): - """Run method. - - This method should be implemented in plugin. - - Hook plugin should call following methods to set result: - set_status - to set hook execution status - Optionally the following methods should be called: - set_error - to indicate that there was an error; - automatically sets hook execution status to 'failed' - add_output - provide data for report - """ - - def result(self): - """Wait and return result of hook.""" - if self._thread.ident is not None: - # hook is still running, wait for result - self._thread.join() - return self._result diff --git a/rally/task/processing/__init__.py b/rally/task/processing/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/task/processing/charts.py b/rally/task/processing/charts.py deleted file mode 100644 index 47b3c28f..00000000 --- a/rally/task/processing/charts.py +++ /dev/null @@ -1,691 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import bisect -import collections -import math - -import six - -from rally.common.plugin import plugin -from rally.common import streaming_algorithms as streaming -from rally.task.processing import utils - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class Chart(plugin.Plugin): - """Base class for charts. - - This is a base for all plugins that prepare data for specific charts - in HTML report. Each chart must at least declare chart widget and - prepare data that is suitable for rendering by JavaScript. - """ - - @abc.abstractproperty - def widget(self): - """Widget name to display this chart by JavaScript.""" - - def __init__(self, workload, zipped_size=1000): - """Setup initial values. - - :param workload: dict, detailed info about the Workload - :param zipped_size: int maximum number of points on scale - """ - self._data = collections.OrderedDict() # Container for results - self._workload = workload - self.base_size = self._workload["total_iteration_count"] - self.zipped_size = zipped_size - - def add_iteration(self, iteration): - """Add iteration data. - - This method must be called for each iteration. - If overridden, this method must use streaming data processing, - so chart instance could process unlimited number of iterations, - with low memory usage. - """ - for name, value in self._map_iteration_values(iteration): - if name not in self._data: - self._data[name] = utils.GraphZipper(self.base_size, - self.zipped_size) - self._data[name].add_point(value) - - def render(self): - """Generate chart data ready for drawing.""" - return [(name, points.get_zipped_graph()) - for name, points in self._data.items()] - - def _fix_atomic_actions(self, atomic_actions): - """Set `0' for missed atomic actions. - - Since some atomic actions can absent in some iterations - due to failures, this method must be used in all cases - related to atomic actions processing. - """ - for name in self._get_atomic_names(): - atomic_actions.setdefault(name, 0) - return atomic_actions - - def _get_atomic_names(self): - atomic_merger = utils.AtomicMerger( - self._workload["statistics"]["atomics"]) - return atomic_merger.get_merged_names() - - def _merge_atomic_actions(self, atomic_actions): - atomic_merger = utils.AtomicMerger( - self._workload["statistics"]["atomics"]) - return atomic_merger.merge_atomic_actions( - atomic_actions) - - @abc.abstractmethod - def _map_iteration_values(self, iteration): - """Get values for processing, from given iteration.""" - - -class MainStackedAreaChart(Chart): - - widget = "StackedArea" - - def _map_iteration_values(self, iteration): - if iteration["error"]: - result = [("duration", 0), ("idle_duration", 0)] - if self._workload["failed_iteration_count"]: - result.append( - ("failed_duration", - iteration["duration"] + iteration["idle_duration"])) - else: - result = [("duration", iteration["duration"]), - ("idle_duration", iteration["idle_duration"])] - if self._workload["failed_iteration_count"]: - result.append(("failed_duration", 0)) - return result - - -class AtomicStackedAreaChart(Chart): - - widget = "StackedArea" - - def _map_iteration_values(self, iteration): - atomic_actions = self._merge_atomic_actions( - iteration["atomic_actions"]) - atomic_actions = self._fix_atomic_actions(atomic_actions) - atomics = list(atomic_actions.items()) - if self._workload["failed_iteration_count"]: - if iteration["error"]: - failed_duration = ( - iteration["duration"] + iteration["idle_duration"] - - sum([(a[1] or 0) for a in atomics])) - else: - failed_duration = 0 - atomics.append(("failed_duration", failed_duration)) - return atomics - - -class AvgChart(Chart): - """Base class for charts with average results.""" - - widget = "Pie" - - def add_iteration(self, iteration): - for name, value in self._map_iteration_values(iteration): - if name not in self._data: - self._data[name] = streaming.MeanComputation() - self._data[name].add(value or 0) - - def render(self): - return [(k, v.result()) for k, v in self._data.items()] - - -class AtomicAvgChart(AvgChart): - - def _map_iteration_values(self, iteration): - atomic_actions = self._merge_atomic_actions( - iteration["atomic_actions"]) - atomic_actions = self._fix_atomic_actions(atomic_actions) - return list(atomic_actions.items()) - - -class LoadProfileChart(Chart): - """Chart for parallel durations.""" - - widget = "StackedArea" - - def __init__(self, workload, name="parallel iterations", - scale=100): - """Setup chart with graph name and scale. - - :param workload: dict, detailed information about Workload - :param name: str name for X axis - :param scale: int number of X points - """ - super(LoadProfileChart, self).__init__(workload) - self._name = name - # NOTE(boris-42): Add 2 points at the end of graph so at the end of - # graph there will be point with 0 running iterations. - self._duration = self._workload["load_duration"] * (1 + 2.0 / scale) - - self.step = self._duration / float(scale) - self._time_axis = [self.step * x - for x in six.moves.range(int(scale)) - if (self.step * x) < self._duration] - self._time_axis.append(self._duration) - self._running = [0] * len(self._time_axis) - # NOTE(andreykurilin): There is a "start_time" field in workload - # object, but due to transformations in database layer, the - # microseconds can be not accurate enough. - if self._workload["data"]: - self._tstamp_start = self._workload["data"][0]["timestamp"] - else: - self._tstamp_start = self._workload["start_time"] - - def _map_iteration_values(self, iteration): - return iteration["timestamp"], iteration["duration"] - - def add_iteration(self, iteration): - timestamp, duration = self._map_iteration_values(iteration) - ts_start = timestamp - self._tstamp_start - started_idx = bisect.bisect(self._time_axis, ts_start) - ended_idx = bisect.bisect(self._time_axis, ts_start + duration) - if self._time_axis[ended_idx - 1] == ts_start + duration: - ended_idx -= 1 - for idx in range(started_idx + 1, ended_idx): - self._running[idx] += 1 - if started_idx == ended_idx: - self._running[ended_idx] += duration / self.step - else: - self._running[started_idx] += ( - self._time_axis[started_idx] - ts_start) / self.step - self._running[ended_idx] += ( - ts_start + duration - - self._time_axis[ended_idx - 1]) / self.step - - def render(self): - return [(self._name, list(zip(self._time_axis, self._running)))] - - -class HistogramChart(Chart): - """Base class for chart with histograms. - - This chart is relatively complex, because actually it is a set - of histograms, that usually can be switched by dropdown select. - And each histogram has several data views. - """ - - widget = "Histogram" - - def _init_views(self, min_value, max_value): - """Generate initial data for each histogram view.""" - if not self.base_size: - return [] - min_value, max_value = min_value or 0, max_value or 0 - views = [] - for view, bins in [ - ("Square Root Choice", - int(math.ceil(math.sqrt(self.base_size)))), - ("Sturges Formula", - int(math.ceil(math.log(self.base_size, 2) + 1))), - ("Rice Rule", - int(math.ceil(2 * self.base_size ** (1.0 / 3))))]: - bin_width = float(max_value - min_value) / bins - x_axis = [min_value + (bin_width * x) for x in range(1, bins + 1)] - views.append({"view": view, "bins": bins, - "x": x_axis, "y": [0] * len(x_axis)}) - return views - - def add_iteration(self, iteration): - for name, value in self._map_iteration_values(iteration): - if name not in self._data: - raise KeyError("Unexpected histogram name: %s" % name) - for i, view in enumerate(self._data[name]["views"]): - for bin_i, bin_v in enumerate(view["x"]): - if (value or 0) <= bin_v: - self._data[name]["views"][i]["y"][bin_i] += 1 - break - - def render(self): - data = [] - for name, hist in self._data.items(): - for idx, v in enumerate(hist["views"]): - graph = {"key": name, - "view": v["view"], - "disabled": hist["disabled"], - "values": [{"x": x, "y": y} - for x, y in zip(v["x"], v["y"])]} - try: - data[idx].append(graph) - except IndexError: - data.append([graph]) - return {"data": data, "views": [{"id": i, "name": d[0]["view"]} - for i, d in enumerate(data)]} - - -class MainHistogramChart(HistogramChart): - - def __init__(self, workload_info): - super(MainHistogramChart, self).__init__(workload_info) - views = self._init_views(self._workload["min_duration"], - self._workload["max_duration"]) - self._data["task"] = {"views": views, "disabled": None} - - def _map_iteration_values(self, iteration): - return [("task", 0 if iteration["error"] else iteration["duration"])] - - -class AtomicHistogramChart(HistogramChart): - - def __init__(self, workload_info): - super(AtomicHistogramChart, self).__init__(workload_info) - atomics = self._workload["statistics"]["atomics"] - atomic_merger = utils.AtomicMerger(atomics) - for i, name in enumerate(atomics): - value = atomics[name] - self._data[atomic_merger.get_merged_name(name)] = { - "views": self._init_views(value["min_duration"], - value["max_duration"]), - "disabled": i} - - def _map_iteration_values(self, iteration): - atomic_actions = self._merge_atomic_actions( - iteration["atomic_actions"]) - atomic_actions = self._fix_atomic_actions(atomic_actions) - return list(atomic_actions.items()) - - -@six.add_metaclass(abc.ABCMeta) -class Table(Chart): - """Base class for tables. - - Each Table subclass represents HTML table which can be easily rendered in - report. Subclasses are responsible for setting up both columns and rows: - columns are set simply by `columns' property (list of str columns names) - and rows must be initialized in _data property, with the following format: - self._data = {name: [streaming_ins, postprocess_func or None], ...} - where: - name - str name of table row parameter - streaming_ins - instance of streaming algorithm - postprocess_func - optional function that processes final result, - None means usage of default self._round() - This can be done in __init__() or even in add_iteration(). - """ - - widget = "Table" - - @abc.abstractproperty - def columns(self): - """List of columns names.""" - - def _round(self, ins, has_result): - """This is a default post-process function for table cell value. - - :param ins: streaming_algorithms.StreamingAlgorithm subclass instance - :param has_result: bool, whether current row is effective - :returns: rounded float - :returns: str "n/a" - """ - return round(ins.result(), 3) if has_result else "n/a" - - def _row_has_results(self, values): - """Determine whether row can be assumed as having values. - - :param values: row values list - [(StreamingAlgorithm, function or None), ...] - :returns: bool - """ - for ins, fn in values: - if isinstance(ins, (streaming.MinComputation, - streaming.MaxComputation, - streaming.MeanComputation)): - # NOTE(amaretskiy): None means this computation - # has never been called - return ins.result() is not None - return True - - def get_rows(self): - """Collect rows values finally, after all data is processed. - - :returns: [str_name, (float or str), (float or str), ...] - """ - rows = [] - for name, values in self._data.items(): - row = [name] - has_result = self._row_has_results(values) - for ins, fn in values: - fn = fn or self._round - row.append(fn(ins, has_result)) - rows.append(row) - return rows - - def render(self): - return {"cols": self.columns, "rows": self.get_rows()} - - -class MainStatsTable(Table): - - columns = ["Action", "Min (sec)", "Median (sec)", "90%ile (sec)", - "95%ile (sec)", "Max (sec)", "Avg (sec)", "Success", "Count"] - - def __init__(self, *args, **kwargs): - super(MainStatsTable, self).__init__(*args, **kwargs) - iters_num = self._workload["total_iteration_count"] - for name in (self._get_atomic_names() + ["total"]): - self._data[name] = [ - [streaming.MinComputation(), None], - [streaming.PercentileComputation(0.5, iters_num), None], - [streaming.PercentileComputation(0.9, iters_num), None], - [streaming.PercentileComputation(0.95, iters_num), None], - [streaming.MaxComputation(), None], - [streaming.MeanComputation(), None], - [streaming.MeanComputation(), - lambda st, has_result: ("%.1f%%" % (st.result() * 100) - if has_result else "n/a")], - [streaming.IncrementComputation(), - lambda st, has_result: st.result()]] - - def _map_iteration_values(self, iteration): - atomic_actions = self._merge_atomic_actions( - iteration["atomic_actions"]) - return dict(atomic_actions, total=iteration["duration"]) - - def add_iteration(self, iteration): - for name, value in self._map_iteration_values(iteration).items(): - self._data[name][-1][0].add() - if iteration["error"]: - self._data[name][-2][0].add(0) - else: - self._data[name][-2][0].add(1) - for idx, dummy in enumerate(self._data[name][:-2]): - self._data[name][idx][0].add(value) - - def to_dict(self): - stats = {"total": None, "atomics": []} - - def row_to_dict(data): - return {"name": data[0], - "min": data[1], - "median": data[2], - "90%ile": data[3], - "95%ile": data[4], - "max": data[5], - "avg": data[6], - "success": data[7], - "count": data[8]} - - for row in self.get_rows(): - if row[0] == "total": - stats["total"] = row_to_dict(row) - else: - stats["atomics"].append(row_to_dict(row)) - return stats - - -class OutputChart(Chart): - """Base class for charts related to scenario output.""" - - def __init__(self, workload_info, zipped_size=1000, - title="", description="", label="", axis_label=""): - super(OutputChart, self).__init__(workload_info, zipped_size) - self.title = title - self.description = description - self.label = label - self.axis_label = axis_label - - def _map_iteration_values(self, iteration): - return iteration - - def render(self): - return {"title": self.title, - "description": self.description, - "widget": self.widget, - "data": super(OutputChart, self).render(), - "label": self.label, - "axis_label": self.axis_label} - - -@plugin.configure(name="StackedArea") -class OutputStackedAreaChart(OutputChart): - """Display results as stacked area. - - This plugin processes additive data and displays it in HTML report - as stacked area with X axis bound to iteration number. - Complete output data is displayed as stacked area as well, without - any processing. - - Keys "description", "label" and "axis_label" are optional. - - Examples of using this plugin in Scenario, for saving output data: - - .. code-block:: python - - self.add_output( - additive={"title": "Additive data as stacked area", - "description": "Iterations trend for foo and bar", - "chart_plugin": "StackedArea", - "data": [["foo", 12], ["bar", 34]]}, - complete={"title": "Complete data as stacked area", - "description": "Data is shown as stacked area, as-is", - "chart_plugin": "StackedArea", - "data": [["foo", [[0, 5], [1, 42], [2, 15], [3, 7]]], - ["bar", [[0, 2], [1, 1.3], [2, 5], [3, 9]]]], - "label": "Y-axis label text", - "axis_label": "X-axis label text"}) - """ - - widget = "StackedArea" - - def render(self): - result = super(OutputStackedAreaChart, self).render() - - # NOTE(amaretskiy): transform to Table if there is a single iteration - if result["data"] and len(result["data"][0][1]) == 1: - rows = [[v[0], v[1][0][1]] for v in result["data"]] - result.update({"widget": "Table", - "data": {"cols": ["Name", self.label or "Value"], - "rows": rows}}) - return result - - -@plugin.configure(name="Lines") -class OutputLinesChart(OutputStackedAreaChart): - """Display results as generic chart with lines. - - This plugin processes additive data and displays it in HTML report - as linear chart with X axis bound to iteration number. - Complete output data is displayed as linear chart as well, without - any processing. - - Examples of using this plugin in Scenario, for saving output data: - - .. code-block:: python - - self.add_output( - additive={"title": "Additive data as stacked area", - "description": "Iterations trend for foo and bar", - "chart_plugin": "Lines", - "data": [["foo", 12], ["bar", 34]]}, - complete={"title": "Complete data as stacked area", - "description": "Data is shown as stacked area, as-is", - "chart_plugin": "Lines", - "data": [["foo", [[0, 5], [1, 42], [2, 15], [3, 7]]], - ["bar", [[0, 2], [1, 1.3], [2, 5], [3, 9]]]], - "label": "Y-axis label text", - "axis_label": "X-axis label text"}) - """ - - widget = "Lines" - - -@plugin.configure(name="Pie") -class OutputAvgChart(OutputChart, AvgChart): - """Display results as pie, calculate average values for additive data. - - This plugin processes additive data and calculate average values. - Both additive and complete data are displayed in HTML report as pie chart. - - Examples of using this plugin in Scenario, for saving output data: - - .. code-block:: python - - self.add_output( - additive={"title": "Additive output", - "description": ("Pie with average data " - "from all iterations values"), - "chart_plugin": "Pie", - "data": [["foo", 12], ["bar", 34], ["spam", 56]]}, - complete={"title": "Complete output", - "description": "Displayed as a pie, as-is", - "chart_plugin": "Pie", - "data": [["foo", 12], ["bar", 34], ["spam", 56]]}) - """ - - widget = "Pie" - - -@plugin.configure(name="Table") -class OutputTable(OutputChart, Table): - """Display complete output as table, can not be used for additive data. - - Use this plugin for complete output data to display it in HTML report - as table. This plugin can not be used for additive data because it - does not contain any processing logic. - - Examples of using this plugin in Scenario, for saving output data: - - .. code-block:: python - - self.add_output( - complete={"title": "Arbitrary Table", - "description": "Just show columns and rows as-is", - "chart_plugin": "Table", - "data": {"cols": ["foo", "bar", "spam"], - "rows": [["a row", 1, 2], ["b row", 3, 4], - ["c row", 5, 6]]}}) - """ - - widget = "Table" - - -@plugin.configure(name="StatsTable") -class OutputStatsTable(OutputTable): - """Calculate statistics for additive data and display it as table. - - This plugin processes additive data and compose statistics that is - displayed as table in HTML report. - - Examples of using this plugin in Scenario, for saving output data: - - .. code-block:: python - - self.add_output( - additive={"title": "Statistics", - "description": ("Table with statistics generated " - "from all iterations values"), - "chart_plugin": "StatsTable", - "data": [["foo stat", 12], ["bar", 34], ["spam", 56]]}) - """ - - columns = ["Action", "Min (sec)", "Median (sec)", "90%ile (sec)", - "95%ile (sec)", "Max (sec)", "Avg (sec)", "Count"] - - def add_iteration(self, iteration): - for name, value in self._map_iteration_values(iteration): - if name not in self._data: - iters_num = self._workload["total_iteration_count"] - self._data[name] = [ - [streaming.MinComputation(), None], - [streaming.PercentileComputation(0.5, iters_num), None], - [streaming.PercentileComputation(0.9, iters_num), None], - [streaming.PercentileComputation(0.95, iters_num), None], - [streaming.MaxComputation(), None], - [streaming.MeanComputation(), None], - [streaming.IncrementComputation(), - lambda v, na: v.result()]] - - self._data[name][-1][0].add(None) - self._data[name][-2][0].add(1) - for idx, dummy in enumerate(self._data[name][:-1]): - self._data[name][idx][0].add(value) - - -@plugin.configure(name="TextArea") -class OutputTextArea(OutputChart): - """Arbitrary text - - This plugin processes complete data and displays of output in HTML report. - - Examples of using this plugin in Scenario, for saving output data: - - .. code-block:: python - - self.add_output( - complete={"title": "Script Inline", - "chart_plugin": "TextArea", - "data": ["first output", "second output", - "third output"]]}) - """ - - widget = "TextArea" - - -_OUTPUT_SCHEMA = { - "key_types": { - "title": six.string_types, - "description": six.string_types, - "chart_plugin": six.string_types, - "data": (list, dict), - "label": six.string_types, - "axis_label": six.string_types}, - "required": ["title", "chart_plugin", "data"]} - - -def validate_output(output_type, output): - # TODO(amaretskiy): this validation is simple and must be improved. - # Maybe it is worth to add classmethod OutputChart.validate(), so - # we could have flexible validation for custom chart plugins - if output_type not in ("additive", "complete"): - return ("unexpected output type: '%s', " - "should be in ('additive', 'complete')" % output_type) - - if type(output) != dict: - return ("%(name)s output item has wrong type '%(type)s', " - "must be 'dict'" % {"name": output_type, - "type": type(output).__name__}) - - for key in _OUTPUT_SCHEMA["required"]: - if key not in output: - return ("%(name)s output missing key '%(key)s'" - % {"name": output_type, "key": key}) - - for key in output: - if key not in _OUTPUT_SCHEMA["key_types"]: - return ("%(name)s output has unexpected key '%(key)s'" - % {"name": output_type, "key": key}) - - proper_type = _OUTPUT_SCHEMA["key_types"][key] - if not isinstance(output[key], proper_type): - if type(proper_type) == tuple: - return ("Value of %(name)s output %(key)s has wrong type " - "'%(actual_type)s', should be in %(types)r" - % {"name": output_type, - "key": key, - "actual_type": type(output[key]).__name__, - "types": tuple(t.__name__ - for t in proper_type)}) - return ("Value of %(name)s output %(key)s has wrong type " - "'%(actual_type)s', should be %(proper_type)s" - % {"name": output_type, - "key": key, - "actual_type": type(output[key]).__name__, - "proper_type": proper_type.__name__}) diff --git a/rally/task/processing/plot.py b/rally/task/processing/plot.py deleted file mode 100644 index e6dc13d1..00000000 --- a/rally/task/processing/plot.py +++ /dev/null @@ -1,321 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import datetime as dt -import hashlib -import itertools -import json - -import six - -from rally.common import objects -from rally.common.plugin import plugin -from rally.common import version -from rally.task.processing import charts -from rally.ui import utils as ui_utils - - -def _process_hooks(hooks): - """Prepare hooks data for report.""" - hooks_ctx = [] - for hook in hooks: - hook_ctx = {"name": hook["config"]["name"], - "desc": hook["config"].get("description", ""), - "additive": [], "complete": []} - - for res in hook["results"]: - started_at = dt.datetime.utcfromtimestamp(res["started_at"]) - finished_at = dt.datetime.utcfromtimestamp(res["finished_at"]) - triggered_by = "%(event_type)s: %(value)s" % res["triggered_by"] - - for i, data in enumerate(res.get("output", {}).get("additive")): - try: - hook_ctx["additive"][i] - except IndexError: - chart_cls = plugin.Plugin.get(data["chart_plugin"]) - hook_ctx["additive"].append([chart_cls]) - hook_ctx["additive"][i].append(data) - - complete_charts = [] - for data in res.get("output", {}).get("complete"): - chart_cls = plugin.Plugin.get(data.pop("chart_plugin")) - data["widget"] = chart_cls.widget - complete_charts.append(data) - - if complete_charts: - hook_ctx["complete"].append( - {"triggered_by": triggered_by, - "started_at": started_at.strftime("%Y-%m-%d %H:%M:%S"), - "finished_at": finished_at.strftime("%Y-%m-%d %H:%M:%S"), - "status": res["status"], - "charts": complete_charts}) - - for i in range(len(hook_ctx["additive"])): - chart_cls = hook_ctx["additive"][i].pop(0) - iters_count = len(hook_ctx["additive"][i]) - first = hook_ctx["additive"][i][0] - descr = first.get("description", "") - axis_label = first.get("axis_label", "") - chart = chart_cls({"total_iteration_count": iters_count}, - title=first["title"], - description=descr, - label=first.get("label", ""), - axis_label=axis_label) - for data in hook_ctx["additive"][i]: - chart.add_iteration(data["data"]) - hook_ctx["additive"][i] = chart.render() - - if hook_ctx["additive"] or hook_ctx["complete"]: - hooks_ctx.append(hook_ctx) - return hooks_ctx - - -def _process_workload(workload, workload_cfg, pos): - main_area = charts.MainStackedAreaChart(workload) - main_hist = charts.MainHistogramChart(workload) - main_stat = charts.MainStatsTable(workload) - load_profile = charts.LoadProfileChart(workload) - atomic_pie = charts.AtomicAvgChart(workload) - atomic_area = charts.AtomicStackedAreaChart(workload) - atomic_hist = charts.AtomicHistogramChart(workload) - - errors = [] - output_errors = [] - additive_output_charts = [] - complete_output = [] - for idx, itr in enumerate(workload["data"], 1): - if itr["error"]: - typ, msg, trace = itr["error"] - errors.append({"iteration": idx, - "type": typ, "message": msg, "traceback": trace}) - - for i, additive in enumerate(itr["output"]["additive"]): - try: - additive_output_charts[i].add_iteration(additive["data"]) - except IndexError: - chart_cls = plugin.Plugin.get(additive["chart_plugin"]) - chart = chart_cls( - workload, title=additive["title"], - description=additive.get("description", ""), - label=additive.get("label", ""), - axis_label=additive.get("axis_label", - "Iteration sequence number")) - chart.add_iteration(additive["data"]) - additive_output_charts.append(chart) - - complete_charts = [] - for complete in itr["output"]["complete"]: - complete_chart = dict(complete) - chart_cls = plugin.Plugin.get(complete_chart.pop("chart_plugin")) - complete_chart["widget"] = chart_cls.widget - complete_charts.append(complete_chart) - complete_output.append(complete_charts) - - for chart in (main_area, main_hist, main_stat, load_profile, - atomic_pie, atomic_area, atomic_hist): - chart.add_iteration(itr) - - cls, method = workload["name"].split(".") - additive_output = [chart.render() for chart in additive_output_charts] - - return { - "cls": cls, - "met": method, - "pos": str(pos), - "name": method + (pos and " [%d]" % (pos + 1) or ""), - "runner": workload["runner"]["type"], - "config": json.dumps({workload["name"]: [workload_cfg]}, indent=2), - "hooks": _process_hooks(workload["hooks"]), - "description": workload.get("description", ""), - "iterations": { - "iter": main_area.render(), - "pie": [("success", (workload["total_iteration_count"] - - len(errors))), - ("errors", len(errors))], - "histogram": main_hist.render()}, - "load_profile": load_profile.render(), - "atomic": {"histogram": atomic_hist.render(), - "iter": atomic_area.render(), - "pie": atomic_pie.render()}, - "table": main_stat.render(), - "additive_output": additive_output, - "complete_output": complete_output, - "has_output": any(additive_output) or any(complete_output), - "output_errors": output_errors, - "errors": errors, - "load_duration": workload["load_duration"], - "full_duration": workload["full_duration"], - "created_at": workload["created_at"], - "sla": workload["sla"], - "sla_success": workload["pass_sla"], - "iterations_count": workload["total_iteration_count"], - } - - -def _process_workloads(workloads): - p_workloads = [] - source_dict = collections.defaultdict(list) - position = collections.defaultdict(lambda: -1) - - for workload in workloads: - name = workload["name"] - position[name] += 1 - workload_cfg = objects.Workload.format_workload_config(workload) - source_dict[name].append(workload_cfg) - p_workloads.append(_process_workload(workload, workload_cfg, - position[name])) - - source = json.dumps(source_dict, indent=2, sort_keys=True) - return source, sorted(p_workloads, key=lambda r: (r["cls"], r["met"], - int(r["pos"]))) - - -def plot(tasks_results, include_libs=False): - tasks = [] - subtasks = [] - workloads = [] - for task in tasks_results: - tasks.append(task) - - for subtask in tasks[-1]["subtasks"]: - workloads.extend(subtask.pop("workloads")) - subtasks.extend(tasks[-1].pop("subtasks")) - - template = ui_utils.get_template("task/report.html") - source, data = _process_workloads(workloads) - return template.render(version=version.version_string(), - source=json.dumps(source), - data=json.dumps(data), - include_libs=include_libs) - - -def trends(tasks): - trends = Trends() - for task in tasks: - for workload in itertools.chain( - *[s["workloads"] for s in task["subtasks"]]): - workload_cfg = objects.Workload.format_workload_config(workload) - trends.add_result(workload, workload_cfg) - template = ui_utils.get_template("task/trends.html") - return template.render(version=version.version_string(), - data=json.dumps(trends.get_data())) - - -class Trends(object): - """Process workloads results and make trends data. - - Group workloads results by their input configuration, - calculate statistics for these groups and prepare it - for displaying in trends HTML report. - """ - - def __init__(self): - self._data = {} - - def _to_str(self, obj): - """Convert object into string.""" - if obj is None: - return "None" - elif isinstance(obj, six.string_types + (int, float)): - return str(obj).strip() - elif isinstance(obj, (list, tuple)): - return ",".join(sorted([self._to_str(v) for v in obj])) - elif isinstance(obj, dict): - return "|".join(sorted([":".join([self._to_str(k), - self._to_str(v)]) - for k, v in obj.items()])) - raise TypeError("Unexpected type %(type)r of object %(obj)r" - % {"obj": obj, "type": type(obj)}) - - def _make_hash(self, obj): - return hashlib.md5(self._to_str(obj).encode("utf8")).hexdigest() - - def add_result(self, workload, workload_cfg): - key = self._make_hash(workload_cfg) - if key not in self._data: - self._data[key] = { - "actions": {}, - "sla_failures": 0, - "name": workload["name"], - "config": json.dumps(workload_cfg, indent=2)} - - self._data[key]["sla_failures"] += not workload["pass_sla"] - - duration_stats = workload["statistics"]["durations"] - ts = int(workload["start_time"] * 1000) - - for action in itertools.chain(duration_stats["atomics"], - [duration_stats["total"]]): - # NOTE(amaretskiy): some atomic actions can be missed due to - # failures. We can ignore that because we use NVD3 lineChart() - # for displaying trends, which is safe for missed points - if action["name"] not in self._data[key]["actions"]: - self._data[key]["actions"][action["name"]] = { - "durations": {"min": [], "median": [], "90%ile": [], - "95%ile": [], "max": [], "avg": []}, - "success": []} - - try: - success = float(action["success"].rstrip("%")) - except ValueError: - # Got "n/a" for some reason - success = 0 - - self._data[key]["actions"][action["name"]]["success"].append( - (ts, success)) - - for tgt in ("min", "median", "90%ile", "95%ile", "max", "avg"): - d = self._data[key]["actions"][action["name"]]["durations"] - d[tgt].append((ts, action[tgt])) - - def get_data(self): - trends = [] - - for wload in self._data.values(): - trend = {"stat": {}, - "name": wload["name"], - "cls": wload["name"].split(".")[0], - "met": wload["name"].split(".")[1], - "sla_failures": wload["sla_failures"], - "config": wload["config"], - "actions": []} - - for action, data in wload["actions"].items(): - action_durs = [(k, sorted(v)) - for k, v in data["durations"].items()] - if action == "total": - trend.update( - {"length": len(data["success"]), - "durations": action_durs, - "success": [("success", sorted(data["success"]))]}) - else: - trend["actions"].append( - {"name": action, - "durations": action_durs, - "success": [("success", sorted(data["success"]))]}) - - for stat, comp in (("min", charts.streaming.MinComputation()), - ("max", charts.streaming.MaxComputation()), - ("avg", charts.streaming.MeanComputation())): - for k, v in trend["durations"]: - for i in v: - if isinstance(i[1], (float,) + six.integer_types): - comp.add(i[1]) - trend["stat"][stat] = comp.result() - - trends.append(trend) - - return sorted(trends, key=lambda i: i["name"]) diff --git a/rally/task/processing/utils.py b/rally/task/processing/utils.py deleted file mode 100644 index c7e52443..00000000 --- a/rally/task/processing/utils.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - - -class GraphZipper(object): - - def __init__(self, base_size, zipped_size=1000): - """Init graph zipper. - - :param base_size: Amount of points in raw graph - :param zip_size: Amount of points that should be in zipped graph - """ - self.base_size = base_size - self.zipped_size = zipped_size - if self.base_size >= self.zipped_size: - self.compression_ratio = self.base_size / float(self.zipped_size) - else: - self.compression_ratio = 1 - - self.point_order = 0 - - self.cached_ratios_sum = 0 - self.ratio_value_points = [] - - self.zipped_graph = [] - - def _get_zipped_point(self): - if self.point_order - self.compression_ratio <= 1: - order = 1 - elif self.point_order == self.base_size: - order = self.base_size - else: - order = self.point_order - int(self.compression_ratio / 2.0) - - value = ( - sum(p[0] * p[1] for p in self.ratio_value_points) / - self.compression_ratio - ) - - return [order, value] - - def add_point(self, value): - self.point_order += 1 - - if self.point_order > self.base_size: - raise RuntimeError("GraphZipper is already full. " - "You can't add more points.") - - if not isinstance(value, (int, float)): - value = 0 - - if self.compression_ratio <= 1: # We don't need to compress - self.zipped_graph.append([self.point_order, value]) - elif self.cached_ratios_sum + 1 < self.compression_ratio: - self.cached_ratios_sum += 1 - self.ratio_value_points.append([1, value]) - else: - rest = self.compression_ratio - self.cached_ratios_sum - self.ratio_value_points.append([rest, value]) - self.zipped_graph.append(self._get_zipped_point()) - self.ratio_value_points = [[1 - rest, value]] - self.cached_ratios_sum = self.ratio_value_points[0][0] - - def get_zipped_graph(self): - return self.zipped_graph - - -class AtomicMerger(object): - - def __init__(self, atomic): - self._atomic = atomic - self._merge_name = lambda x, y: "%s (x%d)" % (x, y) if y > 1 else x - - def get_merged_names(self): - return [self._merge_name(key, value.get("count", 1)) - for key, value in self._atomic.items()] - - def get_merged_name(self, name): - return self._merge_name(name, self._atomic[name].get("count", 1)) - - def merge_atomic_actions(self, atomic_actions): - new_atomic_actions = collections.OrderedDict() - for name in self._atomic.keys(): - count = 0 - duration = 0 - for action in atomic_actions: - if action["name"] == name: - duration += action["finished_at"] - action["started_at"] - count += 1 - if count == self._atomic[name].get("count", 1): - new_name = self._merge_name(name, count) - new_atomic_actions[new_name] = duration - return new_atomic_actions diff --git a/rally/task/runner.py b/rally/task/runner.py deleted file mode 100644 index 95fb6546..00000000 --- a/rally/task/runner.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections -import copy -import multiprocessing -import time - -import six - -from rally.common import logging -from rally.common.plugin import plugin -from rally.common import utils as rutils -from rally.common import validation -from rally.task.processing import charts -from rally.task import scenario -from rally.task import types -from rally.task import utils - - -LOG = logging.getLogger(__name__) -configure = plugin.configure - - -def format_result_on_timeout(exc, timeout): - return { - "duration": timeout, - "idle_duration": 0, - "output": {"additive": [], "complete": []}, - "atomic_actions": [], - "error": utils.format_exc(exc) - } - - -def _get_scenario_context(iteration, context_obj): - context_obj = copy.deepcopy(context_obj) - context_obj["iteration"] = iteration + 1 # Numeration starts from `1' - return context_obj - - -def _run_scenario_once(cls, method_name, context_obj, scenario_kwargs, - event_queue): - iteration = context_obj["iteration"] - event_queue.put({ - "type": "iteration", - "value": iteration, - }) - - # provide arguments isolation between iterations - scenario_kwargs = copy.deepcopy(scenario_kwargs) - - LOG.info("Task %(task)s | ITER: %(iteration)s START" % - {"task": context_obj["task"]["uuid"], "iteration": iteration}) - - scenario_inst = cls(context_obj) - error = [] - try: - with rutils.Timer() as timer: - getattr(scenario_inst, method_name)(**scenario_kwargs) - except Exception as e: - error = utils.format_exc(e) - if logging.is_debug(): - LOG.exception(e) - finally: - status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" - LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % - {"task": context_obj["task"]["uuid"], "iteration": iteration, - "status": status}) - - return {"duration": timer.duration() - scenario_inst.idle_duration(), - "timestamp": timer.timestamp(), - "idle_duration": scenario_inst.idle_duration(), - "error": error, - "output": scenario_inst._output, - "atomic_actions": scenario_inst.atomic_actions()} - - -def _worker_thread(queue, cls, method_name, context_obj, scenario_kwargs, - event_queue): - queue.put(_run_scenario_once(cls, method_name, context_obj, - scenario_kwargs, event_queue)) - - -def _log_worker_info(**info): - """Log worker parameters for debugging. - - :param info: key-value pairs to be logged - """ - info_message = "\n\t".join(["%s: %s" % (k, v) - for k, v in info.items()]) - LOG.debug("Starting a worker.\n\t%s" % info_message) - - -@validation.add_default("jsonschema") -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class ScenarioRunner(plugin.Plugin, validation.ValidatablePluginMixin): - """Base class for all scenario runners. - - Scenario runner is an entity that implements a certain strategy of - launching benchmark scenarios, e.g. running them continuously or - periodically for a given number of times or seconds. - These strategies should be implemented in subclasses of ScenarioRunner - in the_run_scenario() method. - """ - - CONFIG_SCHEMA = { - "type": "object", - "properties": { - "type": {"type": "string"}, - }, - "required": ["type"], - "additionalProperties": True - } - - def __init__(self, task, config, batch_size=0): - """Runner constructor. - - It sets task and config to local variables. Also initialize - result_queue, where results will be put by _send_result method. - - :param task: Instance of objects.Task - :param config: Dict with runner section from benchmark configuration - """ - self.task = task - self.config = config - self.result_queue = collections.deque() - self.event_queue = collections.deque() - self.aborted = multiprocessing.Event() - self.run_duration = 0 - self.batch_size = batch_size - self.result_batch = [] - - @abc.abstractmethod - def _run_scenario(self, cls, method_name, context, args): - """Runs the specified benchmark scenario with given arguments. - - :param cls: The Scenario class where the scenario is implemented - :param method_name: Name of the method that implements the scenario - :param context: Benchmark context that contains users, admin & other - information, that was created before benchmark started. - :param args: Arguments to call the scenario method with - - :returns: List of results fore each single scenario iteration, - where each result is a dictionary - """ - - def run(self, name, context, args): - scenario_plugin = scenario.Scenario.get(name) - - # NOTE(boris-42): processing @types decorators - args = types.preprocess(name, context, args) - - with rutils.Timer() as timer: - # TODO(boris-42): remove method_name argument, now it's always run - self._run_scenario(scenario_plugin, "run", context, args) - - self.run_duration = timer.duration() - - def abort(self): - """Abort the execution of further benchmark scenario iterations.""" - self.aborted.set() - - @staticmethod - def _create_process_pool(processes_to_start, worker_process, - worker_args_gen): - """Create a pool of processes with some defined target function. - - :param processes_to_start: number of processes to create in the pool - :param worker_process: target function for all processes in the pool - :param worker_args_gen: generator of arguments for the target function - :returns: the process pool as a deque - """ - process_pool = collections.deque() - - for i in range(processes_to_start): - kwrgs = {"processes_to_start": processes_to_start, - "processes_counter": i} - process = multiprocessing.Process(target=worker_process, - args=next(worker_args_gen), - kwargs={"info": kwrgs}) - process.start() - process_pool.append(process) - - return process_pool - - def _join_processes(self, process_pool, result_queue, event_queue): - """Join the processes in the pool and send their results to the queue. - - :param process_pool: pool of processes to join - :param result_queue: multiprocessing.Queue that receives the results - :param event_queue: multiprocessing.Queue that receives the events - """ - while process_pool: - while process_pool and not process_pool[0].is_alive(): - process_pool.popleft().join() - - if result_queue.empty() and event_queue.empty(): - # sleep a bit to avoid 100% usage of CPU by this method - time.sleep(0.01) - - while not event_queue.empty(): - self.send_event(**event_queue.get()) - - while not result_queue.empty(): - self._send_result(result_queue.get()) - - self._flush_results() - result_queue.close() - event_queue.close() - - def _flush_results(self): - if self.result_batch: - sorted_batch = sorted(self.result_batch) - self.result_queue.append(sorted_batch) - del self.result_batch[:] - - _RESULT_SCHEMA = { - "fields": [("duration", float), ("timestamp", float), - ("idle_duration", float), ("output", dict), - ("atomic_actions", list), ("error", list)] - } - - def _result_has_valid_schema(self, result): - """Check whatever result has valid schema or not.""" - # NOTE(boris-42): We can't use here jsonschema, this method is called - # to check every iteration result schema. And this - # method works 200 times faster then jsonschema - # which totally makes sense. - for key, proper_type in self._RESULT_SCHEMA["fields"]: - if key not in result: - LOG.warning("'%s' is not result" % key) - return False - if not isinstance(result[key], proper_type): - LOG.warning( - "Task %(uuid)s | result['%(key)s'] has wrong type " - "'%(actual_type)s', should be '%(proper_type)s'" - % {"uuid": self.task["uuid"], - "key": key, - "actual_type": type(result[key]), - "proper_type": proper_type.__name__}) - return False - - actions_list = copy.deepcopy(result["atomic_actions"]) - for action in actions_list: - for key in ("name", "started_at", "finished_at", "children"): - if key not in action: - LOG.warning( - "Task %(uuid)s | Atomic action %(action)s " - "missing key '%(key)s'" - % {"uuid": self.task["uuid"], - "action": action, - "key": key}) - return False - for key in ("started_at", "finished_at"): - if not isinstance(action[key], float): - LOG.warning( - "Task %(uuid)s | Atomic action %(action)s has " - "wrong type '%(type)s', should be 'float'" - % {"uuid": self.task["uuid"], - "action": action, - "type": type(action[key])}) - return False - if action["children"]: - actions_list.extend(action["children"]) - - for e in result["error"]: - if not isinstance(e, str): - LOG.warning("error value has wrong type '%s', should be 'str'" - % type(e)) - return False - - for key in ("additive", "complete"): - if key not in result["output"]: - LOG.warning("Task %(uuid)s | Output missing key '%(key)s'" - % {"uuid": self.task["uuid"], "key": key}) - return False - - type_ = type(result["output"][key]) - if type_ != list: - LOG.warning( - "Task %(uuid)s | Value of result['output']['%(key)s'] " - "has wrong type '%(type)s', must be 'list'" - % {"uuid": self.task["uuid"], - "key": key, "type": type_.__name__}) - return False - - for key in result["output"]: - for output_data in result["output"][key]: - message = charts.validate_output(key, output_data) - if message: - LOG.warning("Task %(uuid)s | %(message)s" - % {"uuid": self.task["uuid"], - "message": message}) - return False - - return True - - def _send_result(self, result): - """Store partial result to send it to consumer later. - - :param result: Result dict to be sent. It should match the - ScenarioRunnerResult schema, otherwise - ValidationError is raised. - """ - - if not self._result_has_valid_schema(result): - LOG.warning( - "Task %(task)s | Runner `%(runner)s` is trying to send " - "results in wrong format" - % {"task": self.task["uuid"], "runner": self.get_name()}) - return - - self.result_batch.append(result) - - if len(self.result_batch) >= self.batch_size: - sorted_batch = sorted(self.result_batch, - key=lambda r: result["timestamp"]) - self.result_queue.append(sorted_batch) - del self.result_batch[:] - - def send_event(self, type, value=None): - """Store event to send it to consumer later. - - :param type: Event type - :param value: Optional event data - """ - self.event_queue.append({"type": type, - "value": value}) - - def _log_debug_info(self, **info): - """Log runner parameters for debugging. - - The method logs the runner name, the task id as well as the values - passed as arguments. - - :param info: key-value pairs to be logged - """ - info_message = "\n\t".join(["%s: %s" % (k, v) - for k, v in info.items()]) - LOG.debug("Starting the %(name)s runner (task UUID: %(task)s)." - "\n\t%(info)s" % - {"name": self._meta_get("name"), - "task": self.task["uuid"], - "info": info_message}) diff --git a/rally/task/scenario.py b/rally/task/scenario.py deleted file mode 100644 index cacd32b4..00000000 --- a/rally/task/scenario.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common.i18n import _ -from rally.common import logging -from rally.common.objects import task # noqa -from rally.common.plugin import plugin -from rally.common import utils -from rally.common import validation -from rally import exceptions -from rally.task import atomic -from rally.task import functional -from rally.task.processing import charts - - -LOG = logging.getLogger(__name__) - - -@logging.log_deprecated_args("Use 'platform' arg instead", "0.10.0", - ["namespace"], log_function=LOG.warning) -def configure(name, platform="default", namespace=None, context=None): - """Configure scenario by setting proper meta data. - - This can also transform plain function into scenario plugin, however - this approach is deprecated - now scenarios must be represented by classes - based on rally.task.scenario.Scenario. - - :param name: str scenario name - :param platform: str plugin's platform - :param context: default task context that is created for this scenario. - If there are custom user specified contexts this one - will be updated by provided contexts. - """ - if namespace: - platform = namespace - - def wrapper(cls): - # TODO(boris-42): Drop this check as soon as we refactor rally report - if "." not in name.strip("."): - msg = (_("Scenario name must include a dot: '%s'") % name) - raise exceptions.RallyException(msg) - - cls = plugin.configure(name=name, platform=platform)(cls) - cls._meta_set("default_context", context or {}) - return cls - - return wrapper - - -@validation.add_default("args-spec") -@plugin.base() -class Scenario(plugin.Plugin, - atomic.ActionTimerMixin, - functional.FunctionalMixin, - utils.RandomNameGeneratorMixin, - validation.ValidatablePluginMixin): - """This is base class for any benchmark scenario. - - You should create subclass of this class. And your test scenarios will - be auto discoverable and you will be able to specify it in test config. - """ - RESOURCE_NAME_FORMAT = "s_rally_XXXXXXXX_XXXXXXXX" - - def __init__(self, context=None): - super(Scenario, self).__init__() - self.context = context or {} - self.task = self.context.get("task", {}) - self._idle_duration = 0.0 - self._output = {"additive": [], "complete": []} - - def get_owner_id(self): - if "owner_id" in self.context: - return self.context["owner_id"] - return super(Scenario, self).get_owner_id() - - @classmethod - def get_default_context(cls): - return cls._meta_get("default_context") - - def sleep_between(self, min_sleep, max_sleep=None, atomic_delay=0.1): - """Call an interruptable_sleep() for a random amount of seconds. - - The exact time is chosen uniformly randomly from the interval - [min_sleep; max_sleep). The method also updates the idle_duration - variable to take into account the overall time spent on sleeping. - - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param atomic_delay: parameter with which time.sleep would be called - int(sleep_time / atomic_delay) times. - """ - if max_sleep is None: - max_sleep = min_sleep - - if not 0 <= min_sleep <= max_sleep: - raise exceptions.InvalidArgumentsException( - "0 <= min_sleep <= max_sleep") - - sleep_time = random.uniform(min_sleep, max_sleep) - utils.interruptable_sleep(sleep_time, atomic_delay) - self._idle_duration += sleep_time - - def idle_duration(self): - """Returns duration of all sleep_between.""" - return self._idle_duration - - def add_output(self, additive=None, complete=None): - """Add iteration's custom output data. - - This saves custom output data to task results. The main way to get - this data processed is to find it in HTML report ("Scenario Data" - tab), where it is displayed by tables or various charts (StackedArea, - Lines, Pie). - - Take a look at "Processing Output Charts" section of Rally Plugins - Reference to find explanations and examples about additive and - complete output types and how to display this output data by - specific widgets. - - Here is a simple example how to add both additive and complete data - and display them by StackedArea widget in HTML report: - - .. code-block:: python - - self.add_output( - additive={"title": "Additive data in StackedArea", - "description": "Iterations trend for foo and bar", - "chart_plugin": "StackedArea", - "data": [["foo", 12], ["bar", 34]]}, - complete={"title": "Complete data as stacked area", - "description": "Data is shown as-is in StackedArea", - "chart_plugin": "StackedArea", - "data": [["foo", [[0, 5], [1, 42], [2, 15]]], - ["bar", [[0, 2], [1, 1.3], [2, 5]]]], - "label": "Y-axis label text", - "axis_label": "X-axis label text"}) - - :param additive: dict with additive output - :param complete: dict with complete output - :raises RallyException: if output has wrong format - """ - for key, value in (("additive", additive), ("complete", complete)): - if value: - message = charts.validate_output(key, value) - if message: - raise exceptions.RallyException(message) - self._output[key].append(value) - - @classmethod - def _get_doc(cls): - return cls.run.__doc__ diff --git a/rally/task/service.py b/rally/task/service.py deleted file mode 100644 index 8e8ff705..00000000 --- a/rally/task/service.py +++ /dev/null @@ -1,338 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import inspect - -import six - -from rally.common.plugin import discover -from rally.common.plugin import meta -from rally import exceptions -from rally.task import atomic - - -def service(service_name, service_type, version, client_name=None): - """Mark class as an implementation of partial service APIs. - - :param service_name: name of the service (e.g. Nova) - :type service_name: str - :param service_type: type of the service (e.g. Compute) - :type service_type: str - :param version: version of service (e.g. 2.1) - :type version: str - :param client_name: name of client for service. If None, service_name will - be used instead. - :type client_name: str - """ - def wrapper(cls): - cls._meta_init() - cls._meta_set("name", service_name.lower()) - cls._meta_set("type", service_type.lower()) - cls._meta_set("version", str(version)) - cls._meta_set("client_name", client_name or service_name) - return cls - return wrapper - - -def compat_layer(original_impl): - """Set class which should be unified to common interface - - :param original_impl: implementation of specific service API - :type original_impl: cls - """ - def wrapper(cls): - cls._meta_init() - cls._meta_set("impl", original_impl) - return cls - return wrapper - - -def should_be_overridden(func): - """Mark method which should be overridden by subclasses.""" - func.require_impl = True - return func - - -def method_wrapper(func): - """Wraps service's methods with some magic - - 1) Each service method should not be called with positional arguments, - since it can lead mistakes in wrong order while writing version - compatible code. We had such situation in KeystoneWrapper - (see https://review.openstack.org/#/c/309470/ ): - - .. code-block:: python - - class IdentityService(Service): - def add_role(self, role_id, user_id, project_id): - self._impl(role_id, user_id, project_id) - - class KeystoneServiceV2(Service): - def add_role(self, user_id, role_id, project_id): - pass - - class KeystoneServiceV3(Service): - def add_role(self, role_id, user_id, project_id): - pass - - Explanation of example: The signature of add_role method is - different in KeystoneServiceV2 and KeystoneServiceV3. Since - IdentityService uses positional arguments to make call to - self._impl.add_role, we have swapped values of role_id and user_id in - case of KeystoneServiceV2. - - Original code and idea are taken from `positional` library. - - 2) We do not need keep atomics for some actions, for example for inner - actions (until we start to support them). Previously, we used - "atomic_action" argument with `if atomic_action` checks inside each - method. To reduce number of similar if blocks, let's write them in one - place, make the code cleaner and support such feature for all service - methods. - """ - - @functools.wraps(func) - def wrapper(instance, *args, **kwargs): - args_len = len(args) - - if args_len > 1: - message = ("%(name)s takes at most 1 positional argument " - "(%(given)d given)" % {"name": func.__name__, - "given": args_len}) - - raise TypeError(message) - - return func(instance, *args, **kwargs) - - return wrapper - - -class ServiceMeta(type): - """Alternative implementation of abstract classes for Services. - - Common class of specific Service should not be hardcoded for any version of - API. We expect that all public methods of specific common class are - overridden in all versioned implementation. - """ - def __new__(mcs, name, parents, dct): - for field in dct: - if not field.startswith("_") and callable(dct[field]): - dct[field] = method_wrapper(dct[field]) - return super(ServiceMeta, mcs).__new__(mcs, name, parents, dct) - - def __init__(cls, name, bases, namespaces): - super(ServiceMeta, cls).__init__(name, bases, namespaces) - bases = [c for c in cls.__bases__ if type(c) == ServiceMeta] - if not bases: - # nothing to check - return - - # obtain all properties of cls, since namespace doesn't include - # properties of parents - not_implemented_apis = set() - for name, obj in inspect.getmembers(cls): - if (getattr(obj, "require_impl", False) and - # name in namespace means that object was introduced in cls - name not in namespaces): - # it is not overridden... - not_implemented_apis.add(name) - - if not_implemented_apis: - raise exceptions.RallyException( - "%s has wrong implementation. Implementation of specific " - "version of API should override all required methods of " - "base service class. Missed method(s): %s." % - (cls.__name__, ", ".join(not_implemented_apis))) - - -@six.add_metaclass(ServiceMeta) -class Service(meta.MetaMixin): - """Base help class for Cloud Services(for example OpenStack services). - - A simple example of implementation: - - .. code-block:: - - # Implementation of Keystone V2 service - @service("keystone", service_type="identity", version="2") - class KeystoneV2Service(Service): - - @atomic.action_timer("keystone_v2.create_tenant") - def create_tenant(self, tenant_name): - return self.client.tenants.create(project_name) - - # Implementation of Keystone V3 service - @service("keystone", service_type="identity", version="3") - class KeystoneV3Service(Service): - - @atomic.action_timer("keystone_v3.create_project") - def create_project(self, project_name): - return self.client.project.create(project_name) - - """ - - def __init__(self, clients, name_generator=None, atomic_inst=None): - """Initialize service class - - :param clients: an instance of rally.osclients.Clients - :param name_generator: a method for generating random names. Usually - it is generate_random_name method of RandomNameGeneratorMixin - instance. - :param atomic_inst: an object to store atomic actions. Usually, it is - `_atomic_actions` property of ActionTimerMixin instance - """ - self._clients = clients - self._name_generator = name_generator - - if atomic_inst is None: - self._atomic_actions = atomic.ActionTimerMixin().atomic_actions() - else: - self._atomic_actions = atomic_inst - - self.version = None - if self._meta_is_inited(raise_exc=False): - self.version = self._meta_get("version") - - def generate_random_name(self): - if not self._name_generator: - raise exceptions.RallyException( - "You cannot use `generate_random_name` method, until you " - "initialize class with `name_generator` argument.") - return self._name_generator() - - -class UnifiedService(Service): - """Base help class for unified layer for Cloud Services - - A simple example of Identity service implementation: - - .. code-block:: - - import collections - - - Project = collections.namedtuple("Project", ["id", "name"]) - - - # Unified entry-point for Identity OpenStack service - class Identity(UnifiedService): - - # this method is equal in UnifiedKeystoneV2 and UnifiedKeystoneV3. - # Since there is no other implementation except Keystone, there - # are no needs to copy-paste it. - @classmethod - def _is_applicable(cls, clients): - cloud_version = clients.keystone().version.split(".")[0][1:] - return cloud_version == impl._meta_get("version") - - def create_project(self, project_name, domain_name="Default"): - return self._impl.create_project(project_name, - domain_name=domain_name) - - - # Class which unifies raw keystone v2 data to common form - @compat_layer(KeystoneV2Service) - class UnifiedKeystoneV2(Identity): - def create_project(self, project_name, domain_name="Default"): - if domain_name.lower() != "default": - raise NotImplementedError( - "Domain functionality not implemented in Keystone v2") - tenant = self._impl.create_tenant(project_name) - return Project(id=tenant.id, name=tenant.name) - - # Class which unifies raw keystone v3 data to common form - @compat_layer(KeystoneV3Service) - class UnifiedKeystoneV3(Identity): - def create_project(self, project_name, domain_name="Default"): - project = self._impl.create_project(project_name, - domain_name=domain_name) - return Project(id=project.id, name=project.name) - """ - - def __init__(self, clients, name_generator=None, atomic_inst=None): - """Initialize service class - - :param clients: an instance of rally.osclients.Clients - :param name_generator: a method for generating random names. Usually - it is generate_random_name method of RandomNameGeneratorMixin - instance. - :param atomic_inst: an object to store atomic actions. Usually, it is - `_atomic_actions` property of ActionTimerMixin instance - """ - super(UnifiedService, self).__init__(clients, name_generator, - atomic_inst) - - if self._meta_is_inited(raise_exc=False): - # it is an instance of compatibility layer for specific Service - impl_cls = self._meta_get("impl") - self._impl = impl_cls(self._clients, self._name_generator, - self._atomic_actions) - self.version = impl_cls._meta_get("version") - else: - # it is a base class of service - impl_cls, _all_impls = self.discover_impl() - if not impl_cls: - raise exceptions.RallyException( - "There is no proper implementation for %s." - % self.__class__.__name__) - self._impl = impl_cls(self._clients, self._name_generator, - self._atomic_actions) - self.version = self._impl.version - - def discover_impl(self): - """Discover implementation for service - - One Service can have different implementations(not only in terms of - versioning, for example Network service of OpenStack has Nova-network - and Neutron implementation. they are quite different). Each of such - implementations can support several versions. This method is designed - to choose the proper helper class based on available services in the - cloud and based on expected version. - - Returns a tuple with implementation class as first element, a set of - all implementations as a second element - """ - - # find all classes with unified implementation - impls = {cls: cls._meta_get("impl") - for cls in discover.itersubclasses(self.__class__) - if (cls._meta_is_inited(raise_exc=False) and - cls._meta_get("impl"))} - - service_names = {o._meta_get("name") for o in impls.values()} - - enabled_services = None - # let's make additional calls to cloud only when we need to make a - # decision based on available services - if len(service_names) > 1: - enabled_services = list(self._clients.services().values()) - - for cls, impl in impls.items(): - if (enabled_services is not None and - impl._meta_get("name") not in enabled_services): - continue - if cls.is_applicable(self._clients): - return cls, impls - - return None, impls - - @classmethod - def is_applicable(cls, clients): - """Check that implementation can be used in cloud.""" - - if cls._meta_is_inited(raise_exc=False): - impl = cls._meta_get("impl", cls) - client = getattr(clients, impl._meta_get("client_name")) - return client.choose_version() == impl._meta_get("version") - return False diff --git a/rally/task/sla.py b/rally/task/sla.py deleted file mode 100644 index dcc924a7..00000000 --- a/rally/task/sla.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -SLA (Service-level agreement) is set of details for determining compliance -with contracted values such as maximum error rate or minimum response time. -""" - -import abc - -import six - -from rally.common.i18n import _ -from rally.common.plugin import plugin -from rally.common import validation -from rally.task import utils - - -configure = plugin.configure - - -def _format_result(criterion_name, success, detail): - """Returns the SLA result dict corresponding to the current state.""" - return {"criterion": criterion_name, - "success": success, - "detail": detail} - - -class SLAChecker(object): - """Base SLA checker class.""" - - def __init__(self, config): - self.config = config - self.unexpected_failure = None - self.aborted_on_sla = False - self.aborted_manually = False - self.sla_criteria = [SLA.get(name)(criterion_value) - for name, criterion_value - in config.get("sla", {}).items()] - - def add_iteration(self, iteration): - """Process the result of a single iteration. - - The call to add_iteration() will return True if all the SLA checks - passed, and False otherwise. - - :param iteration: iteration result object - """ - if isinstance(iteration, dict): - atomic_actions = iteration.get("atomic_actions", None) - iteration["atomic_actions"] = utils.WrapperForAtomicActions( - atomic_actions) - return all([sla.add_iteration(iteration) for sla in self.sla_criteria]) - - def merge(self, other): - self._validate_config(other) - self._validate_sla_types(other) - - return all([self_sla.merge(other_sla) - for self_sla, other_sla - in six.moves.zip( - self.sla_criteria, other.sla_criteria)]) - - def _validate_sla_types(self, other): - for self_sla, other_sla in six.moves.zip_longest( - self.sla_criteria, other.sla_criteria): - self_sla.validate_type(other_sla) - - def _validate_config(self, other): - self_config = self.config.get("sla", {}) - other_config = other.config.get("sla", {}) - if self_config != other_config: - message = _( - "Error merging SLACheckers with configs %s, %s. " - "Only SLACheckers with the same config could be merged." - ) % (self_config, other_config) - raise TypeError(message) - - def results(self): - results = [sla.result() for sla in self.sla_criteria] - if self.aborted_on_sla: - results.append(_format_result( - "aborted_on_sla", False, - _("Task was aborted due to SLA failure(s)."))) - - if self.aborted_manually: - results.append(_format_result( - "aborted_manually", False, - _("Task was aborted due to abort signal."))) - - if self.unexpected_failure: - results.append(_format_result( - "something_went_wrong", False, - _("Unexpected error: %s") % self.unexpected_failure)) - - return results - - def set_aborted_on_sla(self): - self.aborted_on_sla = True - - def set_aborted_manually(self): - self.aborted_manually = True - - def set_unexpected_failure(self, exc): - self.unexpected_failure = exc - - -@validation.add_default("jsonschema") -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class SLA(plugin.Plugin, validation.ValidatablePluginMixin): - """Factory for criteria classes.""" - - CONFIG_SCHEMA = {"type": "null"} - - def __init__(self, criterion_value): - self.criterion_value = criterion_value - self.success = True - - @abc.abstractmethod - def add_iteration(self, iteration): - """Process the result of a single iteration and perform a SLA check. - - The call to add_iteration() will return True if the SLA check passed, - and False otherwise. - - :param iteration: iteration result object - :returns: True if the SLA check passed, False otherwise - """ - - def result(self): - """Returns the SLA result dict corresponding to the current state.""" - return _format_result(self.get_name(), self.success, self.details()) - - @abc.abstractmethod - def details(self): - """Returns the string describing the current results of the SLA.""" - - def status(self): - """Return "Passed" or "Failed" depending on the current SLA status.""" - return "Passed" if self.success else "Failed" - - @abc.abstractmethod - def merge(self, other): - """Merge aggregated data from another SLA instance into self. - - Process the results of several iterations aggregated in another - instance of SLA together with ones stored in self so that the - code - - sla1 = SLA() - sla1.add_iteration(a) - sla1.add_iteration(b) - - sla2 = SLA() - sla2.add_iteration(c) - sla2.add_iteration(d) - - sla1.merge(sla2) - - is equivalent to - - sla1 = SLA() - sla1.add_iteration(a) - sla1.add_iteration(b) - sla1.add_iteration(c) - sla1.add_iteration(d) - - The call to merge() will return True if the SLA check - passed, and False otherwise. - - :param other: another SLA object - :returns: True if the SLA check passed, False otherwise - """ - - def validate_type(self, other): - if type(self) != type(other): - message = _( - "Error merging SLAs of types %s, %s. " - "Only SLAs of the same type could be merged." - ) % (type(self), type(other)) - raise TypeError(message) diff --git a/rally/task/trigger.py b/rally/task/trigger.py deleted file mode 100644 index 80fec655..00000000 --- a/rally/task/trigger.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six - -from rally.common.i18n import _ -from rally.common import logging -from rally.common.plugin import plugin -from rally.common import validation - -configure = plugin.configure - -LOG = logging.getLogger(__name__) - - -@validation.add_default("jsonschema") -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class Trigger(plugin.Plugin, validation.ValidatablePluginMixin): - """Factory for trigger classes.""" - - CONFIG_SCHEMA = {"type": "null"} - - def __init__(self, context, task, hook_cls): - self.context = context - self.config = self.context["trigger"]["args"] - self.task = task - self.hook_cls = hook_cls - self._runs = [] - - @abc.abstractmethod - def get_listening_event(self): - """Returns event type to listen.""" - - def on_event(self, event_type, value=None): - """Launch hook on specified event.""" - LOG.info(_("Hook %s is triggered for Task %s by %s=%s") - % (self.hook_cls.__name__, self.task["uuid"], - event_type, value)) - hook = self.hook_cls(self.task, self.context.get("args", {}), - {"event_type": event_type, "value": value}) - hook.run_async() - self._runs.append(hook) - - def get_results(self): - results = {"config": self.context, - "results": [], - "summary": {}} - for hook in self._runs: - hook_result = hook.result() - results["results"].append(hook_result) - results["summary"].setdefault(hook_result["status"], 0) - results["summary"][hook_result["status"]] += 1 - return results diff --git a/rally/task/types.py b/rally/task/types.py deleted file mode 100644 index 5b19bdf0..00000000 --- a/rally/task/types.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import copy -import operator -import re - -import six - -from rally.common.plugin import plugin -from rally import exceptions -from rally import osclients -from rally.task import scenario - - -def _get_preprocessor_loader(plugin_name): - """Get a class that loads a preprocessor class. - - This returns a class with a single class method, ``transform``, - which, when called, finds a plugin and defers to its ``transform`` - class method. This is necessary because ``convert()`` is called as - a decorator at import time, but we cannot be confident that the - ResourceType plugins may not be loaded yet. (In fact, since - ``convert()`` is used to decorate plugins, we can be confident - that not all plugins are loaded when it is called.) - - This permits us to defer plugin searching until the moment when - ``preprocess()`` calls the various preprocessors, at which point - we can be certain that all plugins have been loaded and finding - them by name will work. - """ - def transform(cls, *args, **kwargs): - plug = ResourceType.get(plugin_name) - return plug.transform(*args, **kwargs) - - return type("PluginLoader_%s" % plugin_name, - (object,), - {"transform": classmethod(transform)}) - - -def convert(**kwargs): - """Decorator to define resource transformation(s) on scenario parameters. - - The ``kwargs`` passed as arguments are used to map a key in the - scenario config to the resource type plugin used to perform a - transformation on the value of the key. For instance: - - @types.convert(image={"type": "glance_image"}) - - This would convert the ``image`` key in the scenario configuration - to a Glance image by using the ``glance_image`` resource - plugin. Currently ``type`` is the only recognized key, but others - may be added in the future. - """ - preprocessors = dict([(k, _get_preprocessor_loader(v["type"])) - for k, v in kwargs.items()]) - - def wrapper(func): - func._meta_setdefault("preprocessors", {}) - func._meta_get("preprocessors").update(preprocessors) - return func - return wrapper - - -def preprocess(name, context, args): - """Run preprocessor on scenario arguments. - - :param name: Plugin name - :param context: dictionary object that must have admin and credential - entries - :param args: args section of benchmark specification in rally task file - - :returns processed_args: dictionary object with additional client - and resource configuration - - """ - preprocessors = scenario.Scenario.get(name)._meta_get("preprocessors", - default={}) - - clients = None - if context.get("admin"): - clients = osclients.Clients(context["admin"]["credential"]) - elif context.get("users"): - clients = osclients.Clients(context["users"][0]["credential"]) - - processed_args = copy.deepcopy(args) - - for src, preprocessor in preprocessors.items(): - resource_cfg = processed_args.get(src) - if resource_cfg: - processed_args[src] = preprocessor.transform( - clients=clients, resource_config=resource_cfg) - return processed_args - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class ResourceType(plugin.Plugin): - - @classmethod - @abc.abstractmethod - def transform(cls, clients, resource_config): - """Transform the resource. - - :param clients: openstack admin client handles - :param resource_config: scenario config of resource - - :returns: transformed value of resource - """ - - @classmethod - def _get_doc(cls): - return cls.transform.__doc__ - - -def obj_from_name(resource_config, resources, typename): - """Return the resource whose name matches the pattern. - - resource_config has to contain `name`, as it is used to lookup a resource. - Value of the name will be treated as regexp. - - An `InvalidScenarioArgument` is thrown if the pattern does - not match unambiguously. - - :param resource_config: resource to be transformed - :param resources: iterable containing all resources - :param typename: name which describes the type of resource - - :returns: resource object uniquely mapped to `name` or `regex` - """ - if "name" in resource_config: - # In a case of pattern string exactly matches resource name - matching_exact = [resource for resource in resources - if resource.name == resource_config["name"]] - if len(matching_exact) == 1: - return matching_exact[0] - elif len(matching_exact) > 1: - raise exceptions.InvalidScenarioArgument( - "{typename} with name '{pattern}' " - "is ambiguous, possible matches " - "by id: {ids}".format(typename=typename.title(), - pattern=resource_config["name"], - ids=", ".join(map( - operator.attrgetter("id"), - matching_exact)))) - # Else look up as regex - patternstr = resource_config["name"] - elif "regex" in resource_config: - patternstr = resource_config["regex"] - else: - raise exceptions.InvalidScenarioArgument( - "{typename} 'id', 'name', or 'regex' not found " - "in '{resource_config}' ".format(typename=typename.title(), - resource_config=resource_config)) - - pattern = re.compile(patternstr) - matching = [resource for resource in resources - if re.search(pattern, resource.name)] - if not matching: - raise exceptions.InvalidScenarioArgument( - "{typename} with pattern '{pattern}' not found".format( - typename=typename.title(), pattern=pattern.pattern)) - elif len(matching) > 1: - raise exceptions.InvalidScenarioArgument( - "{typename} with name '{pattern}' is ambiguous, possible matches " - "by id: {ids}".format(typename=typename.title(), - pattern=pattern.pattern, - ids=", ".join(map(operator.attrgetter("id"), - matching)))) - return matching[0] - - -def obj_from_id(resource_config, resources, typename): - """Return the resource whose name matches the id. - - resource_config has to contain `id`, as it is used to lookup a resource. - - :param resource_config: resource to be transformed - :param resources: iterable containing all resources - :param typename: name which describes the type of resource - - :returns: resource object mapped to `id` - """ - if "id" in resource_config: - matching = [resource for resource in resources - if resource.id == resource_config["id"]] - if len(matching) == 1: - return matching[0] - elif len(matching) > 1: - raise exceptions.MultipleMatchesFound( - needle="{typename} with id '{id}'".format( - typename=typename.title(), id=resource_config["id"]), - haystack=matching) - else: - raise exceptions.InvalidScenarioArgument( - "{typename} with id '{id}' not found".format( - typename=typename.title(), id=resource_config["id"])) - else: - raise exceptions.InvalidScenarioArgument( - "{typename} 'id' not found in '{resource_config}'".format( - typename=typename.title(), resource_config=resource_config)) - - -def _id_from_name(resource_config, resources, typename, id_attr="id"): - """Return the id of the resource whose name matches the pattern. - - resource_config has to contain `name`, as it is used to lookup an id. - Value of the name will be treated as regexp. - - An `InvalidScenarioArgument` is thrown if the pattern does - not match unambiguously. - - :param resource_config: resource to be transformed - :param resources: iterable containing all resources - :param typename: name which describes the type of resource - :param id_attr: id or uuid should be returned - - :returns: resource id uniquely mapped to `name` or `regex` - """ - try: - return getattr(obj_from_name(resource_config, resources, typename), - id_attr) - except AttributeError: - raise exceptions.RallyException( - "There is no attribute {attr} in the object {type}".format( - attr=id_attr, type=typename)) - - -def _name_from_id(resource_config, resources, typename): - """Return the name of the resource which has the id. - - resource_config has to contain `id`, as it is used to lookup a name. - - :param resource_config: resource to be transformed - :param resources: iterable containing all resources - :param typename: name which describes the type of resource - - :returns: resource name mapped to `id` - """ - return obj_from_id(resource_config, resources, typename).name diff --git a/rally/task/utils.py b/rally/task/utils.py deleted file mode 100644 index d1800cf7..00000000 --- a/rally/task/utils.py +++ /dev/null @@ -1,480 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import itertools -import time -import traceback - -import jsonschema -from novaclient import exceptions as nova_exc -import six - -from rally.common.i18n import _ -from rally.common import logging -from rally import consts -from rally import exceptions - - -LOG = logging.getLogger(__name__) - - -def get_status(resource, status_attr="status"): - """Get the status of a given resource object. - - The status is returned in upper case. The status is checked for the - standard field names with special cases for Heat and Ceilometer. - - :param resource: The resource object or dict. - :param status_attr: Allows to specify non-standard status fields. - :return: The status or "NONE" if it is not available. - """ - - for s_attr in ["stack_status", "state", status_attr]: - status = getattr(resource, s_attr, None) - if isinstance(status, six.string_types): - return status.upper() - - # Dict case - if ((isinstance(resource, dict) and status_attr in resource.keys() and - isinstance(resource[status_attr], six.string_types))): - return resource[status_attr].upper() - - return "NONE" - - -class resource_is(object): - def __init__(self, desired_status, status_getter=None): - self.desired_status = desired_status - self.status_getter = status_getter or get_status - - def __call__(self, resource): - return self.status_getter(resource) == self.desired_status.upper() - - def __str__(self): - return str(self.desired_status) - - -def get_from_manager(error_statuses=None): - error_statuses = error_statuses or ["ERROR"] - error_statuses = map(lambda str: str.upper(), error_statuses) - - def _get_from_manager(resource, id_attr="id"): - # catch client side errors - try: - res = resource.manager.get(getattr(resource, id_attr)) - except Exception as e: - if getattr(e, "code", getattr(e, "http_status", 400)) == 404: - raise exceptions.GetResourceNotFound(resource=resource) - raise exceptions.GetResourceFailure(resource=resource, err=e) - - # catch abnormal status, such as "no valid host" for servers - status = get_status(res) - - if status in ("DELETED", "DELETE_COMPLETE"): - raise exceptions.GetResourceNotFound(resource=res) - if status in error_statuses: - raise exceptions.GetResourceErrorStatus( - resource=res, status=status, - fault=getattr(res, "fault", "n/a")) - - return res - - return _get_from_manager - - -def manager_list_size(sizes): - def _list(mgr): - return len(mgr.list()) in sizes - return _list - - -@logging.log_deprecated("Use wait_for_status instead.", "0.1.2", once=True) -def wait_for(resource, is_ready=None, ready_statuses=None, - failure_statuses=None, status_attr="status", update_resource=None, - timeout=60, check_interval=1, id_attr="id"): - """Waits for the given resource to come into the one of the given statuses. - - The method can be used to check resource for status with a `is_ready` - function or with a list of expected statuses and the status attribute - - In case when the is_ready checker is not provided the resource should have - status_attr. It may be an object attribute or a dictionary key. The value - of the attribute is checked against ready statuses list and failure - statuses. In case of a failure the wait exits with an exception. The - resource is updated between iterations with an update_resource call. - - :param is_ready: A predicate that should take the resource object and - return True iff it is ready to be returned - :param ready_statuses: List of statuses which mean that the resource is - ready - :param failure_statuses: List of statuses which mean that an error has - occurred while waiting for the resource - :param status_attr: The name of the status attribute of the resource - :param update_resource: Function that should take the resource object - and return an 'updated' resource. If set to - None, no result updating is performed - :param timeout: Timeout in seconds after which a TimeoutException will be - raised - :param check_interval: Interval in seconds between the two consecutive - readiness checks - - :returns: The "ready" resource object - """ - - if is_ready is not None: - return wait_is_ready(resource=resource, is_ready=is_ready, - update_resource=update_resource, timeout=timeout, - check_interval=check_interval) - else: - return wait_for_status(resource=resource, - ready_statuses=ready_statuses, - failure_statuses=failure_statuses, - status_attr=status_attr, - update_resource=update_resource, - timeout=timeout, - check_interval=check_interval, - id_attr=id_attr) - - -@logging.log_deprecated("Use wait_for_status instead.", "0.1.2", once=True) -def wait_is_ready(resource, is_ready, update_resource=None, - timeout=60, check_interval=1): - - resource_repr = getattr(resource, "name", repr(resource)) - start = time.time() - - while True: - if update_resource is not None: - resource = update_resource(resource) - - if is_ready(resource): - return resource - - time.sleep(check_interval) - if time.time() - start > timeout: - raise exceptions.TimeoutException( - desired_status=str(is_ready), - resource_name=resource_repr, - resource_type=resource.__class__.__name__, - resource_id=getattr(resource, "id", ""), - resource_status=get_status(resource)) - - -def wait_for_status(resource, ready_statuses, failure_statuses=None, - status_attr="status", update_resource=None, - timeout=60, check_interval=1, check_deletion=False, - id_attr="id"): - - resource_repr = getattr(resource, "name", repr(resource)) - if not isinstance(ready_statuses, (set, list, tuple)): - raise ValueError("Ready statuses should be supplied as set, list or " - "tuple") - if failure_statuses and not isinstance(failure_statuses, - (set, list, tuple)): - raise ValueError("Failure statuses should be supplied as set, list or " - "tuple") - - # make all statuses upper case - ready_statuses = set(s.upper() for s in ready_statuses or []) - failure_statuses = set(s.upper() for s in failure_statuses or []) - - if (ready_statuses & failure_statuses): - raise ValueError( - "Can't wait for resource's %s status. Ready and Failure" - "statuses conflict." % resource_repr) - if not ready_statuses: - raise ValueError( - "Can't wait for resource's %s status. No ready " - "statuses provided" % resource_repr) - if not update_resource: - raise ValueError( - "Can't wait for resource's %s status. No update method." - % resource_repr) - - start = time.time() - - latest_status = get_status(resource, status_attr) - latest_status_update = start - - while True: - try: - if id_attr == "id": - resource = update_resource(resource) - else: - resource = update_resource(resource, id_attr=id_attr) - except exceptions.GetResourceNotFound: - if check_deletion: - return - else: - raise - status = get_status(resource, status_attr) - - if status != latest_status: - current_time = time.time() - delta = current_time - latest_status_update - LOG.debug( - "Waiting for resource %(resource)s. Status changed: " - "%(latest)s => %(current)s in %(delta)s" % - {"resource": resource_repr, "latest": latest_status, - "current": status, "delta": delta}) - - latest_status = status - latest_status_update = current_time - - if status in ready_statuses: - return resource - if status in failure_statuses: - raise exceptions.GetResourceErrorStatus( - resource=resource, - status=status, - fault="Status in failure list %s" % str(failure_statuses)) - - time.sleep(check_interval) - if time.time() - start > timeout: - raise exceptions.TimeoutException( - desired_status="('%s')" % "', '".join(ready_statuses), - resource_name=resource_repr, - resource_type=resource.__class__.__name__, - resource_id=getattr(resource, id_attr, ""), - resource_status=get_status(resource, status_attr)) - - -@logging.log_deprecated("Use wait_for_status instead.", "0.1.2", once=True) -def wait_for_delete(resource, update_resource=None, timeout=60, - check_interval=1): - """Wait for the full deletion of resource. - - :param update_resource: Function that should take the resource object - and return an 'updated' resource, or raise - exception rally.exceptions.GetResourceNotFound - that means that resource is deleted. - - :param timeout: Timeout in seconds after which a TimeoutException will be - raised - :param check_interval: Interval in seconds between the two consecutive - readiness checks - """ - start = time.time() - while True: - try: - resource = update_resource(resource) - except exceptions.GetResourceNotFound: - break - time.sleep(check_interval) - if time.time() - start > timeout: - raise exceptions.TimeoutException( - desired_status="deleted", - resource_name=getattr(resource, "name", repr(resource)), - resource_type=resource.__class__.__name__, - resource_id=getattr(resource, "id", ""), - resource_status=get_status(resource)) - - -def format_exc(exc): - return [exc.__class__.__name__, str(exc), traceback.format_exc()] - - -def infinite_run_args_generator(args_func): - for i in itertools.count(): - yield args_func(i) - - -def check_service_status(client, service_name): - """Check if given openstack service is enabled and state is up.""" - try: - for service in client.services.list(): - if service_name in str(service): - if service.status == "enabled" and service.state == "up": - return True - except nova_exc.NotFound: - LOG.warning(_("Unable to retrieve a list of available services from " - "nova. Pre-Grizzly OpenStack deployment?")) - return False - return False - - -class ActionBuilder(object): - """Builder class for mapping and creating action objects. - - An action list is an array of single key/value dicts which takes - the form: - - [{"action": times}, {"action": times}...] - - Here 'action' is a string which indicates an action to perform and - 'times' is a non-zero positive integer which specifies how many - times to run the action in sequence. - - This utility builder class will build and return methods which - wrapper the action call the given amount of times. - """ - - SCHEMA_TEMPLATE = { - "type": "array", - "$schema": consts.JSON_SCHEMA, - "items": { - "type": "object", - "properties": {}, - "additionalProperties": False, - "minItems": 0 - } - } - - ITEM_TEMPLATE = { - "type": "integer", - "minimum": 0, - "exclusiveMinimum": True, - "optional": True - } - - def __init__(self, action_keywords): - """Create a new instance of the builder for the given action keywords. - - :param action_keywords: A list of strings which are the keywords this - instance of the builder supports. - """ - self._bindings = {} - self.schema = dict(ActionBuilder.SCHEMA_TEMPLATE) - for kw in action_keywords: - self.schema["items"]["properties"][kw] = ( - ActionBuilder.ITEM_TEMPLATE) - - def bind_action(self, action_key, action, *args, **kwargs): - """Bind an action to an action key. - - Static args/kwargs can be optionally binded. - :param action_key: The action keyword to bind the action to. - :param action: A method/function to call for the action. - :param args: (optional) Static positional args to prepend - to all invocations of the action. - :param kwargs: (optional) Static kwargs to prepend to all - invocations of the action. - """ - self.validate([{action_key: 1}]) - self._bindings[action_key] = { - "action": action, - "args": args or (), - "kwargs": kwargs or {} - } - - def validate(self, actions): - """Validate the list of action objects against the builder schema. - - :param actions: The list of action objects to validate. - """ - jsonschema.validate(actions, self.schema) - - def _build(self, func, times, *args, **kwargs): - """Build the wrapper action call.""" - def _f(): - for i in range(times): - func(*args, **kwargs) - return _f - - def build_actions(self, actions, *args, **kwargs): - """Build a list of callable actions. - - A list of callable actions based on the given action object list and - the actions bound to this builder. - - :param actions: A list of action objects to build callable - action for. - :param args: (optional) Positional args to pass into each - built action. These will be appended to any args set for the - action via its binding. - :param kwargs: (optional) Keyword args to pass into each built - action. These will be appended to any kwargs set for the action - via its binding. - """ - self.validate(actions) - bound_actions = [] - for action in actions: - action_key = list(action)[0] - times = action.get(action_key) - binding = self._bindings.get(action_key) - dft_kwargs = dict(binding["kwargs"]) - dft_kwargs.update(kwargs or {}) - bound_actions.append( - self._build(binding["action"], times, - *(binding["args"] + args), **dft_kwargs)) - return bound_actions - - -# TODO(andreykurilin): We need to implement some wrapper for atomic actions, -# we can use these wrapper to simulate new and old format. -class WrapperForAtomicActions(list): - - def __init__(self, atomic_actions, timestamp=0): - - self.timestamp = timestamp - - if isinstance(atomic_actions, list): - self.__atomic_actions = atomic_actions - self.__old_atomic_actions = self._convert_new_atomic_actions( - self.__atomic_actions) - else: - self.__atomic_actions = self._convert_old_atomic_actions( - atomic_actions) - self.__old_atomic_actions = atomic_actions - - super(WrapperForAtomicActions, self).__init__(self.__atomic_actions) - - def _convert_old_atomic_actions(self, old_atomic_actions): - """Convert atomic actions to new format. """ - atomic_actions = [] - started_at = self.timestamp - for name, duration in old_atomic_actions.items(): - finished_at = started_at + duration - atomic_actions.append({"name": name, - "started_at": started_at, - "finished_at": finished_at, - "children": []}) - started_at = finished_at - return atomic_actions - - def _convert_new_atomic_actions(self, atomic_actions): - """Convert atomic actions to old format. """ - old_style = collections.OrderedDict() - for action in atomic_actions: - duration = action["finished_at"] - action["started_at"] - if action["name"] in old_style: - name_template = action["name"] + " (%i)" - i = 2 - while name_template % i in old_style: - i += 1 - old_style[name_template % i] = duration - else: - old_style[action["name"]] = duration - return old_style - - def items(self): - return self.__old_atomic_actions.items() - - def get(self, name, default=None): - return self.__old_atomic_actions.get(name, default) - - def __iter__(self): - return iter(self.__atomic_actions) - - def __len__(self): - return len(self.__atomic_actions) - - def __getitem__(self, item): - if isinstance(item, int): - # it is a call to list: - return self.__atomic_actions[item] - else: - return self.__old_atomic_actions[item] diff --git a/rally/task/validation.py b/rally/task/validation.py deleted file mode 100755 index 8ab232b8..00000000 --- a/rally/task/validation.py +++ /dev/null @@ -1,389 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may - -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import os -import re - -from glanceclient import exc as glance_exc -from novaclient import exceptions as nova_exc -import six - -from rally.common.i18n import _ -from rally.common import logging -from rally.common import validation -from rally.common import yamlutils as yaml -from rally import exceptions -from rally.plugins.openstack.context.nova import flavors as flavors_ctx -from rally.plugins.openstack import types as openstack_types -from rally.task import types - -LOG = logging.getLogger(__name__) - -# TODO(astudenov): remove after deprecating all old validators -ValidationResult = validation.ValidationResult -add = validation.add - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="old_validator", platform="openstack") -class OldValidator(validation.Validator): - - class Deployment(object): - pass - - def __init__(self, fn, *args, **kwargs): - """Legacy validator for OpenStack scenarios - - :param fn: function that performs validation - """ - self.fn = fn - self.args = args - self.kwargs = kwargs - - def validate(self, credentials, config, plugin_cls, plugin_cfg): - creds = credentials.get("openstack", {}) - users = creds.get("users", []) - - deployment = self.Deployment() - deployment.get_credentials_for = credentials.get - - if users: - users = [user["credential"].clients() for user in users] - for clients in users: - result = self._run_fn(config, deployment, clients) - if not result.is_valid: - return result - return ValidationResult(True) - else: - return self._run_fn(config, deployment) - - def _run_fn(self, config, deployment, clients=None): - return (self.fn(config, clients, deployment, - *self.args, **self.kwargs) or ValidationResult(True)) - - -def validator(fn): - """Decorator that constructs a scenario validator from given function. - - Decorated function should return ValidationResult on error. - - :param fn: function that performs validation - :returns: rally scenario validator - """ - def wrap_given(*args, **kwargs): - """Dynamic validation decorator for scenario. - - :param args: the arguments of the decorator of the benchmark scenario - ex. @my_decorator("arg1"), then args = ("arg1",) - :param kwargs: the keyword arguments of the decorator of the scenario - ex. @my_decorator(kwarg1="kwarg1"), then kwargs = {"kwarg1": "kwarg1"} - """ - def wrap_scenario(scenario): - scenario._meta_setdefault("validators", []) - scenario._meta_get("validators").append( - ("old_validator", (fn, ) + args, kwargs)) - return scenario - - return wrap_scenario - - return wrap_given - - -def _file_access_ok(filename, mode, param_name, required=True): - if not filename: - return ValidationResult(not required, - "Parameter %s required" % param_name) - if not os.access(os.path.expanduser(filename), mode): - return ValidationResult( - False, "Could not open %(filename)s with mode %(mode)s " - "for parameter %(param_name)s" - % {"filename": filename, "mode": mode, "param_name": param_name}) - return ValidationResult(True) - - -def check_command_dict(command): - """Check command-specifying dict `command', raise ValueError on error.""" - - if not isinstance(command, dict): - raise ValueError("Command must be a dictionary") - - # NOTE(pboldin): Here we check for the values not for presence of the keys - # due to template-driven configuration generation that can leave keys - # defined but values empty. - if command.get("interpreter"): - script_file = command.get("script_file") - if script_file: - if "script_inline" in command: - raise ValueError( - "Exactly one of script_inline or script_file with " - "interpreter is expected: %r" % command) - # User tries to upload a shell? Make sure it is same as interpreter - interpreter = command.get("interpreter") - interpreter = (interpreter[-1] - if isinstance(interpreter, (tuple, list)) - else interpreter) - if (command.get("local_path") and - command.get("remote_path") != interpreter): - raise ValueError( - "When uploading an interpreter its path should be as well" - " specified as the `remote_path' string: %r" % command) - elif not command.get("remote_path"): - # No interpreter and no remote command to execute is given - raise ValueError( - "Supplied dict specifies no command to execute," - " either interpreter or remote_path is required: %r" % command) - - unexpected_keys = set(command) - set(["script_file", "script_inline", - "interpreter", "remote_path", - "local_path", "command_args"]) - if unexpected_keys: - raise ValueError( - "Unexpected command parameters: %s" % ", ".join(unexpected_keys)) - - -@validator -def valid_command(config, clients, deployment, param_name, required=True): - """Checks that parameter is a proper command-specifying dictionary. - - Ensure that the command dictionary is a proper command-specifying - dictionary described in `vmtasks.VMTasks.boot_runcommand_delete' docstring. - - :param param_name: Name of parameter to validate - :param required: Boolean indicating that the command dictionary is required - """ - # TODO(amaretskiy): rework this validator into ResourceType, so this - # will allow to validate parameters values as well - - command = config.get("args", {}).get(param_name) - if command is None and not required: - return ValidationResult(True) - - try: - check_command_dict(command) - except ValueError as e: - return ValidationResult(False, str(e)) - - for key in "script_file", "local_path": - if command.get(key): - return _file_access_ok( - filename=command[key], - mode=os.R_OK, - param_name=param_name + "." + key, - required=True) - - return ValidationResult(True) - - -def _get_validated_image(config, clients, param_name): - image_context = config.get("context", {}).get("images", {}) - image_args = config.get("args", {}).get(param_name) - image_ctx_name = image_context.get("image_name") - - if not image_args: - msg = _("Parameter %s is not specified.") % param_name - return (ValidationResult(False, msg), None) - - if "image_name" in image_context: - # NOTE(rvasilets) check string is "exactly equal to" a regex - # or image name from context equal to image name from args - if "regex" in image_args: - match = re.match(image_args.get("regex"), image_ctx_name) - if image_ctx_name == image_args.get("name") or ( - "regex" in image_args and match): - image = { - "size": image_context.get("min_disk", 0), - "min_ram": image_context.get("min_ram", 0), - "min_disk": image_context.get("min_disk", 0) - } - return (ValidationResult(True), image) - try: - image_id = openstack_types.GlanceImage.transform( - clients=clients, resource_config=image_args) - image = clients.glance().images.get(image_id) - if hasattr(image, "to_dict"): - # NOTE(stpierre): Glance v1 images are objects that can be - # converted to dicts; Glance v2 images are already - # dict-like - image = image.to_dict() - if not image.get("size"): - image["size"] = 0 - if not image.get("min_ram"): - image["min_ram"] = 0 - if not image.get("min_disk"): - image["min_disk"] = 0 - return (ValidationResult(True), image) - except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument): - message = _("Image '%s' not found") % image_args - return (ValidationResult(False, message), None) - - -def _get_flavor_from_context(config, flavor_value): - if "flavors" not in config.get("context", {}): - raise exceptions.InvalidScenarioArgument("No flavors context") - - flavors = [flavors_ctx.FlavorConfig(**f) - for f in config["context"]["flavors"]] - resource = types.obj_from_name(resource_config=flavor_value, - resources=flavors, typename="flavor") - flavor = flavors_ctx.FlavorConfig(**resource) - flavor.id = "" % flavor.name - return (ValidationResult(True), flavor) - - -def _get_validated_flavor(config, clients, param_name): - flavor_value = config.get("args", {}).get(param_name) - if not flavor_value: - msg = "Parameter %s is not specified." % param_name - return (ValidationResult(False, msg), None) - try: - flavor_id = openstack_types.Flavor.transform( - clients=clients, resource_config=flavor_value) - flavor = clients.nova().flavors.get(flavor=flavor_id) - return (ValidationResult(True), flavor) - except (nova_exc.NotFound, exceptions.InvalidScenarioArgument): - try: - return _get_flavor_from_context(config, flavor_value) - except exceptions.InvalidScenarioArgument: - pass - message = _("Flavor '%s' not found") % flavor_value - return (ValidationResult(False, message), None) - - -@validator -def validate_share_proto(config, clients, deployment): - """Validates value of share protocol for creation of Manila share.""" - allowed = ("NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS", ) - share_proto = config.get("args", {}).get("share_proto") - if six.text_type(share_proto).upper() not in allowed: - message = _("Share protocol '%(sp)s' is invalid, allowed values are " - "%(allowed)s.") % {"sp": share_proto, - "allowed": "', '".join(allowed)} - return ValidationResult(False, message) - - -@validator -def flavor_exists(config, clients, deployment, param_name): - """Returns validator for flavor - - :param param_name: defines which variable should be used - to get flavor id value. - """ - return _get_validated_flavor(config, clients, param_name)[0] - - -@validator -def workbook_contains_workflow(config, clients, deployment, workbook, - workflow_name): - """Validate that workflow exist in workbook when workflow is passed - - :param workbook: parameter containing the workbook definition - :param workflow_name: parameter containing the workflow name - """ - - wf_name = config.get("args", {}).get(workflow_name) - if wf_name: - wb_path = config.get("args", {}).get(workbook) - wb_path = os.path.expanduser(wb_path) - file_result = _file_access_ok(config.get("args", {}).get(workbook), - os.R_OK, workbook) - if not file_result.is_valid: - return file_result - - with open(wb_path, "r") as wb_def: - wb_def = yaml.safe_load(wb_def) - if wf_name not in wb_def["workflows"]: - return ValidationResult( - False, - "workflow '{}' not found in the definition '{}'".format( - wf_name, wb_def)) - - -# TODO(astudenov): remove deprecated validators in 1.0.0 - -def deprecated_validator(name, old_validator_name, rally_version): - def decorator(*args, **kwargs): - def wrapper(plugin): - plugin_name = plugin.get_name() - LOG.warning( - "Plugin '%s' uses validator 'rally.task.validation.%s' which " - "is deprecated in favor of '%s' (it should be used " - "via new decorator 'rally.common.validation.add') in " - "Rally v%s.", - plugin_name, old_validator_name, name, rally_version) - plugin._meta_setdefault("validators", []) - plugin._meta_get("validators").append((name, args, kwargs,)) - return plugin - return wrapper - return decorator - - -_deprecated_platform_validator = deprecated_validator( - "required_platform", "required_openstack", "0.10.0") - -required_openstack = functools.partial( - _deprecated_platform_validator, platform="openstack") - -number = deprecated_validator("number", "number", "0.10.0") - -image_exists = deprecated_validator("image_exists", "image_exists", "0.10.0") - -external_network_exists = deprecated_validator("external_network_exists", - "external_network_exists", - "0.10.0") - -required_neutron_extensions = deprecated_validator( - "required_neutron_extensions", "required_neutron_extensions", "0.10.0") - -image_valid_on_flavor = deprecated_validator("image_valid_on_flavor", - "image_valid_on_flavor", - "0.10.0") - -required_clients = deprecated_validator("required_clients", "required_clients", - "0.10.0") - -required_services = deprecated_validator("required_services", - "required_services", "0.10.0") - -validate_heat_template = deprecated_validator("validate_heat_template", - "validate_heat_template", - "0.10.0") - -restricted_parameters = deprecated_validator("restricted_parameters", - "restricted_parameters", - "0.10.0") - -required_cinder_services = deprecated_validator("required_cinder_services", - "required_cinder_services", - "0.10.0") - -required_api_versions = deprecated_validator("required_api_versions", - "required_api_versions", - "0.10.0") - -required_contexts = deprecated_validator("required_contexts", - "required_contexts", - "0.10.0") - -required_param_or_context = deprecated_validator("required_param_or_context", - "required_param_or_context", - "0.10.0") - -volume_type_exists = deprecated_validator("volume_type_exists", - "volume_type_exists", - "0.10.0") - -file_exists = deprecated_validator("file_exists", "file_exists", "0.10.0") diff --git a/rally/ui/__init__.py b/rally/ui/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/ui/templates/base.html b/rally/ui/templates/base.html deleted file mode 100644 index 77802d8a..00000000 --- a/rally/ui/templates/base.html +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - {%- if version %} - - {%- endif %} - Rally | {% block title_text %}{% endblock %} - {% block libs %}{% endblock %} - - - - - - - -
    - {% block content %}{% endblock %} -
    - - - - diff --git a/rally/ui/templates/base.mako b/rally/ui/templates/base.mako deleted file mode 100644 index 9d1fc0f9..00000000 --- a/rally/ui/templates/base.mako +++ /dev/null @@ -1,56 +0,0 @@ - -> - - - - Rally | <%block name="title_text"/> - <%block name="libs"/> - - - -> - -
    -
    - Rally  - <%block name="header_text"/> -
    -
    - -
    - <%block name="content"/> -
    - - - - diff --git a/rally/ui/templates/ci/index.html b/rally/ui/templates/ci/index.html deleted file mode 100644 index d52f6592..00000000 --- a/rally/ui/templates/ci/index.html +++ /dev/null @@ -1,80 +0,0 @@ -{% extends "/base.html" %} - -{% block title_text %}Performance job results{% endblock %} - -{% block js_after %} - function checkLink (elem) { - var request = new XMLHttpRequest(); - request.open('GET', elem.href, true); - request.onreadystatechange = function(){ - if (request.readyState === 4){ - if (request.status === 404) { - elem.href = elem.href + ".gz" - } - } - }; - request.send(); - } - - var elems = document.getElementsByClassName("check-gz"); - - for(var i=0; i :first-child { display:block } - .columns li > :nth-child(2) { display:block; position:static; left:165px; top:0; white-space:nowrap } -{% endblock %} - -{% block media_queries %} - @media only screen and (min-width: 320px) { .content-wrap { width:400px } } - @media only screen and (min-width: 520px) { .content-wrap { width:500px } } - @media only screen and (min-width: 620px) { .content-wrap { width:90% } .columns li > :nth-child(2) { position:absolute } } - @media only screen and (min-width: 720px) { .content-wrap { width:70% } } -{% endblock %} - -{% block header_text %}performance job results{% endblock %} - -{% block content %} -

    Logs and files

    - - -

    Job results, in different formats

    - - -

    About Rally

    -

    Rally is benchmark system for OpenStack:

    - - -

    Steps to repeat locally

    -
      -
    1. Fetch rally task from here
    2. -
    3. Fetch rally plugins from here
    4. -
    5. Install OpenStack and Rally using this instruction
    6. -
    7. Unzip plugins and put to .rally/plugins/ directory
    8. -
    9. Run rally task: $ rally task start task.txt
    10. -
    -{% endblock %} diff --git a/rally/ui/templates/ci/index_verify.html b/rally/ui/templates/ci/index_verify.html deleted file mode 100644 index e81a0b5f..00000000 --- a/rally/ui/templates/ci/index_verify.html +++ /dev/null @@ -1,55 +0,0 @@ -{% extends "/base.html" %} - -{% block title_text %}Rally Verification Job Results{% endblock %} - -{% block css %} - li { margin:2px 0 } -{% if verifications|length == 1 %} - ol {padding: 0; list-style-type: none;} -{% endif %} - a, a:visited { color:#039 } - code { padding:0 15px; color:#888; display: block } - .columns li { position:relative } - .columns li > :first-child { display:block } - .columns li > :nth-child(2) { display:block; position:static; left:165px; top:0; white-space:nowrap } - .fail {color: #cc0000} - .error {color: #cc0000} - .success {color: green} - .skip {color: #A9A9A9} -{% endblock %} - -{% block css_content_wrap %}margin:0 auto; padding:0 5px{% endblock %} - -{% block media_queries %} - @media only screen and (min-width: 320px) { .content-wrap { width:400px } } - @media only screen and (min-width: 520px) { .content-wrap { width:500px } } - @media only screen and (min-width: 620px) { .content-wrap { width:90% } .columns li > :nth-child(2) { position:absolute } } - @media only screen and (min-width: 720px) { .content-wrap { width:70% } } -{% endblock %} - -{% block header_text %}Verify Job Results{% endblock %} - -{% block content %} -

    Logs and Results Files

    - - -

    Steps

    - -{% for step in steps %} - {{ step|safe }} -{% endfor %} - -

    About Rally

    -

    Rally is benchmarking and verification system for OpenStack:

    - -{% endblock %} diff --git a/rally/ui/templates/libs/README.rst b/rally/ui/templates/libs/README.rst deleted file mode 100644 index 85dba25b..00000000 --- a/rally/ui/templates/libs/README.rst +++ /dev/null @@ -1,6 +0,0 @@ -=============================== -Third-party files for templates -=============================== - -This directory includes third-party files -(JavaScript, CSS) that can be embedded into templates. diff --git a/rally/ui/templates/libs/angular.1.3.3.min.js b/rally/ui/templates/libs/angular.1.3.3.min.js deleted file mode 100644 index 2eb32c78..00000000 --- a/rally/ui/templates/libs/angular.1.3.3.min.js +++ /dev/null @@ -1,248 +0,0 @@ -/* - AngularJS v1.3.3 - (c) 2010-2014 Google, Inc. http://angularjs.org - License: MIT -*/ -(function(T,U,t){'use strict';function v(b){return function(){var a=arguments[0],c;c="["+(b?b+":":"")+a+"] http://errors.angularjs.org/1.3.3/"+(b?b+"/":"")+a;for(a=1;a").append(b).html();try{return b[0].nodeType===mb?Q(c):c.match(/^(<[^>]+>)/)[1].replace(/^<([\w\-]+)/,function(a,b){return"<"+Q(b)})}catch(d){return Q(c)}}function qc(b){try{return decodeURIComponent(b)}catch(a){}} -function rc(b){var a={},c,d;r((b||"").split("&"),function(b){b&&(c=b.replace(/\+/g,"%20").split("="),d=qc(c[0]),A(d)&&(b=A(c[1])?qc(c[1]):!0,Jb.call(a,d)?G(a[d])?a[d].push(b):a[d]=[a[d],b]:a[d]=b))});return a}function Kb(b){var a=[];r(b,function(b,d){G(b)?r(b,function(b){a.push(Da(d,!0)+(!0===b?"":"="+Da(b,!0)))}):a.push(Da(d,!0)+(!0===b?"":"="+Da(b,!0)))});return a.length?a.join("&"):""}function nb(b){return Da(b,!0).replace(/%26/gi,"&").replace(/%3D/gi,"=").replace(/%2B/gi,"+")}function Da(b,a){return encodeURIComponent(b).replace(/%40/gi, -"@").replace(/%3A/gi,":").replace(/%24/g,"$").replace(/%2C/gi,",").replace(/%3B/gi,";").replace(/%20/g,a?"%20":"+")}function Gd(b,a){var c,d,e=ob.length;b=y(b);for(d=0;d/,">"));}a=a||[];a.unshift(["$provide",function(a){a.value("$rootElement",b)}]);c.debugInfoEnabled&&a.push(["$compileProvider",function(a){a.debugInfoEnabled(!0)}]);a.unshift("ng");d=Lb(a,c.strictDi);d.invoke(["$rootScope","$rootElement","$compile","$injector",function(a,b,c,d){a.$apply(function(){b.data("$injector", -d);c(b)(a)})}]);return d},e=/^NG_ENABLE_DEBUG_INFO!/,f=/^NG_DEFER_BOOTSTRAP!/;T&&e.test(T.name)&&(c.debugInfoEnabled=!0,T.name=T.name.replace(e,""));if(T&&!f.test(T.name))return d();T.name=T.name.replace(f,"");va.resumeBootstrap=function(b){r(b,function(b){a.push(b)});d()}}function Id(){T.name="NG_ENABLE_DEBUG_INFO!"+T.name;T.location.reload()}function Jd(b){return va.element(b).injector().get("$$testability")}function Mb(b,a){a=a||"_";return b.replace(Kd,function(b,d){return(d?a:"")+b.toLowerCase()})} -function Ld(){var b;tc||((oa=T.jQuery)&&oa.fn.on?(y=oa,H(oa.fn,{scope:Ka.scope,isolateScope:Ka.isolateScope,controller:Ka.controller,injector:Ka.injector,inheritedData:Ka.inheritedData}),b=oa.cleanData,oa.cleanData=function(a){var c;if(Nb)Nb=!1;else for(var d=0,e;null!=(e=a[d]);d++)(c=oa._data(e,"events"))&&c.$destroy&&oa(e).triggerHandler("$destroy");b(a)}):y=R,va.element=y,tc=!0)}function Ob(b,a,c){if(!b)throw Wa("areq",a||"?",c||"required");return b}function pb(b,a,c){c&&G(b)&&(b=b[b.length-1]); -Ob(u(b),a,"not a function, got "+(b&&"object"===typeof b?b.constructor.name||"Object":typeof b));return b}function La(b,a){if("hasOwnProperty"===b)throw Wa("badname",a);}function uc(b,a,c){if(!a)return b;a=a.split(".");for(var d,e=b,f=a.length,g=0;g")+d[2];for(d=d[0];d--;)c=c.lastChild;f=Xa(f,c.childNodes);c=e.firstChild;c.textContent=""}else f.push(a.createTextNode(b));e.textContent="";e.innerHTML="";r(f,function(a){e.appendChild(a)});return e}function R(b){if(b instanceof R)return b;var a; -I(b)&&(b=P(b),a=!0);if(!(this instanceof R)){if(a&&"<"!=b.charAt(0))throw Qb("nosel");return new R(b)}if(a){a=U;var c;b=(c=df.exec(b))?[a.createElement(c[1])]:(c=Ec(b,a))?c.childNodes:[]}Fc(this,b)}function Rb(b){return b.cloneNode(!0)}function tb(b,a){a||ub(b);if(b.querySelectorAll)for(var c=b.querySelectorAll("*"),d=0,e=c.length;d 4096 bytes)!"));else{if(p.cookie!==A)for(A=p.cookie,d=A.split("; "),da={},f=0;fl&&this.remove(q.key),b},get:function(a){if(l").parent()[0])});var f=ca(a,b,a,c,d,e);C.$$addScopeClass(a);var g=null;return function(b, -c,d){Ob(b,"scope");d=d||{};var e=d.parentBoundTranscludeFn,h=d.transcludeControllers;d=d.futureParentElement;e&&e.$$boundTransclude&&(e=e.$$boundTransclude);g||(g=(d=d&&d[0])?"foreignobject"!==sa(d)&&d.toString().match(/SVG/)?"svg":"html":"html");d="html"!==g?y(T(g,y("
    ").append(a).html())):c?Ka.clone.call(a):a;if(h)for(var k in h)d.data("$"+k+"Controller",h[k].instance);C.$$addScopeInfo(d,b);c&&c(d,b);f&&f(b,d,d,e);return d}}function ca(a,b,c,d,e,f){function g(a,c,d,e){var f,k,l,q,s,p,B;if(n)for(B= -Array(c.length),q=0;qK.priority)break;if(v=K.scope)K.templateUrl||(L(v)?(ya("new/isolated scope",M||N,K,Y),M=K):ya("new/isolated scope",M,K,Y)),N=N||K;ga=K.name;!K.templateUrl&&K.controller&&(v=K.controller,z=z||{},ya("'"+ga+"' controller",z[ga],K,Y),z[ga]=K);if(v=K.transclude)w=!0,K.$$tlb||(ya("transclusion",fa,K,Y),fa=K),"element"==v?(H=!0,x=K.priority,v=Y,Y=e.$$element=y(U.createComment(" "+ga+ -": "+e[ga]+" ")),d=Y[0],Ab(g,Ya.call(v,0),d),Ga=C(v,f,x,k&&k.name,{nonTlbTranscludeDirective:fa})):(v=y(Rb(d)).contents(),Y.empty(),Ga=C(v,f));if(K.template)if(Na=!0,ya("template",da,K,Y),da=K,v=u(K.template)?K.template(Y,e):K.template,v=Qc(v),K.replace){k=K;v=Pb.test(v)?Rc(T(K.templateNamespace,P(v))):[];d=v[0];if(1!=v.length||d.nodeType!==la)throw ia("tplrt",ga,"");Ab(g,Y,d);za={$attr:{}};v=V(d,[],za);var of=a.splice(R+1,a.length-(R+1));M&&D(v);a=a.concat(v).concat(of);Pc(e,za);za=a.length}else Y.html(v); -if(K.templateUrl)Na=!0,ya("template",da,K,Y),da=K,K.replace&&(k=K),F=nf(a.splice(R,a.length-R),Y,e,g,w&&Ga,l,s,{controllerDirectives:z,newIsolateScopeDirective:M,templateDirective:da,nonTlbTranscludeDirective:fa}),za=a.length;else if(K.compile)try{Q=K.compile(Y,e,Ga),u(Q)?B(null,Q,zb,aa):Q&&B(Q.pre,Q.post,zb,aa)}catch(ba){c(ba,ua(Y))}K.terminal&&(F.terminal=!0,x=Math.max(x,K.priority))}F.scope=N&&!0===N.scope;F.transcludeOnThisElement=w;F.elementTranscludeOnThisElement=H;F.templateOnThisElement=Na; -F.transclude=Ga;p.hasElementTranscludeDirective=H;return F}function D(a){for(var b=0,c=a.length;bq.priority)&&-1!=q.restrict.indexOf(f)&&(k&&(q=mc(q,{$$start:k,$$end:l})),b.push(q),h=q)}catch(O){c(O)}}return h}function Pc(a,b){var c=b.$attr,d=a.$attr,e=a.$$element;r(a,function(d,e){"$"!=e.charAt(0)&& -(b[e]&&b[e]!==d&&(d+=("style"===e?";":" ")+b[e]),a.$set(e,d,!0,c[e]))});r(b,function(b,f){"class"==f?(N(e,b),a["class"]=(a["class"]?a["class"]+" ":"")+b):"style"==f?(e.attr("style",e.attr("style")+";"+b),a.style=(a.style?a.style+";":"")+b):"$"==f.charAt(0)||a.hasOwnProperty(f)||(a[f]=b,d[f]=c[f])})}function nf(a,b,c,d,e,f,g,h){var k=[],l,q,n=b[0],p=a.shift(),B=H({},p,{templateUrl:null,transclude:null,replace:null,$$originalDirective:p}),O=u(p.templateUrl)?p.templateUrl(b,c):p.templateUrl,E=p.templateNamespace; -b.empty();s(J.getTrustedResourceUrl(O)).then(function(s){var F,J;s=Qc(s);if(p.replace){s=Pb.test(s)?Rc(T(E,P(s))):[];F=s[0];if(1!=s.length||F.nodeType!==la)throw ia("tplrt",p.name,O);s={$attr:{}};Ab(d,b,F);var x=V(F,[],s);L(p.scope)&&D(x);a=x.concat(a);Pc(c,s)}else F=n,b.html(s);a.unshift(B);l=A(a,F,c,e,b,p,f,g,h);r(d,function(a,c){a==F&&(d[c]=b[0])});for(q=ca(b[0].childNodes,e);k.length;){s=k.shift();J=k.shift();var z=k.shift(),C=k.shift(),x=b[0];if(!s.$$destroyed){if(J!==n){var S=J.className;h.hasElementTranscludeDirective&& -p.replace||(x=Rb(F));Ab(z,y(J),x);N(y(x),S)}J=l.transcludeOnThisElement?M(s,l.transclude,C):C;l(q,s,x,d,J)}}k=null});return function(a,b,c,d,e){a=e;b.$$destroyed||(k?(k.push(b),k.push(c),k.push(d),k.push(a)):(l.transcludeOnThisElement&&(a=M(b,l.transclude,e)),l(q,b,c,d,a)))}}function v(a,b){var c=b.priority-a.priority;return 0!==c?c:a.name!==b.name?a.name"+b+"";return c.childNodes[0].childNodes;default:return b}}function Ga(a,b){if("srcdoc"==b)return J.HTML;var c=sa(a);if("xlinkHref"==b||"form"==c&&"action"==b|| -"img"!=c&&("src"==b||"ngSrc"==b))return J.RESOURCE_URL}function R(a,c,d,e,f){var h=b(d,!0);if(h){if("multiple"===e&&"select"===sa(a))throw ia("selmulti",ua(a));c.push({priority:100,compile:function(){return{pre:function(c,d,l){d=l.$$observers||(l.$$observers={});if(k.test(e))throw ia("nodomevents");l[e]&&(h=b(l[e],!0,Ga(a,e),g[e]||f))&&(l[e]=h(c),(d[e]||(d[e]=[])).$$inter=!0,(l.$$observers&&l.$$observers[e].$$scope||c).$watch(h,function(a,b){"class"===e&&a!=b?l.$updateClass(a,b):l.$set(e,a)}))}}}})}} -function Ab(a,b,c){var d=b[0],e=b.length,f=d.parentNode,g,h;if(a)for(g=0,h=a.length;g=a)return b;for(;a--;)8===b[a].nodeType&&qf.call(b,a,1);return b}function De(){var b={},a=!1,c=/^(\S+)(\s+as\s+(\w+))?$/;this.register=function(a,c){La(a,"controller");L(a)?H(b,a):b[a]=c};this.allowGlobals=function(){a=!0};this.$get=["$injector","$window",function(d,e){function f(a,b,c,d){if(!a||!L(a.$scope))throw v("$controller")("noscp",d, -b);a.$scope[b]=c}return function(g,h,k,l){var m,p,q;k=!0===k;l&&I(l)&&(q=l);I(g)&&(l=g.match(c),p=l[1],q=q||l[3],g=b.hasOwnProperty(p)?b[p]:uc(h.$scope,p,!0)||(a?uc(e,p,!0):t),pb(g,p,!0));if(k)return k=function(){},k.prototype=(G(g)?g[g.length-1]:g).prototype,m=new k,q&&f(h,q,m,p||g.name),H(function(){d.invoke(g,m,h,p);return m},{instance:m,identifier:q});m=d.instantiate(g,h,p);q&&f(h,q,m,p||g.name);return m}}]}function Ee(){this.$get=["$window",function(b){return y(b.document)}]}function Fe(){this.$get= -["$log",function(b){return function(a,c){b.error.apply(b,arguments)}}]}function Yb(b,a){if(I(b)){b=b.replace(rf,"");var c=a("Content-Type");if(c&&0===c.indexOf(Tc)&&b.trim()||sf.test(b)&&tf.test(b))b=pc(b)}return b}function Uc(b){var a={},c,d,e;if(!b)return a;r(b.split("\n"),function(b){e=b.indexOf(":");c=Q(P(b.substr(0,e)));d=P(b.substr(e+1));c&&(a[c]=a[c]?a[c]+", "+d:d)});return a}function Vc(b){var a=L(b)?b:t;return function(c){a||(a=Uc(b));return c?a[Q(c)]||null:a}}function Wc(b,a,c){if(u(c))return c(b, -a);r(c,function(c){b=c(b,a)});return b}function Ie(){var b=this.defaults={transformResponse:[Yb],transformRequest:[function(a){return L(a)&&"[object File]"!==Ja.call(a)&&"[object Blob]"!==Ja.call(a)?Za(a):a}],headers:{common:{Accept:"application/json, text/plain, */*"},post:ta(Zb),put:ta(Zb),patch:ta(Zb)},xsrfCookieName:"XSRF-TOKEN",xsrfHeaderName:"X-XSRF-TOKEN"},a=!1;this.useApplyAsync=function(b){return A(b)?(a=!!b,this):a};var c=this.interceptors=[];this.$get=["$httpBackend","$browser","$cacheFactory", -"$rootScope","$q","$injector",function(d,e,f,g,h,k){function l(a){function c(a){var b=H({},a);b.data=a.data?Wc(a.data,a.headers,d.transformResponse):a.data;a=a.status;return 200<=a&&300>a?b:h.reject(b)}var d={method:"get",transformRequest:b.transformRequest,transformResponse:b.transformResponse},e=function(a){var c=b.headers,d=H({},a.headers),e,f,c=H({},c.common,c[Q(a.method)]);a:for(e in c){a=Q(e);for(f in d)if(Q(f)===a)continue a;d[e]=c[e]}(function(a){var b;r(a,function(c,d){u(c)&&(b=c(),null!= -b?a[d]=b:delete a[d])})})(d);return d}(a);H(d,a);d.headers=e;d.method=rb(d.method);var f=[function(a){e=a.headers;var d=Wc(a.data,Vc(e),a.transformRequest);D(d)&&r(e,function(a,b){"content-type"===Q(b)&&delete e[b]});D(a.withCredentials)&&!D(b.withCredentials)&&(a.withCredentials=b.withCredentials);return m(a,d,e).then(c,c)},t],g=h.when(d);for(r(n,function(a){(a.request||a.requestError)&&f.unshift(a.request,a.requestError);(a.response||a.responseError)&&f.push(a.response,a.responseError)});f.length;){a= -f.shift();var k=f.shift(),g=g.then(a,k)}g.success=function(a){g.then(function(b){a(b.data,b.status,b.headers,d)});return g};g.error=function(a){g.then(null,function(b){a(b.data,b.status,b.headers,d)});return g};return g}function m(c,f,k){function n(b,c,d,e){function f(){m(c,b,d,e)}N&&(200<=b&&300>b?N.put(r,[b,c,Uc(d),e]):N.remove(r));a?g.$applyAsync(f):(f(),g.$$phase||g.$apply())}function m(a,b,d,e){b=Math.max(b,0);(200<=b&&300>b?z.resolve:z.reject)({data:a,status:b,headers:Vc(d),config:c,statusText:e})} -function J(){var a=l.pendingRequests.indexOf(c);-1!==a&&l.pendingRequests.splice(a,1)}var z=h.defer(),F=z.promise,N,C,r=p(c.url,c.params);l.pendingRequests.push(c);F.then(J,J);!c.cache&&!b.cache||!1===c.cache||"GET"!==c.method&&"JSONP"!==c.method||(N=L(c.cache)?c.cache:L(b.cache)?b.cache:q);if(N)if(C=N.get(r),A(C)){if(C&&u(C.then))return C.then(J,J),C;G(C)?m(C[1],C[0],ta(C[2]),C[3]):m(C,200,{},"OK")}else N.put(r,F);D(C)&&((C=Xc(c.url)?e.cookies()[c.xsrfCookieName||b.xsrfCookieName]:t)&&(k[c.xsrfHeaderName|| -b.xsrfHeaderName]=C),d(c.method,r,f,n,k,c.timeout,c.withCredentials,c.responseType));return F}function p(a,b){if(!b)return a;var c=[];Cd(b,function(a,b){null===a||D(a)||(G(a)||(a=[a]),r(a,function(a){L(a)&&(a=ea(a)?a.toISOString():Za(a));c.push(Da(b)+"="+Da(a))}))});0=k&&(s.resolve(q),p(O.$$intervalId),delete f[O.$$intervalId]);n||b.$apply()},h);f[O.$$intervalId]=s;return O}var f={}; -e.cancel=function(b){return b&&b.$$intervalId in f?(f[b.$$intervalId].reject("canceled"),a.clearInterval(b.$$intervalId),delete f[b.$$intervalId],!0):!1};return e}]}function Pd(){this.$get=function(){return{id:"en-us",NUMBER_FORMATS:{DECIMAL_SEP:".",GROUP_SEP:",",PATTERNS:[{minInt:1,minFrac:0,maxFrac:3,posPre:"",posSuf:"",negPre:"-",negSuf:"",gSize:3,lgSize:3},{minInt:1,minFrac:2,maxFrac:2,posPre:"\u00a4",posSuf:"",negPre:"(\u00a4",negSuf:")",gSize:3,lgSize:3}],CURRENCY_SYM:"$"},DATETIME_FORMATS:{MONTH:"January February March April May June July August September October November December".split(" "), -SHORTMONTH:"Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split(" "),DAY:"Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),SHORTDAY:"Sun Mon Tue Wed Thu Fri Sat".split(" "),AMPMS:["AM","PM"],medium:"MMM d, y h:mm:ss a","short":"M/d/yy h:mm a",fullDate:"EEEE, MMMM d, y",longDate:"MMMM d, y",mediumDate:"MMM d, y",shortDate:"M/d/yy",mediumTime:"h:mm:ss a",shortTime:"h:mm a"},pluralCat:function(b){return 1===b?"one":"other"}}}}function ac(b){b=b.split("/");for(var a=b.length;a--;)b[a]= -nb(b[a]);return b.join("/")}function Yc(b,a){var c=Aa(b);a.$$protocol=c.protocol;a.$$host=c.hostname;a.$$port=aa(c.port)||wf[c.protocol]||null}function Zc(b,a){var c="/"!==b.charAt(0);c&&(b="/"+b);var d=Aa(b);a.$$path=decodeURIComponent(c&&"/"===d.pathname.charAt(0)?d.pathname.substring(1):d.pathname);a.$$search=rc(d.search);a.$$hash=decodeURIComponent(d.hash);a.$$path&&"/"!=a.$$path.charAt(0)&&(a.$$path="/"+a.$$path)}function xa(b,a){if(0===a.indexOf(b))return a.substr(b.length)}function Fa(b){var a= -b.indexOf("#");return-1==a?b:b.substr(0,a)}function bc(b){return b.substr(0,Fa(b).lastIndexOf("/")+1)}function cc(b,a){this.$$html5=!0;a=a||"";var c=bc(b);Yc(b,this);this.$$parse=function(a){var b=xa(c,a);if(!I(b))throw eb("ipthprfx",a,c);Zc(b,this);this.$$path||(this.$$path="/");this.$$compose()};this.$$compose=function(){var a=Kb(this.$$search),b=this.$$hash?"#"+nb(this.$$hash):"";this.$$url=ac(this.$$path)+(a?"?"+a:"")+b;this.$$absUrl=c+this.$$url.substr(1)};this.$$parseLinkUrl=function(d,e){if(e&& -"#"===e[0])return this.hash(e.slice(1)),!0;var f,g;(f=xa(b,d))!==t?(g=f,g=(f=xa(a,f))!==t?c+(xa("/",f)||f):b+g):(f=xa(c,d))!==t?g=c+f:c==d+"/"&&(g=c);g&&this.$$parse(g);return!!g}}function dc(b,a){var c=bc(b);Yc(b,this);this.$$parse=function(d){var e=xa(b,d)||xa(c,d),e="#"==e.charAt(0)?xa(a,e):this.$$html5?e:"";if(!I(e))throw eb("ihshprfx",d,a);Zc(e,this);d=this.$$path;var f=/^\/[A-Z]:(\/.*)/;0===e.indexOf(b)&&(e=e.replace(b,""));f.exec(e)||(d=(e=f.exec(d))?e[1]:d);this.$$path=d;this.$$compose()}; -this.$$compose=function(){var c=Kb(this.$$search),e=this.$$hash?"#"+nb(this.$$hash):"";this.$$url=ac(this.$$path)+(c?"?"+c:"")+e;this.$$absUrl=b+(this.$$url?a+this.$$url:"")};this.$$parseLinkUrl=function(a,c){return Fa(b)==Fa(a)?(this.$$parse(a),!0):!1}}function $c(b,a){this.$$html5=!0;dc.apply(this,arguments);var c=bc(b);this.$$parseLinkUrl=function(d,e){if(e&&"#"===e[0])return this.hash(e.slice(1)),!0;var f,g;b==Fa(d)?f=d:(g=xa(c,d))?f=b+a+g:c===d+"/"&&(f=c);f&&this.$$parse(f);return!!f};this.$$compose= -function(){var c=Kb(this.$$search),e=this.$$hash?"#"+nb(this.$$hash):"";this.$$url=ac(this.$$path)+(c?"?"+c:"")+e;this.$$absUrl=b+a+this.$$url}}function Bb(b){return function(){return this[b]}}function ad(b,a){return function(c){if(D(c))return this[b];this[b]=a(c);this.$$compose();return this}}function Ke(){var b="",a={enabled:!1,requireBase:!0,rewriteLinks:!0};this.hashPrefix=function(a){return A(a)?(b=a,this):b};this.html5Mode=function(b){return Ua(b)?(a.enabled=b,this):L(b)?(Ua(b.enabled)&&(a.enabled= -b.enabled),Ua(b.requireBase)&&(a.requireBase=b.requireBase),Ua(b.rewriteLinks)&&(a.rewriteLinks=b.rewriteLinks),this):a};this.$get=["$rootScope","$browser","$sniffer","$rootElement",function(c,d,e,f){function g(a,b,c){var e=k.url(),f=k.$$state;try{d.url(a,b,c),k.$$state=d.state()}catch(g){throw k.url(e),k.$$state=f,g;}}function h(a,b){c.$broadcast("$locationChangeSuccess",k.absUrl(),a,k.$$state,b)}var k,l;l=d.baseHref();var m=d.url(),p;if(a.enabled){if(!l&&a.requireBase)throw eb("nobase");p=m.substring(0, -m.indexOf("/",m.indexOf("//")+2))+(l||"/");l=e.history?cc:$c}else p=Fa(m),l=dc;k=new l(p,"#"+b);k.$$parseLinkUrl(m,m);k.$$state=d.state();var q=/^\s*(javascript|mailto):/i;f.on("click",function(b){if(a.rewriteLinks&&!b.ctrlKey&&!b.metaKey&&2!=b.which){for(var e=y(b.target);"a"!==sa(e[0]);)if(e[0]===f[0]||!(e=e.parent())[0])return;var g=e.prop("href"),h=e.attr("href")||e.attr("xlink:href");L(g)&&"[object SVGAnimatedString]"===g.toString()&&(g=Aa(g.animVal).href);q.test(g)||!g||e.attr("target")||b.isDefaultPrevented()|| -!k.$$parseLinkUrl(g,h)||(b.preventDefault(),k.absUrl()!=d.url()&&(c.$apply(),T.angular["ff-684208-preventDefault"]=!0))}});k.absUrl()!=m&&d.url(k.absUrl(),!0);var n=!0;d.onUrlChange(function(a,b){c.$evalAsync(function(){var d=k.absUrl(),e=k.$$state,f;k.$$parse(a);k.$$state=b;f=c.$broadcast("$locationChangeStart",a,d,b,e).defaultPrevented;k.absUrl()===a&&(f?(k.$$parse(d),k.$$state=e,g(d,!1,e)):(n=!1,h(d,e)))});c.$$phase||c.$digest()});c.$watch(function(){var a=d.url(),b=d.state(),f=k.$$replace,l=a!== -k.absUrl()||k.$$html5&&e.history&&b!==k.$$state;if(n||l)n=!1,c.$evalAsync(function(){var d=k.absUrl(),e=c.$broadcast("$locationChangeStart",d,a,k.$$state,b).defaultPrevented;k.absUrl()===d&&(e?(k.$$parse(a),k.$$state=b):(l&&g(d,f,b===k.$$state?null:k.$$state),h(a,b)))});k.$$replace=!1});return k}]}function Le(){var b=!0,a=this;this.debugEnabled=function(a){return A(a)?(b=a,this):b};this.$get=["$window",function(c){function d(a){a instanceof Error&&(a.stack?a=a.message&&-1===a.stack.indexOf(a.message)? -"Error: "+a.message+"\n"+a.stack:a.stack:a.sourceURL&&(a=a.message+"\n"+a.sourceURL+":"+a.line));return a}function e(a){var b=c.console||{},e=b[a]||b.log||w;a=!1;try{a=!!e.apply}catch(k){}return a?function(){var a=[];r(arguments,function(b){a.push(d(b))});return e.apply(b,a)}:function(a,b){e(a,null==b?"":b)}}return{log:e("log"),info:e("info"),warn:e("warn"),error:e("error"),debug:function(){var c=e("debug");return function(){b&&c.apply(a,arguments)}}()}}]}function qa(b,a){if("__defineGetter__"=== -b||"__defineSetter__"===b||"__lookupGetter__"===b||"__lookupSetter__"===b||"__proto__"===b)throw ja("isecfld",a);return b}function ra(b,a){if(b){if(b.constructor===b)throw ja("isecfn",a);if(b.window===b)throw ja("isecwindow",a);if(b.children&&(b.nodeName||b.prop&&b.attr&&b.find))throw ja("isecdom",a);if(b===Object)throw ja("isecobj",a);}return b}function ec(b){return b.constant}function Oa(b,a,c,d){ra(b,d);a=a.split(".");for(var e,f=0;1h?bd(g[0],g[1],g[2],g[3],g[4],c,d):function(a,b){var e=0,f;do f=bd(g[e++],g[e++],g[e++],g[e++],g[e++],c,d)(a,b),b=t,a=f;while(e=this.promise.$$state.status&&d&&d.length&&b(function(){for(var b,e,f=0,g=d.length;fa)for(b in l++,f)e.hasOwnProperty(b)||(s--,delete f[b])}else f!==e&&(f=e,l++);return l}}c.$stateful=!0;var d=this,e,f,h,k=1< -b.length,l=0,p=g(a,c),m=[],q={},n=!0,s=0;return this.$watch(p,function(){n?(n=!1,b(e,e,d)):b(e,h,d);if(k)if(L(e))if(Ra(e)){h=Array(e.length);for(var a=0;ax&&(X=4-x,t[X]||(t[X]=[]),t[X].push({msg:u(e.exp)?"fn: "+(e.exp.name||e.exp.toString()):e.exp,newVal:g,oldVal:k}));else if(e===c){r=!1;break a}}catch(v){f(v)}if(!(m=M.$$childHead||M!==this&&M.$$nextSibling))for(;M!==this&&!(m=M.$$nextSibling);)M=M.$parent}while(M=m);if((r||O.length)&&!x--)throw s.$$phase=null,a("infdig",b,t);}while(r|| -O.length);for(s.$$phase=null;E.length;)try{E.shift()()}catch(y){f(y)}},$destroy:function(){if(!this.$$destroyed){var a=this.$parent;this.$broadcast("$destroy");this.$$destroyed=!0;if(this!==s){for(var b in this.$$listenerCount)m(this,this.$$listenerCount[b],b);a.$$childHead==this&&(a.$$childHead=this.$$nextSibling);a.$$childTail==this&&(a.$$childTail=this.$$prevSibling);this.$$prevSibling&&(this.$$prevSibling.$$nextSibling=this.$$nextSibling);this.$$nextSibling&&(this.$$nextSibling.$$prevSibling= -this.$$prevSibling);this.$destroy=this.$digest=this.$apply=this.$evalAsync=this.$applyAsync=w;this.$on=this.$watch=this.$watchGroup=function(){return w};this.$$listeners={};this.$parent=this.$$nextSibling=this.$$prevSibling=this.$$childHead=this.$$childTail=this.$root=this.$$watchers=null}}},$eval:function(a,b){return g(a)(this,b)},$evalAsync:function(a){s.$$phase||O.length||h.defer(function(){O.length&&s.$digest()});O.push({scope:this,expression:a})},$$postDigest:function(a){E.push(a)},$apply:function(a){try{return l("$apply"), -this.$eval(a)}catch(b){f(b)}finally{s.$$phase=null;try{s.$digest()}catch(c){throw f(c),c;}}},$applyAsync:function(a){function b(){c.$eval(a)}var c=this;a&&x.push(b);n()},$on:function(a,b){var c=this.$$listeners[a];c||(this.$$listeners[a]=c=[]);c.push(b);var d=this;do d.$$listenerCount[a]||(d.$$listenerCount[a]=0),d.$$listenerCount[a]++;while(d=d.$parent);var e=this;return function(){var d=c.indexOf(b);-1!==d&&(c[d]=null,m(e,1,a))}},$emit:function(a,b){var c=[],d,e=this,g=!1,h={name:a,targetScope:e, -stopPropagation:function(){g=!0},preventDefault:function(){h.defaultPrevented=!0},defaultPrevented:!1},k=Xa([h],arguments,1),l,m;do{d=e.$$listeners[a]||c;h.currentScope=e;l=0;for(m=d.length;lHa)throw Ba("iequirks");var d=ta(ka);d.isEnabled=function(){return b};d.trustAs=c.trustAs;d.getTrusted=c.getTrusted;d.valueOf=c.valueOf;b||(d.trustAs=d.getTrusted=function(a,b){return b},d.valueOf=ma);d.parseAs=function(b,c){var e=a(c);return e.literal&&e.constant?e:a(c,function(a){return d.getTrusted(b,a)})};var e=d.parseAs,f=d.getTrusted,g=d.trustAs;r(ka,function(a,b){var c=Q(b);d[bb("parse_as_"+c)]=function(b){return e(a, -b)};d[bb("get_trusted_"+c)]=function(b){return f(a,b)};d[bb("trust_as_"+c)]=function(b){return g(a,b)}});return d}]}function Se(){this.$get=["$window","$document",function(b,a){var c={},d=aa((/android (\d+)/.exec(Q((b.navigator||{}).userAgent))||[])[1]),e=/Boxee/i.test((b.navigator||{}).userAgent),f=a[0]||{},g,h=/^(Moz|webkit|ms)(?=[A-Z])/,k=f.body&&f.body.style,l=!1,m=!1;if(k){for(var p in k)if(l=h.exec(p)){g=l[0];g=g.substr(0,1).toUpperCase()+g.substr(1);break}g||(g="WebkitOpacity"in k&&"webkit"); -l=!!("transition"in k||g+"Transition"in k);m=!!("animation"in k||g+"Animation"in k);!d||l&&m||(l=I(f.body.style.webkitTransition),m=I(f.body.style.webkitAnimation))}return{history:!(!b.history||!b.history.pushState||4>d||e),hasEvent:function(a){if("input"==a&&9==Ha)return!1;if(D(c[a])){var b=f.createElement("div");c[a]="on"+a in b}return c[a]},csp:$a(),vendorPrefix:g,transitions:l,animations:m,android:d}}]}function Ue(){this.$get=["$templateCache","$http","$q",function(b,a,c){function d(e,f){d.totalPendingRequests++; -var g=a.defaults&&a.defaults.transformResponse;if(G(g))for(var h=g,g=[],k=0;kb;b=Math.abs(b);var g=b+"",h="",k=[],l=!1;if(-1!==g.indexOf("e")){var m=g.match(/([\d\.]+)e(-?)(\d+)/);m&&"-"==m[2]&&m[3]>e+1?(g="0",b=0):(h=g,l=!0)}if(l)0b&&(h=b.toFixed(e));else{g=(g.split(md)[1]||"").length;D(e)&&(e=Math.min(Math.max(a.minFrac, -g),a.maxFrac));b=+(Math.round(+(b.toString()+"e"+e)).toString()+"e"+-e);0===b&&(f=!1);b=(""+b).split(md);g=b[0];b=b[1]||"";var m=0,p=a.lgSize,q=a.gSize;if(g.length>=p+q)for(m=g.length-p,l=0;lb&&(d="-",b=-b);for(b= -""+b;b.length-c)e+=c;0===e&&-12==c&&(e=12);return Cb(e,a,d)}}function Db(b,a){return function(c,d){var e=c["get"+b](),f=rb(a?"SHORT"+b:b);return d[f][e]}}function nd(b){var a=(new Date(b,0,1)).getDay();return new Date(b,0,(4>=a?5:12)-a)}function od(b){return function(a){var c=nd(a.getFullYear());a=+new Date(a.getFullYear(),a.getMonth(),a.getDate()+(4-a.getDay()))-+c;a=1+Math.round(a/ -6048E5);return Cb(a,b)}}function id(b){function a(a){var b;if(b=a.match(c)){a=new Date(0);var f=0,g=0,h=b[8]?a.setUTCFullYear:a.setFullYear,k=b[8]?a.setUTCHours:a.setHours;b[9]&&(f=aa(b[9]+b[10]),g=aa(b[9]+b[11]));h.call(a,aa(b[1]),aa(b[2])-1,aa(b[3]));f=aa(b[4]||0)-f;g=aa(b[5]||0)-g;h=aa(b[6]||0);b=Math.round(1E3*parseFloat("0."+(b[7]||0)));k.call(a,f,g,h,b)}return a}var c=/^(\d{4})-?(\d\d)-?(\d\d)(?:T(\d\d)(?::?(\d\d)(?::?(\d\d)(?:\.(\d+))?)?)?(Z|([+-])(\d\d):?(\d\d))?)?$/;return function(c,e,f){var g= -"",h=[],k,l;e=e||"mediumDate";e=b.DATETIME_FORMATS[e]||e;I(c)&&(c=Hf.test(c)?aa(c):a(c));W(c)&&(c=new Date(c));if(!ea(c))return c;for(;e;)(l=If.exec(e))?(h=Xa(h,l,1),e=h.pop()):(h.push(e),e=null);f&&"UTC"===f&&(c=new Date(c.getTime()),c.setMinutes(c.getMinutes()+c.getTimezoneOffset()));r(h,function(a){k=Jf[a];g+=k?k(c,b.DATETIME_FORMATS):a.replace(/(^'|'$)/g,"").replace(/''/g,"'")});return g}}function Df(){return function(b){return Za(b,!0)}}function Ef(){return function(b,a){W(b)&&(b=b.toString()); -if(!G(b)&&!I(b))return b;a=Infinity===Math.abs(Number(a))?Number(a):aa(a);if(I(b))return a?0<=a?b.slice(0,a):b.slice(a,b.length):"";var c=[],d,e;a>b.length?a=b.length:a<-b.length&&(a=-b.length);0b||37<=b&&40>=b||q(a)});if(e.hasEvent("paste"))a.on("paste cut",q)}a.on("change",m);d.$render=function(){a.val(d.$isEmpty(d.$modelValue)?"":d.$viewValue)}}function Gb(b,a){return function(c,d){var e,f;if(ea(c))return c;if(I(c)){'"'==c.charAt(0)&&'"'==c.charAt(c.length-1)&&(c=c.substring(1,c.length-1));if(Kf.test(c))return new Date(c);b.lastIndex=0;if(e=b.exec(c))return e.shift(),f=d?{yyyy:d.getFullYear(),MM:d.getMonth()+1,dd:d.getDate(), -HH:d.getHours(),mm:d.getMinutes(),ss:d.getSeconds(),sss:d.getMilliseconds()/1E3}:{yyyy:1970,MM:1,dd:1,HH:0,mm:0,ss:0,sss:0},r(e,function(b,c){c=s};g.$observe("min",function(a){s=p(a);h.$validate()})}if(A(g.max)||g.ngMax){var r;h.$validators.max=function(a){return h.$isEmpty(a)||D(r)||c(a)<= -r};g.$observe("max",function(a){r=p(a);h.$validate()})}h.$isEmpty=function(a){return!a||a.getTime&&a.getTime()!==a.getTime()}}}function rd(b,a,c,d){(d.$$hasNativeValidators=L(a[0].validity))&&d.$parsers.push(function(b){var c=a.prop("validity")||{};return c.badInput&&!c.typeMismatch?t:b})}function sd(b,a,c,d,e){if(A(d)){b=b(d);if(!b.constant)throw v("ngModel")("constexpr",c,d);return b(a)}return e}function qd(b){function a(a,b){b&&!f[a]?(l.addClass(e,a),f[a]=!0):!b&&f[a]&&(l.removeClass(e,a),f[a]= -!1)}function c(b,c){b=b?"-"+Mb(b,"-"):"";a(ib+b,!0===c);a(td+b,!1===c)}var d=b.ctrl,e=b.$element,f={},g=b.set,h=b.unset,k=b.parentForm,l=b.$animate;f[td]=!(f[ib]=e.hasClass(ib));d.$setValidity=function(b,e,f){e===t?(d.$pending||(d.$pending={}),g(d.$pending,b,f)):(d.$pending&&h(d.$pending,b,f),ud(d.$pending)&&(d.$pending=t));Ua(e)?e?(h(d.$error,b,f),g(d.$$success,b,f)):(g(d.$error,b,f),h(d.$$success,b,f)):(h(d.$error,b,f),h(d.$$success,b,f));d.$pending?(a(vd,!0),d.$valid=d.$invalid=t,c("",null)):(a(vd, -!1),d.$valid=ud(d.$error),d.$invalid=!d.$valid,c("",d.$valid));e=d.$pending&&d.$pending[b]?t:d.$error[b]?!1:d.$$success[b]?!0:null;c(b,e);k.$setValidity(b,e,d)}}function ud(b){if(b)for(var a in b)return!1;return!0}function ic(b,a){b="ngClass"+b;return["$animate",function(c){function d(a,b){var c=[],d=0;a:for(;d(?:<\/\1>|)$/,Pb=/<|&#?\w+;/,bf=/<([\w:]+)/,cf=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,ha={option:[1,'"],thead:[1,"","
    "],col:[2,"","
    "],tr:[2,"","
    "], -td:[3,"","
    "],_default:[0,"",""]};ha.optgroup=ha.option;ha.tbody=ha.tfoot=ha.colgroup=ha.caption=ha.thead;ha.th=ha.td;var Ka=R.prototype={ready:function(b){function a(){c||(c=!0,b())}var c=!1;"complete"===U.readyState?setTimeout(a):(this.on("DOMContentLoaded",a),R(T).on("load",a))},toString:function(){var b=[];r(this,function(a){b.push(""+a)});return"["+b.join(", ")+"]"},eq:function(b){return 0<=b?y(this[b]):y(this[this.length+b])},length:0,push:Mf,sort:[].sort, -splice:[].splice},yb={};r("multiple selected checked disabled readOnly required open".split(" "),function(b){yb[Q(b)]=b});var Lc={};r("input select option textarea button form details".split(" "),function(b){Lc[b]=!0});var Mc={ngMinlength:"minlength",ngMaxlength:"maxlength",ngMin:"min",ngMax:"max",ngPattern:"pattern"};r({data:Sb,removeData:ub},function(b,a){R[a]=b});r({data:Sb,inheritedData:xb,scope:function(b){return y.data(b,"$scope")||xb(b.parentNode||b,["$isolateScope","$scope"])},isolateScope:function(b){return y.data(b, -"$isolateScope")||y.data(b,"$isolateScopeNoTemplate")},controller:Hc,injector:function(b){return xb(b,"$injector")},removeAttr:function(b,a){b.removeAttribute(a)},hasClass:Tb,css:function(b,a,c){a=bb(a);if(A(c))b.style[a]=c;else return b.style[a]},attr:function(b,a,c){var d=Q(a);if(yb[d])if(A(c))c?(b[a]=!0,b.setAttribute(a,d)):(b[a]=!1,b.removeAttribute(d));else return b[a]||(b.attributes.getNamedItem(a)||w).specified?d:t;else if(A(c))b.setAttribute(a,c);else if(b.getAttribute)return b=b.getAttribute(a, -2),null===b?t:b},prop:function(b,a,c){if(A(c))b[a]=c;else return b[a]},text:function(){function b(a,b){if(D(b)){var d=a.nodeType;return d===la||d===mb?a.textContent:""}a.textContent=b}b.$dv="";return b}(),val:function(b,a){if(D(a)){if(b.multiple&&"select"===sa(b)){var c=[];r(b.options,function(a){a.selected&&c.push(a.value||a.text)});return 0===c.length?null:c}return b.value}b.value=a},html:function(b,a){if(D(a))return b.innerHTML;tb(b,!0);b.innerHTML=a},empty:Ic},function(b,a){R.prototype[a]=function(a, -d){var e,f,g=this.length;if(b!==Ic&&(2==b.length&&b!==Tb&&b!==Hc?a:d)===t){if(L(a)){for(e=0;e":function(a,c,d,e){return d(a,c)>e(a,c)},"<=":function(a,c,d,e){return d(a,c)<=e(a,c)},">=":function(a,c,d,e){return d(a,c)>=e(a,c)},"&&":function(a, -c,d,e){return d(a,c)&&e(a,c)},"||":function(a,c,d,e){return d(a,c)||e(a,c)},"!":function(a,c,d){return!d(a,c)},"=":!0,"|":!0}),Uf={n:"\n",f:"\f",r:"\r",t:"\t",v:"\v","'":"'",'"':'"'},gc=function(a){this.options=a};gc.prototype={constructor:gc,lex:function(a){this.text=a;this.index=0;for(this.tokens=[];this.index=a&&"string"===typeof a},isWhitespace:function(a){return" "===a||"\r"===a||"\t"===a||"\n"===a||"\v"===a||"\u00a0"===a},isIdent:function(a){return"a"<=a&&"z">=a||"A"<=a&&"Z">=a||"_"===a||"$"===a},isExpOperator:function(a){return"-"===a||"+"===a||this.isNumber(a)},throwError:function(a,c,d){d=d||this.index;c=A(c)?"s "+c+"-"+this.index+" ["+this.text.substring(c,d)+"]":" "+d;throw ja("lexerr",a,c,this.text);},readNumber:function(){for(var a="",c=this.index;this.index< -this.text.length;){var d=Q(this.text.charAt(this.index));if("."==d||this.isNumber(d))a+=d;else{var e=this.peek();if("e"==d&&this.isExpOperator(e))a+=d;else if(this.isExpOperator(d)&&e&&this.isNumber(e)&&"e"==a.charAt(a.length-1))a+=d;else if(!this.isExpOperator(d)||e&&this.isNumber(e)||"e"!=a.charAt(a.length-1))break;else this.throwError("Invalid exponent")}this.index++}this.tokens.push({index:c,text:a,constant:!0,value:Number(a)})},readIdent:function(){for(var a=this.index;this.indexa){a=this.tokens[a];var g=a.text;if(g===c||g===d||g===e||g===f||!(c||d||e||f))return a}return!1},expect:function(a,c,d,e){return(a= -this.peek(a,c,d,e))?(this.tokens.shift(),a):!1},consume:function(a){if(0===this.tokens.length)throw ja("ueoe",this.text);var c=this.expect(a);c||this.throwError("is unexpected, expecting ["+a+"]",this.peek());return c},unaryFn:function(a,c){var d=jb[a];return H(function(a,f){return d(a,f,c)},{constant:c.constant,inputs:[c]})},binaryFn:function(a,c,d,e){var f=jb[c];return H(function(c,e){return f(c,e,a,d)},{constant:a.constant&&d.constant,inputs:!e&&[a,d]})},identifier:function(){for(var a=this.consume().text;this.peek(".")&& -this.peekAhead(1).identifier&&!this.peekAhead(2,"(");)a+=this.consume().text+this.consume().text;return Ib[a]||cd(a,this.options,this.text)},constant:function(){var a=this.consume().value;return H(function(){return a},{constant:!0,literal:!0})},statements:function(){for(var a=[];;)if(0","<=",">="))a=this.binaryFn(a,c.text,this.relational());return a},additive:function(){for(var a= -this.multiplicative(),c;c=this.expect("+","-");)a=this.binaryFn(a,c.text,this.multiplicative());return a},multiplicative:function(){for(var a=this.unary(),c;c=this.expect("*","/","%");)a=this.binaryFn(a,c.text,this.unary());return a},unary:function(){var a;return this.expect("+")?this.primary():(a=this.expect("-"))?this.binaryFn(fb.ZERO,a.text,this.unary()):(a=this.expect("!"))?this.unaryFn(a.text,this.unary()):this.primary()},fieldAccess:function(a){var c=this.text,d=this.consume().text,e=cd(d,this.options, -c);return H(function(c,d,h){return e(h||a(c,d))},{assign:function(e,g,h){(h=a(e,h))||a.assign(e,h={});return Oa(h,d,g,c)}})},objectIndex:function(a){var c=this.text,d=this.expression();this.consume("]");return H(function(e,f){var g=a(e,f),h=d(e,f);qa(h,c);return g?ra(g[h],c):t},{assign:function(e,f,g){var h=qa(d(e,g),c);(g=ra(a(e,g),c))||a.assign(e,g={});return g[h]=f}})},functionCall:function(a,c){var d=[];if(")"!==this.peekToken().text){do d.push(this.expression());while(this.expect(","))}this.consume(")"); -var e=this.text,f=d.length?[]:null;return function(g,h){var k=c?c(g,h):g,l=a(g,h,k)||w;if(f)for(var m=d.length;m--;)f[m]=ra(d[m](g,h),e);ra(k,e);if(l){if(l.constructor===l)throw ja("isecfn",e);if(l===Rf||l===Sf||l===Tf)throw ja("isecff",e);}k=l.apply?l.apply(k,f):l(f[0],f[1],f[2],f[3],f[4]);return ra(k,e)}},arrayDeclaration:function(){var a=[];if("]"!==this.peekToken().text){do{if(this.peek("]"))break;a.push(this.expression())}while(this.expect(","))}this.consume("]");return H(function(c,d){for(var e= -[],f=0,g=a.length;fa.getHours()?c.AMPMS[0]:c.AMPMS[1]},Z:function(a){a=-1*a.getTimezoneOffset();return a=(0<=a?"+":"")+(Cb(Math[0=h};d.$observe("min",function(a){A(a)&&!W(a)&&(a=parseFloat(a, -10));h=W(a)&&!isNaN(a)?a:t;e.$validate()})}if(d.max||d.ngMax){var k;e.$validators.max=function(a){return e.$isEmpty(a)||D(k)||a<=k};d.$observe("max",function(a){A(a)&&!W(a)&&(a=parseFloat(a,10));k=W(a)&&!isNaN(a)?a:t;e.$validate()})}},url:function(a,c,d,e,f,g){gb(a,c,d,e,f,g);hc(e);e.$$parserName="url";e.$validators.url=function(a){return e.$isEmpty(a)||Vf.test(a)}},email:function(a,c,d,e,f,g){gb(a,c,d,e,f,g);hc(e);e.$$parserName="email";e.$validators.email=function(a){return e.$isEmpty(a)||Wf.test(a)}}, -radio:function(a,c,d,e){D(d.name)&&c.attr("name",++kb);c.on("click",function(a){c[0].checked&&e.$setViewValue(d.value,a&&a.type)});e.$render=function(){c[0].checked=d.value==e.$viewValue};d.$observe("value",e.$render)},checkbox:function(a,c,d,e,f,g,h,k){var l=sd(k,a,"ngTrueValue",d.ngTrueValue,!0),m=sd(k,a,"ngFalseValue",d.ngFalseValue,!1);c.on("click",function(a){e.$setViewValue(c[0].checked,a&&a.type)});e.$render=function(){c[0].checked=e.$viewValue};e.$isEmpty=function(a){return a!==l};e.$formatters.push(function(a){return na(a, -l)});e.$parsers.push(function(a){return a?l:m})},hidden:w,button:w,submit:w,reset:w,file:w},wc=["$browser","$sniffer","$filter","$parse",function(a,c,d,e){return{restrict:"E",require:["?ngModel"],link:{pre:function(f,g,h,k){k[0]&&(Bd[Q(h.type)]||Bd.text)(f,g,h,k[0],c,a,d,e)}}}}],ib="ng-valid",td="ng-invalid",Qa="ng-pristine",Fb="ng-dirty",vd="ng-pending",Zf=["$scope","$exceptionHandler","$attrs","$element","$parse","$animate","$timeout","$rootScope","$q","$interpolate",function(a,c,d,e,f,g,h,k,l, -m){this.$modelValue=this.$viewValue=Number.NaN;this.$validators={};this.$asyncValidators={};this.$parsers=[];this.$formatters=[];this.$viewChangeListeners=[];this.$untouched=!0;this.$touched=!1;this.$pristine=!0;this.$dirty=!1;this.$valid=!0;this.$invalid=!1;this.$error={};this.$$success={};this.$pending=t;this.$name=m(d.name||"",!1)(a);var p=f(d.ngModel),q=null,n=this,s=function(){var c=p(a);n.$options&&n.$options.getterSetter&&u(c)&&(c=c());return c},O=function(c){var d;n.$options&&n.$options.getterSetter&& -u(d=p(a))?d(n.$modelValue):p.assign(a,n.$modelValue)};this.$$setOptions=function(a){n.$options=a;if(!(p.assign||a&&a.getterSetter))throw Hb("nonassign",d.ngModel,ua(e));};this.$render=w;this.$isEmpty=function(a){return D(a)||""===a||null===a||a!==a};var E=e.inheritedData("$formController")||Eb,x=0;qd({ctrl:this,$element:e,set:function(a,c){a[c]=!0},unset:function(a,c){delete a[c]},parentForm:E,$animate:g});this.$setPristine=function(){n.$dirty=!1;n.$pristine=!0;g.removeClass(e,Fb);g.addClass(e,Qa)}; -this.$setUntouched=function(){n.$touched=!1;n.$untouched=!0;g.setClass(e,"ng-untouched","ng-touched")};this.$setTouched=function(){n.$touched=!0;n.$untouched=!1;g.setClass(e,"ng-touched","ng-untouched")};this.$rollbackViewValue=function(){h.cancel(q);n.$viewValue=n.$$lastCommittedViewValue;n.$render()};this.$validate=function(){W(n.$modelValue)&&isNaN(n.$modelValue)||this.$$parseAndValidate()};this.$$runValidators=function(a,c,d,e){function f(){var a=!0;r(n.$validators,function(e,f){var g=e(c,d); -a=a&&g;h(f,g)});return a?!0:(r(n.$asyncValidators,function(a,c){h(c,null)}),!1)}function g(){var a=[],e=!0;r(n.$asyncValidators,function(f,g){var k=f(c,d);if(!k||!u(k.then))throw Hb("$asyncValidators",k);h(g,t);a.push(k.then(function(){h(g,!0)},function(a){e=!1;h(g,!1)}))});a.length?l.all(a).then(function(){k(e)},w):k(!0)}function h(a,c){m===x&&n.$setValidity(a,c)}function k(a){m===x&&e(a)}x++;var m=x;(function(a){var c=n.$$parserName||"parse";if(a===t)h(c,null);else if(h(c,a),!a)return r(n.$validators, -function(a,c){h(c,null)}),r(n.$asyncValidators,function(a,c){h(c,null)}),!1;return!0})(a)?f()?g():k(!1):k(!1)};this.$commitViewValue=function(){var a=n.$viewValue;h.cancel(q);if(n.$$lastCommittedViewValue!==a||""===a&&n.$$hasNativeValidators)n.$$lastCommittedViewValue=a,n.$pristine&&(n.$dirty=!0,n.$pristine=!1,g.removeClass(e,Qa),g.addClass(e,Fb),E.$setDirty()),this.$$parseAndValidate()};this.$$parseAndValidate=function(){var a=n.$$lastCommittedViewValue,c=a,d=D(c)?t:!0;if(d)for(var e=0;e=f}}}}},ue=function(){return{restrict:"A",priority:100,require:"ngModel",link:function(a,c,d,e){var f=c.attr(d.$attr.ngList)||", ",g="false"!==d.ngTrim,h=g?P(f):f;e.$parsers.push(function(a){if(!D(a)){var c=[];a&&r(a.split(h),function(a){a&&c.push(g?P(a):a)});return c}}); -e.$formatters.push(function(a){return G(a)?a.join(f):t});e.$isEmpty=function(a){return!a||!a.length}}}},$f=/^(true|false|\d+)$/,we=function(){return{restrict:"A",priority:100,compile:function(a,c){return $f.test(c.ngValue)?function(a,c,f){f.$set("value",a.$eval(f.ngValue))}:function(a,c,f){a.$watch(f.ngValue,function(a){f.$set("value",a)})}}}},xe=function(){return{restrict:"A",controller:["$scope","$attrs",function(a,c){var d=this;this.$options=a.$eval(c.ngModelOptions);this.$options.updateOn!==t? -(this.$options.updateOnDefault=!1,this.$options.updateOn=P(this.$options.updateOn.replace(Yf,function(){d.$options.updateOnDefault=!0;return" "}))):this.$options.updateOnDefault=!0}]}},Xd=["$compile",function(a){return{restrict:"AC",compile:function(c){a.$$addBindingClass(c);return function(c,e,f){a.$$addBindingInfo(e,f.ngBind);e=e[0];c.$watch(f.ngBind,function(a){e.textContent=a===t?"":a})}}}}],Zd=["$interpolate","$compile",function(a,c){return{compile:function(d){c.$$addBindingClass(d);return function(d, -f,g){d=a(f.attr(g.$attr.ngBindTemplate));c.$$addBindingInfo(f,d.expressions);f=f[0];g.$observe("ngBindTemplate",function(a){f.textContent=a===t?"":a})}}}}],Yd=["$sce","$parse","$compile",function(a,c,d){return{restrict:"A",compile:function(e,f){var g=c(f.ngBindHtml),h=c(f.ngBindHtml,function(a){return(a||"").toString()});d.$$addBindingClass(e);return function(c,e,f){d.$$addBindingInfo(e,f.ngBindHtml);c.$watch(h,function(){e.html(a.getTrustedHtml(g(c))||"")})}}}}],$d=ic("",!0),be=ic("Odd",0),ae=ic("Even", -1),ce=Ia({compile:function(a,c){c.$set("ngCloak",t);a.removeClass("ng-cloak")}}),de=[function(){return{restrict:"A",scope:!0,controller:"@",priority:500}}],Bc={},ag={blur:!0,focus:!0};r("click dblclick mousedown mouseup mouseover mouseout mousemove mouseenter mouseleave keydown keyup keypress submit focus blur copy cut paste".split(" "),function(a){var c=wa("ng-"+a);Bc[c]=["$parse","$rootScope",function(d,e){return{restrict:"A",compile:function(f,g){var h=d(g[c],null,!0);return function(c,d){d.on(a, -function(d){var f=function(){h(c,{$event:d})};ag[a]&&e.$$phase?c.$evalAsync(f):c.$apply(f)})}}}}]});var ge=["$animate",function(a){return{multiElement:!0,transclude:"element",priority:600,terminal:!0,restrict:"A",$$tlb:!0,link:function(c,d,e,f,g){var h,k,l;c.$watch(e.ngIf,function(c){c?k||g(function(c,f){k=f;c[c.length++]=U.createComment(" end ngIf: "+e.ngIf+" ");h={clone:c};a.enter(c,d.parent(),d)}):(l&&(l.remove(),l=null),k&&(k.$destroy(),k=null),h&&(l=qb(h.clone),a.leave(l).then(function(){l=null}), -h=null))})}}}],he=["$templateRequest","$anchorScroll","$animate","$sce",function(a,c,d,e){return{restrict:"ECA",priority:400,terminal:!0,transclude:"element",controller:va.noop,compile:function(f,g){var h=g.ngInclude||g.src,k=g.onload||"",l=g.autoscroll;return function(f,g,q,n,r){var t=0,E,x,B,v=function(){x&&(x.remove(),x=null);E&&(E.$destroy(),E=null);B&&(d.leave(B).then(function(){x=null}),x=B,B=null)};f.$watch(e.parseAsResourceUrl(h),function(e){var h=function(){!A(l)||l&&!f.$eval(l)||c()},q= -++t;e?(a(e,!0).then(function(a){if(q===t){var c=f.$new();n.template=a;a=r(c,function(a){v();d.enter(a,null,g).then(h)});E=c;B=a;E.$emit("$includeContentLoaded",e);f.$eval(k)}},function(){q===t&&(v(),f.$emit("$includeContentError",e))}),f.$emit("$includeContentRequested",e)):(v(),n.template=null)})}}}}],ye=["$compile",function(a){return{restrict:"ECA",priority:-400,require:"ngInclude",link:function(c,d,e,f){/SVG/.test(d[0].toString())?(d.empty(),a(Ec(f.template,U).childNodes)(c,function(a){d.append(a)}, -{futureParentElement:d})):(d.html(f.template),a(d.contents())(c))}}}],ie=Ia({priority:450,compile:function(){return{pre:function(a,c,d){a.$eval(d.ngInit)}}}}),je=Ia({terminal:!0,priority:1E3}),ke=["$locale","$interpolate",function(a,c){var d=/{}/g;return{restrict:"EA",link:function(e,f,g){var h=g.count,k=g.$attr.when&&f.attr(g.$attr.when),l=g.offset||0,m=e.$eval(k)||{},p={},q=c.startSymbol(),n=c.endSymbol(),s=/^when(Minus)?(.+)$/;r(g,function(a,c){s.test(c)&&(m[Q(c.replace("when","").replace("Minus", -"-"))]=f.attr(g.$attr[c]))});r(m,function(a,e){p[e]=c(a.replace(d,q+h+"-"+l+n))});e.$watch(function(){var c=parseFloat(e.$eval(h));if(isNaN(c))return"";c in m||(c=a.pluralCat(c-l));return p[c](e)},function(a){f.text(a)})}}}],le=["$parse","$animate",function(a,c){var d=v("ngRepeat"),e=function(a,c,d,e,l,m,p){a[d]=e;l&&(a[l]=m);a.$index=c;a.$first=0===c;a.$last=c===p-1;a.$middle=!(a.$first||a.$last);a.$odd=!(a.$even=0===(c&1))};return{restrict:"A",multiElement:!0,transclude:"element",priority:1E3,terminal:!0, -$$tlb:!0,compile:function(f,g){var h=g.ngRepeat,k=U.createComment(" end ngRepeat: "+h+" "),l=h.match(/^\s*([\s\S]+?)\s+in\s+([\s\S]+?)(?:\s+as\s+([\s\S]+?))?(?:\s+track\s+by\s+([\s\S]+?))?\s*$/);if(!l)throw d("iexp",h);var m=l[1],p=l[2],q=l[3],n=l[4],l=m.match(/^(?:([\$\w]+)|\(([\$\w]+)\s*,\s*([\$\w]+)\))$/);if(!l)throw d("iidexp",m);var s=l[3]||l[1],A=l[2];if(q&&(!/^[$a-zA-Z_][$a-zA-Z0-9_]*$/.test(q)||/^(null|undefined|this|\$index|\$first|\$middle|\$last|\$even|\$odd|\$parent)$/.test(q)))throw d("badident", -q);var v,x,B,J,z={$id:Ma};n?v=a(n):(B=function(a,c){return Ma(c)},J=function(a){return a});return function(a,f,g,l,n){v&&(x=function(c,d,e){A&&(z[A]=c);z[s]=d;z.$index=e;return v(a,z)});var m=pa();a.$watchCollection(p,function(g){var l,p,C=f[0],v,z=pa(),E,H,w,D,G,u,I;q&&(a[q]=g);if(Ra(g))G=g,p=x||B;else{p=x||J;G=[];for(I in g)g.hasOwnProperty(I)&&"$"!=I.charAt(0)&&G.push(I);G.sort()}E=G.length;I=Array(E);for(l=0;lE;)d=t.pop(),m(S,d.label,!1),d.element.remove(); -r(S,function(a,c){0a&&q.removeOption(c)})}for(;Q.length>w;)Q.pop()[0].element.remove()}var u;if(!(u=s.match(d)))throw bg("iexp",s,ua(f));var D=c(u[2]||u[1]),z=u[4]||u[6],y=/ as /.test(u[0])&&u[1],w=y?c(y):null,F=u[5],I=c(u[3]||""),E=c(u[2]?u[1]:z),N=c(u[7]),L=u[8]?c(u[8]):null,R={},Q=[[{element:f,label:""}]],T={};v&&(a(v)(e),v.removeClass("ng-scope"),v.remove());f.empty();f.on("change",function(){e.$apply(function(){var a=N(e)||[],c;if(n)c=[],r(f.val(),function(d){d=L?R[d]:d;c.push("?"=== -d?t:""===d?null:h(w?w:E,d,a[d]))});else{var d=L?R[f.val()]:f.val();c="?"===d?t:""===d?null:h(w?w:E,d,a[d])}g.$setViewValue(c);p()})});g.$render=p;e.$watchCollection(N,l);e.$watchCollection(function(){var a=N(e),c;if(a&&G(a)){c=Array(a.length);for(var d=0,f=a.length;d@charset "UTF-8";[ng\\:cloak],[ng-cloak],[data-ng-cloak],[x-ng-cloak],.ng-cloak,.x-ng-cloak,.ng-hide:not(.ng-hide-animate){display:none !important;}ng\\:form{display:block;}'); -//# sourceMappingURL=angular.min.js.map diff --git a/rally/ui/templates/libs/d3.3.4.13.min.js b/rally/ui/templates/libs/d3.3.4.13.min.js deleted file mode 100644 index b5ff3620..00000000 --- a/rally/ui/templates/libs/d3.3.4.13.min.js +++ /dev/null @@ -1,35 +0,0 @@ -/* - Copyright (c) 2010-2015, Michael Bostock - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * The name Michael Bostock may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT, - INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - https://github.com/mbostock/d3 -*/ -!function(){function n(n,t){return t>n?-1:n>t?1:n>=t?0:0/0}function t(n){return null===n?0/0:+n}function e(n){return!isNaN(n)}function r(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n(t[i],e)<0?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n(t[i],e)>0?u=i:r=i+1}return r}}}function u(n){return n.length}function i(n){for(var t=1;n*t%1;)t*=10;return t}function o(n,t){for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}function a(){this._=Object.create(null)}function c(n){return(n+="")===la||n[0]===sa?sa+n:n}function l(n){return(n+="")[0]===sa?n.slice(1):n}function s(n){return c(n)in this._}function f(n){return(n=c(n))in this._&&delete this._[n]}function h(){var n=[];for(var t in this._)n.push(l(t));return n}function g(){var n=0;for(var t in this._)++n;return n}function p(){for(var n in this._)return!1;return!0}function v(){this._=Object.create(null)}function d(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function m(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.slice(1);for(var e=0,r=fa.length;r>e;++e){var u=fa[e]+t;if(u in n)return u}}function y(){}function x(){}function M(n){function t(){for(var t,r=e,u=-1,i=r.length;++ue;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function H(n){return ga(n,Ma),n}function O(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t0&&(n=n.slice(0,a));var l=ba.get(n);return l&&(n=l,c=V),a?t?u:r:t?y:i}function Z(n,t){return function(e){var r=Bo.event;Bo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Bo.event=r}}}function V(n,t){var e=Z(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function X(){var n=".dragsuppress-"+ ++Sa,t="click"+n,e=Bo.select(Qo).on("touchmove"+n,_).on("dragstart"+n,_).on("selectstart"+n,_);if(wa){var r=Ko.style,u=r[wa];r[wa]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),wa&&(r[wa]=u),i&&(e.on(t,function(){_(),o()},!0),setTimeout(o,0))}}function $(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>ka&&(Qo.scrollX||Qo.scrollY)){e=Bo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();ka=!(u.f||u.e),e.remove()}return ka?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function B(){return Bo.event.changedTouches[0].identifier}function W(){return Bo.event.target}function J(){return Qo}function G(n){return n>0?1:0>n?-1:0}function K(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function Q(n){return n>1?0:-1>n?Ea:Math.acos(n)}function nt(n){return n>1?Ca:-1>n?-Ca:Math.asin(n)}function tt(n){return((n=Math.exp(n))-1/n)/2}function et(n){return((n=Math.exp(n))+1/n)/2}function rt(n){return((n=Math.exp(2*n))-1)/(n+1)}function ut(n){return(n=Math.sin(n/2))*n}function it(){}function ot(n,t,e){return this instanceof ot?(this.h=+n,this.s=+t,void(this.l=+e)):arguments.length<2?n instanceof ot?new ot(n.h,n.s,n.l):Mt(""+n,_t,ot):new ot(n,t,e)}function at(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,new dt(u(n+120),u(n),u(n-120))}function ct(n,t,e){return this instanceof ct?(this.h=+n,this.c=+t,void(this.l=+e)):arguments.length<2?n instanceof ct?new ct(n.h,n.c,n.l):n instanceof st?ht(n.l,n.a,n.b):ht((n=bt((n=Bo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):new ct(n,t,e)}function lt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),new st(e,Math.cos(n*=La)*t,Math.sin(n)*t)}function st(n,t,e){return this instanceof st?(this.l=+n,this.a=+t,void(this.b=+e)):arguments.length<2?n instanceof st?new st(n.l,n.a,n.b):n instanceof ct?lt(n.h,n.c,n.l):bt((n=dt(n)).r,n.g,n.b):new st(n,t,e)}function ft(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=gt(u)*Ya,r=gt(r)*Ia,i=gt(i)*Za,new dt(vt(3.2404542*u-1.5371385*r-.4985314*i),vt(-.969266*u+1.8760108*r+.041556*i),vt(.0556434*u-.2040259*r+1.0572252*i))}function ht(n,t,e){return n>0?new ct(Math.atan2(e,t)*Ta,Math.sqrt(t*t+e*e),n):new ct(0/0,0/0,n)}function gt(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function pt(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function vt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function dt(n,t,e){return this instanceof dt?(this.r=~~n,this.g=~~t,void(this.b=~~e)):arguments.length<2?n instanceof dt?new dt(n.r,n.g,n.b):Mt(""+n,dt,at):new dt(n,t,e)}function mt(n){return new dt(n>>16,255&n>>8,255&n)}function yt(n){return mt(n)+""}function xt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function Mt(n,t,e){var r,u,i,o=0,a=0,c=0;if(r=/([a-z]+)\((.*)\)/i.exec(n))switch(u=r[2].split(","),r[1]){case"hsl":return e(parseFloat(u[0]),parseFloat(u[1])/100,parseFloat(u[2])/100);case"rgb":return t(St(u[0]),St(u[1]),St(u[2]))}return(i=$a.get(n))?t(i.r,i.g,i.b):(null==n||"#"!==n.charAt(0)||isNaN(i=parseInt(n.slice(1),16))||(4===n.length?(o=(3840&i)>>4,o=o>>4|o,a=240&i,a=a>>4|a,c=15&i,c=c<<4|c):7===n.length&&(o=(16711680&i)>>16,a=(65280&i)>>8,c=255&i)),t(o,a,c))}function _t(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),new ot(r,u,c)}function bt(n,t,e){n=wt(n),t=wt(t),e=wt(e);var r=pt((.4124564*n+.3575761*t+.1804375*e)/Ya),u=pt((.2126729*n+.7151522*t+.072175*e)/Ia),i=pt((.0193339*n+.119192*t+.9503041*e)/Za);return st(116*u-16,500*(r-u),200*(u-i))}function wt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function St(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function kt(n){return"function"==typeof n?n:function(){return n}}function Et(n){return n}function At(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),Ct(t,e,n,r)}}function Ct(n,t,e,r){function u(){var n,t=c.status;if(!t&&zt(c)||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Bo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,l=null;return!Qo.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Bo.event;Bo.event=n;try{o.progress.call(i,c)}finally{Bo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(l=n,i):l},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Jo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var s in a)c.setRequestHeader(s,a[s]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=l&&(c.responseType=l),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Bo.rebind(i,o,"on"),null==r?i:i.get(Nt(r))}function Nt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function zt(n){var t=n.responseType;return t&&"text"!==t?n.response:n.responseText}function Lt(){var n=Tt(),t=qt()-n;t>24?(isFinite(t)&&(clearTimeout(Ga),Ga=setTimeout(Lt,t)),Ja=0):(Ja=1,Qa(Lt))}function Tt(){var n=Date.now();for(Ka=Ba;Ka;)n>=Ka.t&&(Ka.f=Ka.c(n-Ka.t)),Ka=Ka.n;return n}function qt(){for(var n,t=Ba,e=1/0;t;)t.f?t=n?n.n=t.n:Ba=t.n:(t.t8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Pt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r&&e?function(n,t){for(var u=n.length,i=[],o=0,a=r[0],c=0;u>0&&a>0&&(c+a+1>t&&(a=Math.max(1,t-c)),i.push(n.substring(u-=a,u+a)),!((c+=a+1)>t));)a=r[o=(o+1)%r.length];return i.reverse().join(e)}:Et;return function(n){var e=tc.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"-",c=e[4]||"",l=e[5],s=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1,y=!0;switch(h&&(h=+h.substring(1)),(l||"0"===r&&"="===o)&&(l=r="0",o="="),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":y=!1;case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=ec.get(g)||Ut;var x=l&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):"-"===a?"":a;if(0>p){var c=Bo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var M,_,b=n.lastIndexOf(".");if(0>b){var w=y?n.lastIndexOf("e"):-1;0>w?(M=n,_=""):(M=n.substring(0,w),_=n.substring(w))}else M=n.substring(0,b),_=t+n.substring(b+1);!l&&f&&(M=i(M,1/0));var S=v.length+M.length+_.length+(x?0:u.length),k=s>S?new Array(S=s-S+1).join(r):"";return x&&(M=i(k+M,k.length?s-_.length:1/0)),u+=v,n=M+_,("<"===o?u+n+k:">"===o?k+u+n:"^"===o?k.substring(0,S>>=1)+u+n+k.substring(S):u+(x?n:k+n))+e}}}function Ut(n){return n+""}function jt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Ft(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new uc(e-1)),1),e}function i(n,e){return t(n=new uc(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{uc=jt;var r=new jt;return r._=n,o(r,t,e)}finally{uc=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Ht(n);return c.floor=c,c.round=Ht(r),c.ceil=Ht(u),c.offset=Ht(i),c.range=a,n}function Ht(n){return function(t,e){try{uc=jt;var r=new jt;return r._=t,n(r,e)._}finally{uc=Date}}}function Ot(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++aa;){if(r>=l)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in oc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.slice(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.slice(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.slice(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.slice(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function l(n,t,r){return e(n,C.X.toString(),t,r)}function s(n,t,e){var r=x.get(t.slice(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{uc=jt;var t=new uc;return t._=n,r(t)}finally{uc=Date}}var r=t(n);return e.parse=function(n){try{uc=jt;var t=r.parse(n);return t&&t._}finally{uc=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ae;var x=Bo.map(),M=It(v),_=Zt(v),b=It(d),w=Zt(d),S=It(m),k=Zt(m),E=It(y),A=Zt(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Yt(n.getDate(),t,2)},e:function(n,t){return Yt(n.getDate(),t,2)},H:function(n,t){return Yt(n.getHours(),t,2)},I:function(n,t){return Yt(n.getHours()%12||12,t,2)},j:function(n,t){return Yt(1+rc.dayOfYear(n),t,3)},L:function(n,t){return Yt(n.getMilliseconds(),t,3)},m:function(n,t){return Yt(n.getMonth()+1,t,2)},M:function(n,t){return Yt(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Yt(n.getSeconds(),t,2)},U:function(n,t){return Yt(rc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Yt(rc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Yt(n.getFullYear()%100,t,2)},Y:function(n,t){return Yt(n.getFullYear()%1e4,t,4)},Z:ie,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Qt,e:Qt,H:te,I:te,j:ne,L:ue,m:Kt,M:ee,p:s,S:re,U:Xt,w:Vt,W:$t,x:c,X:l,y:Wt,Y:Bt,Z:Jt,"%":oe};return t}function Yt(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function It(n){return new RegExp("^(?:"+n.map(Bo.requote).join("|")+")","i")}function Zt(n){for(var t=new a,e=-1,r=n.length;++e68?1900:2e3)}function Kt(n,t,e){ac.lastIndex=0;var r=ac.exec(t.slice(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Qt(n,t,e){ac.lastIndex=0;var r=ac.exec(t.slice(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function ne(n,t,e){ac.lastIndex=0;var r=ac.exec(t.slice(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function te(n,t,e){ac.lastIndex=0;var r=ac.exec(t.slice(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function ee(n,t,e){ac.lastIndex=0;var r=ac.exec(t.slice(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function re(n,t,e){ac.lastIndex=0;var r=ac.exec(t.slice(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function ue(n,t,e){ac.lastIndex=0;var r=ac.exec(t.slice(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ie(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=0|ca(t)/60,u=ca(t)%60;return e+Yt(r,"0",2)+Yt(u,"0",2)}function oe(n,t,e){cc.lastIndex=0;var r=cc.exec(t.slice(e,e+1));return r?e+r[0].length:-1}function ae(n){for(var t=n.length,e=-1;++e=0?1:-1,a=o*e,c=Math.cos(t),l=Math.sin(t),s=i*l,f=u*c+s*Math.cos(a),h=s*o*Math.sin(a);pc.add(Math.atan2(h,f)),r=n,u=c,i=l}var t,e,r,u,i;vc.point=function(o,a){vc.point=n,r=(t=o)*La,u=Math.cos(a=(e=a)*La/2+Ea/4),i=Math.sin(a)},vc.lineEnd=function(){n(t,e)}}function pe(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function ve(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function de(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function me(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ye(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function xe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function Me(n){return[Math.atan2(n[1],n[0]),nt(n[2])]}function _e(n,t){return ca(n[0]-t[0])a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ze(e,n,null,!0),l=new ze(e,null,c,!1);c.o=l,i.push(c),o.push(l),c=new ze(r,n,null,!1),l=new ze(r,null,c,!0),c.o=l,i.push(c),o.push(l)}}),o.sort(t),Ne(i),Ne(o),i.length){for(var a=0,c=e,l=o.length;l>a;++a)o[a].e=c=!c;for(var s,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;s=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,l=s.length;l>a;++a)u.point((f=s[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){s=g.p.z;for(var a=s.length-1;a>=0;--a)u.point((f=s[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,s=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Ne(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r0){for(_||(i.polygonStart(),_=!0),i.lineStart();++o1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Te))}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:l,polygonStart:function(){y.point=s,y.lineStart=f,y.lineEnd=h,g=[],p=[]},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=l,g=Bo.merge(g);var n=je(m,p);g.length?(_||(i.polygonStart(),_=!0),Ce(g,Re,n,e,i)):n&&(_||(i.polygonStart(),_=!0),i.lineStart(),e(null,null,1,i),i.lineEnd()),_&&(i.polygonEnd(),_=!1),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=qe(),M=t(x),_=!1;return y}}function Te(n){return n.length>1}function qe(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:y,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Re(n,t){return((n=n.x)[0]<0?n[1]-Ca-Na:Ca-n[1])-((t=t.x)[0]<0?t[1]-Ca-Na:Ca-t[1])}function De(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Ea:-Ea,c=ca(i-e);ca(c-Ea)0?Ca:-Ca),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Ea&&(ca(e-u)Na?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function Ue(n,t,e,r){var u;if(null==n)u=e*Ca,r.point(-Ea,u),r.point(0,u),r.point(Ea,u),r.point(Ea,0),r.point(Ea,-u),r.point(0,-u),r.point(-Ea,-u),r.point(-Ea,0),r.point(-Ea,u);else if(ca(n[0]-t[0])>Na){var i=n[0]a;++a){var l=t[a],s=l.length;if(s)for(var f=l[0],h=f[0],g=f[1]/2+Ea/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===s&&(d=0),n=l[d];var m=n[0],y=n[1]/2+Ea/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Ea,k=p*x;if(pc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*Aa:_,S^h>=e^m>=e){var E=de(pe(f),pe(n));xe(E);var A=de(u,E);xe(A);var C=(S^_>=0?-1:1)*nt(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Na>i||Na>i&&0>pc)^1&o}function Fe(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,l,s;return{lineStart:function(){l=c=!1,s=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Ea:-Ea),h):0;if(!e&&(l=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(_e(e,g)||_e(p,g))&&(p[0]+=Na,p[1]+=Na,v=t(p[0],p[1]))),v!==c)s=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(s=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&_e(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return s|(l&&c)<<1}}}function r(n,t,e){var r=pe(n),u=pe(t),o=[1,0,0],a=de(r,u),c=ve(a,a),l=a[0],s=c-l*l;if(!s)return!e&&n;var f=i*c/s,h=-i*l/s,g=de(o,a),p=ye(o,f),v=ye(a,h);me(p,v);var d=g,m=ve(p,d),y=ve(d,d),x=m*m-y*(ve(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ye(d,(-m-M)/y);if(me(_,p),_=Me(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=ca(A-Ea)A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(ca(_[0]-w)Ea^(w<=_[0]&&_[0]<=S)){var z=ye(d,(-m+M)/y);return me(z,p),[_,Me(z)]}}}function u(t,e){var r=o?n:Ea-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=ca(i)>Na,c=gr(n,6*La);return Le(t,e,c,o?[0,-n]:[-Ea,n-Ea])}function He(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,l=o.y,s=a.x,f=a.y,h=0,g=1,p=s-c,v=f-l;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-l,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-l,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:l+h*v}),1>g&&(u.b={x:c+g*p,y:l+g*v}),u}}}}}}function Oe(n,t,e,r){function u(r,u){return ca(r[0]-n)0?0:3:ca(r[0]-e)0?2:1:ca(r[1]-t)0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,l=a[0];c>o;++o)i=a[o],l[1]<=r?i[1]>r&&K(l,i,n)>0&&++t:i[1]<=r&&K(l,i,n)<0&&--t,l=i;return 0!==t}function l(i,a,c,l){var s=0,f=0;if(null==i||(s=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do l.point(0===s||3===s?n:e,s>1?r:t);while((s=(s+c+4)%4)!==f)}else l.point(a[0],a[1])}function s(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){s(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Nc,Math.min(Nc,n)),t=Math.max(-Nc,Math.min(Nc,t));var e=s(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=qe(),C=He(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Bo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),l(null,null,1,a),a.lineEnd()),u&&Ce(v,i,t,l,a),a.polygonEnd()),v=d=m=null}};return N}}function Ye(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function Ie(n){var t=0,e=Ea/3,r=ir(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Ea/180,e=n[1]*Ea/180):[180*(t/Ea),180*(e/Ea)]},u}function Ze(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,nt((i-(n*n+e*e)*u*u)/(2*u))]},e}function Ve(){function n(n,t){Lc+=u*n-r*t,r=n,u=t}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,t=r=i,e=u=o},Pc.lineEnd=function(){n(t,e)}}function Xe(n,t){Tc>n&&(Tc=n),n>Rc&&(Rc=n),qc>t&&(qc=t),t>Dc&&(Dc=t)}function $e(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Be(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Be(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Be(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function We(n,t){yc+=n,xc+=t,++Mc}function Je(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);_c+=o*(t+n)/2,bc+=o*(e+r)/2,wc+=o,We(t=n,e=r)}var t,e;jc.point=function(r,u){jc.point=n,We(t=r,e=u)}}function Ge(){jc.point=We}function Ke(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);_c+=o*(r+n)/2,bc+=o*(u+t)/2,wc+=o,o=u*n-r*t,Sc+=o*(r+n),kc+=o*(u+t),Ec+=3*o,We(r=n,u=t)}var t,e,r,u;jc.point=function(i,o){jc.point=n,We(t=r=i,e=u=o)},jc.lineEnd=function(){n(t,e)}}function Qe(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,Aa)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:y};return a}function nr(n){function t(n){return(a?r:e)(n)}function e(t){return rr(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=pe([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=l,S.lineEnd=s}function l(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function s(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,l,s,f,h,g,p,v,d,m){var y=s-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=l+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=ca(ca(w)-1)i||ca((y*z+x*L)/M-.5)>.3||o>a*g+c*p+l*v)&&(u(t,e,r,a,c,l,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,s,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*La),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function tr(n){var t=nr(function(t,e){return n([t*Ta,e*Ta])});return function(n){return or(t(n))}}function er(n){this.stream=n}function rr(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function ur(n){return ir(function(){return n})()}function ir(n){function t(n){return n=a(n[0]*La,n[1]*La),[n[0]*h+c,l-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(l-n[1])/h),n&&[n[0]*Ta,n[1]*Ta]}function r(){a=Ye(o=lr(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,l=p+n[1]*h,u()}function u(){return s&&(s.valid=!1,s=null),t}var i,o,a,c,l,s,f=nr(function(n,t){return n=i(n,t),[n[0]*h+c,l-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Cc,_=Et,b=null,w=null;return t.stream=function(n){return s&&(s.valid=!1),s=or(M(o,f(_(n)))),s.valid=!0,s},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Cc):Fe((b=+n)*La),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Oe(n[0][0],n[0][1],n[1][0],n[1][1]):Et,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*La,d=n[1]%360*La,r()):[v*Ta,d*Ta]},t.rotate=function(n){return arguments.length?(m=n[0]%360*La,y=n[1]%360*La,x=n.length>2?n[2]%360*La:0,r()):[m*Ta,y*Ta,x*Ta]},Bo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function or(n){return rr(n,function(t,e){n.point(t*La,e*La)})}function ar(n,t){return[n,t]}function cr(n,t){return[n>Ea?n-Aa:-Ea>n?n+Aa:n,t]}function lr(n,t,e){return n?t||e?Ye(fr(n),hr(t,e)):fr(n):t||e?hr(t,e):cr}function sr(n){return function(t,e){return t+=n,[t>Ea?t-Aa:-Ea>t?t+Aa:t,e]}}function fr(n){var t=sr(n);return t.invert=sr(-n),t}function hr(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,l=Math.sin(t),s=l*r+a*u;return[Math.atan2(c*i-s*o,a*r-l*u),nt(s*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,l=Math.sin(t),s=l*i-c*o;return[Math.atan2(c*i+l*o,a*r+s*u),nt(s*r-a*u)]},e}function gr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=pr(e,u),i=pr(e,i),(o>0?i>u:u>i)&&(u+=o*Aa)):(u=n+o*Aa,i=n-.5*c);for(var l,s=u;o>0?s>i:i>s;s-=c)a.point((l=Me([e,-r*Math.cos(s),-r*Math.sin(s)]))[0],l[1])}}function pr(n,t){var e=pe(t);e[0]-=n,xe(e);var r=Q(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Na)%(2*Math.PI)}function vr(n,t,e){var r=Bo.range(n,t-Na,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function dr(n,t,e){var r=Bo.range(n,t-Na,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function mr(n){return n.source}function yr(n){return n.target}function xr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),l=u*Math.sin(n),s=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(ut(r-t)+u*o*ut(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*s,u=e*l+t*f,o=e*i+t*a;return[Math.atan2(u,r)*Ta,Math.atan2(o,Math.sqrt(r*r+u*u))*Ta]}:function(){return[n*Ta,t*Ta]};return p.distance=h,p}function Mr(){function n(n,u){var i=Math.sin(u*=La),o=Math.cos(u),a=ca((n*=La)-t),c=Math.cos(a);Fc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;Hc.point=function(u,i){t=u*La,e=Math.sin(i*=La),r=Math.cos(i),Hc.point=n},Hc.lineEnd=function(){Hc.point=Hc.lineEnd=y}}function _r(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function br(n,t){function e(n,t){o>0?-Ca+Na>t&&(t=-Ca+Na):t>Ca-Na&&(t=Ca-Na);var e=o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Ea/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=G(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ca]},e):Sr}function wr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return ca(u)u;u++){for(;r>1&&K(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function zr(n,t){return n[0]-t[0]||n[1]-t[1]}function Lr(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Tr(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],l=e[1],s=t[1]-c,f=r[1]-l,h=(a*(c-l)-f*(u-i))/(f*o-a*s);return[u+h*o,c+h*s]}function qr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Rr(){tu(this),this.edge=this.site=this.circle=null}function Dr(n){var t=Kc.pop()||new Rr;return t.site=n,t}function Pr(n){Xr(n),Wc.remove(n),Kc.push(n),tu(n)}function Ur(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Pr(n);for(var c=i;c.circle&&ca(e-c.circle.x)s;++s)l=a[s],c=a[s-1],Kr(l.edge,c.site,l.site,u);c=a[0],l=a[f-1],l.edge=Jr(c.site,l.site,null,u),Vr(c),Vr(l)}function jr(n){for(var t,e,r,u,i=n.x,o=n.y,a=Wc._;a;)if(r=Fr(a,o)-i,r>Na)a=a.L;else{if(u=i-Hr(a,o),!(u>Na)){r>-Na?(t=a.P,e=a):u>-Na?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Dr(n);if(Wc.insert(t,c),t||e){if(t===e)return Xr(t),e=Dr(t.site),Wc.insert(c,e),c.edge=e.edge=Jr(t.site,c.site),Vr(t),Vr(e),void 0;if(!e)return c.edge=Jr(t.site,c.site),void 0;Xr(t),Xr(e);var l=t.site,s=l.x,f=l.y,h=n.x-s,g=n.y-f,p=e.site,v=p.x-s,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+s,y:(h*x-v*y)/m+f};Kr(e.edge,l,p,M),c.edge=Jr(l,n,null,M),e.edge=Jr(n,p,null,M),Vr(t),Vr(e)}}function Fr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,l=c-t;if(!l)return a;var s=a-r,f=1/i-1/l,h=s/l;return f?(-h+Math.sqrt(h*h-2*f*(s*s/(-2*l)-c+l/2+u-i/2)))/f+r:(r+a)/2}function Hr(n,t){var e=n.N;if(e)return Fr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Or(n){this.site=n,this.edges=[]}function Yr(n){for(var t,e,r,u,i,o,a,c,l,s,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Bc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)s=a[o].end(),r=s.x,u=s.y,l=a[++o%c].start(),t=l.x,e=l.y,(ca(r-t)>Na||ca(u-e)>Na)&&(a.splice(o,0,new Qr(Gr(i.site,s,ca(r-f)Na?{x:f,y:ca(t-f)Na?{x:ca(e-p)Na?{x:h,y:ca(t-h)Na?{x:ca(e-g)=-za)){var g=c*c+l*l,p=s*s+f*f,v=(f*g-l*p)/h,d=(c*p-s*g)/h,f=d+a,m=Qc.pop()||new Zr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Gc._;x;)if(m.yd||d>=a)return;if(h>p){if(i){if(i.y>=l)return}else i={x:d,y:c};e={x:d,y:l}}else{if(i){if(i.yr||r>1)if(h>p){if(i){if(i.y>=l)return}else i={x:(c-u)/r,y:c};e={x:(l-u)/r,y:l}}else{if(i){if(i.yg){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.xi&&(u=t.slice(i,u),a[o]?a[o]+=u:a[++o]=u),(e=e[0])===(r=r[0])?a[o]?a[o]+=r:a[++o]=r:(a[++o]=null,c.push({i:o,x:pu(e,r)})),i=el.lastIndex;return ir;++r)a[(e=c[r]).i]=e.x(n);return a.join("")})}function du(n,t){for(var e,r=Bo.interpolators.length;--r>=0&&!(e=Bo.interpolators[r](n,t)););return e}function mu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(du(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function yu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function xu(n){return function(t){return 1-n(1-t)}}function Mu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function _u(n){return n*n}function bu(n){return n*n*n}function wu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function Su(n){return function(t){return Math.pow(t,n)}}function ku(n){return 1-Math.cos(n*Ca)}function Eu(n){return Math.pow(2,10*(n-1))}function Au(n){return 1-Math.sqrt(1-n*n)}function Cu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/Aa*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*Aa/t)}}function Nu(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function zu(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Lu(n,t){n=Bo.hcl(n),t=Bo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return lt(e+i*n,r+o*n,u+a*n)+""}}function Tu(n,t){n=Bo.hsl(n),t=Bo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return at(e+i*n,r+o*n,u+a*n)+""}}function qu(n,t){n=Bo.lab(n),t=Bo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ft(e+i*n,r+o*n,u+a*n)+""}}function Ru(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Du(n){var t=[n.a,n.b],e=[n.c,n.d],r=Uu(t),u=Pu(t,e),i=Uu(ju(e,t,-u))||0;t[0]*e[1]180?s+=360:s-l>180&&(l+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:pu(l,s)})):s&&r.push(r.pop()+"rotate("+s+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:pu(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:pu(g[0],p[0])},{i:e-2,x:pu(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i=0;)e.push(u[r])}function Ku(n,t){for(var e=[n],r=[];null!=(n=e.pop());)if(r.push(n),(i=n.children)&&(u=i.length))for(var u,i,o=-1;++oe;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function li(n){return n.reduce(si,0)}function si(n,t){return n+t[1]}function fi(n,t){return hi(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function hi(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function gi(n){return[Bo.min(n),Bo.max(n)]}function pi(n,t){return n.value-t.value}function vi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function di(n,t){n._pack_next=t,t._pack_prev=n}function mi(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function yi(n){function t(n){s=Math.min(n.x-n.r,s),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(l=e.length)){var e,r,u,i,o,a,c,l,s=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(xi),r=e[0],r.x=-r.r,r.y=0,t(r),l>1&&(u=e[1],u.x=u.r,u.y=0,t(u),l>2))for(i=e[2],bi(r,u,i),t(i),vi(r,i),r._pack_prev=i,vi(i,u),u=r._pack_next,o=3;l>o;o++){bi(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(mi(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!mi(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.ro;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Mi)}}function xi(n){n._pack_next=n._pack_prev=n}function Mi(n){delete n._pack_next,delete n._pack_prev}function _i(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i=0;)t=u[i],t.z+=e,t.m+=e,e+=t.s+(r+=t.c)}function Ci(n,t,e){return n.a.parent===t.parent?n.a:e}function Ni(n){return 1+Bo.max(n,function(n){return n.y})}function zi(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Li(n){var t=n.children;return t&&t.length?Li(t[0]):n}function Ti(n){var t,e=n.children;return e&&(t=e.length)?Ti(e[t-1]):n}function qi(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function Ri(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function Di(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Pi(n){return n.rangeExtent?n.rangeExtent():Di(n.range())}function Ui(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function ji(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Fi(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:gl}function Hi(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]2?Hi:Ui,c=r?Ou:Hu;return o=u(n,t,c,e),a=u(t,n,c,du),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Ru)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Vi(n,t)},i.tickFormat=function(t,e){return Xi(n,t,e)},i.nice=function(t){return Ii(n,t),u()},i.copy=function(){return Oi(n,t,e,r)},u()}function Yi(n,t){return Bo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Ii(n,t){return ji(n,Fi(Zi(n,t)[2]))}function Zi(n,t){null==t&&(t=10);var e=Di(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Vi(n,t){return Bo.range.apply(Bo,Zi(n,t))}function Xi(n,t,e){var r=Zi(n,t);if(e){var u=tc.exec(e);if(u.shift(),"s"===u[8]){var i=Bo.formatPrefix(Math.max(ca(r[0]),ca(r[1])));return u[7]||(u[7]="."+$i(i.scale(r[2]))),u[8]="f",e=Bo.format(u.join("")),function(n){return e(i.scale(n))+i.symbol}}u[7]||(u[7]="."+Bi(u[8],r)),e=u.join("")}else e=",."+$i(r[2])+"f";return Bo.format(e)}function $i(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Bi(n,t){var e=$i(t[2]);return n in pl?Math.abs(e-$i(Math.max(ca(t[0]),ca(t[1]))))+ +("e"!==n):e-2*("%"===n)}function Wi(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=ji(r.map(u),e?Math:dl);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=Di(r),o=[],a=n[0],c=n[1],l=Math.floor(u(a)),s=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(s-l)){if(e){for(;s>l;l++)for(var h=1;f>h;h++)o.push(i(l)*h);o.push(i(l))}else for(o.push(i(l));l++0;h--)o.push(i(l)*h);for(l=0;o[l]c;s--);o=o.slice(l,s)}return o},o.tickFormat=function(n,t){if(!arguments.length)return vl;arguments.length<2?t=vl:"function"!=typeof t&&(t=Bo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return Wi(n.copy(),t,e,r)},Yi(o,n)}function Ji(n,t,e){function r(t){return n(u(t))}var u=Gi(t),i=Gi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Vi(e,n)},r.tickFormat=function(n,t){return Xi(e,n,t)},r.nice=function(n){return r.domain(Ii(e,n))},r.exponent=function(o){return arguments.length?(u=Gi(t=o),i=Gi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Ji(n.copy(),t,e)},Yi(r,n)}function Gi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ki(n,t){function e(e){return i[((u.get(e)||("range"===t.t?u.set(e,n.push(e)):0/0))-1)%i.length]}function r(t,e){return Bo.range(n.length).map(function(n){return t+e*n})}var u,i,o;return e.domain=function(r){if(!arguments.length)return n;n=[],u=new a;for(var i,o=-1,c=r.length;++on?[0/0,0/0]:[n>0?a[n-1]:r[0],nt?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return no(n,t,e)},u()}function to(n,t){function e(e){return e>=e?t[Bo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return to(n,t)},e}function eo(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Vi(n,t)},t.tickFormat=function(t,e){return Xi(n,t,e)},t.copy=function(){return eo(n)},t}function ro(n){return n.innerRadius}function uo(n){return n.outerRadius}function io(n){return n.startAngle}function oo(n){return n.endAngle}function ao(n){function t(t){function o(){l.push("M",i(n(s),a))}for(var c,l=[],s=[],f=-1,h=t.length,g=kt(e),p=kt(r);++f1&&u.push("H",r[0]),u.join("")}function fo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var l=2;l9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Co(n){return n.length<3?co(n):n[0]+mo(n,Ao(n))}function No(n){for(var t,e,r,u=-1,i=n.length;++ue?l():(u.active=e,i.event&&i.event.start.call(n,s,t),i.tween.forEach(function(e,r){(r=r.call(n,s,t))&&v.push(r) -}),Bo.timer(function(){return p.c=c(r||1)?Ae:c,1},0,o),void 0)}function c(r){if(u.active!==e)return l();for(var o=r/g,a=f(o),c=v.length;c>0;)v[--c].call(n,a);return o>=1?(i.event&&i.event.end.call(n,s,t),l()):void 0}function l(){return--u.count?delete u[e]:delete n.__transition__,1}var s=n.__data__,f=i.ease,h=i.delay,g=i.duration,p=Ka,v=[];return p.t=h+o,r>=h?a(r-h):(p.c=a,void 0)},0,o)}}function Oo(n,t,e){n.attr("transform",function(n){var r=t(n);return"translate("+(isFinite(r)?r:e(n))+",0)"})}function Yo(n,t,e){n.attr("transform",function(n){var r=t(n);return"translate(0,"+(isFinite(r)?r:e(n))+")"})}function Io(n){return n.toISOString()}function Zo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Bo.bisect(Ol,u);return i==Ol.length?[t.year,Zi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/Ol[i-1]1?{floor:function(t){for(;e(t=n.floor(t));)t=Vo(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Vo(+t+1);return t}}:n))},r.ticks=function(n,t){var e=Di(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Vo(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Zo(n.copy(),t,e)},Yi(r,n)}function Vo(n){return new Date(n)}function Xo(n){return JSON.parse(n.responseText)}function $o(n){var t=Go.createRange();return t.selectNode(Go.body),t.createContextualFragment(n.responseText)}var Bo={version:"3.4.13"};Date.now||(Date.now=function(){return+new Date});var Wo=[].slice,Jo=function(n){return Wo.call(n)},Go=document,Ko=Go.documentElement,Qo=window;try{Jo(Ko.childNodes)[0].nodeType}catch(na){Jo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Go.createElement("div").style.setProperty("opacity",0,"")}catch(ta){var ea=Qo.Element.prototype,ra=ea.setAttribute,ua=ea.setAttributeNS,ia=Qo.CSSStyleDeclaration.prototype,oa=ia.setProperty;ea.setAttribute=function(n,t){ra.call(this,n,t+"")},ea.setAttributeNS=function(n,t,e){ua.call(this,n,t,e+"")},ia.setProperty=function(n,t,e){oa.call(this,n,t+"",e)}}Bo.ascending=n,Bo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Bo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ur&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ur&&(e=r)}return e},Bo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ue&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ue&&(e=r)}return e},Bo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i=e);)e=u=void 0;for(;++ir&&(e=r),r>u&&(u=r))}else{for(;++i=e);)e=void 0;for(;++ir&&(e=r),r>u&&(u=r))}return[e,u]},Bo.sum=function(n,t){var r,u=0,i=n.length,o=-1;if(1===arguments.length)for(;++or?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Bo.zip=function(){if(!(r=arguments.length))return[];for(var n=-1,t=Bo.min(arguments,u),e=new Array(t);++n=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var ca=Math.abs;Bo.range=function(n,t,e){if(arguments.length<3&&(e=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/e)throw new Error("infinite range");var r,u=[],o=i(ca(e)),a=-1;if(n*=o,t*=o,e*=o,0>e)for(;(r=n+e*++a)>t;)u.push(r/o);else for(;(r=n+e*++a)=i.length)return r?r.call(u,o):e?o.sort(e):o;for(var l,s,f,h,g=-1,p=o.length,v=i[c++],d=new a;++g=i.length)return n;var r=[],u=o[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,u={},i=[],o=[];return u.map=function(t,e){return n(e,t,0)},u.entries=function(e){return t(n(Bo.map,e,0),0)},u.key=function(n){return i.push(n),u},u.sortKeys=function(n){return o[i.length-1]=n,u},u.sortValues=function(n){return e=n,u},u.rollup=function(n){return r=n,u},u},Bo.set=function(n){var t=new v;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},o(v,{has:s,add:function(n){return this._[c(n+="")]=!0,n},remove:f,values:h,size:g,empty:p,forEach:function(n){for(var t in this._)n.call(this,l(t))}}),Bo.behavior={},Bo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r=0&&(r=n.slice(e+1),n=n.slice(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Bo.event=null,Bo.requote=function(n){return n.replace(ha,"\\$&")};var ha=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,ga={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},pa=function(n,t){return t.querySelector(n)},va=function(n,t){return t.querySelectorAll(n)},da=Ko.matches||Ko[m(Ko,"matchesSelector")],ma=function(n,t){return da.call(n,t)};"function"==typeof Sizzle&&(pa=function(n,t){return Sizzle(n,t)[0]||null},va=Sizzle,ma=Sizzle.matchesSelector),Bo.selection=function(){return _a};var ya=Bo.selection.prototype=[];ya.select=function(n){var t,e,r,u,i=[];n=k(n);for(var o=-1,a=this.length;++o=0&&(e=n.slice(0,t),n=n.slice(t+1)),xa.hasOwnProperty(e)?{space:xa[e],local:n}:n}},ya.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Bo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(A(t,n[t]));return this}return this.each(A(n,t))},ya.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=z(n)).length,u=-1;if(t=e.classList){for(;++ur){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(q(e,n[e],t));return this}if(2>r)return Qo.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(q(n,t,e))},ya.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(R(t,n[t]));return this}return this.each(R(n,t))},ya.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},ya.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},ya.append=function(n){return n=D(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},ya.insert=function(n,t){return n=D(n),t=k(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},ya.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},ya.data=function(n,t){function e(n,e){var r,u,i,o=n.length,f=e.length,h=Math.min(o,f),g=new Array(f),p=new Array(f),v=new Array(o);if(t){var d,m=new a,y=new Array(o);for(r=-1;++rr;++r)p[r]=P(e[r]);for(;o>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),l.push(g),s.push(v)}var r,u,i=-1,o=this.length;if(!arguments.length){for(n=new Array(o=(r=this[0]).length);++ii;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return S(u)},ya.order=function(){for(var n=-1,t=this.length;++n=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},ya.sort=function(n){n=j.apply(this,arguments);for(var t=-1,e=this.length;++tn;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},ya.size=function(){var n=0;return F(this,function(){++n}),n};var Ma=[];Bo.selection.enter=H,Bo.selection.enter.prototype=Ma,Ma.append=ya.append,Ma.empty=ya.empty,Ma.node=ya.node,Ma.call=ya.call,Ma.size=ya.size,Ma.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++ar){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(I(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(I(n,t,e))};var ba=Bo.map({mouseenter:"mouseover",mouseleave:"mouseout"});ba.forEach(function(n){"on"+n in Go&&ba.remove(n)});var wa="onselectstart"in Go?null:m(Ko.style,"userSelect"),Sa=0;Bo.mouse=function(n){return $(n,b())};var ka=/WebKit/.test(Qo.navigator.userAgent)?-1:0;Bo.touch=function(n,t,e){if(arguments.length<3&&(e=t,t=b().changedTouches),t)for(var r,u=0,i=t.length;i>u;++u)if((r=t[u]).identifier===e)return $(n,r)},Bo.behavior.drag=function(){function n(){this.on("mousedown.drag",u).on("touchstart.drag",i)}function t(n,t,u,i,o){return function(){function a(){var n,e,r=t(h,v);r&&(n=r[0]-x[0],e=r[1]-x[1],p|=n|e,x=r,g({type:"drag",x:r[0]+l[0],y:r[1]+l[1],dx:n,dy:e}))}function c(){t(h,v)&&(m.on(i+d,null).on(o+d,null),y(p&&Bo.event.target===f),g({type:"dragend"}))}var l,s=this,f=Bo.event.target,h=s.parentNode,g=e.of(s,arguments),p=0,v=n(),d=".drag"+(null==v?"":"-"+v),m=Bo.select(u()).on(i+d,a).on(o+d,c),y=X(),x=t(h,v);r?(l=r.apply(s,arguments),l=[l.x-x[0],l.y-x[1]]):l=[0,0],g({type:"dragstart"})}}var e=w(n,"drag","dragstart","dragend"),r=null,u=t(y,Bo.mouse,J,"mousemove","mouseup"),i=t(B,Bo.touch,W,"touchmove","touchend");return n.origin=function(t){return arguments.length?(r=t,n):r},Bo.rebind(n,e,"on")},Bo.touches=function(n,t){return arguments.length<2&&(t=b().touches),t?Jo(t).map(function(t){var e=$(n,t);return e.identifier=t.identifier,e}):[]};var Ea=Math.PI,Aa=2*Ea,Ca=Ea/2,Na=1e-6,za=Na*Na,La=Ea/180,Ta=180/Ea,qa=Math.SQRT2,Ra=2,Da=4;Bo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=et(v),o=i/(Ra*h)*(e*rt(qa*t+v)-tt(v));return[r+o*l,u+o*s,i*e/et(qa*t+v)]}return[r+n*l,u+n*s,i*Math.exp(qa*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],l=o-r,s=a-u,f=l*l+s*s,h=Math.sqrt(f),g=(c*c-i*i+Da*f)/(2*i*Ra*h),p=(c*c-i*i-Da*f)/(2*c*Ra*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/qa;return e.duration=1e3*y,e},Bo.behavior.zoom=function(){function n(n){n.on(A,l).on(ja+".zoom",f).on("dblclick.zoom",h).on(z,s)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){x&&x.domain(y.range().map(function(n){return(n-S.x)/S.k}).map(y.invert)),b&&b.domain(M.range().map(function(n){return(n-S.y)/S.k}).map(M.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function l(){function n(){s=1,u(Bo.mouse(r),h),a(l)}function e(){f.on(C,null).on(N,null),g(s&&Bo.event.target===i),c(l)}var r=this,i=Bo.event.target,l=L.of(r,arguments),s=0,f=Bo.select(Qo).on(C,n).on(N,e),h=t(Bo.mouse(r)),g=X();Y.call(r),o(l)}function s(){function n(){var n=Bo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){var t=Bo.event.target;Bo.select(t).on(x,i).on(M,f),b.push(t);for(var e=Bo.event.changedTouches,o=0,c=e.length;c>o;++o)v[e[o].identifier]=null;var l=n(),s=Date.now();if(1===l.length){if(500>s-m){var h=l[0],g=v[h.identifier];r(2*S.k),u(h,g),_(),a(p)}m=s}else if(l.length>1){var h=l[0],y=l[1],w=h[0]-y[0],k=h[1]-y[1];d=w*w+k*k}}function i(){for(var n,t,e,i,o=Bo.touches(g),c=0,l=o.length;l>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var s=(s=e[0]-n[0])*s+(s=e[1]-n[1])*s,f=d&&Math.sqrt(s/d);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}m=null,u(n,t),a(p)}function f(){if(Bo.event.touches.length){for(var t=Bo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}Bo.selectAll(b).on(y,null),w.on(A,l).on(z,s),k(),c(p)}var h,g=this,p=L.of(g,arguments),v={},d=0,y=".zoom-"+Bo.event.changedTouches[0].identifier,x="touchmove"+y,M="touchend"+y,b=[],w=Bo.select(g),k=X();Y.call(g),e(),o(p),w.on(A,null).on(z,e)}function f(){var n=L.of(this,arguments);d?clearTimeout(d):(g=t(p=v||Bo.mouse(this)),Y.call(this),o(n)),d=setTimeout(function(){d=null,c(n)},50),_(),r(Math.pow(2,.002*Pa())*S.k),u(p,g),a(n)}function h(){var n=L.of(this,arguments),e=Bo.mouse(this),i=t(e),l=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Bo.event.shiftKey?Math.ceil(l)-1:Math.floor(l)+1)),u(e,i),a(n),c(n)}var g,p,v,d,m,y,x,M,b,S={x:0,y:0,k:1},k=[960,500],E=Ua,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",z="touchstart.zoom",L=w(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=L.of(this,arguments),t=S;Cl?Bo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Bo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Ua:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(x=t,y=t.copy(),S={x:0,y:0,k:1},n):x},n.y=function(t){return arguments.length?(b=t,M=t.copy(),S={x:0,y:0,k:1},n):b},Bo.rebind(n,L,"on")};var Pa,Ua=[0,1/0],ja="onwheel"in Go?(Pa=function(){return-Bo.event.deltaY*(Bo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Go?(Pa=function(){return Bo.event.wheelDelta},"mousewheel"):(Pa=function(){return-Bo.event.detail},"MozMousePixelScroll");Bo.color=it,it.prototype.toString=function(){return this.rgb()+""},Bo.hsl=ot;var Fa=ot.prototype=new it;Fa.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),new ot(this.h,this.s,this.l/n)},Fa.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),new ot(this.h,this.s,n*this.l)},Fa.rgb=function(){return at(this.h,this.s,this.l)},Bo.hcl=ct;var Ha=ct.prototype=new it;Ha.brighter=function(n){return new ct(this.h,this.c,Math.min(100,this.l+Oa*(arguments.length?n:1)))},Ha.darker=function(n){return new ct(this.h,this.c,Math.max(0,this.l-Oa*(arguments.length?n:1)))},Ha.rgb=function(){return lt(this.h,this.c,this.l).rgb()},Bo.lab=st;var Oa=18,Ya=.95047,Ia=1,Za=1.08883,Va=st.prototype=new it;Va.brighter=function(n){return new st(Math.min(100,this.l+Oa*(arguments.length?n:1)),this.a,this.b)},Va.darker=function(n){return new st(Math.max(0,this.l-Oa*(arguments.length?n:1)),this.a,this.b)},Va.rgb=function(){return ft(this.l,this.a,this.b)},Bo.rgb=dt;var Xa=dt.prototype=new it;Xa.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),new dt(Math.min(255,t/n),Math.min(255,e/n),Math.min(255,r/n))):new dt(u,u,u)},Xa.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),new dt(n*this.r,n*this.g,n*this.b)},Xa.hsl=function(){return _t(this.r,this.g,this.b)},Xa.toString=function(){return"#"+xt(this.r)+xt(this.g)+xt(this.b)};var $a=Bo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});$a.forEach(function(n,t){$a.set(n,mt(t))}),Bo.functor=kt,Bo.xhr=At(Et),Bo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=Ct(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(s>=l)return o;if(u)return u=!1,i;var t=s;if(34===n.charCodeAt(t)){for(var e=t;e++s;){var r=n.charCodeAt(s++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(s)&&(++s,++a);else if(r!==c)continue;return n.slice(t,s-a)}return n.slice(t)}for(var r,u,i={},o={},a=[],l=n.length,s=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();t&&null==(h=t(h,f++))||a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new v,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Bo.csv=Bo.dsv(",","text/csv"),Bo.tsv=Bo.dsv(" ","text/tab-separated-values");var Ba,Wa,Ja,Ga,Ka,Qa=Qo[m(Qo,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Bo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};Wa?Wa.n=i:Ba=i,Wa=i,Ja||(Ga=clearTimeout(Ga),Ja=1,Qa(Lt))},Bo.timer.flush=function(){Tt(),qt()},Bo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var nc=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Dt);Bo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Bo.round(n,Rt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((e-1)/3)))),nc[8+e/3]};var tc=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,ec=Bo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Bo.round(n,Rt(n,t))).toFixed(Math.max(0,Math.min(20,Rt(n*(1+1e-15),t))))}}),rc=Bo.time={},uc=Date;jt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){ic.setUTCDate.apply(this._,arguments)},setDay:function(){ic.setUTCDay.apply(this._,arguments)},setFullYear:function(){ic.setUTCFullYear.apply(this._,arguments)},setHours:function(){ic.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){ic.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){ic.setUTCMinutes.apply(this._,arguments)},setMonth:function(){ic.setUTCMonth.apply(this._,arguments)},setSeconds:function(){ic.setUTCSeconds.apply(this._,arguments)},setTime:function(){ic.setTime.apply(this._,arguments)}};var ic=Date.prototype;rc.year=Ft(function(n){return n=rc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),rc.years=rc.year.range,rc.years.utc=rc.year.utc.range,rc.day=Ft(function(n){var t=new uc(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),rc.days=rc.day.range,rc.days.utc=rc.day.utc.range,rc.dayOfYear=function(n){var t=rc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=rc[n]=Ft(function(n){return(n=rc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=rc.year(n).getDay();return Math.floor((rc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});rc[n+"s"]=e.range,rc[n+"s"].utc=e.utc.range,rc[n+"OfYear"]=function(n){var e=rc.year(n).getDay();return Math.floor((rc.dayOfYear(n)+(e+t)%7)/7)}}),rc.week=rc.sunday,rc.weeks=rc.sunday.range,rc.weeks.utc=rc.sunday.utc.range,rc.weekOfYear=rc.sundayOfYear;var oc={"-":"",_:" ",0:"0"},ac=/^\s*\d+/,cc=/^%/;Bo.locale=function(n){return{numberFormat:Pt(n),timeFormat:Ot(n)}};var lc=Bo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Bo.format=lc.numberFormat,Bo.geo={},ce.prototype={s:0,t:0,add:function(n){le(n,this.t,sc),le(sc.s,this.s,this),this.s?this.t+=sc.t:this.s=sc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var sc=new ce;Bo.geo.stream=function(n,t){n&&fc.hasOwnProperty(n.type)?fc[n.type](n,t):se(n,t)};var fc={Feature:function(n,t){se(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++rn?4*Ea+n:n,vc.lineStart=vc.lineEnd=vc.point=y}};Bo.geo.bounds=function(){function n(n,t){x.push(M=[s=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=pe([t*La,e*La]);if(m){var u=de(m,r),i=[u[1],-u[0],0],o=de(i,u);xe(o),o=Me(o);var c=t-p,l=c>0?1:-1,v=o[0]*Ta*l,d=ca(c)>180;if(d^(v>l*p&&l*t>v)){var y=o[1]*Ta;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>l*p&&l*t>v)){var y=-o[1]*Ta;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(s,t)>a(s,h)&&(h=t):a(t,h)>a(s,h)&&(s=t):h>=s?(s>t&&(s=t),t>h&&(h=t)):t>p?a(s,t)>a(s,h)&&(h=t):a(t,h)>a(s,h)&&(s=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=s,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=ca(r)>180?r+(r>0?360:-360):r}else v=n,d=e;vc.point(n,e),t(n,e)}function i(){vc.lineStart()}function o(){u(v,d),vc.lineEnd(),ca(y)>Na&&(s=-(h=180)),M[0]=s,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function l(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:npc?(s=-(h=180),f=-(g=90)):y>Na?g=90:-Na>y&&(f=-90),M[0]=s,M[1]=h}};return function(n){g=h=-(s=f=1/0),x=[],Bo.geo.stream(n,_); -var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],l(e[0],u)||l(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,s=e[0],h=u[1])}return x=M=null,1/0===s||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[s,f],[h,g]]}}(),Bo.geo.centroid=function(n){dc=mc=yc=xc=Mc=_c=bc=wc=Sc=kc=Ec=0,Bo.geo.stream(n,Ac);var t=Sc,e=kc,r=Ec,u=t*t+e*e+r*r;return za>u&&(t=_c,e=bc,r=wc,Na>mc&&(t=yc,e=xc,r=Mc),u=t*t+e*e+r*r,za>u)?[0/0,0/0]:[Math.atan2(e,t)*Ta,nt(r/Math.sqrt(u))*Ta]};var dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc,Ec,Ac={sphere:y,point:be,lineStart:Se,lineEnd:ke,polygonStart:function(){Ac.lineStart=Ee},polygonEnd:function(){Ac.lineStart=Se}},Cc=Le(Ae,De,Ue,[-Ea,-Ea/2]),Nc=1e9;Bo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Oe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Bo.geo.conicEqualArea=function(){return Ie(Ze)}).raw=Ze,Bo.geo.albers=function(){return Bo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Bo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Bo.geo.albers(),o=Bo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Bo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var l=i.scale(),s=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[s-.455*l,f-.238*l],[s+.455*l,f+.238*l]]).stream(c).point,r=o.translate([s-.307*l,f+.201*l]).clipExtent([[s-.425*l+Na,f+.12*l+Na],[s-.214*l-Na,f+.234*l-Na]]).stream(c).point,u=a.translate([s-.205*l,f+.212*l]).clipExtent([[s-.214*l+Na,f+.166*l+Na],[s-.115*l-Na,f+.234*l-Na]]).stream(c).point,n},n.scale(1070)};var zc,Lc,Tc,qc,Rc,Dc,Pc={point:y,lineStart:y,lineEnd:y,polygonStart:function(){Lc=0,Pc.lineStart=Ve},polygonEnd:function(){Pc.lineStart=Pc.lineEnd=Pc.point=y,zc+=ca(Lc/2)}},Uc={point:Xe,lineStart:y,lineEnd:y,polygonStart:y,polygonEnd:y},jc={point:We,lineStart:Je,lineEnd:Ge,polygonStart:function(){jc.lineStart=Ke},polygonEnd:function(){jc.point=We,jc.lineStart=Je,jc.lineEnd=Ge}};Bo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Bo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return zc=0,Bo.geo.stream(n,u(Pc)),zc},n.centroid=function(n){return yc=xc=Mc=_c=bc=wc=Sc=kc=Ec=0,Bo.geo.stream(n,u(jc)),Ec?[Sc/Ec,kc/Ec]:wc?[_c/wc,bc/wc]:Mc?[yc/Mc,xc/Mc]:[0/0,0/0]},n.bounds=function(n){return Rc=Dc=-(Tc=qc=1/0),Bo.geo.stream(n,u(Uc)),[[Tc,qc],[Rc,Dc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||tr(n):Et,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new $e:new Qe(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Bo.geo.albersUsa()).context(null)},Bo.geo.transform=function(n){return{stream:function(t){var e=new er(t);for(var r in n)e[r]=n[r];return e}}},er.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Bo.geo.projection=ur,Bo.geo.projectionMutator=ir,(Bo.geo.equirectangular=function(){return ur(ar)}).raw=ar.invert=ar,Bo.geo.rotation=function(n){function t(t){return t=n(t[0]*La,t[1]*La),t[0]*=Ta,t[1]*=Ta,t}return n=lr(n[0]%360*La,n[1]*La,n.length>2?n[2]*La:0),t.invert=function(t){return t=n.invert(t[0]*La,t[1]*La),t[0]*=Ta,t[1]*=Ta,t},t},cr.invert=ar,Bo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=lr(-n[0]*La,-n[1]*La,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=Ta,n[1]*=Ta}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=gr((t=+r)*La,u*La),n):t},n.precision=function(r){return arguments.length?(e=gr(t*La,(u=+r)*La),n):u},n.angle(90)},Bo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*La,u=n[1]*La,i=t[1]*La,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),l=Math.cos(u),s=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=l*s-c*f*a)*e),c*s+l*f*a)},Bo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Bo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Bo.range(Math.ceil(l/m)*m,c,m).map(g)).concat(Bo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return ca(n%d)>Na}).map(s)).concat(Bo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return ca(n%m)>Na}).map(f))}var e,r,u,i,o,a,c,l,s,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(l).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],l=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),l>c&&(t=l,l=c,c=t),n.precision(y)):[[i,l],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,s=vr(a,o,90),f=dr(r,e,y),h=vr(l,c,90),g=dr(i,u,y),n):y},n.majorExtent([[-180,-90+Na],[180,90-Na]]).minorExtent([[-180,-80-Na],[180,80+Na]])},Bo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=mr,u=yr;return n.distance=function(){return Bo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Bo.geo.interpolate=function(n,t){return xr(n[0]*La,n[1]*La,t[0]*La,t[1]*La)},Bo.geo.length=function(n){return Fc=0,Bo.geo.stream(n,Hc),Fc};var Fc,Hc={sphere:y,point:y,lineStart:Mr,lineEnd:y,polygonStart:y,polygonEnd:y},Oc=_r(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Bo.geo.azimuthalEqualArea=function(){return ur(Oc)}).raw=Oc;var Yc=_r(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},Et);(Bo.geo.azimuthalEquidistant=function(){return ur(Yc)}).raw=Yc,(Bo.geo.conicConformal=function(){return Ie(br)}).raw=br,(Bo.geo.conicEquidistant=function(){return Ie(wr)}).raw=wr;var Ic=_r(function(n){return 1/n},Math.atan);(Bo.geo.gnomonic=function(){return ur(Ic)}).raw=Ic,Sr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ca]},(Bo.geo.mercator=function(){return kr(Sr)}).raw=Sr;var Zc=_r(function(){return 1},Math.asin);(Bo.geo.orthographic=function(){return ur(Zc)}).raw=Zc;var Vc=_r(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Bo.geo.stereographic=function(){return ur(Vc)}).raw=Vc,Er.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ca]},(Bo.geo.transverseMercator=function(){var n=kr(Er),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[n[1],-n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},e([0,0,90])}).raw=Er,Bo.geom={},Bo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=kt(e),i=kt(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(zr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var l=Nr(a),s=Nr(c),f=s[0]===l[0],h=s[s.length-1]===l[l.length-1],g=[];for(t=l.length-1;t>=0;--t)g.push(n[a[l[t]][2]]);for(t=+f;t=r&&l.x<=i&&l.y>=u&&l.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];s.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Na)*Na,y:Math.round(o(n,t)/Na)*Na,i:t}})}var r=Ar,u=Cr,i=r,o=u,a=nl;return n?t(n):(t.links=function(n){return iu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return iu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(Ir),c=-1,l=a.length,s=a[l-1].edge,f=s.l===o?s.r:s.l;++c=l,h=r>=s,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=su()),f?u=l:a=l,h?o=s:c=s,i(n,t,e,r,u,o,a,c)}var s,f,h,g,p,v,d,m,y,x=kt(a),M=kt(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)s=n[g],s.xm&&(m=s.x),s.y>y&&(y=s.y),f.push(s.x),h.push(s.y);else for(g=0;p>g;++g){var _=+x(s=n[g],g),b=+M(s,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=su();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){fu(n,k,v,d,m,y)},g=-1,null==t){for(;++g=0?n.slice(0,t):n,r=t>=0?n.slice(t+1):"in";return e=ul.get(e)||rl,r=il.get(r)||Et,yu(r(e.apply(null,Wo.call(arguments,1))))},Bo.interpolateHcl=Lu,Bo.interpolateHsl=Tu,Bo.interpolateLab=qu,Bo.interpolateRound=Ru,Bo.transform=function(n){var t=Go.createElementNS(Bo.ns.prefix.svg,"g");return(Bo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Du(e?e.matrix:ol)})(n)},Du.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var ol={a:1,b:0,c:0,d:1,e:0,f:0};Bo.interpolateTransform=Fu,Bo.layout={},Bo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++ea*a/d){if(p>c){var l=t.charge/c;n.px-=i*l,n.py-=o*l}return!0}if(t.point&&c&&p>c){var l=t.pointCharge/c;n.px-=i*l,n.py-=o*l}}return!t.charge}}function t(n){n.px=Bo.event.x,n.py=Bo.event.y,a.resume()}var e,r,u,i,o,a={},c=Bo.dispatch("start","tick","end"),l=[1,1],s=.9,f=al,h=cl,g=-30,p=ll,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=l[0]/2,M=l[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Wu(t=Bo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*s,a.y-=(a.py-(a.py=a.y))*s);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(l=n,a):l},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(s=+n,a):s},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Bo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;l>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,l=o.length;++at;++t)(r=m[t]).index=t,r.weight=0;for(t=0;s>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;s>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;s>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;s>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;s>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Bo.behavior.drag().origin(Et).on("dragstart.force",Vu).on("drag.force",t).on("dragend.force",Xu)),arguments.length?(this.on("mouseover.force",$u).on("mouseout.force",Bu).call(e),void 0):e},Bo.rebind(a,c,"on")};var al=20,cl=1,ll=1/0;Bo.layout.hierarchy=function(){function n(u){var i,o=[u],a=[];for(u.depth=0;null!=(i=o.pop());)if(a.push(i),(l=e.call(n,i,i.depth))&&(c=l.length)){for(var c,l,s;--c>=0;)o.push(s=l[c]),s.parent=i,s.depth=i.depth+1;r&&(i.value=0),i.children=l}else r&&(i.value=+r.call(n,i,i.depth)||0),delete i.children;return Ku(u,function(n){var e,u;t&&(e=n.children)&&e.sort(t),r&&(u=n.parent)&&(u.value+=n.value)}),a}var t=ti,e=Qu,r=ni;return n.sort=function(e){return arguments.length?(t=e,n):t},n.children=function(t){return arguments.length?(e=t,n):e},n.value=function(t){return arguments.length?(r=t,n):r},n.revalue=function(t){return r&&(Gu(t,function(n){n.children&&(n.value=0)}),Ku(t,function(t){var e;t.children||(t.value=+r.call(n,t,t.depth)||0),(e=t.parent)&&(e.value+=t.value)})),t},n},Bo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,l=-1;for(r=t.value?r/t.value:0;++lp;++p)for(u.call(n,l[0][p],v=d[p],s[0][p][1]),g=1;h>g;++g)u.call(n,l[g][p],v+=s[g-1][p][1],s[g][p][1]);return a}var t=Et,e=oi,r=ai,u=ii,i=ri,o=ui;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:fl.get(t)||oi,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:hl.get(t)||ai,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var fl=Bo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ci),i=n.map(li),o=Bo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,l=[],s=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],l.push(e)):(c+=i[e],s.push(e));return s.reverse().concat(l)},reverse:function(n){return Bo.range(n.length).reverse()},"default":oi}),hl=Bo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,l,s=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=l=0,e=1;h>e;++e){for(t=0,u=0;s>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];s>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,l>c&&(l=c)}for(e=0;h>e;++e)g[e]-=l;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ai});Bo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],l=n.map(e,this),s=r.call(this,l,i),f=u.call(this,s,l,i),i=-1,h=l.length,g=f.length-1,p=t?1:1/h;++i0)for(i=-1;++i=s[0]&&a<=s[1]&&(o=c[Bo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=gi,u=fi;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=kt(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return hi(n,t)}:kt(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Bo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],l=u[1],s=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,Ku(a,function(n){n.r=+s(n.value)}),Ku(a,yi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/l))/2;Ku(a,function(n){n.r+=f}),Ku(a,yi),Ku(a,function(n){n.r-=f})}return _i(a,c/2,l/2,t?1:1/Math.max(2*a.r/c,2*a.r/l)),o}var t,e=Bo.layout.hierarchy().sort(pi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Ju(n,e)},Bo.layout.tree=function(){function n(n,u){var s=o.call(this,n,u),f=s[0],h=t(f);if(Ku(h,e),h.parent.m=-h.z,Gu(h,r),l)Gu(f,i);else{var g=f,p=f,v=f;Gu(f,function(n){n.xp.x&&(p=n),n.depth>v.depth&&(v=n)});var d=a(g,p)/2-g.x,m=c[0]/(p.x+a(p,g)/2+d),y=c[1]/(v.depth||1);Gu(f,function(n){n.x=(n.x+d)*m,n.y=n.depth*y})}return s}function t(n){for(var t,e={A:null,children:[n]},r=[e];null!=(t=r.pop());)for(var u,i=t.children,o=0,a=i.length;a>o;++o)r.push((i[o]=u={_:i[o],parent:t,children:(u=i[o].children)&&u.slice()||[],A:null,a:null,z:0,m:0,c:0,s:0,t:null,i:o}).a=u);return e.children[0]}function e(n){var t=n.children,e=n.parent.children,r=n.i?e[n.i-1]:null;if(t.length){Ai(n);var i=(t[0].z+t[t.length-1].z)/2;r?(n.z=r.z+a(n._,r._),n.m=n.z-i):n.z=i}else r&&(n.z=r.z+a(n._,r._));n.parent.A=u(n,r,n.parent.A||e[0])}function r(n){n._.x=n.z+n.parent.m,n.m+=n.parent.m}function u(n,t,e){if(t){for(var r,u=n,i=n,o=t,c=u.parent.children[0],l=u.m,s=i.m,f=o.m,h=c.m;o=ki(o),u=Si(u),o&&u;)c=Si(c),i=ki(i),i.a=n,r=o.z+f-u.z-l+a(o._,u._),r>0&&(Ei(Ci(o,n,e),n,r),l+=r,s+=r),f+=o.m,l+=u.m,h+=c.m,s+=i.m;o&&!ki(i)&&(i.t=o,i.m+=f-s),u&&!Si(c)&&(c.t=u,c.m+=l-h,e=n)}return e}function i(n){n.x*=c[0],n.y=n.depth*c[1]}var o=Bo.layout.hierarchy().sort(null).value(null),a=wi,c=[1,1],l=null;return n.separation=function(t){return arguments.length?(a=t,n):a},n.size=function(t){return arguments.length?(l=null==(c=t)?i:null,n):l?null:c},n.nodeSize=function(t){return arguments.length?(l=null==(c=t)?null:i,n):l?c:null},Ju(n,o)},Bo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],l=0;Ku(c,function(n){var t=n.children;t&&t.length?(n.x=zi(t),n.y=Ni(t)):(n.x=o?l+=e(n,o):0,n.y=0,o=n)});var s=Li(c),f=Ti(c),h=s.x-e(s,f)/2,g=f.x+e(f,s)/2;return Ku(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Bo.layout.hierarchy().sort(null).value(null),e=wi,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Ju(n,t)},Bo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++ut?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,l=f(e),s=[],h=i.slice(),p=1/0,v="slice"===g?l.dx:"dice"===g?l.dy:"slice-dice"===g?1&e.depth?l.dy:l.dx:Math.min(l.dx,l.dy);for(n(h,l.dx*l.dy/e.value),s.area=0;(c=h.length)>0;)s.push(o=h[c-1]),s.area+=o.area,"squarify"!==g||(a=r(s,v))<=p?(h.pop(),p=a):(s.area-=s.pop().area,u(s,v,l,!1),v=Math.min(l.dx,l.dy),s.length=s.area=0,p=1/0);s.length&&(u(s,v,l,!0),s.length=s.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++oe&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,l=e.y,s=t?c(n.area/t):0;if(t==e.dx){for((r||s>e.dy)&&(s=e.dy);++ie.dx)&&(s=e.dx);++ie&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Bo.random.normal.apply(Bo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Bo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Bo.scale={};var gl={floor:Et,ceil:Et};Bo.scale.linear=function(){return Oi([0,1],[0,1],du,!1)};var pl={s:1,g:1,p:1,r:1,e:1};Bo.scale.log=function(){return Wi(Bo.scale.linear().domain([0,1]),10,!0,[1,10])};var vl=Bo.format(".0e"),dl={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Bo.scale.pow=function(){return Ji(Bo.scale.linear(),1,[0,1])},Bo.scale.sqrt=function(){return Bo.scale.pow().exponent(.5)},Bo.scale.ordinal=function(){return Ki([],{t:"range",a:[[]]})},Bo.scale.category10=function(){return Bo.scale.ordinal().range(ml)},Bo.scale.category20=function(){return Bo.scale.ordinal().range(yl)},Bo.scale.category20b=function(){return Bo.scale.ordinal().range(xl)},Bo.scale.category20c=function(){return Bo.scale.ordinal().range(Ml)};var ml=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(yt),yl=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(yt),xl=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(yt),Ml=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(yt);Bo.scale.quantile=function(){return Qi([],[]) -},Bo.scale.quantize=function(){return no(0,1,[0,1])},Bo.scale.threshold=function(){return to([.5],[0,1])},Bo.scale.identity=function(){return eo([0,1])},Bo.svg={},Bo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+_l,a=u.apply(this,arguments)+_l,c=(o>a&&(c=o,o=a,a=c),a-o),l=Ea>c?"0":"1",s=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=bl?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*s+","+i*f+"A"+i+","+i+" 0 "+l+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+l+",0 "+n*s+","+n*f+"Z":"M"+i*s+","+i*f+"A"+i+","+i+" 0 "+l+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=ro,e=uo,r=io,u=oo;return n.innerRadius=function(e){return arguments.length?(t=kt(e),n):t},n.outerRadius=function(t){return arguments.length?(e=kt(t),n):e},n.startAngle=function(t){return arguments.length?(r=kt(t),n):r},n.endAngle=function(t){return arguments.length?(u=kt(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+_l;return[Math.cos(i)*n,Math.sin(i)*n]},n};var _l=-Ca,bl=Aa-Na;Bo.svg.line=function(){return ao(Et)};var wl=Bo.map({linear:co,"linear-closed":lo,step:so,"step-before":fo,"step-after":ho,basis:xo,"basis-open":Mo,"basis-closed":_o,bundle:bo,cardinal:vo,"cardinal-open":go,"cardinal-closed":po,monotone:Co});wl.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var Sl=[0,2/3,1/3,0],kl=[0,1/3,2/3,0],El=[0,1/6,2/3,1/6];Bo.svg.line.radial=function(){var n=ao(No);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},fo.reverse=ho,ho.reverse=fo,Bo.svg.area=function(){return zo(Et)},Bo.svg.area.radial=function(){var n=zo(No);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Bo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),l=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,l)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,l.r,l.p0)+r(l.r,l.p1,l.a1-l.a0)+u(l.r,l.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+_l,s=l.call(n,u,r)+_l;return{r:i,a0:o,a1:s,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(s),i*Math.sin(s)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Ea)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=mr,o=yr,a=Lo,c=io,l=oo;return n.radius=function(t){return arguments.length?(a=kt(t),n):a},n.source=function(t){return arguments.length?(i=kt(t),n):i},n.target=function(t){return arguments.length?(o=kt(t),n):o},n.startAngle=function(t){return arguments.length?(c=kt(t),n):c},n.endAngle=function(t){return arguments.length?(l=kt(t),n):l},n},Bo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=mr,e=yr,r=To;return n.source=function(e){return arguments.length?(t=kt(e),n):t},n.target=function(t){return arguments.length?(e=kt(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Bo.svg.diagonal.radial=function(){var n=Bo.svg.diagonal(),t=To,e=n.projection;return n.projection=function(n){return arguments.length?e(qo(t=n)):t},n},Bo.svg.symbol=function(){function n(n,r){return(Al.get(t.call(this,n,r))||Po)(e.call(this,n,r))}var t=Do,e=Ro;return n.type=function(e){return arguments.length?(t=kt(e),n):t},n.size=function(t){return arguments.length?(e=kt(t),n):e},n};var Al=Bo.map({circle:Po,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Ll)),e=t*Ll;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/zl),e=t*zl/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/zl),e=t*zl/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Bo.svg.symbolTypes=Al.keys();var Cl,Nl,zl=Math.sqrt(3),Ll=Math.tan(30*La),Tl=[],ql=0;Tl.call=ya.call,Tl.empty=ya.empty,Tl.node=ya.node,Tl.size=ya.size,Bo.transition=function(n){return arguments.length?Cl?n.transition():n:_a.transition()},Bo.transition.prototype=Tl,Tl.select=function(n){var t,e,r,u=this.id,i=[];n=k(n);for(var o=-1,a=this.length;++oi;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Uo(u,this.id)},Tl.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):F(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Tl.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Fu:du,a=Bo.ns.qualify(n);return jo(this,"attr."+n,t,a.local?i:u)},Tl.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Bo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Tl.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Qo.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=du(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return jo(this,"style."+n,t,u)},Tl.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Qo.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Tl.text=function(n){return jo(this,"text",n,Fo)},Tl.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Tl.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Bo.ease.apply(Bo,arguments)),F(this,function(e){e.__transition__[t].ease=n}))},Tl.delay=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].delay:F(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Tl.duration=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].duration:F(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Tl.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Nl,u=Cl;Cl=e,F(this,function(t,r,u){Nl=t.__transition__[e],n.call(t,t.__data__,r,u)}),Nl=r,Cl=u}else F(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Bo.dispatch("start","end"))).on(n,t)});return this},Tl.transition=function(){for(var n,t,e,r,u=this.id,i=++ql,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],l=0,s=t.length;s>l;l++)(e=t[l])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,Ho(e,l,i,r)),n.push(e)}return Uo(o,i)},Bo.svg.axis=function(){function n(n){n.each(function(){var n,l=Bo.select(this),s=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):Et:t,p=l.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Na),d=Bo.transition(p.exit()).style("opacity",Na).remove(),m=Bo.transition(p.order()).style("opacity",1),y=Math.max(u,0)+o,x=Pi(f),M=l.selectAll(".domain").data([0]),_=(M.enter().append("path").attr("class","domain"),Bo.transition(M));v.append("line"),v.append("text");var b,w,S,k,E=v.select("line"),A=m.select("line"),C=p.select("text").text(g),N=v.select("text"),z=m.select("text"),L="top"===r||"left"===r?-1:1;if("bottom"===r||"top"===r?(n=Oo,b="x",S="y",w="x2",k="y2",C.attr("dy",0>L?"0em":".71em").style("text-anchor","middle"),_.attr("d","M"+x[0]+","+L*i+"V0H"+x[1]+"V"+L*i)):(n=Yo,b="y",S="x",w="y2",k="x2",C.attr("dy",".32em").style("text-anchor",0>L?"end":"start"),_.attr("d","M"+L*i+","+x[0]+"H0V"+x[1]+"H"+L*i)),E.attr(k,L*u),N.attr(S,L*y),A.attr(w,0).attr(k,L*u),z.attr(b,0).attr(S,L*y),f.rangeBand){var T=f,q=T.rangeBand()/2;s=f=function(n){return T(n)+q}}else s.rangeBand?s=f:d.call(n,f,s);v.call(n,s,f),m.call(n,f,f)})}var t,e=Bo.scale.linear(),r=Rl,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in Dl?t+"":Rl,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Rl="bottom",Dl={top:1,right:1,bottom:1,left:1};Bo.svg.brush=function(){function n(i){i.each(function(){var i=Bo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,Et);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return Pl[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var s,f=Bo.transition(i),h=Bo.transition(o);c&&(s=Pi(c),h.attr("x",s[0]).attr("width",s[1]-s[0]),e(f)),l&&(s=Pi(l),h.attr("y",s[0]).attr("height",s[1]-s[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+s[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",s[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",s[1]-s[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Bo.event.keyCode&&(C||(y=null,z[0]-=s[1],z[1]-=f[1],C=2),_())}function p(){32==Bo.event.keyCode&&2==C&&(z[0]+=s[1],z[1]+=f[1],C=0,_())}function v(){var n=Bo.mouse(M),u=!1;x&&(n[0]+=x[0],n[1]+=x[1]),C||(Bo.event.altKey?(y||(y=[(s[0]+s[1])/2,(f[0]+f[1])/2]),z[0]=s[+(n[0]p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function m(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Bo.select("body").style("cursor",null),L.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var y,x,M=this,b=Bo.select(Bo.event.target),w=a.of(M,arguments),S=Bo.select(M),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&l,C=b.classed("extent"),N=X(),z=Bo.mouse(M),L=Bo.select(Qo).on("keydown.brush",u).on("keyup.brush",p);if(Bo.event.changedTouches?L.on("touchmove.brush",v).on("touchend.brush",m):L.on("mousemove.brush",v).on("mouseup.brush",m),S.interrupt().selectAll("*").interrupt(),C)z[0]=s[0]-z[0],z[1]=f[0]-z[1];else if(k){var T=+/w$/.test(k),q=+/^n/.test(k);x=[s[1-T]-z[0],f[1-q]-z[1]],z[0]=s[T],z[1]=f[q]}else Bo.event.altKey&&(y=z.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Bo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=w(n,"brushstart","brush","brushend"),c=null,l=null,s=[0,0],f=[0,0],h=!0,g=!0,p=Ul[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:s,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,Cl?Bo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,s=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=mu(s,t.x),r=mu(f,t.y);return i=o=null,function(u){s=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Ul[!c<<1|!l],n):c},n.y=function(t){return arguments.length?(l=t,p=Ul[!c<<1|!l],n):l},n.clamp=function(t){return arguments.length?(c&&l?(h=!!t[0],g=!!t[1]):c?h=!!t:l&&(g=!!t),n):c&&l?[h,g]:c?h:l?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],l&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=s[0]||r!=s[1])&&(s=[e,r])),l&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],l.invert&&(u=l(u),a=l(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=s[0],r=s[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),l&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],l.invert&&(u=l.invert(u),a=l.invert(a)),u>a&&(h=u,u=a,a=h))),c&&l?[[e,u],[r,a]]:c?[e,r]:l&&[u,a])},n.clear=function(){return n.empty()||(s=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&s[0]==s[1]||!!l&&f[0]==f[1]},Bo.rebind(n,a,"on")};var Pl={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Ul=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],jl=rc.format=lc.timeFormat,Fl=jl.utc,Hl=Fl("%Y-%m-%dT%H:%M:%S.%LZ");jl.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Io:Hl,Io.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Io.toString=Hl.toString,rc.second=Ft(function(n){return new uc(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),rc.seconds=rc.second.range,rc.seconds.utc=rc.second.utc.range,rc.minute=Ft(function(n){return new uc(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),rc.minutes=rc.minute.range,rc.minutes.utc=rc.minute.utc.range,rc.hour=Ft(function(n){var t=n.getTimezoneOffset()/60;return new uc(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),rc.hours=rc.hour.range,rc.hours.utc=rc.hour.utc.range,rc.month=Ft(function(n){return n=rc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),rc.months=rc.month.range,rc.months.utc=rc.month.utc.range;var Ol=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Yl=[[rc.second,1],[rc.second,5],[rc.second,15],[rc.second,30],[rc.minute,1],[rc.minute,5],[rc.minute,15],[rc.minute,30],[rc.hour,1],[rc.hour,3],[rc.hour,6],[rc.hour,12],[rc.day,1],[rc.day,2],[rc.week,1],[rc.month,1],[rc.month,3],[rc.year,1]],Il=jl.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",Ae]]),Zl={range:function(n,t,e){return Bo.range(Math.ceil(n/e)*e,+t,e).map(Vo)},floor:Et,ceil:Et};Yl.year=rc.year,rc.scale=function(){return Zo(Bo.scale.linear(),Yl,Il)};var Vl=Yl.map(function(n){return[n[0].utc,n[1]]}),Xl=Fl.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",Ae]]);Vl.year=rc.year.utc,rc.scale.utc=function(){return Zo(Bo.scale.linear(),Vl,Xl)},Bo.text=At(function(n){return n.responseText}),Bo.json=function(n,t){return Ct(n,"application/json",Xo,t)},Bo.html=function(n,t){return Ct(n,"text/html",$o,t)},Bo.xml=At(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Bo):"object"==typeof module&&module.exports&&(module.exports=Bo),this.d3=Bo}(); \ No newline at end of file diff --git a/rally/ui/templates/libs/nv.d3.1.1.15-beta.min.css b/rally/ui/templates/libs/nv.d3.1.1.15-beta.min.css deleted file mode 100644 index bee36681..00000000 --- a/rally/ui/templates/libs/nv.d3.1.1.15-beta.min.css +++ /dev/null @@ -1,6 +0,0 @@ -/* https://github.com/novus/nvd3 - - Copyright (c) 2011-2014 Novus Partners, Inc. - http://www.apache.org/licenses/LICENSE-2.0 -*/ -.chartWrap{margin:0;padding:0;overflow:hidden}.nvtooltip.with-3d-shadow,.with-3d-shadow .nvtooltip{-moz-box-shadow:0 5px 10px rgba(0,0,0,.2);-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.nvtooltip{position:absolute;background-color:rgba(255,255,255,1);padding:1px;border:1px solid rgba(0,0,0,.2);z-index:10000;font-family:Arial;font-size:13px;text-align:left;pointer-events:none;white-space:nowrap;-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.nvtooltip.with-transitions,.with-transitions .nvtooltip{transition:opacity 250ms linear;-moz-transition:opacity 250ms linear;-webkit-transition:opacity 250ms linear;transition-delay:250ms;-moz-transition-delay:250ms;-webkit-transition-delay:250ms}.nvtooltip.x-nvtooltip,.nvtooltip.y-nvtooltip{padding:8px}.nvtooltip h3{margin:0;padding:4px 14px;line-height:18px;font-weight:400;background-color:rgba(247,247,247,.75);text-align:center;border-bottom:1px solid #ebebeb;-webkit-border-radius:5px 5px 0 0;-moz-border-radius:5px 5px 0 0;border-radius:5px 5px 0 0}.nvtooltip p{margin:0;padding:5px 14px;text-align:center}.nvtooltip span{display:inline-block;margin:2px 0}.nvtooltip table{margin:6px;border-spacing:0}.nvtooltip table td{padding:2px 9px 2px 0;vertical-align:middle}.nvtooltip table td.key{font-weight:400}.nvtooltip table td.value{text-align:right;font-weight:700}.nvtooltip table tr.highlight td{padding:1px 9px 1px 0;border-bottom-style:solid;border-bottom-width:1px;border-top-style:solid;border-top-width:1px}.nvtooltip table td.legend-color-guide div{width:8px;height:8px;vertical-align:middle}.nvtooltip .footer{padding:3px;text-align:center}.nvtooltip-pending-removal{position:absolute;pointer-events:none}svg{-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;display:block;width:100%;height:100%}svg text{font:400 12px Arial}svg .title{font:700 14px Arial}.nvd3 .nv-background{fill:#fff;fill-opacity:0}.nvd3.nv-noData{font-size:18px;font-weight:700}.nv-brush .extent{fill-opacity:.125;shape-rendering:crispEdges}.nvd3 .nv-legend .nv-series{cursor:pointer}.nvd3 .nv-legend .disabled circle{fill-opacity:0}.nvd3 .nv-axis{pointer-events:none}.nvd3 .nv-axis path{fill:none;stroke:#000;stroke-opacity:.75;shape-rendering:crispEdges}.nvd3 .nv-axis path.domain{stroke-opacity:.75}.nvd3 .nv-axis.nv-x path.domain{stroke-opacity:0}.nvd3 .nv-axis line{fill:none;stroke:#e5e5e5;shape-rendering:crispEdges}.nvd3 .nv-axis .zero line,.nvd3 .nv-axis line.zero{stroke-opacity:.75}.nvd3 .nv-axis .nv-axisMaxMin text{font-weight:700}.nvd3 .x .nv-axis .nv-axisMaxMin text,.nvd3 .x2 .nv-axis .nv-axisMaxMin text,.nvd3 .x3 .nv-axis .nv-axisMaxMin text{text-anchor:middle}.nv-brush .resize path{fill:#eee;stroke:#666}.nvd3 .nv-bars .negative rect{zfill:brown}.nvd3 .nv-bars rect{zfill:#4682b4;fill-opacity:.75;transition:fill-opacity 250ms linear;-moz-transition:fill-opacity 250ms linear;-webkit-transition:fill-opacity 250ms linear}.nvd3 .nv-bars rect.hover{fill-opacity:1}.nvd3 .nv-bars .hover rect{fill:#add8e6}.nvd3 .nv-bars text{fill:rgba(0,0,0,0)}.nvd3 .nv-bars .hover text{fill:rgba(0,0,0,1)}.nvd3 .nv-multibar .nv-groups rect,.nvd3 .nv-multibarHorizontal .nv-groups rect,.nvd3 .nv-discretebar .nv-groups rect{stroke-opacity:0;transition:fill-opacity 250ms linear;-moz-transition:fill-opacity 250ms linear;-webkit-transition:fill-opacity 250ms linear}.nvd3 .nv-multibar .nv-groups rect:hover,.nvd3 .nv-multibarHorizontal .nv-groups rect:hover,.nvd3 .nv-discretebar .nv-groups rect:hover{fill-opacity:1}.nvd3 .nv-discretebar .nv-groups text,.nvd3 .nv-multibarHorizontal .nv-groups text{font-weight:700;fill:rgba(0,0,0,1);stroke:rgba(0,0,0,0)}.nvd3.nv-pie path{stroke-opacity:0;transition:fill-opacity 250ms linear,stroke-width 250ms linear,stroke-opacity 250ms linear;-moz-transition:fill-opacity 250ms linear,stroke-width 250ms linear,stroke-opacity 250ms linear;-webkit-transition:fill-opacity 250ms linear,stroke-width 250ms linear,stroke-opacity 250ms linear}.nvd3.nv-pie .nv-slice text{stroke:#000;stroke-width:0}.nvd3.nv-pie path{stroke:#fff;stroke-width:1px;stroke-opacity:1}.nvd3.nv-pie .hover path{fill-opacity:.7}.nvd3.nv-pie .nv-label{pointer-events:none}.nvd3.nv-pie .nv-label rect{fill-opacity:0;stroke-opacity:0}.nvd3 .nv-groups path.nv-line{fill:none;stroke-width:1.5px}.nvd3 .nv-groups path.nv-line.nv-thin-line{stroke-width:1px}.nvd3 .nv-groups path.nv-area{stroke:none}.nvd3 .nv-line.hover path{stroke-width:6px}.nvd3.nv-line .nvd3.nv-scatter .nv-groups .nv-point{fill-opacity:0;stroke-opacity:0}.nvd3.nv-scatter.nv-single-point .nv-groups .nv-point{fill-opacity:.5!important;stroke-opacity:.5!important}.with-transitions .nvd3 .nv-groups .nv-point{transition:stroke-width 250ms linear,stroke-opacity 250ms linear;-moz-transition:stroke-width 250ms linear,stroke-opacity 250ms linear;-webkit-transition:stroke-width 250ms linear,stroke-opacity 250ms linear}.nvd3.nv-scatter .nv-groups .nv-point.hover,.nvd3 .nv-groups .nv-point.hover{stroke-width:7px;fill-opacity:.95!important;stroke-opacity:.95!important}.nvd3 .nv-point-paths path{stroke:#aaa;stroke-opacity:0;fill:#eee;fill-opacity:0}.nvd3 .nv-indexLine{cursor:ew-resize}.nvd3 .nv-distribution{pointer-events:none}.nvd3 .nv-groups .nv-point.hover{stroke-width:20px;stroke-opacity:.5}.nvd3 .nv-scatter .nv-point.hover{fill-opacity:1}.nvd3.nv-stackedarea path.nv-area{fill-opacity:.7;stroke-opacity:0;transition:fill-opacity 250ms linear,stroke-opacity 250ms linear;-moz-transition:fill-opacity 250ms linear,stroke-opacity 250ms linear;-webkit-transition:fill-opacity 250ms linear,stroke-opacity 250ms linear}.nvd3.nv-stackedarea path.nv-area.hover{fill-opacity:.9}.nvd3.nv-stackedarea .nv-groups .nv-point{stroke-opacity:0;fill-opacity:0}.nvd3.nv-linePlusBar .nv-bar rect{fill-opacity:.75}.nvd3.nv-linePlusBar .nv-bar rect:hover{fill-opacity:1}.nvd3.nv-bullet{font:10px sans-serif}.nvd3.nv-bullet .nv-measure{fill-opacity:.8}.nvd3.nv-bullet .nv-measure:hover{fill-opacity:1}.nvd3.nv-bullet .nv-marker{stroke:#000;stroke-width:2px}.nvd3.nv-bullet .nv-markerTriangle{stroke:#000;fill:#fff;stroke-width:1.5px}.nvd3.nv-bullet .nv-tick line{stroke:#666;stroke-width:.5px}.nvd3.nv-bullet .nv-range.nv-s0{fill:#eee}.nvd3.nv-bullet .nv-range.nv-s1{fill:#ddd}.nvd3.nv-bullet .nv-range.nv-s2{fill:#ccc}.nvd3.nv-bullet .nv-title{font-size:14px;font-weight:700}.nvd3.nv-bullet .nv-subtitle{fill:#999}.nvd3.nv-bullet .nv-range{fill:#bababa;fill-opacity:.4}.nvd3.nv-bullet .nv-range:hover{fill-opacity:.7}.nvd3.nv-sparkline path{fill:none}.nvd3.nv-sparklineplus g.nv-hoverValue{pointer-events:none}.nvd3.nv-sparklineplus .nv-hoverValue line{stroke:#333;stroke-width:1.5px}.nvd3.nv-sparklineplus,.nvd3.nv-sparklineplus g{pointer-events:all}.nvd3 .nv-hoverArea{fill-opacity:0;stroke-opacity:0}.nvd3.nv-sparklineplus .nv-xValue,.nvd3.nv-sparklineplus .nv-yValue{stroke-width:0;font-size:.9em;font-weight:400}.nvd3.nv-sparklineplus .nv-yValue{stroke:#f66}.nvd3.nv-sparklineplus .nv-maxValue{stroke:#2ca02c;fill:#2ca02c}.nvd3.nv-sparklineplus .nv-minValue{stroke:#d62728;fill:#d62728}.nvd3.nv-sparklineplus .nv-currentValue{font-weight:700;font-size:1.1em}.nvd3.nv-ohlcBar .nv-ticks .nv-tick{stroke-width:2px}.nvd3.nv-ohlcBar .nv-ticks .nv-tick.hover{stroke-width:4px}.nvd3.nv-ohlcBar .nv-ticks .nv-tick.positive{stroke:#2ca02c}.nvd3.nv-ohlcBar .nv-ticks .nv-tick.negative{stroke:#d62728}.nvd3.nv-historicalStockChart .nv-axis .nv-axislabel{font-weight:700}.nvd3.nv-historicalStockChart .nv-dragTarget{fill-opacity:0;stroke:none;cursor:move}.nvd3 .nv-brush .extent{fill-opacity:0!important}.nvd3 .nv-brushBackground rect{stroke:#000;stroke-width:.4;fill:#fff;fill-opacity:.7}.nvd3.nv-indentedtree .name{margin-left:5px}.nvd3.nv-indentedtree .clickable{color:#08C;cursor:pointer}.nvd3.nv-indentedtree span.clickable:hover{color:#005580;text-decoration:underline}.nvd3.nv-indentedtree .nv-childrenCount{display:inline-block;margin-left:5px}.nvd3.nv-indentedtree .nv-treeicon{cursor:pointer}.nvd3.nv-indentedtree .nv-treeicon.nv-folded{cursor:pointer}.nvd3 .background path{fill:none;stroke:#ccc;stroke-opacity:.4;shape-rendering:crispEdges}.nvd3 .foreground path{fill:none;stroke:#4682b4;stroke-opacity:.7}.nvd3 .brush .extent{fill-opacity:.3;stroke:#fff;shape-rendering:crispEdges}.nvd3 .axis line,.axis path{fill:none;stroke:#000;shape-rendering:crispEdges}.nvd3 .axis text{text-shadow:0 1px 0 #fff}.nvd3 .nv-interactiveGuideLine{pointer-events:none}.nvd3 line.nv-guideline{stroke:#ccc} \ No newline at end of file diff --git a/rally/ui/templates/libs/nv.d3.1.1.15-beta.min.js b/rally/ui/templates/libs/nv.d3.1.1.15-beta.min.js deleted file mode 100644 index adca97f5..00000000 --- a/rally/ui/templates/libs/nv.d3.1.1.15-beta.min.js +++ /dev/null @@ -1,11 +0,0 @@ -/* https://github.com/novus/nvd3 - - Copyright (c) 2011-2014 Novus Partners, Inc. - http://www.apache.org/licenses/LICENSE-2.0 -*/ -(function(){function t(e,t){return(new Date(t,e+1,0)).getDate()}function n(e,t,n){return function(r,i,s){var o=e(r),u=[];o1)while(op||r>d||d3.event.relatedTarget&&d3.event.relatedTarget.ownerSVGElement===undefined||a){if(l&&d3.event.relatedTarget&&d3.event.relatedTarget.ownerSVGElement===undefined&&d3.event.relatedTarget.className.match(t.nvPointerEventsClass))return;u.elementMouseout({mouseX:n,mouseY:r}),c.renderGuideLine(null);return}var f=s.invert(n);u.elementMousemove({mouseX:n,mouseY:r,pointXValue:f}),d3.event.type==="dblclick"&&u.elementDblclick({mouseX:n,mouseY:r,pointXValue:f})}var h=d3.select(this),p=n||960,d=r||400,v=h.selectAll("g.nv-wrap.nv-interactiveLineLayer").data([o]),m=v.enter().append("g").attr("class"," nv-wrap nv-interactiveLineLayer");m.append("g").attr("class","nv-interactiveGuideLine");if(!f)return;f.on("mousemove",g,!0).on("mouseout",g,!0).on("dblclick",g),c.renderGuideLine=function(t){if(!a)return;var n=v.select(".nv-interactiveGuideLine").selectAll("line").data(t!=null?[e.utils.NaNtoZero(t)]:[],String);n.enter().append("line").attr("class","nv-guideline").attr("x1",function(e){return e}).attr("x2",function(e){return e}).attr("y1",d).attr("y2",0),n.exit().remove()}})}var t=e.models.tooltip(),n=null,r=null,i={left:0,top:0},s=d3.scale.linear(),o=d3.scale.linear(),u=d3.dispatch("elementMousemove","elementMouseout","elementDblclick"),a=!0,f=null,l=navigator.userAgent.indexOf("MSIE")!==-1;return c.dispatch=u,c.tooltip=t,c.margin=function(e){return arguments.length?(i.top=typeof e.top!="undefined"?e.top:i.top,i.left=typeof e.left!="undefined"?e.left:i.left,c):i},c.width=function(e){return arguments.length?(n=e,c):n},c.height=function(e){return arguments.length?(r=e,c):r},c.xScale=function(e){return arguments.length?(s=e,c):s},c.showGuideLine=function(e){return arguments.length?(a=e,c):a},c.svgContainer=function(e){return arguments.length?(f=e,c):f},c},e.interactiveBisect=function(e,t,n){"use strict";if(!e instanceof Array)return null;typeof n!="function"&&(n=function(e,t){return e.x});var r=d3.bisector(n).left,i=d3.max([0,r(e,t)-1]),s=n(e[i],i);typeof s=="undefined"&&(s=i);if(s===t)return i;var o=d3.min([i+1,e.length-1]),u=n(e[o],o);return typeof u=="undefined"&&(u=o),Math.abs(u-t)>=Math.abs(s-t)?i:o},e.nearestValueIndex=function(e,t,n){"use strict";var r=Infinity,i=null;return e.forEach(function(e,s){var o=Math.abs(t-e);o<=r&&oT.height?0:x}v.top=Math.abs(x-S.top),v.left=Math.abs(E.left-S.left)}t+=a.offsetLeft+v.left-2*a.scrollLeft,u+=a.offsetTop+v.top-2*a.scrollTop}return s&&s>0&&(u=Math.floor(u/s)*s),e.tooltip.calcTooltipPosition([t,u],r,i,h),w}var t=null,n=null,r="w",i=50,s=25,o=null,u=null,a=null,f=null,l={left:null,top:null},c=!0,h="nvtooltip-"+Math.floor(Math.random()*1e5),p="nv-pointer-events-none",d=function(e,t){return e},v=function(e){return e},m=function(e){if(t!=null)return t;if(e==null)return"";var n=d3.select(document.createElement("table")),r=n.selectAll("thead").data([e]).enter().append("thead");r.append("tr").append("td").attr("colspan",3).append("strong").classed("x-value",!0).html(v(e.value));var i=n.selectAll("tbody").data([e]).enter().append("tbody"),s=i.selectAll("tr").data(function(e){return e.series}).enter().append("tr").classed("highlight",function(e){return e.highlight});s.append("td").classed("legend-color-guide",!0).append("div").style("background-color",function(e){return e.color}),s.append("td").classed("key",!0).html(function(e){return e.key}),s.append("td").classed("value",!0).html(function(e,t){return d(e.value,t)}),s.selectAll("td").each(function(e){if(e.highlight){var t=d3.scale.linear().domain([0,1]).range(["#fff",e.color]),n=.6;d3.select(this).style("border-bottom-color",t(n)).style("border-top-color",t(n))}});var o=n.node().outerHTML;return e.footer!==undefined&&(o+=""),o},g=function(e){return e&&e.series&&e.series.length>0?!0:!1};return w.nvPointerEventsClass=p,w.content=function(e){return arguments.length?(t=e,w):t},w.tooltipElem=function(){return f},w.contentGenerator=function(e){return arguments.length?(typeof e=="function"&&(m=e),w):m},w.data=function(e){return arguments.length?(n=e,w):n},w.gravity=function(e){return arguments.length?(r=e,w):r},w.distance=function(e){return arguments.length?(i=e,w):i},w.snapDistance=function(e){return arguments.length?(s=e,w):s},w.classes=function(e){return arguments.length?(u=e,w):u},w.chartContainer=function(e){return arguments.length?(a=e,w):a},w.position=function(e){return arguments.length?(l.left=typeof e.left!="undefined"?e.left:l.left,l.top=typeof e.top!="undefined"?e.top:l.top,w):l},w.fixedTop=function(e){return arguments.length?(o=e,w):o},w.enabled=function(e){return arguments.length?(c=e,w):c},w.valueFormatter=function(e){return arguments.length?(typeof e=="function"&&(d=e),w):d},w.headerFormatter=function(e){return arguments.length?(typeof e=="function"&&(v=e),w):v},w.id=function(){return h},w},e.tooltip.show=function(t,n,r,i,s,o){var u=document.createElement("div");u.className="nvtooltip "+(o?o:"xy-tooltip");var a=s;if(!s||s.tagName.match(/g|svg/i))a=document.getElementsByTagName("body")[0];u.style.left=0,u.style.top=0,u.style.opacity=0,u.innerHTML=n,a.appendChild(u),s&&(t[0]=t[0]-s.scrollLeft,t[1]=t[1]-s.scrollTop),e.tooltip.calcTooltipPosition(t,r,i,u)},e.tooltip.findFirstNonSVGParent=function(e){while(e.tagName.match(/^g|svg$/i)!==null)e=e.parentNode;return e},e.tooltip.findTotalOffsetTop=function(e,t){var n=t;do isNaN(e.offsetTop)||(n+=e.offsetTop);while(e=e.offsetParent);return n},e.tooltip.findTotalOffsetLeft=function(e,t){var n=t;do isNaN(e.offsetLeft)||(n+=e.offsetLeft);while(e=e.offsetParent);return n},e.tooltip.calcTooltipPosition=function(t,n,r,i){var s=parseInt(i.offsetHeight),o=parseInt(i.offsetWidth),u=e.utils.windowSize().width,a=e.utils.windowSize().height,f=window.pageYOffset,l=window.pageXOffset,c,h;a=window.innerWidth>=document.body.scrollWidth?a:a-16,u=window.innerHeight>=document.body.scrollHeight?u:u-16,n=n||"s",r=r||20;var p=function(t){return e.tooltip.findTotalOffsetTop(t,h)},d=function(t){return e.tooltip.findTotalOffsetLeft(t,c)};switch(n){case"e":c=t[0]-o-r,h=t[1]-s/2;var v=d(i),m=p(i);vl?t[0]+r:l-v+c),mf+a&&(h=f+a-m+h-s);break;case"w":c=t[0]+r,h=t[1]-s/2;var v=d(i),m=p(i);v+o>u&&(c=t[0]-o-r),mf+a&&(h=f+a-m+h-s);break;case"n":c=t[0]-o/2-5,h=t[1]+r;var v=d(i),m=p(i);vu&&(c=c-o/2+5),m+s>f+a&&(h=f+a-m+h-s);break;case"s":c=t[0]-o/2,h=t[1]-s-r;var v=d(i),m=p(i);vu&&(c=c-o/2+5),f>m&&(h=f);break;case"none":c=t[0],h=t[1]-r;var v=d(i),m=p(i)}return i.style.left=c+"px",i.style.top=h+"px",i.style.opacity=1,i.style.position="absolute",i},e.tooltip.cleanup=function(){var e=document.getElementsByClassName("nvtooltip"),t=[];while(e.length)t.push(e[0]),e[0].style.transitionDelay="0 !important",e[0].style.opacity=0,e[0].className="nvtooltip-pending-removal";setTimeout(function(){while(t.length){var e=t.pop();e.parentNode.removeChild(e)}},500)}}(),e.utils.windowSize=function(){var e={width:640,height:480};return document.body&&document.body.offsetWidth&&(e.width=document.body.offsetWidth,e.height=document.body.offsetHeight),document.compatMode=="CSS1Compat"&&document.documentElement&&document.documentElement.offsetWidth&&(e.width=document.documentElement.offsetWidth,e.height=document.documentElement.offsetHeight),window.innerWidth&&window.innerHeight&&(e.width=window.innerWidth,e.height=window.innerHeight),e},e.utils.windowResize=function(e){if(e===undefined)return;var t=window.onresize;window.onresize=function(n){typeof t=="function"&&t(n),e(n)}},e.utils.getColor=function(t){return arguments.length?Object.prototype.toString.call(t)==="[object Array]"?function(e,n){return e.color||t[n%t.length]}:t:e.utils.defaultColor()},e.utils.defaultColor=function(){var e=d3.scale.category20().range();return function(t,n){return t.color||e[n%e.length]}},e.utils.customTheme=function(e,t,n){t=t||function(e){return e.key},n=n||d3.scale.category20().range();var r=n.length;return function(i,s){var o=t(i);return r||(r=n.length),typeof e[o]!="undefined"?typeof e[o]=="function"?e[o]():e[o]:n[--r]}},e.utils.pjax=function(t,n){function r(r){d3.html(r,function(r){var i=d3.select(n).node();i.parentNode.replaceChild(d3.select(r).select(n).node(),i),e.utils.pjax(t,n)})}d3.selectAll(t).on("click",function(){history.pushState(this.href,this.textContent,this.href),r(this.href),d3.event.preventDefault()}),d3.select(window).on("popstate",function(){d3.event.state&&r(d3.event.state)})},e.utils.calcApproxTextWidth=function(e){if(typeof e.style=="function"&&typeof e.text=="function"){var t=parseInt(e.style("font-size").replace("px","")),n=e.text().length;return n*t*.5}return 0},e.utils.NaNtoZero=function(e){return typeof e!="number"||isNaN(e)||e===null||e===Infinity?0:e},e.utils.optionsFunc=function(e){return e&&d3.map(e).forEach(function(e,t){typeof this[e]=="function"&&this[e](t)}.bind(this)),this},e.models.axis=function(){"use strict";function m(e){return e.each(function(e){var i=d3.select(this),m=i.selectAll("g.nv-wrap.nv-axis").data([e]),g=m.enter().append("g").attr("class","nvd3 nv-wrap nv-axis"),y=g.append("g"),b=m.select("g");p!==null?t.ticks(p):(t.orient()=="top"||t.orient()=="bottom")&&t.ticks(Math.abs(s.range()[1]-s.range()[0])/100),b.transition().call(t),v=v||t.scale();var w=t.tickFormat();w==null&&(w=v.tickFormat());var E=b.selectAll("text.nv-axislabel").data([o||null]);E.exit().remove();switch(t.orient()){case"top":E.enter().append("text").attr("class","nv-axislabel");var S=s.range().length==2?s.range()[1]:s.range()[s.range().length-1]+(s.range()[1]-s.range()[0]);E.attr("text-anchor","middle").attr("y",0).attr("x",S/2);if(u){var x=m.selectAll("g.nv-axisMaxMin").data(s.domain());x.enter().append("g").attr("class","nv-axisMaxMin").append("text"),x.exit().remove(),x.attr("transform",function(e,t){return"translate("+s(e)+",0)"}).select("text").attr("dy","-0.5em").attr("y",-t.tickPadding()).attr("text-anchor","middle").text(function(e,t){var n=w(e);return(""+n).match("NaN")?"":n}),x.transition().attr("transform",function(e,t){return"translate("+s.range()[t]+",0)"})}break;case"bottom":var T=36,N=30,C=b.selectAll("g").select("text");if(f%360){C.each(function(e,t){var n=this.getBBox().width;n>N&&(N=n)});var k=Math.abs(Math.sin(f*Math.PI/180)),T=(k?k*N:N)+30;C.attr("transform",function(e,t,n){return"rotate("+f+" 0,0)"}).style("text-anchor",f%360>0?"start":"end")}E.enter().append("text").attr("class","nv-axislabel");var S=s.range().length==2?s.range()[1]:s.range()[s.range().length-1]+(s.range()[1]-s.range()[0]);E.attr("text-anchor","middle").attr("y",T).attr("x",S/2);if(u){var x=m.selectAll("g.nv-axisMaxMin").data([s.domain()[0],s.domain()[s.domain().length-1]]);x.enter().append("g").attr("class","nv-axisMaxMin").append("text"),x.exit().remove(),x.attr("transform",function(e,t){return"translate("+(s(e)+(h?s.rangeBand()/2:0))+",0)"}).select("text").attr("dy",".71em").attr("y",t.tickPadding()).attr("transform",function(e,t,n){return"rotate("+f+" 0,0)"}).style("text-anchor",f?f%360>0?"start":"end":"middle").text(function(e,t){var n=w(e);return(""+n).match("NaN")?"":n}),x.transition().attr("transform",function(e,t){return"translate("+(s(e)+(h?s.rangeBand()/2:0))+",0)"})}c&&C.attr("transform",function(e,t){return"translate(0,"+(t%2==0?"0":"12")+")"});break;case"right":E.enter().append("text").attr("class","nv-axislabel"),E.style("text-anchor",l?"middle":"begin").attr("transform",l?"rotate(90)":"").attr("y",l?-Math.max(n.right,r)+12:-10).attr("x",l?s.range()[0]/2:t.tickPadding());if(u){var x=m.selectAll("g.nv-axisMaxMin").data(s.domain());x.enter().append("g").attr("class","nv-axisMaxMin").append("text").style("opacity",0),x.exit().remove(),x.attr("transform",function(e,t){return"translate(0,"+s(e)+")"}).select("text").attr("dy",".32em").attr("y",0).attr("x",t.tickPadding()).style("text-anchor","start").text(function(e,t){var n=w(e);return(""+n).match("NaN")?"":n}),x.transition().attr("transform",function(e,t){return"translate(0,"+s.range()[t]+")"}).select("text").style("opacity",1)}break;case"left":E.enter().append("text").attr("class","nv-axislabel"),E.style("text-anchor",l?"middle":"end").attr("transform",l?"rotate(-90)":"").attr("y",l?-Math.max(n.left,r)+d:-10).attr("x",l?-s.range()[0]/2:-t.tickPadding());if(u){var x=m.selectAll("g.nv-axisMaxMin").data(s.domain());x.enter().append("g").attr("class","nv-axisMaxMin").append("text").style("opacity",0),x.exit().remove(),x.attr("transform",function(e,t){return"translate(0,"+v(e)+")"}).select("text").attr("dy",".32em").attr("y",0).attr("x",-t.tickPadding()).attr("text-anchor","end").text(function(e,t){var n=w(e);return(""+n).match("NaN")?"":n}),x.transition().attr("transform",function(e,t){return"translate(0,"+s.range()[t]+")"}).select("text").style("opacity",1)}}E.text(function(e){return e}),u&&(t.orient()==="left"||t.orient()==="right")&&(b.selectAll("g").each(function(e,t){d3.select(this).select("text").attr("opacity",1);if(s(e)s.range()[0]-10)(e>1e-10||e<-1e-10)&&d3.select(this).attr("opacity",0),d3.select(this).select("text").attr("opacity",0)}),s.domain()[0]==s.domain()[1]&&s.domain()[0]==0&&m.selectAll("g.nv-axisMaxMin").style("opacity",function(e,t){return t?0:1}));if(u&&(t.orient()==="top"||t.orient()==="bottom")){var L=[];m.selectAll("g.nv-axisMaxMin").each(function(e,t){try{t?L.push(s(e)-this.getBBox().width-4):L.push(s(e)+this.getBBox().width+4)}catch(n){t?L.push(s(e)-4):L.push(s(e)+4)}}),b.selectAll("g").each(function(e,t){if(s(e)L[1])e>1e-10||e<-1e-10?d3.select(this).remove():d3.select(this).select("text").remove()})}a&&b.selectAll(".tick").filter(function(e){return!parseFloat(Math.round(e.__data__*1e5)/1e6)&&e.__data__!==undefined}).classed("zero",!0),v=s.copy()}),m}var t=d3.svg.axis(),n={top:0,right:0,bottom:0,left:0},r=75,i=60,s=d3.scale.linear(),o=null,u=!0,a=!0,f=0,l=!0,c=!1,h=!1,p=null,d=12;t.scale(s).orient("bottom").tickFormat(function(e){return e});var v;return m.axis=t,d3.rebind(m,t,"orient","tickValues","tickSubdivide","tickSize","tickPadding","tickFormat"),d3.rebind(m,s,"domain","range","rangeBand","rangeBands"),m.options=e.utils.optionsFunc.bind(m),m.margin=function(e){return arguments.length?(n.top=typeof e.top!="undefined"?e.top:n.top,n.right=typeof e.right!="undefined"?e.right:n.right,n.bottom=typeof e.bottom!="undefined"?e.bottom:n.bottom,n.left=typeof e.left!="undefined"?e.left:n.left,m):n},m.width=function(e){return arguments.length?(r=e,m):r},m.ticks=function(e){return arguments.length?(p=e,m):p},m.height=function(e){return arguments.length?(i=e,m):i},m.axisLabel=function(e){return arguments.length?(o=e,m):o},m.showMaxMin=function(e){return arguments.length?(u=e,m):u},m.highlightZero=function(e){return arguments.length?(a=e,m):a},m.scale=function(e){return arguments.length?(s=e,t.scale(s),h=typeof s.rangeBands=="function",d3.rebind(m,s,"domain","range","rangeBand","rangeBands"),m):s},m.rotateYLabel=function(e){return arguments.length?(l=e,m):l},m.rotateLabels=function(e){return arguments.length?(f=e,m):f},m.staggerLabels=function(e){return arguments.length?(c=e,m):c},m.axisLabelDistance=function(e){return arguments.length?(d=e,m):d},m},e.models.bullet=function(){"use strict";function m(e){return e.each(function(e,n){var p=c-t.left-t.right,m=h-t.top-t.bottom,g=d3.select(this),y=i.call(this,e,n).slice().sort(d3.descending),b=s.call(this,e,n).slice().sort(d3.descending),w=o.call(this,e,n).slice().sort(d3.descending),E=u.call(this,e,n).slice(),S=a.call(this,e,n).slice(),x=f.call(this,e,n).slice(),T=d3.scale.linear().domain(d3.extent(d3.merge([l,y]))).range(r?[p,0]:[0,p]),N=this.__chart__||d3.scale.linear().domain([0,Infinity]).range(T.range());this.__chart__=T;var C=d3.min(y),k=d3.max(y),L=y[1],A=g.selectAll("g.nv-wrap.nv-bullet").data([e]),O=A.enter().append("g").attr("class","nvd3 nv-wrap nv-bullet"),M=O.append("g"),_=A.select("g");M.append("rect").attr("class","nv-range nv-rangeMax"),M.append("rect").attr("class","nv-range nv-rangeAvg"),M.append("rect").attr("class","nv-range nv-rangeMin"),M.append("rect").attr("class","nv-measure"),M.append("path").attr("class","nv-markerTriangle"),A.attr("transform","translate("+t.left+","+t.top+")");var D=function(e){return Math.abs(N(e)-N(0))},P=function(e){return Math.abs(T(e)-T(0))},H=function(e){return e<0?N(e):N(0)},B=function(e){return e<0?T(e):T(0)};_.select("rect.nv-rangeMax").attr("height",m).attr("width",P(k>0?k:C)).attr("x",B(k>0?k:C)).datum(k>0?k:C),_.select("rect.nv-rangeAvg").attr("height",m).attr("width",P(L)).attr("x",B(L)).datum(L),_.select("rect.nv-rangeMin").attr("height",m).attr("width",P(k)).attr("x",B(k)).attr("width",P(k>0?C:k)).attr("x",B(k>0?C:k)).datum(k>0?C:k),_.select("rect.nv-measure").style("fill",d).attr("height",m/3).attr("y",m/3).attr("width",w<0?T(0)-T(w[0]):T(w[0])-T(0)).attr("x",B(w)).on("mouseover",function(){v.elementMouseover({value:w[0],label:x[0]||"Current",pos:[T(w[0]),m/2]})}).on("mouseout",function(){v.elementMouseout({value:w[0],label:x[0]||"Current"})});var j=m/6;b[0]?_.selectAll("path.nv-markerTriangle").attr("transform",function(e){return"translate("+T(b[0])+","+m/2+")"}).attr("d","M0,"+j+"L"+j+","+ -j+" "+ -j+","+ -j+"Z").on("mouseover",function(){v.elementMouseover({value:b[0],label:S[0]||"Previous",pos:[T(b[0]),m/2]})}).on("mouseout",function(){v.elementMouseout({value:b[0],label:S[0]||"Previous"})}):_.selectAll("path.nv-markerTriangle").remove(),A.selectAll(".nv-range").on("mouseover",function(e,t){var n=E[t]||(t?t==1?"Mean":"Minimum":"Maximum");v.elementMouseover({value:e,label:n,pos:[T(e),m/2]})}).on("mouseout",function(e,t){var n=E[t]||(t?t==1?"Mean":"Minimum":"Maximum");v.elementMouseout({value:e,label:n})})}),m}var t={top:0,right:0,bottom:0,left:0},n="left",r=!1,i=function(e){return e.ranges},s=function(e){return e.markers},o=function(e){return e.measures},u=function(e){return e.rangeLabels?e.rangeLabels:[]},a=function(e){return e.markerLabels?e.markerLabels:[]},f=function(e){return e.measureLabels?e.measureLabels:[]},l=[0],c=380,h=30,p=null,d=e.utils.getColor(["#1f77b4"]),v=d3.dispatch("elementMouseover","elementMouseout");return m.dispatch=v,m.options=e.utils.optionsFunc.bind(m),m.orient=function(e){return arguments.length?(n=e,r=n=="right"||n=="bottom",m):n},m.ranges=function(e){return arguments.length?(i=e,m):i},m.markers=function(e){return arguments.length?(s=e,m):s},m.measures=function(e){return arguments.length?(o=e,m):o},m.forceX=function(e){return arguments.length?(l=e,m):l},m.width=function(e){return arguments.length?(c=e,m):c},m.height=function(e){return arguments.length?(h=e,m):h},m.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,m):t},m.tickFormat=function(e){return arguments.length?(p=e,m):p},m.color=function(t){return arguments.length?(d=e.utils.getColor(t),m):d},m},e.models.bulletChart=function(){"use strict";function m(e){return e.each(function(n,h){var g=d3.select(this),y=(a||parseInt(g.style("width"))||960)-i.left-i.right,b=f-i.top-i.bottom,w=this;m.update=function(){m(e)},m.container=this;if(!n||!s.call(this,n,h)){var E=g.selectAll(".nv-noData").data([p]);return E.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),E.attr("x",i.left+y/2).attr("y",18+i.top+b/2).text(function(e){return e}),m}g.selectAll(".nv-noData").remove();var S=s.call(this,n,h).slice().sort(d3.descending),x=o.call(this,n,h).slice().sort(d3.descending),T=u.call(this,n,h).slice().sort(d3.descending),N=g.selectAll("g.nv-wrap.nv-bulletChart").data([n]),C=N.enter().append("g").attr("class","nvd3 nv-wrap nv-bulletChart"),k=C.append("g"),L=N.select("g");k.append("g").attr("class","nv-bulletWrap"),k.append("g").attr("class","nv-titles"),N.attr("transform","translate("+i.left+","+i.top+")");var A=d3.scale.linear().domain([0,Math.max(S[0],x[0],T[0])]).range(r?[y,0]:[0,y]),O=this.__chart__||d3.scale.linear().domain([0,Infinity]).range(A.range());this.__chart__=A;var M=function(e){return Math.abs(O(e)-O(0))},_=function(e){return Math.abs(A(e)-A(0))},D=k.select(".nv-titles").append("g").attr("text-anchor","end").attr("transform","translate(-6,"+(f-i.top-i.bottom)/2+")");D.append("text").attr("class","nv-title").text(function(e){return e.title}),D.append("text").attr("class","nv-subtitle").attr("dy","1em").text(function(e){return e.subtitle}),t.width(y).height(b);var P=L.select(".nv-bulletWrap");d3.transition(P).call(t);var H=l||A.tickFormat(y/100),B=L.selectAll("g.nv-tick").data(A.ticks(y/50),function(e){return this.textContent||H(e)}),j=B.enter().append("g").attr("class","nv-tick").attr("transform",function(e){return"translate("+O(e)+",0)"}).style("opacity",1e-6);j.append("line").attr("y1",b).attr("y2",b*7/6),j.append("text").attr("text-anchor","middle").attr("dy","1em").attr("y",b*7/6).text(H);var F=d3.transition(B).attr("transform",function(e){return"translate("+A(e)+",0)"}).style("opacity",1);F.select("line").attr("y1",b).attr("y2",b*7/6),F.select("text").attr("y",b*7/6),d3.transition(B.exit()).attr("transform",function(e){return"translate("+A(e)+",0)"}).style("opacity",1e-6).remove(),d.on("tooltipShow",function(e){e.key=n.title,c&&v(e,w.parentNode)})}),d3.timer.flush(),m}var t=e.models.bullet(),n="left",r=!1,i={top:5,right:40,bottom:20,left:120},s=function(e){return e.ranges},o=function(e){return e.markers},u=function(e){return e.measures},a=null,f=55,l=null,c=!0,h=function(e,t,n,r,i){return"

    "+t+"

    "+"

    "+n+"

    "},p="No Data Available.",d=d3.dispatch("tooltipShow","tooltipHide"),v=function(t,n){var r=t.pos[0]+(n.offsetLeft||0)+i.left,s=t.pos[1]+(n.offsetTop||0)+i.top,o=h(t.key,t.label,t.value,t,m);e.tooltip.show([r,s],o,t.value<0?"e":"w",null,n)};return t.dispatch.on("elementMouseover.tooltip",function(e){d.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){d.tooltipHide(e)}),d.on("tooltipHide",function(){c&&e.tooltip.cleanup()}),m.dispatch=d,m.bullet=t,d3.rebind(m,t,"color"),m.options=e.utils.optionsFunc.bind(m),m.orient=function(e){return arguments.length?(n=e,r=n=="right"||n=="bottom",m):n},m.ranges=function(e){return arguments.length?(s=e,m):s},m.markers=function(e){return arguments.length?(o=e,m):o},m.measures=function(e){return arguments.length?(u=e,m):u},m.width=function(e){return arguments.length?(a=e,m):a},m.height=function(e){return arguments.length?(f=e,m):f},m.margin=function(e){return arguments.length?(i.top=typeof e.top!="undefined"?e.top:i.top,i.right=typeof e.right!="undefined"?e.right:i.right,i.bottom=typeof e.bottom!="undefined"?e.bottom:i.bottom,i.left=typeof e.left!="undefined"?e.left:i.left,m):i},m.tickFormat=function(e){return arguments.length?(l=e,m):l},m.tooltips=function(e){return arguments.length?(c=e,m):c},m.tooltipContent=function(e){return arguments.length?(h=e,m):h},m.noData=function(e){return arguments.length?(p=e,m):p},m},e.models.cumulativeLineChart=function(){"use strict";function D(b){return b.each(function(b){function q(e,t){d3.select(D.container).style("cursor","ew-resize")}function R(e,t){M.x=d3.event.x,M.i=Math.round(O.invert(M.x)),rt()}function U(e,t){d3.select(D.container).style("cursor","auto"),x.index=M.i,k.stateChange(x)}function rt(){nt.data([M]);var e=D.transitionDuration();D.transitionDuration(0),D.update(),D.transitionDuration(e)}var A=d3.select(this).classed("nv-chart-"+S,!0),H=this,B=(f||parseInt(A.style("width"))||960)-u.left-u.right,j=(l||parseInt(A.style("height"))||400)-u.top-u.bottom;D.update=function(){A.transition().duration(L).call(D)},D.container=this,x.disabled=b.map(function(e){return!!e.disabled});if(!T){var F;T={};for(F in x)x[F]instanceof Array?T[F]=x[F].slice(0):T[F]=x[F]}var I=d3.behavior.drag().on("dragstart",q).on("drag",R).on("dragend",U);if(!b||!b.length||!b.filter(function(e){return e.values.length}).length){var z=A.selectAll(".nv-noData").data([N]);return z.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),z.attr("x",u.left+B/2).attr("y",u.top+j/2).text(function(e){return e}),D}A.selectAll(".nv-noData").remove(),w=t.xScale(),E=t.yScale();if(!y){var W=b.filter(function(e){return!e.disabled}).map(function(e,n){var r=d3.extent(e.values,t.y());return r[0]<-0.95&&(r[0]=-0.95),[(r[0]-r[1])/(1+r[1]),(r[1]-r[0])/(1+r[0])]}),X=[d3.min(W,function(e){return e[0]}),d3.max(W,function(e){return e[1]})];t.yDomain(X)}else t.yDomain(null);O.domain([0,b[0].values.length-1]).range([0,B]).clamp(!0);var b=P(M.i,b),V=g?"none":"all",$=A.selectAll("g.nv-wrap.nv-cumulativeLine").data([b]),J=$.enter().append("g").attr("class","nvd3 nv-wrap nv-cumulativeLine").append("g"),K=$.select("g");J.append("g").attr("class","nv-interactive"),J.append("g").attr("class","nv-x nv-axis").style("pointer-events","none"),J.append("g").attr("class","nv-y nv-axis"),J.append("g").attr("class","nv-background"),J.append("g").attr("class","nv-linesWrap").style("pointer-events",V),J.append("g").attr("class","nv-avgLinesWrap").style("pointer-events","none"),J.append("g").attr("class","nv-legendWrap"),J.append("g").attr("class","nv-controlsWrap"),c&&(i.width(B),K.select(".nv-legendWrap").datum(b).call(i),u.top!=i.height()&&(u.top=i.height(),j=(l||parseInt(A.style("height"))||400)-u.top-u.bottom),K.select(".nv-legendWrap").attr("transform","translate(0,"+ -u.top+")"));if(m){var Q=[{key:"Re-scale y-axis",disabled:!y}];s.width(140).color(["#444","#444","#444"]).rightAlign(!1).margin({top:5,right:0,bottom:5,left:20}),K.select(".nv-controlsWrap").datum(Q).attr("transform","translate(0,"+ -u.top+")").call(s)}$.attr("transform","translate("+u.left+","+u.top+")"),d&&K.select(".nv-y.nv-axis").attr("transform","translate("+B+",0)");var G=b.filter(function(e){return e.tempDisabled});$.select(".tempDisabled").remove(),G.length&&$.append("text").attr("class","tempDisabled").attr("x",B/2).attr("y","-.71em").style("text-anchor","end").text(G.map(function(e){return e.key}).join(", ")+" values cannot be calculated for this time period."),g&&(o.width(B).height(j).margin({left:u.left,top:u.top}).svgContainer(A).xScale(w),$.select(".nv-interactive").call(o)),J.select(".nv-background").append("rect"),K.select(".nv-background rect").attr("width",B).attr("height",j),t.y(function(e){return e.display.y}).width(B).height(j).color(b.map(function(e,t){return e.color||a(e,t)}).filter(function(e,t){return!b[t].disabled&&!b[t].tempDisabled}));var Y=K.select(".nv-linesWrap").datum(b.filter(function(e){return!e.disabled&&!e.tempDisabled}));Y.call(t),b.forEach(function(e,t){e.seriesIndex=t});var Z=b.filter(function(e){return!e.disabled&&!!C(e)}),et=K.select(".nv-avgLinesWrap").selectAll("line").data(Z,function(e){return e.key}),tt=function(e){var t=E(C(e));return t<0?0:t>j?j:t};et.enter().append("line").style("stroke-width",2).style("stroke-dasharray","10,10").style("stroke",function(e,n){return t.color()(e,e.seriesIndex)}).attr("x1",0).attr("x2",B).attr("y1",tt).attr("y2",tt),et.style("stroke-opacity",function(e){var t=E(C(e));return t<0||t>j?0:1}).attr("x1",0).attr("x2",B).attr("y1",tt).attr("y2",tt),et.exit().remove();var nt=Y.selectAll(".nv-indexLine").data([M]);nt.enter().append("rect").attr("class","nv-indexLine").attr("width",3).attr("x",-2).attr("fill","red").attr("fill-opacity",.5).style("pointer-events","all").call(I),nt.attr("transform",function(e){return"translate("+O(e.i)+",0)"}).attr("height",j),h&&(n.scale(w).ticks(Math.min(b[0].values.length,B/70)).tickSize(-j,0),K.select(".nv-x.nv-axis").attr("transform","translate(0,"+E.range()[0]+")"),d3.transition(K.select(".nv-x.nv-axis")).call(n)),p&&(r.scale(E).ticks(j/36).tickSize(-B,0),d3.transition(K.select(".nv-y.nv-axis")).call(r)),K.select(".nv-background rect").on("click",function(){M.x=d3.mouse(this)[0],M.i=Math.round(O.invert(M.x)),x.index=M.i,k.stateChange(x),rt()}),t.dispatch.on("elementClick",function(e){M.i=e.pointIndex,M.x=O(M.i),x.index=M.i,k.stateChange(x),rt()}),s.dispatch.on("legendClick",function(e,t){e.disabled=!e.disabled,y=!e.disabled,x.rescaleY=y,k.stateChange(x),D.update()}),i.dispatch.on("stateChange",function(e){x.disabled=e.disabled,k.stateChange(x),D.update()}),o.dispatch.on("elementMousemove",function(i){t.clearHighlights();var s,f,l,c=[];b.filter(function(e,t){return e.seriesIndex=t,!e.disabled}).forEach(function(n,r){f=e.interactiveBisect(n.values,i.pointXValue,D.x()),t.highlightPoint(r,f,!0);var o=n.values[f];if(typeof o=="undefined")return;typeof s=="undefined"&&(s=o),typeof l=="undefined"&&(l=D.xScale()(D.x()(o,f))),c.push({key:n.key,value:D.y()(o,f),color:a(n,n.seriesIndex)})});if(c.length>2){var h=D.yScale().invert(i.mouseY),p=Math.abs(D.yScale().domain()[0]-D.yScale().domain()[1]),d=.03*p,m=e.nearestValueIndex(c.map(function(e){return e.value}),h,d);m!==null&&(c[m].highlight=!0)}var g=n.tickFormat()(D.x()(s,f),f);o.tooltip.position({left:l+u.left,top:i.mouseY+u.top}).chartContainer(H.parentNode).enabled(v).valueFormatter(function(e,t){return r.tickFormat()(e)}).data({value:g,series:c})(),o.renderGuideLine(l)}),o.dispatch.on("elementMouseout",function(e){k.tooltipHide(),t.clearHighlights()}),k.on("tooltipShow",function(e){v&&_(e,H.parentNode)}),k.on("changeState",function(e){typeof e.disabled!="undefined"&&(b.forEach(function(t,n){t.disabled=e.disabled[n]}),x.disabled=e.disabled),typeof e.index!="undefined"&&(M.i=e.index,M.x=O(M.i),x.index=e.index,nt.data([M])),typeof e.rescaleY!="undefined"&&(y=e.rescaleY),D.update()})}),D}function P(e,n){return n.map(function(n,r){if(!n.values)return n;var i=t.y()(n.values[e],e);return i<-0.95&&!A?(n.tempDisabled=!0,n):(n.tempDisabled=!1,n.values= -n.values.map(function(e,n){return e.display={y:(t.y()(e,n)-i)/(1+i)},e}),n)})}var t=e.models.line(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o=e.interactiveGuideline(),u={top:30,right:30,bottom:50,left:60},a=e.utils.defaultColor(),f=null,l=null,c=!0,h=!0,p=!0,d=!1,v=!0,m=!0,g=!1,y=!0,b=function(e,t,n,r,i){return"

    "+e+"

    "+"

    "+n+" at "+t+"

    "},w,E,S=t.id(),x={index:0,rescaleY:y},T=null,N="No Data Available.",C=function(e){return e.average},k=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),L=250,A=!1;n.orient("bottom").tickPadding(7),r.orient(d?"right":"left"),s.updateState(!1);var O=d3.scale.linear(),M={i:0,x:0},_=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=b(i.series.key,a,f,i,D);e.tooltip.show([o,u],l,null,null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+u.left,e.pos[1]+u.top],k.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){k.tooltipHide(e)}),k.on("tooltipHide",function(){v&&e.tooltip.cleanup()}),D.dispatch=k,D.lines=t,D.legend=i,D.xAxis=n,D.yAxis=r,D.interactiveLayer=o,d3.rebind(D,t,"defined","isArea","x","y","xScale","yScale","size","xDomain","yDomain","xRange","yRange","forceX","forceY","interactive","clipEdge","clipVoronoi","useVoronoi","id"),D.options=e.utils.optionsFunc.bind(D),D.margin=function(e){return arguments.length?(u.top=typeof e.top!="undefined"?e.top:u.top,u.right=typeof e.right!="undefined"?e.right:u.right,u.bottom=typeof e.bottom!="undefined"?e.bottom:u.bottom,u.left=typeof e.left!="undefined"?e.left:u.left,D):u},D.width=function(e){return arguments.length?(f=e,D):f},D.height=function(e){return arguments.length?(l=e,D):l},D.color=function(t){return arguments.length?(a=e.utils.getColor(t),i.color(a),D):a},D.rescaleY=function(e){return arguments.length?(y=e,D):y},D.showControls=function(e){return arguments.length?(m=e,D):m},D.useInteractiveGuideline=function(e){return arguments.length?(g=e,e===!0&&(D.interactive(!1),D.useVoronoi(!1)),D):g},D.showLegend=function(e){return arguments.length?(c=e,D):c},D.showXAxis=function(e){return arguments.length?(h=e,D):h},D.showYAxis=function(e){return arguments.length?(p=e,D):p},D.rightAlignYAxis=function(e){return arguments.length?(d=e,r.orient(e?"right":"left"),D):d},D.tooltips=function(e){return arguments.length?(v=e,D):v},D.tooltipContent=function(e){return arguments.length?(b=e,D):b},D.state=function(e){return arguments.length?(x=e,D):x},D.defaultState=function(e){return arguments.length?(T=e,D):T},D.noData=function(e){return arguments.length?(N=e,D):N},D.average=function(e){return arguments.length?(C=e,D):C},D.transitionDuration=function(e){return arguments.length?(L=e,D):L},D.noErrorCheck=function(e){return arguments.length?(A=e,D):A},D},e.models.discreteBar=function(){"use strict";function E(e){return e.each(function(e){var i=n-t.left-t.right,E=r-t.top-t.bottom,S=d3.select(this);e.forEach(function(e,t){e.values.forEach(function(e){e.series=t})});var T=p&&d?[]:e.map(function(e){return e.values.map(function(e,t){return{x:u(e,t),y:a(e,t),y0:e.y0}})});s.domain(p||d3.merge(T).map(function(e){return e.x})).rangeBands(v||[0,i],.1),o.domain(d||d3.extent(d3.merge(T).map(function(e){return e.y}).concat(f))),c?o.range(m||[E-(o.domain()[0]<0?12:0),o.domain()[1]>0?12:0]):o.range(m||[E,0]),b=b||s,w=w||o.copy().range([o(0),o(0)]);var N=S.selectAll("g.nv-wrap.nv-discretebar").data([e]),C=N.enter().append("g").attr("class","nvd3 nv-wrap nv-discretebar"),k=C.append("g"),L=N.select("g");k.append("g").attr("class","nv-groups"),N.attr("transform","translate("+t.left+","+t.top+")");var A=N.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e){return e.key});A.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),A.exit().transition().style("stroke-opacity",1e-6).style("fill-opacity",1e-6).remove(),A.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}),A.transition().style("stroke-opacity",1).style("fill-opacity",.75);var O=A.selectAll("g.nv-bar").data(function(e){return e.values});O.exit().remove();var M=O.enter().append("g").attr("transform",function(e,t,n){return"translate("+(s(u(e,t))+s.rangeBand()*.05)+", "+o(0)+")"}).on("mouseover",function(t,n){d3.select(this).classed("hover",!0),g.elementMouseover({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t,n))+s.rangeBand()*(t.series+.5)/e.length,o(a(t,n))],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("mouseout",function(t,n){d3.select(this).classed("hover",!1),g.elementMouseout({value:a(t,n),point:t,series:e[t.series],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("click",function(t,n){g.elementClick({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t,n))+s.rangeBand()*(t.series+.5)/e.length,o(a(t,n))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}).on("dblclick",function(t,n){g.elementDblClick({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t,n))+s.rangeBand()*(t.series+.5)/e.length,o(a(t,n))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()});M.append("rect").attr("height",0).attr("width",s.rangeBand()*.9/e.length),c?(M.append("text").attr("text-anchor","middle"),O.select("text").text(function(e,t){return h(a(e,t))}).transition().attr("x",s.rangeBand()*.9/2).attr("y",function(e,t){return a(e,t)<0?o(a(e,t))-o(0)+12:-4})):O.selectAll("text").remove(),O.attr("class",function(e,t){return a(e,t)<0?"nv-bar negative":"nv-bar positive"}).style("fill",function(e,t){return e.color||l(e,t)}).style("stroke",function(e,t){return e.color||l(e,t)}).select("rect").attr("class",y).transition().attr("width",s.rangeBand()*.9/e.length),O.transition().attr("transform",function(e,t){var n=s(u(e,t))+s.rangeBand()*.05,r=a(e,t)<0?o(0):o(0)-o(a(e,t))<1?o(0)-1:o(a(e,t));return"translate("+n+", "+r+")"}).select("rect").attr("height",function(e,t){return Math.max(Math.abs(o(a(e,t))-o(d&&d[0]||0))||1)}),b=s.copy(),w=o.copy()}),E}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=Math.floor(Math.random()*1e4),s=d3.scale.ordinal(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=[0],l=e.utils.defaultColor(),c=!1,h=d3.format(",.2f"),p,d,v,m,g=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout"),y="discreteBar",b,w;return E.dispatch=g,E.options=e.utils.optionsFunc.bind(E),E.x=function(e){return arguments.length?(u=e,E):u},E.y=function(e){return arguments.length?(a=e,E):a},E.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,E):t},E.width=function(e){return arguments.length?(n=e,E):n},E.height=function(e){return arguments.length?(r=e,E):r},E.xScale=function(e){return arguments.length?(s=e,E):s},E.yScale=function(e){return arguments.length?(o=e,E):o},E.xDomain=function(e){return arguments.length?(p=e,E):p},E.yDomain=function(e){return arguments.length?(d=e,E):d},E.xRange=function(e){return arguments.length?(v=e,E):v},E.yRange=function(e){return arguments.length?(m=e,E):m},E.forceY=function(e){return arguments.length?(f=e,E):f},E.color=function(t){return arguments.length?(l=e.utils.getColor(t),E):l},E.id=function(e){return arguments.length?(i=e,E):i},E.showValues=function(e){return arguments.length?(c=e,E):c},E.valueFormat=function(e){return arguments.length?(h=e,E):h},E.rectClass=function(e){return arguments.length?(y=e,E):y},E},e.models.discreteBarChart=function(){"use strict";function w(e){return e.each(function(e){var u=d3.select(this),p=this,E=(s||parseInt(u.style("width"))||960)-i.left-i.right,S=(o||parseInt(u.style("height"))||400)-i.top-i.bottom;w.update=function(){g.beforeUpdate(),u.transition().duration(y).call(w)},w.container=this;if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var T=u.selectAll(".nv-noData").data([m]);return T.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),T.attr("x",i.left+E/2).attr("y",i.top+S/2).text(function(e){return e}),w}u.selectAll(".nv-noData").remove(),d=t.xScale(),v=t.yScale().clamp(!0);var N=u.selectAll("g.nv-wrap.nv-discreteBarWithAxes").data([e]),C=N.enter().append("g").attr("class","nvd3 nv-wrap nv-discreteBarWithAxes").append("g"),k=C.append("defs"),L=N.select("g");C.append("g").attr("class","nv-x nv-axis"),C.append("g").attr("class","nv-y nv-axis").append("g").attr("class","nv-zeroLine").append("line"),C.append("g").attr("class","nv-barsWrap"),L.attr("transform","translate("+i.left+","+i.top+")"),l&&L.select(".nv-y.nv-axis").attr("transform","translate("+E+",0)"),t.width(E).height(S);var A=L.select(".nv-barsWrap").datum(e.filter(function(e){return!e.disabled}));A.transition().call(t),k.append("clipPath").attr("id","nv-x-label-clip-"+t.id()).append("rect"),L.select("#nv-x-label-clip-"+t.id()+" rect").attr("width",d.rangeBand()*(c?2:1)).attr("height",16).attr("x",-d.rangeBand()/(c?1:2));if(a){n.scale(d).ticks(E/100).tickSize(-S,0),L.select(".nv-x.nv-axis").attr("transform","translate(0,"+(v.range()[0]+(t.showValues()&&v.domain()[0]<0?16:0))+")"),L.select(".nv-x.nv-axis").transition().call(n);var O=L.select(".nv-x.nv-axis").selectAll("g");c&&O.selectAll("text").attr("transform",function(e,t,n){return"translate(0,"+(n%2==0?"5":"17")+")"})}f&&(r.scale(v).ticks(S/36).tickSize(-E,0),L.select(".nv-y.nv-axis").transition().call(r)),L.select(".nv-zeroLine line").attr("x1",0).attr("x2",E).attr("y1",v(0)).attr("y2",v(0)),g.on("tooltipShow",function(e){h&&b(e,p.parentNode)})}),w}var t=e.models.discreteBar(),n=e.models.axis(),r=e.models.axis(),i={top:15,right:10,bottom:50,left:60},s=null,o=null,u=e.utils.getColor(),a=!0,f=!0,l=!1,c=!1,h=!0,p=function(e,t,n,r,i){return"

    "+t+"

    "+"

    "+n+"

    "},d,v,m="No Data Available.",g=d3.dispatch("tooltipShow","tooltipHide","beforeUpdate"),y=250;n.orient("bottom").highlightZero(!1).showMaxMin(!1).tickFormat(function(e){return e}),r.orient(l?"right":"left").tickFormat(d3.format(",.1f"));var b=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=p(i.series.key,a,f,i,w);e.tooltip.show([o,u],l,i.value<0?"n":"s",null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+i.left,e.pos[1]+i.top],g.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){g.tooltipHide(e)}),g.on("tooltipHide",function(){h&&e.tooltip.cleanup()}),w.dispatch=g,w.discretebar=t,w.xAxis=n,w.yAxis=r,d3.rebind(w,t,"x","y","xDomain","yDomain","xRange","yRange","forceX","forceY","id","showValues","valueFormat"),w.options=e.utils.optionsFunc.bind(w),w.margin=function(e){return arguments.length?(i.top=typeof e.top!="undefined"?e.top:i.top,i.right=typeof e.right!="undefined"?e.right:i.right,i.bottom=typeof e.bottom!="undefined"?e.bottom:i.bottom,i.left=typeof e.left!="undefined"?e.left:i.left,w):i},w.width=function(e){return arguments.length?(s=e,w):s},w.height=function(e){return arguments.length?(o=e,w):o},w.color=function(n){return arguments.length?(u=e.utils.getColor(n),t.color(u),w):u},w.showXAxis=function(e){return arguments.length?(a=e,w):a},w.showYAxis=function(e){return arguments.length?(f=e,w):f},w.rightAlignYAxis=function(e){return arguments.length?(l=e,r.orient(e?"right":"left"),w):l},w.staggerLabels=function(e){return arguments.length?(c=e,w):c},w.tooltips=function(e){return arguments.length?(h=e,w):h},w.tooltipContent=function(e){return arguments.length?(p=e,w):p},w.noData=function(e){return arguments.length?(m=e,w):m},w.transitionDuration=function(e){return arguments.length?(y=e,w):y},w},e.models.distribution=function(){"use strict";function l(e){return e.each(function(e){var a=n-(i==="x"?t.left+t.right:t.top+t.bottom),l=i=="x"?"y":"x",c=d3.select(this);f=f||u;var h=c.selectAll("g.nv-distribution").data([e]),p=h.enter().append("g").attr("class","nvd3 nv-distribution"),d=p.append("g"),v=h.select("g");h.attr("transform","translate("+t.left+","+t.top+")");var m=v.selectAll("g.nv-dist").data(function(e){return e},function(e){return e.key});m.enter().append("g"),m.attr("class",function(e,t){return"nv-dist nv-series-"+t}).style("stroke",function(e,t){return o(e,t)});var g=m.selectAll("line.nv-dist"+i).data(function(e){return e.values});g.enter().append("line").attr(i+"1",function(e,t){return f(s(e,t))}).attr(i+"2",function(e,t){return f(s(e,t))}),m.exit().selectAll("line.nv-dist"+i).transition().attr(i+"1",function(e,t){return u(s(e,t))}).attr(i+"2",function(e,t){return u(s(e,t))}).style("stroke-opacity",0).remove(),g.attr("class",function(e,t){return"nv-dist"+i+" nv-dist"+i+"-"+t}).attr(l+"1",0).attr(l+"2",r),g.transition().attr(i+"1",function(e,t){return u(s(e,t))}).attr(i+"2",function(e,t){return u(s(e,t))}),f=u.copy()}),l}var t={top:0,right:0,bottom:0,left:0},n=400,r=8,i="x",s=function(e){return e[i]},o=e.utils.defaultColor(),u=d3.scale.linear(),a,f;return l.options=e.utils.optionsFunc.bind(l),l.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,l):t},l.width=function(e){return arguments.length?(n=e,l):n},l.axis=function(e){return arguments.length?(i=e,l):i},l.size=function(e){return arguments.length?(r=e,l):r},l.getData=function(e){return arguments.length?(s=d3.functor(e),l):s},l.scale=function(e){return arguments.length?(u=e,l):u},l.color=function(t){return arguments.length?(o=e.utils.getColor(t),l):o},l},e.models.historicalBar=function(){"use strict";function w(E){return E.each(function(w){var E=n-t.left-t.right,S=r-t.top-t.bottom,T=d3.select(this);s.domain(d||d3.extent(w[0].values.map(u).concat(f))),c?s.range(m||[E*.5/w[0].values.length,E*(w[0].values.length-.5)/w[0].values.length]):s.range(m||[0,E]),o.domain(v||d3.extent(w[0].values.map(a).concat(l))).range(g||[S,0]),s.domain()[0]===s.domain()[1]&&(s.domain()[0]?s.domain([s.domain()[0]-s.domain()[0]*.01,s.domain()[1]+s.domain()[1]*.01]):s.domain([-1,1])),o.domain()[0]===o.domain()[1]&&(o.domain()[0]?o.domain([o.domain()[0]+o.domain()[0]*.01,o.domain()[1]-o.domain()[1]*.01]):o.domain([-1,1]));var N=T.selectAll("g.nv-wrap.nv-historicalBar-"+i).data([w[0].values]),C=N.enter().append("g").attr("class","nvd3 nv-wrap nv-historicalBar-"+i),k=C.append("defs"),L=C.append("g"),A=N.select("g");L.append("g").attr("class","nv-bars"),N.attr("transform","translate("+t.left+","+t.top+")"),T.on("click",function(e,t){y.chartClick({data:e,index:t,pos:d3.event,id:i})}),k.append("clipPath").attr("id","nv-chart-clip-path-"+i).append("rect"),N.select("#nv-chart-clip-path-"+i+" rect").attr("width",E).attr("height",S),A.attr("clip-path",h?"url(#nv-chart-clip-path-"+i+")":"");var O=N.select(".nv-bars").selectAll(".nv-bar").data(function(e){return e},function(e,t){return u(e,t)});O.exit().remove();var M=O.enter().append("rect").attr("x",0).attr("y",function(t,n){return e.utils.NaNtoZero(o(Math.max(0,a(t,n))))}).attr("height",function(t,n){return e.utils.NaNtoZero(Math.abs(o(a(t,n))-o(0)))}).attr("transform",function(e,t){return"translate("+(s(u(e,t))-E/w[0].values.length*.45)+",0)"}).on("mouseover",function(e,t){if(!b)return;d3.select(this).classed("hover",!0),y.elementMouseover({point:e,series:w[0],pos:[s(u(e,t)),o(a(e,t))],pointIndex:t,seriesIndex:0,e:d3.event})}).on("mouseout",function(e,t){if(!b)return;d3.select(this).classed("hover",!1),y.elementMouseout({point:e,series:w[0],pointIndex:t,seriesIndex:0,e:d3.event})}).on("click",function(e,t){if(!b)return;y.elementClick({value:a(e,t),data:e,index:t,pos:[s(u(e,t)),o(a(e,t))],e:d3.event,id:i}),d3.event.stopPropagation()}).on("dblclick",function(e,t){if(!b)return;y.elementDblClick({value:a(e,t),data:e,index:t,pos:[s(u(e,t)),o(a(e,t))],e:d3.event,id:i}),d3.event.stopPropagation()});O.attr("fill",function(e,t){return p(e,t)}).attr("class",function(e,t,n){return(a(e,t)<0?"nv-bar negative":"nv-bar positive")+" nv-bar-"+n+"-"+t}).transition().attr("transform",function(e,t){return"translate("+(s(u(e,t))-E/w[0].values.length*.45)+",0)"}).attr("width",E/w[0].values.length*.9),O.transition().attr("y",function(t,n){var r=a(t,n)<0?o(0):o(0)-o(a(t,n))<1?o(0)-1:o(a(t,n));return e.utils.NaNtoZero(r)}).attr("height",function(t,n){return e.utils.NaNtoZero(Math.max(Math.abs(o(a(t,n))-o(0)),1))})}),w}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=Math.floor(Math.random()*1e4),s=d3.scale.linear(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=[],l=[0],c=!1,h=!0,p=e.utils.defaultColor(),d,v,m,g,y=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout"),b=!0;return w.highlightPoint=function(e,t){d3.select(".nv-historicalBar-"+i).select(".nv-bars .nv-bar-0-"+e).classed("hover",t)},w.clearHighlights=function(){d3.select(".nv-historicalBar-"+i).select(".nv-bars .nv-bar.hover").classed("hover",!1)},w.dispatch=y,w.options=e.utils.optionsFunc.bind(w),w.x=function(e){return arguments.length?(u=e,w):u},w.y=function(e){return arguments.length?(a=e,w):a},w.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,w):t},w.width=function(e){return arguments.length?(n=e,w):n},w.height=function(e){return arguments.length?(r=e,w):r},w.xScale=function(e){return arguments.length?(s=e,w):s},w.yScale=function(e){return arguments.length?(o=e,w):o},w.xDomain=function(e){return arguments.length?(d=e,w):d},w.yDomain=function(e){return arguments.length?(v=e,w):v},w.xRange=function(e){return arguments.length?(m=e,w):m},w.yRange=function(e){return arguments.length?(g=e,w):g},w.forceX=function(e){return arguments.length?(f=e,w):f},w.forceY=function(e){return arguments.length?(l=e,w):l},w.padData=function(e){return arguments.length?(c=e,w):c},w.clipEdge=function(e){return arguments.length?(h=e,w):h},w.color=function(t){return arguments.length?(p=e.utils.getColor(t),w):p},w.id=function(e){return arguments.length?(i=e,w):i},w.interactive=function(e){return arguments.length?(b=!1,w):b},w},e.models.historicalBarChart=function(){"use strict";function x(e){return e.each(function(d){var T=d3.select(this),N=this,C=(u||parseInt(T.style("width"))||960)-s.left-s.right,k=(a||parseInt(T.style("height"))||400)-s.top-s.bottom;x.update=function(){T.transition().duration(E).call(x)},x.container=this,g.disabled=d.map(function(e){return!!e.disabled});if(!y){var L;y={};for(L in g)g[L]instanceof Array?y[L]=g[L].slice(0):y[L]=g[L]}if(!d||!d.length||!d.filter(function(e){return e.values.length}).length){var A=T.selectAll(".nv-noData").data([b]);return A.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),A.attr("x",s.left+C/2).attr("y",s.top+k/2).text(function(e){return e}),x}T.selectAll(".nv-noData").remove(),v=t.xScale(),m=t.yScale();var O=T.selectAll("g.nv-wrap.nv-historicalBarChart").data([d]),M=O.enter().append("g").attr("class","nvd3 nv-wrap nv-historicalBarChart").append("g"),_=O.select("g");M.append("g").attr("class","nv-x nv-axis"),M.append("g").attr("class","nv-y nv-axis"),M.append("g").attr("class","nv-barsWrap"),M.append("g").attr("class","nv-legendWrap"),f&&(i.width(C),_.select(".nv-legendWrap").datum(d).call(i),s.top!=i.height()&&(s.top=i.height(),k=(a||parseInt(T.style("height"))||400)-s.top-s.bottom),O.select(".nv-legendWrap").attr("transform","translate(0,"+ -s.top+")")),O.attr("transform","translate("+s.left+","+s.top+")"),h&&_.select(".nv-y.nv-axis").attr("transform","translate("+C+",0)"),t.width(C).height(k).color(d.map(function(e,t){return e.color||o(e,t)}).filter(function(e,t){return!d[t].disabled}));var D=_.select(".nv-barsWrap").datum(d.filter(function(e){return!e.disabled}));D.transition().call(t),l&&(n.scale(v).tickSize(-k,0),_.select(".nv-x.nv-axis").attr("transform","translate(0,"+m.range()[0]+")"),_.select(".nv-x.nv-axis").transition().call(n)),c&&(r.scale(m).ticks(k/36).tickSize(-C,0),_.select(".nv-y.nv-axis").transition().call(r)),i.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,d.filter(function(e){return!e.disabled}).length||d.map(function(e){return e.disabled=!1,O.selectAll(".nv-series").classed("disabled",!1),e}),g.disabled=d.map(function(e){return!!e.disabled}),w.stateChange(g),e.transition().call(x)}),i.dispatch.on("legendDblclick",function(e){d.forEach(function(e){e.disabled=!0}),e.disabled=!1,g.disabled=d.map(function(e){return!!e.disabled}),w.stateChange(g),x.update()}),w.on("tooltipShow",function(e){p&&S(e,N.parentNode)}),w.on("changeState",function(e){typeof e.disabled!="undefined"&&(d.forEach(function(t,n){t.disabled=e.disabled[n]}),g.disabled=e.disabled),x.update()})}),x}var t=e.models.historicalBar(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s={top:30,right:90,bottom:50,left:90},o=e.utils.defaultColor(),u=null,a=null,f=!1,l=!0,c=!0,h=!1,p=!0,d=function(e,t,n,r,i){return"

    "+e+"

    "+"

    "+n+" at "+t+"

    "},v,m,g={},y=null,b="No Data Available.",w=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),E=250;n.orient("bottom").tickPadding(7),r.orient(h?"right":"left");var S=function(i,s){if(s){var o=d3.select(s).select("svg"),u=o.node()?o.attr("viewBox"):null;if(u){u=u.split(" ");var a=parseInt(o.style("width"))/u[2];i.pos[0]=i.pos[0]*a,i.pos[1]=i.pos[1]*a}}var f=i.pos[0]+(s.offsetLeft||0),l=i.pos[1]+(s.offsetTop||0),c=n.tickFormat()(t.x()(i.point,i.pointIndex)),h=r.tickFormat()(t.y()(i.point,i.pointIndex)),p=d(i.series.key,c,h,i,x);e.tooltip.show([f,l],p,null,null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+s.left,e.pos[1]+s.top],w.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){w.tooltipHide(e)}),w.on("tooltipHide",function(){p&&e.tooltip.cleanup()}),x.dispatch=w,x.bars=t,x.legend=i,x.xAxis=n,x.yAxis=r,d3.rebind(x,t,"defined","isArea","x","y","size","xScale","yScale","xDomain","yDomain","xRange","yRange","forceX","forceY","interactive","clipEdge","clipVoronoi","id","interpolate","highlightPoint","clearHighlights","interactive"),x.options=e.utils.optionsFunc.bind(x),x.margin=function(e){return arguments.length?(s.top=typeof e.top!="undefined"?e.top:s.top,s.right=typeof e.right!="undefined"?e.right:s.right,s.bottom=typeof e.bottom!="undefined"?e.bottom:s.bottom,s.left=typeof e.left!="undefined"?e.left:s.left,x):s},x.width=function(e){return arguments.length?(u=e,x):u},x.height=function(e){return arguments.length?(a=e,x):a},x.color=function(t){return arguments.length?(o=e.utils.getColor(t),i.color(o),x):o},x.showLegend=function(e){return arguments.length?(f=e,x):f},x.showXAxis=function(e){return arguments.length?(l=e,x):l},x.showYAxis=function(e){return arguments.length?(c=e,x):c},x.rightAlignYAxis=function(e){return arguments.length?(h=e,r.orient(e?"right":"left"),x):h},x.tooltips=function(e){return arguments.length?(p=e,x):p},x.tooltipContent=function(e){return arguments.length?(d=e,x):d},x.state=function(e){return arguments.length?(g=e,x):g},x.defaultState=function(e){return arguments.length?(y=e,x):y},x.noData=function(e){return arguments.length?(b=e,x):b},x.transitionDuration=function(e){return arguments.length?(E=e,x):E},x},e.models.indentedTree=function(){"use strict";function g(e){return e.each(function(e){function k(e,t,n){d3.event.stopPropagation();if(d3.event.shiftKey&&!n)return d3.event.shiftKey=!1,e.values&&e.values.forEach(function(e){(e.values||e._values)&&k(e,0,!0)}),!0;if(!O(e))return!0;e.values?(e._values=e.values,e.values=null):(e.values=e._values,e._values=null),g.update()}function L(e){return e._values&&e._values.length?h:e.values&&e.values.length?p:""}function A(e){return e._values&&e._values.length}function O(e){var t=e.values||e._values;return t&&t.length}var t=1,n=d3.select(this),i=d3.layout.tree().children(function(e){return e.values}).size([r,f]);g.update=function(){n.transition().duration(600).call(g)},e[0]||(e[0]={key:a});var s=i.nodes(e[0]),y=d3.select(this).selectAll("div").data([[s]]),b=y.enter().append("div").attr("class","nvd3 nv-wrap nv-indentedtree"),w=b.append("table"),E=y.select("table").attr("width","100%").attr("class",c);if(o){var S=w.append("thead"),x=S.append("tr");l.forEach(function(e){x.append("th").attr("width",e.width?e.width:"10%").style("text-align",e.type=="numeric"?"right":"left").append("span").text(e.label)})}var T=E.selectAll("tbody").data(function(e){return e});T.enter().append("tbody"),t=d3.max(s,function(e){return e.depth}),i.size([r,t*f]);var N=T.selectAll("tr").data(function(e){return e.filter(function(e){return u&&!e.children?u(e):!0})},function(e,t){return e.id||e.id||++m});N.exit().remove(),N.select("img.nv-treeicon").attr("src",L).classed("folded",A);var C=N.enter().append("tr");l.forEach(function(e,t){var n=C.append("td").style("padding-left",function(e){return(t?0:e.depth*f+12+(L(e)?0:16))+"px"},"important").style("text-align",e.type=="numeric"?"right":"left");t==0&&n.append("img").classed("nv-treeicon",!0).classed("nv-folded",A).attr("src",L).style("width","14px").style("height","14px").style("padding","0 1px").style("display",function(e){return L(e)?"inline-block":"none"}).on("click",k),n.each(function(n){!t&&v(n)?d3.select(this).append("a").attr("href",v).attr("class",d3.functor(e.classes)).append("span"):d3.select(this).append("span"),d3.select(this).select("span").attr("class",d3.functor(e.classes)).text(function(t){return e.format?e.format(t):t[e.key]||"-"})}),e.showCount&&(n.append("span").attr("class","nv-childrenCount"),N.selectAll("span.nv-childrenCount").text(function(e){return e.values&&e.values.length||e._values&&e._values.length?"("+(e.values&&e.values.filter(function(e){return u?u(e):!0}).length||e._values&&e._values.filter(function(e){return u?u(e):!0}).length||0)+")":""}))}),N.order().on("click",function(e){d.elementClick({row:this,data:e,pos:[e.x,e.y]})}).on("dblclick",function(e){d.elementDblclick({row:this,data:e,pos:[e.x,e.y]})}).on("mouseover",function(e){d.elementMouseover({row:this,data:e,pos:[e.x,e.y]})}).on("mouseout",function(e){d.elementMouseout({row:this,data:e,pos:[e.x,e.y]})})}),g}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=e.utils.defaultColor(),s=Math.floor(Math.random()*1e4),o=!0,u=!1,a="No Data Available.",f=20,l=[{key:"key",label:"Name",type:"text"}],c=null,h="images/grey-plus.png",p="images/grey-minus.png",d=d3.dispatch("elementClick","elementDblclick","elementMouseover","elementMouseout"),v=function(e){return e.url},m=0;return g.options=e.utils.optionsFunc.bind(g),g.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,g):t},g.width=function(e){return arguments.length?(n=e,g):n},g.height=function(e){return arguments.length?(r=e,g):r},g.color=function(t){return arguments.length?(i=e.utils.getColor(t),scatter.color(i),g):i},g.id=function(e){return arguments.length?(s=e,g):s},g.header=function(e){return arguments.length?(o=e,g):o},g.noData=function(e){return arguments.length?(a=e,g):a},g.filterZero=function(e){return arguments.length?(u=e,g):u},g.columns=function(e){return arguments.length?(l=e,g):l},g.tableClass=function(e){return arguments.length?(c=e,g):c},g.iconOpen=function(e){return arguments.length?(h=e,g):h},g.iconClose=function(e){return arguments.length?(p=e,g):p},g.getUrl=function(e){return arguments.length?(v=e,g):v},g},e.models.legend=function(){"use strict";function c(h){return h.each(function(c){var h=n-t.left-t.right,p=d3.select(this),d=p.selectAll("g.nv-legend").data([c]),v=d.enter().append("g").attr("class","nvd3 nv-legend").append("g"),m=d.select("g");d.attr("transform","translate("+t.left+","+t.top+")");var g=m.selectAll(".nv-series").data(function(e){return e}),y=g.enter().append("g").attr("class","nv-series").on("mouseover",function(e,t){l.legendMouseover(e,t)}).on("mouseout",function(e,t){l.legendMouseout(e,t)}).on("click",function(e,t){l.legendClick(e,t),a&&(f?(c.forEach(function(e){e.disabled=!0}),e.disabled=!1):(e.disabled=!e.disabled,c.every(function(e){return e.disabled})&&c.forEach(function(e){e.disabled=!1})),l.stateChange({disabled:c.map(function(e){return!!e.disabled})}))}).on("dblclick",function(e,t){l.legendDblclick(e,t),a&&(c.forEach(function(e){e.disabled=!0}),e.disabled=!1,l.stateChange({disabled:c.map(function(e){return!!e.disabled})}))});y.append("circle").style("stroke-width",2).attr("class","nv-legend-symbol").attr("r",5),y.append("text").attr("text-anchor","start").attr("class","nv-legend-text").attr("dy",".32em").attr("dx","8"),g.classed("disabled",function(e){return e.disabled}),g.exit().remove(),g.select("circle").style("fill",function(e,t){return e.color||s(e,t)}).style("stroke",function(e,t){return e.color||s(e,t)}),g.select("text").text(i);if(o){var b=[];g.each(function(t,n){var r=d3.select(this).select("text"),i;try{i=r.getComputedTextLength();if(i<=0)throw Error()}catch(s){i=e.utils.calcApproxTextWidth(r)}b.push(i+28)});var w=0,E=0,S=[];while(Eh&&w>1){S=[],w--;for(var x=0;x(S[x%w]||0)&&(S[x%w]=b[x]);E=S.reduce(function(e,t,n,r){return e+t})}var T=[];for(var N=0,C=0;NA&&(A=L),"translate("+O+","+k+")"}),m.attr("transform","translate("+(n-t.right-A)+","+t.top+")"),r=t.top+t.bottom+k+15}}),c}var t={top:5,right:0,bottom:5,left:0},n=400,r=20,i=function(e){return e.key},s=e.utils.defaultColor(),o=!0,u=!0,a=!0,f=!1,l=d3.dispatch("legendClick","legendDblclick","legendMouseover","legendMouseout","stateChange");return c.dispatch=l,c.options=e.utils.optionsFunc.bind(c),c.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,c):t},c.width=function(e){return arguments.length?(n=e,c):n},c.height=function(e){return arguments.length?(r=e,c):r},c.key=function(e){return arguments.length?(i=e,c):i},c.color=function(t){return arguments.length?(s=e.utils.getColor(t),c):s},c.align=function(e){return arguments.length?(o=e,c):o},c.rightAlign=function(e){return arguments.length?(u=e,c):u},c.updateState=function(e){return arguments.length?(a=e,c):a},c.radioButtonMode=function(e){return arguments.length?(f=e,c):f},c},e.models.line=function(){"use strict";function m(g){return g.each(function(m){var g=r-n.left-n.right,b=i-n.top-n.bottom,w=d3.select(this);c=t.xScale(),h=t.yScale(),d=d||c,v=v||h;var E=w.selectAll("g.nv-wrap.nv-line").data([m]),S=E.enter().append("g").attr("class","nvd3 nv-wrap nv-line"),T=S.append("defs"),N=S.append("g"),C=E.select("g");N.append("g").attr("class","nv-groups"),N.append("g").attr("class","nv-scatterWrap"),E.attr("transform","translate("+n.left+","+n.top+")"),t.width(g).height(b);var k=E.select(".nv-scatterWrap");k.transition().call(t),T.append("clipPath").attr("id","nv-edge-clip-"+t.id()).append("rect"),E.select("#nv-edge-clip-"+t.id()+" rect").attr("width",g).attr("height",b),C.attr("clip-path",l?"url(#nv-edge-clip-"+t.id()+")":""),k.attr("clip-path",l?"url(#nv-edge-clip-"+t.id()+")":"");var L=E.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e){return e.key});L.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),L.exit().remove(),L.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}).style("fill",function(e,t){return s(e,t)}).style("stroke",function(e,t){return s(e,t)}),L.transition().style("stroke-opacity",1).style("fill-opacity",.5);var A=L.selectAll("path.nv-area").data(function(e){return f(e)?[e]:[]});A.enter().append("path").attr("class","nv-area").attr("d",function(t){return d3.svg.area().interpolate(p).defined(a).x(function(t,n){return e. -utils.NaNtoZero(d(o(t,n)))}).y0(function(t,n){return e.utils.NaNtoZero(v(u(t,n)))}).y1(function(e,t){return v(h.domain()[0]<=0?h.domain()[1]>=0?0:h.domain()[1]:h.domain()[0])}).apply(this,[t.values])}),L.exit().selectAll("path.nv-area").remove(),A.transition().attr("d",function(t){return d3.svg.area().interpolate(p).defined(a).x(function(t,n){return e.utils.NaNtoZero(c(o(t,n)))}).y0(function(t,n){return e.utils.NaNtoZero(h(u(t,n)))}).y1(function(e,t){return h(h.domain()[0]<=0?h.domain()[1]>=0?0:h.domain()[1]:h.domain()[0])}).apply(this,[t.values])});var O=L.selectAll("path.nv-line").data(function(e){return[e.values]});O.enter().append("path").attr("class","nv-line").attr("d",d3.svg.line().interpolate(p).defined(a).x(function(t,n){return e.utils.NaNtoZero(d(o(t,n)))}).y(function(t,n){return e.utils.NaNtoZero(v(u(t,n)))})),O.transition().attr("d",d3.svg.line().interpolate(p).defined(a).x(function(t,n){return e.utils.NaNtoZero(c(o(t,n)))}).y(function(t,n){return e.utils.NaNtoZero(h(u(t,n)))})),d=c.copy(),v=h.copy()}),m}var t=e.models.scatter(),n={top:0,right:0,bottom:0,left:0},r=960,i=500,s=e.utils.defaultColor(),o=function(e){return e.x},u=function(e){return e.y},a=function(e,t){return!isNaN(u(e,t))&&u(e,t)!==null},f=function(e){return e.area},l=!1,c,h,p="linear";t.size(16).sizeDomain([16,256]);var d,v;return m.dispatch=t.dispatch,m.scatter=t,d3.rebind(m,t,"id","interactive","size","xScale","yScale","zScale","xDomain","yDomain","xRange","yRange","sizeDomain","forceX","forceY","forceSize","clipVoronoi","useVoronoi","clipRadius","padData","highlightPoint","clearHighlights"),m.options=e.utils.optionsFunc.bind(m),m.margin=function(e){return arguments.length?(n.top=typeof e.top!="undefined"?e.top:n.top,n.right=typeof e.right!="undefined"?e.right:n.right,n.bottom=typeof e.bottom!="undefined"?e.bottom:n.bottom,n.left=typeof e.left!="undefined"?e.left:n.left,m):n},m.width=function(e){return arguments.length?(r=e,m):r},m.height=function(e){return arguments.length?(i=e,m):i},m.x=function(e){return arguments.length?(o=e,t.x(e),m):o},m.y=function(e){return arguments.length?(u=e,t.y(e),m):u},m.clipEdge=function(e){return arguments.length?(l=e,m):l},m.color=function(n){return arguments.length?(s=e.utils.getColor(n),t.color(s),m):s},m.interpolate=function(e){return arguments.length?(p=e,m):p},m.defined=function(e){return arguments.length?(a=e,m):a},m.isArea=function(e){return arguments.length?(f=d3.functor(e),m):f},m},e.models.lineChart=function(){"use strict";function N(m){return m.each(function(m){var C=d3.select(this),k=this,L=(a||parseInt(C.style("width"))||960)-o.left-o.right,A=(f||parseInt(C.style("height"))||400)-o.top-o.bottom;N.update=function(){C.transition().duration(x).call(N)},N.container=this,b.disabled=m.map(function(e){return!!e.disabled});if(!w){var O;w={};for(O in b)b[O]instanceof Array?w[O]=b[O].slice(0):w[O]=b[O]}if(!m||!m.length||!m.filter(function(e){return e.values.length}).length){var M=C.selectAll(".nv-noData").data([E]);return M.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),M.attr("x",o.left+L/2).attr("y",o.top+A/2).text(function(e){return e}),N}C.selectAll(".nv-noData").remove(),g=t.xScale(),y=t.yScale();var _=C.selectAll("g.nv-wrap.nv-lineChart").data([m]),D=_.enter().append("g").attr("class","nvd3 nv-wrap nv-lineChart").append("g"),P=_.select("g");D.append("rect").style("opacity",0),D.append("g").attr("class","nv-x nv-axis"),D.append("g").attr("class","nv-y nv-axis"),D.append("g").attr("class","nv-linesWrap"),D.append("g").attr("class","nv-legendWrap"),D.append("g").attr("class","nv-interactive"),P.select("rect").attr("width",L).attr("height",A>0?A:0),l&&(i.width(L),P.select(".nv-legendWrap").datum(m).call(i),o.top!=i.height()&&(o.top=i.height(),A=(f||parseInt(C.style("height"))||400)-o.top-o.bottom),_.select(".nv-legendWrap").attr("transform","translate(0,"+ -o.top+")")),_.attr("transform","translate("+o.left+","+o.top+")"),p&&P.select(".nv-y.nv-axis").attr("transform","translate("+L+",0)"),d&&(s.width(L).height(A).margin({left:o.left,top:o.top}).svgContainer(C).xScale(g),_.select(".nv-interactive").call(s)),t.width(L).height(A).color(m.map(function(e,t){return e.color||u(e,t)}).filter(function(e,t){return!m[t].disabled}));var H=P.select(".nv-linesWrap").datum(m.filter(function(e){return!e.disabled}));H.transition().call(t),c&&(n.scale(g).ticks(L/100).tickSize(-A,0),P.select(".nv-x.nv-axis").attr("transform","translate(0,"+y.range()[0]+")"),P.select(".nv-x.nv-axis").transition().call(n)),h&&(r.scale(y).ticks(A/36).tickSize(-L,0),P.select(".nv-y.nv-axis").transition().call(r)),i.dispatch.on("stateChange",function(e){b=e,S.stateChange(b),N.update()}),s.dispatch.on("elementMousemove",function(i){t.clearHighlights();var a,f,l,c=[];m.filter(function(e,t){return e.seriesIndex=t,!e.disabled}).forEach(function(n,r){f=e.interactiveBisect(n.values,i.pointXValue,N.x()),t.highlightPoint(r,f,!0);var s=n.values[f];if(typeof s=="undefined")return;typeof a=="undefined"&&(a=s),typeof l=="undefined"&&(l=N.xScale()(N.x()(s,f))),c.push({key:n.key,value:N.y()(s,f),color:u(n,n.seriesIndex)})});if(c.length>2){var h=N.yScale().invert(i.mouseY),p=Math.abs(N.yScale().domain()[0]-N.yScale().domain()[1]),d=.03*p,g=e.nearestValueIndex(c.map(function(e){return e.value}),h,d);g!==null&&(c[g].highlight=!0)}var y=n.tickFormat()(N.x()(a,f));s.tooltip.position({left:l+o.left,top:i.mouseY+o.top}).chartContainer(k.parentNode).enabled(v).valueFormatter(function(e,t){return r.tickFormat()(e)}).data({value:y,series:c})(),s.renderGuideLine(l)}),s.dispatch.on("elementMouseout",function(e){S.tooltipHide(),t.clearHighlights()}),S.on("tooltipShow",function(e){v&&T(e,k.parentNode)}),S.on("changeState",function(e){typeof e.disabled!="undefined"&&m.length===e.disabled.length&&(m.forEach(function(t,n){t.disabled=e.disabled[n]}),b.disabled=e.disabled),N.update()})}),N}var t=e.models.line(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.interactiveGuideline(),o={top:30,right:20,bottom:50,left:60},u=e.utils.defaultColor(),a=null,f=null,l=!0,c=!0,h=!0,p=!1,d=!1,v=!0,m=function(e,t,n,r,i){return"

    "+e+"

    "+"

    "+n+" at "+t+"

    "},g,y,b={},w=null,E="No Data Available.",S=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),x=250;n.orient("bottom").tickPadding(7),r.orient(p?"right":"left");var T=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=m(i.series.key,a,f,i,N);e.tooltip.show([o,u],l,null,null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+o.left,e.pos[1]+o.top],S.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),S.on("tooltipHide",function(){v&&e.tooltip.cleanup()}),N.dispatch=S,N.lines=t,N.legend=i,N.xAxis=n,N.yAxis=r,N.interactiveLayer=s,d3.rebind(N,t,"defined","isArea","x","y","size","xScale","yScale","xDomain","yDomain","xRange","yRange","forceX","forceY","interactive","clipEdge","clipVoronoi","useVoronoi","id","interpolate"),N.options=e.utils.optionsFunc.bind(N),N.margin=function(e){return arguments.length?(o.top=typeof e.top!="undefined"?e.top:o.top,o.right=typeof e.right!="undefined"?e.right:o.right,o.bottom=typeof e.bottom!="undefined"?e.bottom:o.bottom,o.left=typeof e.left!="undefined"?e.left:o.left,N):o},N.width=function(e){return arguments.length?(a=e,N):a},N.height=function(e){return arguments.length?(f=e,N):f},N.color=function(t){return arguments.length?(u=e.utils.getColor(t),i.color(u),N):u},N.showLegend=function(e){return arguments.length?(l=e,N):l},N.showXAxis=function(e){return arguments.length?(c=e,N):c},N.showYAxis=function(e){return arguments.length?(h=e,N):h},N.rightAlignYAxis=function(e){return arguments.length?(p=e,r.orient(e?"right":"left"),N):p},N.useInteractiveGuideline=function(e){return arguments.length?(d=e,e===!0&&(N.interactive(!1),N.useVoronoi(!1)),N):d},N.tooltips=function(e){return arguments.length?(v=e,N):v},N.tooltipContent=function(e){return arguments.length?(m=e,N):m},N.state=function(e){return arguments.length?(b=e,N):b},N.defaultState=function(e){return arguments.length?(w=e,N):w},N.noData=function(e){return arguments.length?(E=e,N):E},N.transitionDuration=function(e){return arguments.length?(x=e,N):x},N},e.models.linePlusBarChart=function(){"use strict";function T(e){return e.each(function(e){var l=d3.select(this),c=this,v=(a||parseInt(l.style("width"))||960)-u.left-u.right,N=(f||parseInt(l.style("height"))||400)-u.top-u.bottom;T.update=function(){l.transition().call(T)},b.disabled=e.map(function(e){return!!e.disabled});if(!w){var C;w={};for(C in b)b[C]instanceof Array?w[C]=b[C].slice(0):w[C]=b[C]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var k=l.selectAll(".nv-noData").data([E]);return k.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),k.attr("x",u.left+v/2).attr("y",u.top+N/2).text(function(e){return e}),T}l.selectAll(".nv-noData").remove();var L=e.filter(function(e){return!e.disabled&&e.bar}),A=e.filter(function(e){return!e.bar});m=A.filter(function(e){return!e.disabled}).length&&A.filter(function(e){return!e.disabled})[0].values.length?t.xScale():n.xScale(),g=n.yScale(),y=t.yScale();var O=d3.select(this).selectAll("g.nv-wrap.nv-linePlusBar").data([e]),M=O.enter().append("g").attr("class","nvd3 nv-wrap nv-linePlusBar").append("g"),_=O.select("g");M.append("g").attr("class","nv-x nv-axis"),M.append("g").attr("class","nv-y1 nv-axis"),M.append("g").attr("class","nv-y2 nv-axis"),M.append("g").attr("class","nv-barsWrap"),M.append("g").attr("class","nv-linesWrap"),M.append("g").attr("class","nv-legendWrap"),p&&(o.width(v/2),_.select(".nv-legendWrap").datum(e.map(function(e){return e.originalKey=e.originalKey===undefined?e.key:e.originalKey,e.key=e.originalKey+(e.bar?" (left axis)":" (right axis)"),e})).call(o),u.top!=o.height()&&(u.top=o.height(),N=(f||parseInt(l.style("height"))||400)-u.top-u.bottom),_.select(".nv-legendWrap").attr("transform","translate("+v/2+","+ -u.top+")")),O.attr("transform","translate("+u.left+","+u.top+")"),t.width(v).height(N).color(e.map(function(e,t){return e.color||h(e,t)}).filter(function(t,n){return!e[n].disabled&&!e[n].bar})),n.width(v).height(N).color(e.map(function(e,t){return e.color||h(e,t)}).filter(function(t,n){return!e[n].disabled&&e[n].bar}));var D=_.select(".nv-barsWrap").datum(L.length?L:[{values:[]}]),P=_.select(".nv-linesWrap").datum(A[0]&&!A[0].disabled?A:[{values:[]}]);d3.transition(D).call(n),d3.transition(P).call(t),r.scale(m).ticks(v/100).tickSize(-N,0),_.select(".nv-x.nv-axis").attr("transform","translate(0,"+g.range()[0]+")"),d3.transition(_.select(".nv-x.nv-axis")).call(r),i.scale(g).ticks(N/36).tickSize(-v,0),d3.transition(_.select(".nv-y1.nv-axis")).style("opacity",L.length?1:0).call(i),s.scale(y).ticks(N/36).tickSize(L.length?0:-v,0),_.select(".nv-y2.nv-axis").style("opacity",A.length?1:0).attr("transform","translate("+v+",0)"),d3.transition(_.select(".nv-y2.nv-axis")).call(s),o.dispatch.on("stateChange",function(e){b=e,S.stateChange(b),T.update()}),S.on("tooltipShow",function(e){d&&x(e,c.parentNode)}),S.on("changeState",function(t){typeof t.disabled!="undefined"&&(e.forEach(function(e,n){e.disabled=t.disabled[n]}),b.disabled=t.disabled),T.update()})}),T}var t=e.models.line(),n=e.models.historicalBar(),r=e.models.axis(),i=e.models.axis(),s=e.models.axis(),o=e.models.legend(),u={top:30,right:60,bottom:50,left:60},a=null,f=null,l=function(e){return e.x},c=function(e){return e.y},h=e.utils.defaultColor(),p=!0,d=!0,v=function(e,t,n,r,i){return"

    "+e+"

    "+"

    "+n+" at "+t+"

    "},m,g,y,b={},w=null,E="No Data Available.",S=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState");n.padData(!0),t.clipEdge(!1).padData(!0),r.orient("bottom").tickPadding(7).highlightZero(!1),i.orient("left"),s.orient("right");var x=function(n,o){var u=n.pos[0]+(o.offsetLeft||0),a=n.pos[1]+(o.offsetTop||0),f=r.tickFormat()(t.x()(n.point,n.pointIndex)),l=(n.series.bar?i:s).tickFormat()(t.y()(n.point,n.pointIndex)),c=v(n.series.key,f,l,n,T);e.tooltip.show([u,a],c,n.value<0?"n":"s",null,o)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+u.left,e.pos[1]+u.top],S.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),n.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+u.left,e.pos[1]+u.top],S.tooltipShow(e)}),n.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),S.on("tooltipHide",function(){d&&e.tooltip.cleanup()}),T.dispatch=S,T.legend=o,T.lines=t,T.bars=n,T.xAxis=r,T.y1Axis=i,T.y2Axis=s,d3.rebind(T,t,"defined","size","clipVoronoi","interpolate"),T.options=e.utils.optionsFunc.bind(T),T.x=function(e){return arguments.length?(l=e,t.x(e),n.x(e),T):l},T.y=function(e){return arguments.length?(c=e,t.y(e),n.y(e),T):c},T.margin=function(e){return arguments.length?(u.top=typeof e.top!="undefined"?e.top:u.top,u.right=typeof e.right!="undefined"?e.right:u.right,u.bottom=typeof e.bottom!="undefined"?e.bottom:u.bottom,u.left=typeof e.left!="undefined"?e.left:u.left,T):u},T.width=function(e){return arguments.length?(a=e,T):a},T.height=function(e){return arguments.length?(f=e,T):f},T.color=function(t){return arguments.length?(h=e.utils.getColor(t),o.color(h),T):h},T.showLegend=function(e){return arguments.length?(p=e,T):p},T.tooltips=function(e){return arguments.length?(d=e,T):d},T.tooltipContent=function(e){return arguments.length?(v=e,T):v},T.state=function(e){return arguments.length?(b=e,T):b},T.defaultState=function(e){return arguments.length?(w=e,T):w},T.noData=function(e){return arguments.length?(E=e,T):E},T},e.models.lineWithFocusChart=function(){"use strict";function k(e){return e.each(function(e){function U(e){var t=+(e=="e"),n=t?1:-1,r=M/3;return"M"+.5*n+","+r+"A6,6 0 0 "+t+" "+6.5*n+","+(r+6)+"V"+(2*r-6)+"A6,6 0 0 "+t+" "+.5*n+","+2*r+"Z"+"M"+2.5*n+","+(r+8)+"V"+(2*r-8)+"M"+4.5*n+","+(r+8)+"V"+(2*r-8)}function z(){a.empty()||a.extent(w),I.data([a.empty()?g.domain():w]).each(function(e,t){var n=g(e[0])-v.range()[0],r=v.range()[1]-g(e[1]);d3.select(this).select(".left").attr("width",n<0?0:n),d3.select(this).select(".right").attr("x",g(e[1])).attr("width",r<0?0:r)})}function W(){w=a.empty()?null:a.extent();var n=a.empty()?g.domain():a.extent();if(Math.abs(n[0]-n[1])<=1)return;T.brush({extent:n,brush:a}),z();var s=H.select(".nv-focus .nv-linesWrap").datum(e.filter(function(e){return!e.disabled}).map(function(e,r){return{key:e.key,values:e.values.filter(function(e,r){return t.x()(e,r)>=n[0]&&t.x()(e,r)<=n[1]})}}));s.transition().duration(N).call(t),H.select(".nv-focus .nv-x.nv-axis").transition().duration(N).call(r),H.select(".nv-focus .nv-y.nv-axis").transition().duration(N).call(i)}var S=d3.select(this),L=this,A=(h||parseInt(S.style("width"))||960)-f.left-f.right,O=(p||parseInt(S.style("height"))||400)-f.top-f.bottom-d,M=d-l.top-l.bottom;k.update=function(){S.transition().duration(N).call(k)},k.container=this;if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var _=S.selectAll(".nv-noData").data([x]);return _.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),_.attr("x",f.left+A/2).attr("y",f.top+O/2).text(function(e){return e}),k}S.selectAll(".nv-noData").remove(),v=t.xScale(),m=t.yScale(),g=n.xScale(),y=n.yScale();var D=S.selectAll("g.nv-wrap.nv-lineWithFocusChart").data([e]),P=D.enter().append("g").attr("class","nvd3 nv-wrap nv-lineWithFocusChart").append("g"),H=D.select("g");P.append("g").attr("class","nv-legendWrap");var B=P.append("g").attr("class","nv-focus");B.append("g").attr("class","nv-x nv-axis"),B.append("g").attr("class","nv-y nv-axis"),B.append("g").attr("class","nv-linesWrap");var j=P.append("g").attr("class","nv-context");j.append("g").attr("class","nv-x nv-axis"),j.append("g").attr("class","nv-y nv-axis"),j.append("g").attr("class","nv-linesWrap"),j.append("g").attr("class","nv-brushBackground"),j.append("g").attr("class","nv-x nv-brush"),b&&(u.width(A),H.select(".nv-legendWrap").datum(e).call(u),f.top!=u.height()&&(f.top=u.height(),O=(p||parseInt(S.style("height"))||400)-f.top-f.bottom-d),H.select(".nv-legendWrap").attr("transform","translate(0,"+ -f.top+")")),D.attr("transform","translate("+f.left+","+f.top+")"),t.width(A).height(O).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),n.defined(t.defined()).width(A).height(M).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),H.select(".nv-context").attr("transform","translate(0,"+(O+f.bottom+l.top)+")");var F=H.select(".nv-context .nv-linesWrap").datum(e.filter(function(e){return!e.disabled}));d3.transition(F).call(n),r.scale(v).ticks(A/100).tickSize(-O,0),i.scale(m).ticks(O/36).tickSize(-A,0),H.select(".nv-focus .nv-x.nv-axis").attr("transform","translate(0,"+O+")"),a.x(g).on("brush",function(){var e=k.transitionDuration();k.transitionDuration(0),W(),k.transitionDuration(e)}),w&&a.extent(w);var I=H.select(".nv-brushBackground").selectAll("g").data([w||a.extent()]),q=I.enter().append("g");q.append("rect").attr("class","left").attr("x",0).attr("y",0).attr("height",M),q.append("rect").attr("class","right").attr("x",0).attr("y",0).attr("height",M);var R=H.select(".nv-x.nv-brush").call(a);R.selectAll("rect").attr("height",M),R.selectAll(".resize").append("path").attr("d",U),W(),s.scale(g).ticks(A/100).tickSize(-M,0),H.select(".nv-context .nv-x.nv-axis").attr("transform","translate(0,"+y.range()[0]+")"),d3.transition(H.select(".nv-context .nv-x.nv-axis")).call(s),o.scale(y).ticks(M/36).tickSize(-A,0),d3.transition(H.select(".nv-context .nv-y.nv-axis")).call(o),H.select(".nv-context .nv-x.nv-axis").attr("transform","translate(0,"+y.range()[0]+")"),u.dispatch.on("stateChange",function(e){k.update()}),T.on("tooltipShow",function(e){E&&C(e,L.parentNode)})}),k}var t=e.models.line(),n=e.models.line(),r=e.models.axis(),i=e.models.axis(),s=e.models.axis(),o=e.models.axis(),u=e.models.legend(),a=d3.svg.brush(),f={top:30,right:30,bottom:30,left:60},l={top:0,right:30,bottom:20,left:60},c=e.utils.defaultColor(),h=null,p=null,d=100,v,m,g,y,b=!0,w=null,E=!0,S=function(e,t,n,r,i){return"

    "+e+"

    "+"

    "+n+" at "+t+"

    "},x="No Data Available.",T=d3.dispatch("tooltipShow","tooltipHide","brush"),N=250;t.clipEdge(!0),n.interactive(!1),r.orient("bottom").tickPadding(5),i.orient("left"),s.orient("bottom").tickPadding(5),o.orient("left");var C=function(n,s){var o=n.pos[0]+(s.offsetLeft||0),u=n.pos[1]+(s.offsetTop||0),a=r.tickFormat()(t.x()(n.point,n.pointIndex)),f=i.tickFormat()(t.y()(n.point,n.pointIndex)),l=S(n.series.key,a,f,n,k);e.tooltip.show([o,u],l,null,null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+f.left,e.pos[1]+f.top],T.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){T.tooltipHide(e)}),T.on("tooltipHide",function(){E&&e.tooltip.cleanup()}),k.dispatch=T,k.legend=u,k.lines=t,k.lines2=n,k.xAxis=r,k.yAxis=i,k.x2Axis=s,k.y2Axis=o,d3.rebind(k,t,"defined","isArea","size","xDomain","yDomain","xRange","yRange","forceX","forceY","interactive","clipEdge","clipVoronoi","id"),k.options=e.utils.optionsFunc.bind(k),k.x=function(e){return arguments.length?(t.x(e),n.x(e),k):t.x},k.y=function(e){return arguments.length?(t.y(e),n.y(e),k):t.y},k.margin=function(e){return arguments.length?(f.top=typeof e.top!="undefined"?e.top:f.top,f.right=typeof e.right!="undefined"?e.right:f.right,f.bottom=typeof e.bottom!="undefined"?e.bottom:f.bottom,f.left=typeof e.left!="undefined"?e.left:f.left,k):f},k.margin2=function(e){return arguments.length?(l=e,k):l},k.width=function(e){return arguments.length?(h=e,k):h},k.height=function(e){return arguments.length?(p=e,k):p},k.height2=function(e){return arguments.length?(d=e,k):d},k.color=function(t){return arguments.length?(c=e.utils.getColor(t),u.color(c),k):c},k.showLegend=function(e){return arguments.length?(b=e,k):b},k.tooltips=function(e){return arguments.length?(E=e,k):E},k.tooltipContent=function(e){return arguments.length?(S=e,k):S},k.interpolate=function(e){return arguments.length?(t.interpolate(e),n.interpolate(e),k):t.interpolate()},k.noData=function(e){return arguments.length?(x=e,k):x},k.xTickFormat=function(e){return arguments.length?(r.tickFormat(e),s.tickFormat(e),k):r.tickFormat()},k.yTickFormat=function(e){return arguments.length?(i.tickFormat(e),o.tickFormat(e),k):i.tickFormat()},k.brushExtent=function(e){return arguments.length?(w=e,k):w},k.transitionDuration=function(e){return arguments.length?(N=e,k):N},k},e.models.linePlusBarWithFocusChart=function(){"use strict";function B(e){return e.each(function(e){function nt(e){var t=+(e=="e"),n=t?1:-1,r=q/3;return"M"+.5*n+","+r+"A6,6 0 0 "+t+" "+6.5*n+","+(r+6)+"V"+(2*r-6)+"A6,6 0 0 "+t+" "+.5*n+","+2*r+"Z"+"M"+2.5*n+","+(r+8)+"V"+(2*r-8)+"M"+4.5*n+","+(r+8)+"V"+(2*r-8)}function rt(){h.empty()||h.extent(x),Z.data([h.empty()?k.domain():x]).each(function(e,t){var n=k(e[0])-k.range()[0],r=k.range()[1]-k(e[1]);d3.select(this).select(".left").attr("width",n<0?0:n),d3.select(this).select(".right").attr("x",k(e[1])).attr("width",r<0?0:r)})}function it(){x=h.empty()?null:h.extent(),S=h.empty()?k.domain():h.extent(),D.brush({extent:S,brush:h}),rt(),r.width(F).height(I).color(e.map(function(e,t){return e.color||w(e,t)}).filter(function(t,n){return!e[n].disabled&&e[n].bar})),t.width(F).height(I).color(e.map(function(e,t){return e.color||w(e,t)}).filter(function(t,n){return!e[n].disabled&&!e[n].bar}));var n=J.select(".nv-focus .nv-barsWrap").datum(U.length?U.map(function(e,t){return{key:e.key,values:e.values.filter(function(e,t){return r.x()(e,t)>=S[0]&&r.x()(e,t)<=S[1]})}}):[{values:[]}]),i=J.select(".nv-focus .nv-linesWrap").datum(z[0].disabled?[{values:[]}]:z.map(function(e,n){return{key:e.key,values:e.values.filter(function(e,n){return t.x()(e,n)>=S[0]&&t.x()(e,n)<=S[1]})}}));U.length?C=r.xScale():C=t.xScale(),s.scale(C).ticks(F/100).tickSize(-I,0),s.domain([Math.ceil(S[0]),Math.floor(S[1])]),J.select(".nv-x.nv-axis").transition().duration(P).call(s),n.transition().duration(P).call(r),i.transition().duration(P).call(t),J.select(".nv-focus .nv-x.nv-axis").attr("transform","translate(0,"+L.range()[0]+")"),u.scale(L).ticks(I/36).tickSize(-F,0),J.select(".nv-focus .nv-y1.nv-axis").style("opacity",U.length?1:0),a.scale(A).ticks(I/36).tickSize(U.length?0:-F,0),J.select(".nv-focus .nv-y2.nv-axis").style("opacity",z.length?1:0).attr("transform","translate("+C.range()[1]+",0)"),J.select(".nv-focus .nv-y1.nv-axis").transition().duration(P).call(u),J.select(".nv-focus .nv-y2.nv-axis").transition().duration(P).call(a)}var N=d3.select(this),j=this,F=(v||parseInt(N.style("width"))||960)-p.left-p.right,I=(m||parseInt(N.style("height"))||400)-p.top-p.bottom-g,q=g-d.top-d.bottom;B.update=function(){N.transition().duration(P).call(B)},B.container=this;if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var R=N.selectAll(".nv-noData").data([_]);return R.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),R.attr("x",p.left+F/2).attr("y",p.top+I/2).text(function(e){return e}),B}N.selectAll(".nv-noData").remove();var U=e.filter(function(e){return!e.disabled&&e.bar}),z=e.filter(function(e){return!e.bar});C=r.xScale(),k=o.scale(),L=r.yScale(),A=t.yScale(),O=i.yScale(),M=n.yScale();var W=e.filter(function(e){return!e.disabled&&e.bar}).map(function(e){return e.values.map(function(e,t){return{x:y(e,t),y:b(e,t)}})}),X=e.filter(function(e){return!e.disabled&&!e.bar}).map(function(e){return e.values.map(function(e,t){return{x:y(e,t),y:b(e,t)}})});C.range([0,F]),k.domain(d3.extent(d3.merge(W.concat(X)),function(e){return e.x})).range([0,F]);var V=N.selectAll("g.nv-wrap.nv-linePlusBar").data([e]),$=V.enter().append("g").attr("class","nvd3 nv-wrap nv-linePlusBar").append("g"),J=V.select("g");$.append("g").attr("class","nv-legendWrap");var K=$.append("g").attr("class","nv-focus");K.append("g").attr("class","nv-x nv-axis"),K.append("g").attr("class","nv-y1 nv-axis"),K.append("g").attr("class","nv-y2 nv-axis"),K.append("g").attr("class","nv-barsWrap"),K.append("g").attr("class","nv-linesWrap");var Q=$.append("g").attr("class","nv-context");Q.append("g").attr("class","nv-x nv-axis"),Q.append("g").attr("class","nv-y1 nv-axis"),Q.append("g").attr("class","nv-y2 nv-axis"),Q.append("g").attr("class","nv-barsWrap"),Q.append("g").attr("class","nv-linesWrap"),Q.append("g").attr("class","nv-brushBackground"),Q.append("g").attr("class","nv-x nv-brush"),E&&(c.width(F/2),J.select(".nv-legendWrap").datum(e.map(function(e){return e.originalKey=e.originalKey===undefined?e.key:e.originalKey,e.key=e.originalKey+(e.bar?" (left axis)":" (right axis)"),e})).call(c),p.top!=c.height()&&(p.top=c.height(),I=(m||parseInt(N.style("height"))||400)-p.top-p.bottom-g),J.select(".nv-legendWrap").attr("transform","translate("+F/2+","+ -p.top+")")),V.attr("transform","translate("+p.left+","+p.top+")"),i.width(F).height(q).color(e.map(function(e,t){return e.color||w(e,t)}).filter(function(t,n){return!e[n].disabled&&e[n].bar})),n.width(F).height(q).color(e.map(function(e,t){return e.color||w(e,t)}).filter(function(t,n){return!e[n].disabled&&!e[n].bar}));var G=J.select(".nv-context .nv-barsWrap").datum(U.length?U:[{values:[]}]),Y=J.select(".nv-context .nv-linesWrap").datum(z[0].disabled?[{values:[]}]:z);J.select(".nv-context").attr("transform","translate(0,"+(I+p.bottom+d.top)+")"),G.transition().call(i),Y.transition().call(n),h.x(k).on("brush",it),x&&h.extent(x);var Z=J.select(".nv-brushBackground").selectAll("g").data([x||h.extent()]),et=Z.enter().append("g");et.append("rect").attr("class","left").attr("x",0).attr("y",0).attr("height",q),et.append("rect").attr("class","right").attr("x",0).attr("y",0).attr("height",q);var tt=J.select(".nv-x.nv-brush").call(h);tt.selectAll("rect").attr("height",q),tt.selectAll(".resize").append("path").attr("d",nt),o.ticks(F/100).tickSize(-q,0),J.select(".nv-context .nv-x.nv-axis").attr("transform","translate(0,"+O.range()[0]+")"),J.select(".nv-context .nv-x.nv-axis").transition().call(o),f.scale(O).ticks(q/36).tickSize(-F,0),J.select(".nv-context .nv-y1.nv-axis").style("opacity",U.length?1:0).attr("transform","translate(0,"+k.range()[0]+")"),J.select(".nv-context .nv-y1.nv-axis").transition().call(f),l.scale(M).ticks(q/36).tickSize(U.length?0:-F,0),J.select(".nv-context .nv-y2.nv-axis").style("opacity",z.length?1:0).attr("transform","translate("+k.range()[1]+",0)"),J.select(".nv-context .nv-y2.nv-axis").transition().call(l),c.dispatch.on("stateChange",function(e){B.update()}),D.on("tooltipShow",function(e){T&&H(e,j.parentNode)}),it()}),B}var t=e.models.line(),n=e.models.line(),r=e.models.historicalBar(),i=e.models.historicalBar(),s=e.models.axis(),o=e.models.axis(),u=e.models.axis(),a=e.models.axis(),f=e.models.axis(),l=e.models.axis(),c=e.models.legend(),h=d3.svg.brush(),p={top:30,right:30,bottom:30,left:60},d={top:0,right:30,bottom:20,left:60},v=null,m=null,g=100,y=function(e){return e.x},b=function(e){return e.y},w=e.utils.defaultColor(),E=!0,S,x=null,T=!0,N=function(e,t,n,r,i){return"

    "+e+"

    "+"

    "+n+" at "+t+"

    "},C,k,L,A,O,M,_="No Data Available.",D=d3.dispatch("tooltipShow","tooltipHide","brush"),P=0;t.clipEdge(!0),n.interactive(!1),s.orient("bottom").tickPadding(5),u.orient("left"),a.orient("right"),o.orient("bottom").tickPadding(5),f.orient("left"),l.orient("right");var H=function(n,r){S&&(n.pointIndex+=Math.ceil(S[0]));var i=n.pos[0]+(r.offsetLeft||0),o=n.pos[1]+(r.offsetTop||0),f=s.tickFormat()(t.x()(n.point,n.pointIndex)),l=(n.series.bar?u:a).tickFormat()(t.y()(n.point,n.pointIndex)),c=N(n.series.key,f,l,n,B);e.tooltip.show([i,o],c,n.value<0?"n":"s",null,r)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+p.left,e.pos[1]+p.top],D.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){D.tooltipHide(e)}),r.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+p.left,e.pos[1]+p.top],D.tooltipShow(e)}),r.dispatch.on("elementMouseout.tooltip",function(e){D.tooltipHide(e)}),D.on("tooltipHide",function(){T&&e.tooltip.cleanup()}),B.dispatch=D,B.legend=c,B.lines=t,B.lines2=n,B.bars=r,B.bars2=i,B.xAxis=s,B.x2Axis=o,B.y1Axis=u,B.y2Axis=a,B.y3Axis=f,B.y4Axis=l,d3.rebind(B,t,"defined","size","clipVoronoi","interpolate"),B.options=e.utils.optionsFunc.bind(B),B.x=function(e){return arguments.length?(y=e,t.x(e),r.x(e),B):y},B.y=function(e){return arguments.length?(b=e,t.y(e),r.y(e),B):b},B.margin=function(e){return arguments.length?(p.top=typeof e.top!="undefined"?e.top:p.top,p.right=typeof e.right!="undefined"?e.right:p.right,p.bottom=typeof e.bottom!="undefined"?e.bottom:p.bottom,p.left=typeof e.left!="undefined"?e.left:p.left,B):p},B.width=function(e){return arguments.length?(v=e,B):v},B.height=function(e){return arguments.length?(m=e,B):m},B.color=function(t){return arguments.length?(w=e.utils.getColor(t),c.color(w),B):w},B.showLegend=function(e){return arguments.length?(E=e,B):E},B.tooltips=function(e){return arguments.length?(T=e,B):T},B.tooltipContent=function(e){return arguments.length?(N=e,B):N},B.noData=function(e){return arguments.length?(_=e,B):_},B.brushExtent=function(e){return arguments.length?(x=e,B):x},B},e.models.multiBar=function(){"use strict";function C(e){return e.each(function(e){var C=n-t.left-t.right,k=r-t.top-t.bottom,L=d3.select(this);d&&e.length&&(d=[{values:e[0].values.map(function(e){return{x:e.x,y:0,series:e.series,size:.01}})}]),c&&(e=d3.layout.stack().offset(h).values(function(e){return e.values}).y(a)(!e.length&&d?d:e)),e.forEach(function(e,t){e.values.forEach(function(e){e.series=t})}),c&&e[0].values.map(function(t,n){var r=0,i=0;e.map(function(e){var t=e.values[n];t.size=Math.abs(t.y),t.y<0?(t.y1=i,i-=t.size):(t.y1=t.size+r,r+=t.size)})});var A=y&&b?[]:e.map(function(e){return e.values.map(function(e,t){return{x:u(e,t),y:a(e,t),y0:e.y0,y1:e.y1}})});i.domain(y||d3.merge(A).map(function(e){return e.x})).rangeBands(w||[0,C],S),s.domain(b||d3.extent(d3.merge(A).map(function(e){return c?e.y>0?e.y1:e.y1+e.y:e.y}).concat(f))).range(E||[k,0]),i.domain()[0]===i.domain()[1]&&(i.domain()[0]?i.domain([i.domain()[0]-i.domain()[0]*.01,i.domain()[1]+i.domain()[1]*.01]):i.domain([-1,1])),s.domain()[0]===s.domain()[1]&&(s.domain()[0]?s.domain([s.domain()[0]+s.domain()[0]*.01,s.domain()[1]-s.domain()[1]*.01]):s.domain([-1,1])),T=T||i,N=N||s;var O=L.selectAll("g.nv-wrap.nv-multibar").data([e]),M=O.enter().append("g").attr("class","nvd3 nv-wrap nv-multibar"),_=M.append("defs"),D=M.append("g"),P=O.select("g");D.append("g").attr("class","nv-groups"),O.attr("transform","translate("+t.left+","+t.top+")"),_.append("clipPath").attr("id","nv-edge-clip-"+o).append("rect"),O.select("#nv-edge-clip-"+o+" rect").attr("width",C).attr("height",k),P.attr("clip-path",l?"url(#nv-edge-clip-"+o+")":"");var H=O.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e,t){return t});H.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),H.exit().transition().selectAll("rect.nv-bar").delay(function(t,n){return n*g/e[0].values.length}).attr("y",function(e){return c?N(e.y0):N(0)}).attr("height",0).remove(),H.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}).style("fill",function(e,t){return p(e,t)}).style("stroke",function(e,t){return p(e,t)}),H.transition().style("stroke-opacity",1).style("fill-opacity",.75);var B=H.selectAll("rect.nv-bar").data(function(t){return d&&!e.length?d.values:t.values});B.exit().remove();var j=B.enter().append("rect").attr("class",function(e,t){return a(e,t)<0?"nv-bar negative":"nv-bar positive"}).attr("x",function(t,n,r){return c?0:r*i.rangeBand()/e.length}).attr("y",function(e){return N(c?e.y0:0)}).attr("height",0).attr("width",i.rangeBand()/(c?1:e.length)).attr("transform",function(e,t){return"translate("+i(u(e,t))+",0)"});B.style("fill",function(e,t,n){return p(e,n,t)}).style("stroke",function(e,t,n){return p(e,n,t)}).on("mouseover",function(t,n){d3.select(this).classed("hover",!0),x.elementMouseover({value:a(t,n),point:t,series:e[t.series],pos:[i(u(t,n))+i.rangeBand()*(c?e.length/2:t.series+.5)/e.length,s(a(t,n)+(c?t.y0:0))],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("mouseout",function(t,n){d3.select(this).classed("hover",!1),x.elementMouseout({value:a(t,n),point:t,series:e[t.series],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("click",function(t,n){x.elementClick({value:a(t,n),point:t,series:e[t.series],pos:[i(u(t,n))+i.rangeBand()*(c?e.length/2:t.series+.5)/e.length -,s(a(t,n)+(c?t.y0:0))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}).on("dblclick",function(t,n){x.elementDblClick({value:a(t,n),point:t,series:e[t.series],pos:[i(u(t,n))+i.rangeBand()*(c?e.length/2:t.series+.5)/e.length,s(a(t,n)+(c?t.y0:0))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}),B.attr("class",function(e,t){return a(e,t)<0?"nv-bar negative":"nv-bar positive"}).transition().attr("transform",function(e,t){return"translate("+i(u(e,t))+",0)"}),v&&(m||(m=e.map(function(){return!0})),B.style("fill",function(e,t,n){return d3.rgb(v(e,t)).darker(m.map(function(e,t){return t}).filter(function(e,t){return!m[t]})[n]).toString()}).style("stroke",function(e,t,n){return d3.rgb(v(e,t)).darker(m.map(function(e,t){return t}).filter(function(e,t){return!m[t]})[n]).toString()})),c?B.transition().delay(function(t,n){return n*g/e[0].values.length}).attr("y",function(e,t){return s(c?e.y1:0)}).attr("height",function(e,t){return Math.max(Math.abs(s(e.y+(c?e.y0:0))-s(c?e.y0:0)),1)}).attr("x",function(t,n){return c?0:t.series*i.rangeBand()/e.length}).attr("width",i.rangeBand()/(c?1:e.length)):B.transition().delay(function(t,n){return n*g/e[0].values.length}).attr("x",function(t,n){return t.series*i.rangeBand()/e.length}).attr("width",i.rangeBand()/e.length).attr("y",function(e,t){return a(e,t)<0?s(0):s(0)-s(a(e,t))<1?s(0)-1:s(a(e,t))||0}).attr("height",function(e,t){return Math.max(Math.abs(s(a(e,t))-s(0)),1)||0}),T=i.copy(),N=s.copy()}),C}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=d3.scale.ordinal(),s=d3.scale.linear(),o=Math.floor(Math.random()*1e4),u=function(e){return e.x},a=function(e){return e.y},f=[0],l=!0,c=!1,h="zero",p=e.utils.defaultColor(),d=!1,v=null,m,g=1200,y,b,w,E,S=.1,x=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout"),T,N;return C.dispatch=x,C.options=e.utils.optionsFunc.bind(C),C.x=function(e){return arguments.length?(u=e,C):u},C.y=function(e){return arguments.length?(a=e,C):a},C.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,C):t},C.width=function(e){return arguments.length?(n=e,C):n},C.height=function(e){return arguments.length?(r=e,C):r},C.xScale=function(e){return arguments.length?(i=e,C):i},C.yScale=function(e){return arguments.length?(s=e,C):s},C.xDomain=function(e){return arguments.length?(y=e,C):y},C.yDomain=function(e){return arguments.length?(b=e,C):b},C.xRange=function(e){return arguments.length?(w=e,C):w},C.yRange=function(e){return arguments.length?(E=e,C):E},C.forceY=function(e){return arguments.length?(f=e,C):f},C.stacked=function(e){return arguments.length?(c=e,C):c},C.stackOffset=function(e){return arguments.length?(h=e,C):h},C.clipEdge=function(e){return arguments.length?(l=e,C):l},C.color=function(t){return arguments.length?(p=e.utils.getColor(t),C):p},C.barColor=function(t){return arguments.length?(v=e.utils.getColor(t),C):v},C.disabled=function(e){return arguments.length?(m=e,C):m},C.id=function(e){return arguments.length?(o=e,C):o},C.hideable=function(e){return arguments.length?(d=e,C):d},C.delay=function(e){return arguments.length?(g=e,C):g},C.groupSpacing=function(e){return arguments.length?(S=e,C):S},C},e.models.multiBarChart=function(){"use strict";function A(e){return e.each(function(e){var b=d3.select(this),O=this,M=(u||parseInt(b.style("width"))||960)-o.left-o.right,_=(a||parseInt(b.style("height"))||400)-o.top-o.bottom;A.update=function(){b.transition().duration(k).call(A)},A.container=this,S.disabled=e.map(function(e){return!!e.disabled});if(!x){var D;x={};for(D in S)S[D]instanceof Array?x[D]=S[D].slice(0):x[D]=S[D]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var P=b.selectAll(".nv-noData").data([T]);return P.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),P.attr("x",o.left+M/2).attr("y",o.top+_/2).text(function(e){return e}),A}b.selectAll(".nv-noData").remove(),w=t.xScale(),E=t.yScale();var H=b.selectAll("g.nv-wrap.nv-multiBarWithLegend").data([e]),B=H.enter().append("g").attr("class","nvd3 nv-wrap nv-multiBarWithLegend").append("g"),j=H.select("g");B.append("g").attr("class","nv-x nv-axis"),B.append("g").attr("class","nv-y nv-axis"),B.append("g").attr("class","nv-barsWrap"),B.append("g").attr("class","nv-legendWrap"),B.append("g").attr("class","nv-controlsWrap"),c&&(i.width(M-C()),t.barColor()&&e.forEach(function(e,t){e.color=d3.rgb("#ccc").darker(t*1.5).toString()}),j.select(".nv-legendWrap").datum(e).call(i),o.top!=i.height()&&(o.top=i.height(),_=(a||parseInt(b.style("height"))||400)-o.top-o.bottom),j.select(".nv-legendWrap").attr("transform","translate("+C()+","+ -o.top+")"));if(l){var F=[{key:"Grouped",disabled:t.stacked()},{key:"Stacked",disabled:!t.stacked()}];s.width(C()).color(["#444","#444","#444"]),j.select(".nv-controlsWrap").datum(F).attr("transform","translate(0,"+ -o.top+")").call(s)}H.attr("transform","translate("+o.left+","+o.top+")"),d&&j.select(".nv-y.nv-axis").attr("transform","translate("+M+",0)"),t.disabled(e.map(function(e){return e.disabled})).width(M).height(_).color(e.map(function(e,t){return e.color||f(e,t)}).filter(function(t,n){return!e[n].disabled}));var I=j.select(".nv-barsWrap").datum(e.filter(function(e){return!e.disabled}));I.transition().call(t);if(h){n.scale(w).ticks(M/100).tickSize(-_,0),j.select(".nv-x.nv-axis").attr("transform","translate(0,"+E.range()[0]+")"),j.select(".nv-x.nv-axis").transition().call(n);var q=j.select(".nv-x.nv-axis > g").selectAll("g");q.selectAll("line, text").style("opacity",1);if(m){var R=function(e,t){return"translate("+e+","+t+")"},U=5,z=17;q.selectAll("text").attr("transform",function(e,t,n){return R(0,n%2==0?U:z)});var W=d3.selectAll(".nv-x.nv-axis .nv-wrap g g text")[0].length;j.selectAll(".nv-x.nv-axis .nv-axisMaxMin text").attr("transform",function(e,t){return R(0,t===0||W%2!==0?z:U)})}v&&q.filter(function(t,n){return n%Math.ceil(e[0].values.length/(M/100))!==0}).selectAll("text, line").style("opacity",0),g&&q.selectAll(".tick text").attr("transform","rotate("+g+" 0,0)").style("text-anchor",g>0?"start":"end"),j.select(".nv-x.nv-axis").selectAll("g.nv-axisMaxMin text").style("opacity",1)}p&&(r.scale(E).ticks(_/36).tickSize(-M,0),j.select(".nv-y.nv-axis").transition().call(r)),i.dispatch.on("stateChange",function(e){S=e,N.stateChange(S),A.update()}),s.dispatch.on("legendClick",function(e,n){if(!e.disabled)return;F=F.map(function(e){return e.disabled=!0,e}),e.disabled=!1;switch(e.key){case"Grouped":t.stacked(!1);break;case"Stacked":t.stacked(!0)}S.stacked=t.stacked(),N.stateChange(S),A.update()}),N.on("tooltipShow",function(e){y&&L(e,O.parentNode)}),N.on("changeState",function(n){typeof n.disabled!="undefined"&&(e.forEach(function(e,t){e.disabled=n.disabled[t]}),S.disabled=n.disabled),typeof n.stacked!="undefined"&&(t.stacked(n.stacked),S.stacked=n.stacked),A.update()})}),A}var t=e.models.multiBar(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o={top:30,right:20,bottom:50,left:60},u=null,a=null,f=e.utils.defaultColor(),l=!0,c=!0,h=!0,p=!0,d=!1,v=!0,m=!1,g=0,y=!0,b=function(e,t,n,r,i){return"

    "+e+"

    "+"

    "+n+" on "+t+"

    "},w,E,S={stacked:!1},x=null,T="No Data Available.",N=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),C=function(){return l?180:0},k=250;t.stacked(!1),n.orient("bottom").tickPadding(7).highlightZero(!0).showMaxMin(!1).tickFormat(function(e){return e}),r.orient(d?"right":"left").tickFormat(d3.format(",.1f")),s.updateState(!1);var L=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=b(i.series.key,a,f,i,A);e.tooltip.show([o,u],l,i.value<0?"n":"s",null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+o.left,e.pos[1]+o.top],N.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){N.tooltipHide(e)}),N.on("tooltipHide",function(){y&&e.tooltip.cleanup()}),A.dispatch=N,A.multibar=t,A.legend=i,A.xAxis=n,A.yAxis=r,d3.rebind(A,t,"x","y","xDomain","yDomain","xRange","yRange","forceX","forceY","clipEdge","id","stacked","stackOffset","delay","barColor","groupSpacing"),A.options=e.utils.optionsFunc.bind(A),A.margin=function(e){return arguments.length?(o.top=typeof e.top!="undefined"?e.top:o.top,o.right=typeof e.right!="undefined"?e.right:o.right,o.bottom=typeof e.bottom!="undefined"?e.bottom:o.bottom,o.left=typeof e.left!="undefined"?e.left:o.left,A):o},A.width=function(e){return arguments.length?(u=e,A):u},A.height=function(e){return arguments.length?(a=e,A):a},A.color=function(t){return arguments.length?(f=e.utils.getColor(t),i.color(f),A):f},A.showControls=function(e){return arguments.length?(l=e,A):l},A.showLegend=function(e){return arguments.length?(c=e,A):c},A.showXAxis=function(e){return arguments.length?(h=e,A):h},A.showYAxis=function(e){return arguments.length?(p=e,A):p},A.rightAlignYAxis=function(e){return arguments.length?(d=e,r.orient(e?"right":"left"),A):d},A.reduceXTicks=function(e){return arguments.length?(v=e,A):v},A.rotateLabels=function(e){return arguments.length?(g=e,A):g},A.staggerLabels=function(e){return arguments.length?(m=e,A):m},A.tooltip=function(e){return arguments.length?(b=e,A):b},A.tooltips=function(e){return arguments.length?(y=e,A):y},A.tooltipContent=function(e){return arguments.length?(b=e,A):b},A.state=function(e){return arguments.length?(S=e,A):S},A.defaultState=function(e){return arguments.length?(x=e,A):x},A.noData=function(e){return arguments.length?(T=e,A):T},A.transitionDuration=function(e){return arguments.length?(k=e,A):k},A},e.models.multiBarHorizontal=function(){"use strict";function C(e){return e.each(function(e){var i=n-t.left-t.right,y=r-t.top-t.bottom,C=d3.select(this);p&&(e=d3.layout.stack().offset("zero").values(function(e){return e.values}).y(a)(e)),e.forEach(function(e,t){e.values.forEach(function(e){e.series=t})}),p&&e[0].values.map(function(t,n){var r=0,i=0;e.map(function(e){var t=e.values[n];t.size=Math.abs(t.y),t.y<0?(t.y1=i-t.size,i-=t.size):(t.y1=r,r+=t.size)})});var k=b&&w?[]:e.map(function(e){return e.values.map(function(e,t){return{x:u(e,t),y:a(e,t),y0:e.y0,y1:e.y1}})});s.domain(b||d3.merge(k).map(function(e){return e.x})).rangeBands(E||[0,y],.1),o.domain(w||d3.extent(d3.merge(k).map(function(e){return p?e.y>0?e.y1+e.y:e.y1:e.y}).concat(f))),d&&!p?o.range(S||[o.domain()[0]<0?m:0,i-(o.domain()[1]>0?m:0)]):o.range(S||[0,i]),T=T||s,N=N||d3.scale.linear().domain(o.domain()).range([o(0),o(0)]);var L=d3.select(this).selectAll("g.nv-wrap.nv-multibarHorizontal").data([e]),A=L.enter().append("g").attr("class","nvd3 nv-wrap nv-multibarHorizontal"),O=A.append("defs"),M=A.append("g"),_=L.select("g");M.append("g").attr("class","nv-groups"),L.attr("transform","translate("+t.left+","+t.top+")");var D=L.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e,t){return t});D.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),D.exit().transition().style("stroke-opacity",1e-6).style("fill-opacity",1e-6).remove(),D.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}).style("fill",function(e,t){return l(e,t)}).style("stroke",function(e,t){return l(e,t)}),D.transition().style("stroke-opacity",1).style("fill-opacity",.75);var P=D.selectAll("g.nv-bar").data(function(e){return e.values});P.exit().remove();var H=P.enter().append("g").attr("transform",function(t,n,r){return"translate("+N(p?t.y0:0)+","+(p?0:r*s.rangeBand()/e.length+s(u(t,n)))+")"});H.append("rect").attr("width",0).attr("height",s.rangeBand()/(p?1:e.length)),P.on("mouseover",function(t,n){d3.select(this).classed("hover",!0),x.elementMouseover({value:a(t,n),point:t,series:e[t.series],pos:[o(a(t,n)+(p?t.y0:0)),s(u(t,n))+s.rangeBand()*(p?e.length/2:t.series+.5)/e.length],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("mouseout",function(t,n){d3.select(this).classed("hover",!1),x.elementMouseout({value:a(t,n),point:t,series:e[t.series],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("click",function(t,n){x.elementClick({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t,n))+s.rangeBand()*(p?e.length/2:t.series+.5)/e.length,o(a(t,n)+(p?t.y0:0))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}).on("dblclick",function(t,n){x.elementDblClick({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t,n))+s.rangeBand()*(p?e.length/2:t.series+.5)/e.length,o(a(t,n)+(p?t.y0:0))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}),H.append("text"),d&&!p?(P.select("text").attr("text-anchor",function(e,t){return a(e,t)<0?"end":"start"}).attr("y",s.rangeBand()/(e.length*2)).attr("dy",".32em").text(function(e,t){return g(a(e,t))}),P.transition().select("text").attr("x",function(e,t){return a(e,t)<0?-4:o(a(e,t))-o(0)+4})):P.selectAll("text").text(""),v&&!p?(H.append("text").classed("nv-bar-label",!0),P.select("text.nv-bar-label").attr("text-anchor",function(e,t){return a(e,t)<0?"start":"end"}).attr("y",s.rangeBand()/(e.length*2)).attr("dy",".32em").text(function(e,t){return u(e,t)}),P.transition().select("text.nv-bar-label").attr("x",function(e,t){return a(e,t)<0?o(0)-o(a(e,t))+4:-4})):P.selectAll("text.nv-bar-label").text(""),P.attr("class",function(e,t){return a(e,t)<0?"nv-bar negative":"nv-bar positive"}),c&&(h||(h=e.map(function(){return!0})),P.style("fill",function(e,t,n){return d3.rgb(c(e,t)).darker(h.map(function(e,t){return t}).filter(function(e,t){return!h[t]})[n]).toString()}).style("stroke",function(e,t,n){return d3.rgb(c(e,t)).darker(h.map(function(e,t){return t}).filter(function(e,t){return!h[t]})[n]).toString()})),p?P.transition().attr("transform",function(e,t){return"translate("+o(e.y1)+","+s(u(e,t))+")"}).select("rect").attr("width",function(e,t){return Math.abs(o(a(e,t)+e.y0)-o(e.y0))}).attr("height",s.rangeBand()):P.transition().attr("transform",function(t,n){return"translate("+(a(t,n)<0?o(a(t,n)):o(0))+","+(t.series*s.rangeBand()/e.length+s(u(t,n)))+")"}).select("rect").attr("height",s.rangeBand()/e.length).attr("width",function(e,t){return Math.max(Math.abs(o(a(e,t))-o(0)),1)}),T=s.copy(),N=o.copy()}),C}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=Math.floor(Math.random()*1e4),s=d3.scale.ordinal(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=[0],l=e.utils.defaultColor(),c=null,h,p=!1,d=!1,v=!1,m=60,g=d3.format(",.2f"),y=1200,b,w,E,S,x=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout"),T,N;return C.dispatch=x,C.options=e.utils.optionsFunc.bind(C),C.x=function(e){return arguments.length?(u=e,C):u},C.y=function(e){return arguments.length?(a=e,C):a},C.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,C):t},C.width=function(e){return arguments.length?(n=e,C):n},C.height=function(e){return arguments.length?(r=e,C):r},C.xScale=function(e){return arguments.length?(s=e,C):s},C.yScale=function(e){return arguments.length?(o=e,C):o},C.xDomain=function(e){return arguments.length?(b=e,C):b},C.yDomain=function(e){return arguments.length?(w=e,C):w},C.xRange=function(e){return arguments.length?(E=e,C):E},C.yRange=function(e){return arguments.length?(S=e,C):S},C.forceY=function(e){return arguments.length?(f=e,C):f},C.stacked=function(e){return arguments.length?(p=e,C):p},C.color=function(t){return arguments.length?(l=e.utils.getColor(t),C):l},C.barColor=function(t){return arguments.length?(c=e.utils.getColor(t),C):c},C.disabled=function(e){return arguments.length?(h=e,C):h},C.id=function(e){return arguments.length?(i=e,C):i},C.delay=function(e){return arguments.length?(y=e,C):y},C.showValues=function(e){return arguments.length?(d=e,C):d},C.showBarLabels=function(e){return arguments.length?(v=e,C):v},C.valueFormat=function(e){return arguments.length?(g=e,C):g},C.valuePadding=function(e){return arguments.length?(m=e,C):m},C},e.models.multiBarHorizontalChart=function(){"use strict";function C(e){return e.each(function(e){var d=d3.select(this),m=this,k=(u||parseInt(d.style("width"))||960)-o.left-o.right,L=(a||parseInt(d.style("height"))||400)-o.top-o.bottom;C.update=function(){d.transition().duration(T).call(C)},C.container=this,b.disabled=e.map(function(e){return!!e.disabled});if(!w){var A;w={};for(A in b)b[A]instanceof Array?w[A]=b[A].slice(0):w[A]=b[A]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var O=d.selectAll(".nv-noData").data([E]);return O.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),O.attr("x",o.left+k/2).attr("y",o.top+L/2).text(function(e){return e}),C}d.selectAll(".nv-noData").remove(),g=t.xScale(),y=t.yScale();var M=d.selectAll("g.nv-wrap.nv-multiBarHorizontalChart").data([e]),_=M.enter().append("g").attr("class","nvd3 nv-wrap nv-multiBarHorizontalChart").append("g"),D=M.select("g");_.append("g").attr("class","nv-x nv-axis"),_.append("g").attr("class","nv-y nv-axis").append("g").attr("class","nv-zeroLine").append("line"),_.append("g").attr("class","nv-barsWrap"),_.append("g").attr("class","nv-legendWrap"),_.append("g").attr("class","nv-controlsWrap"),c&&(i.width(k-x()),t.barColor()&&e.forEach(function(e,t){e.color=d3.rgb("#ccc").darker(t*1.5).toString()}),D.select(".nv-legendWrap").datum(e).call(i),o.top!=i.height()&&(o.top=i.height(),L=(a||parseInt(d.style("height"))||400)-o.top-o.bottom),D.select(".nv-legendWrap").attr("transform","translate("+x()+","+ -o.top+")"));if(l){var P=[{key:"Grouped",disabled:t.stacked()},{key:"Stacked",disabled:!t.stacked()}];s.width(x()).color(["#444","#444","#444"]),D.select(".nv-controlsWrap").datum(P).attr("transform","translate(0,"+ -o.top+")").call(s)}M.attr("transform","translate("+o.left+","+o.top+")"),t.disabled(e.map(function(e){return e.disabled})).width(k).height(L).color(e.map(function(e,t){return e.color||f(e,t)}).filter(function(t,n){return!e[n].disabled}));var H=D.select(".nv-barsWrap").datum(e.filter(function(e){return!e.disabled}));H.transition().call(t);if(h){n.scale(g).ticks(L/24).tickSize(-k,0),D.select(".nv-x.nv-axis").transition().call(n);var B=D.select(".nv-x.nv-axis").selectAll("g");B.selectAll("line, text")}p&&(r.scale(y).ticks(k/100).tickSize(-L,0),D.select(".nv-y.nv-axis").attr("transform","translate(0,"+L+")"),D.select(".nv-y.nv-axis").transition().call(r)),D.select(".nv-zeroLine line").attr("x1",y(0)).attr("x2",y(0)).attr("y1",0).attr("y2",-L),i.dispatch.on("stateChange",function(e){b=e,S.stateChange(b),C.update()}),s.dispatch.on("legendClick",function(e,n){if(!e.disabled)return;P=P.map(function(e){return e.disabled=!0,e}),e.disabled=!1;switch(e.key){case"Grouped":t.stacked(!1);break;case"Stacked":t.stacked(!0)}b.stacked=t.stacked(),S.stateChange(b),C.update()}),S.on("tooltipShow",function(e){v&&N(e,m.parentNode)}),S.on("changeState",function(n){typeof n.disabled!="undefined"&&(e.forEach(function(e,t){e.disabled=n.disabled[t]}),b.disabled=n.disabled),typeof n.stacked!="undefined"&&(t.stacked(n.stacked),b.stacked=n.stacked),C.update()})}),C}var t=e.models.multiBarHorizontal(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend().height(30),s=e.models.legend().height(30),o={top:30,right:20,bottom:50,left:60},u=null,a=null,f=e.utils.defaultColor(),l=!0,c=!0,h=!0,p=!0,d=!1,v=!0,m=function(e,t,n,r,i){return"

    "+e+" - "+t+"

    "+"

    "+n+"

    "},g,y,b={stacked:d},w=null,E="No Data Available.",S=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),x=function(){return l?180:0},T=250;t.stacked(d),n.orient("left").tickPadding(5).highlightZero(!1).showMaxMin(!1).tickFormat(function(e){return e}),r.orient("bottom").tickFormat(d3.format(",.1f")),s.updateState(!1);var N=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=m(i.series.key,a,f,i,C);e.tooltip.show([o,u],l,i.value<0?"e":"w",null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+o.left,e.pos[1]+o.top],S.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),S.on("tooltipHide",function(){v&&e.tooltip.cleanup()}),C.dispatch=S,C.multibar=t,C.legend=i,C.xAxis=n,C.yAxis=r,d3.rebind(C,t,"x","y","xDomain","yDomain","xRange","yRange","forceX","forceY","clipEdge","id","delay","showValues","showBarLabels","valueFormat","stacked","barColor"),C.options=e.utils.optionsFunc.bind(C),C.margin=function(e){return arguments.length?(o.top=typeof e.top!="undefined"?e.top:o.top,o.right=typeof e.right!="undefined"?e.right:o.right,o.bottom=typeof e.bottom!="undefined"?e.bottom:o.bottom,o.left=typeof e.left!="undefined"?e.left:o.left,C):o},C.width=function(e){return arguments.length?(u=e,C):u},C.height=function(e){return arguments.length?(a=e,C):a},C.color=function(t){return arguments.length?(f=e.utils.getColor(t),i.color(f),C):f},C.showControls=function(e){return arguments.length?(l=e,C):l},C.showLegend=function(e){return arguments.length?(c=e,C):c},C.showXAxis=function(e){return arguments.length?(h=e,C):h},C.showYAxis=function(e){return arguments.length?(p=e,C):p},C.tooltip=function(e){return arguments.length?(m=e,C):m},C.tooltips=function(e){return arguments.length?(v=e,C):v},C.tooltipContent=function(e){return arguments.length?(m=e,C):m},C.state=function(e){return arguments.length?(b=e,C):b},C.defaultState=function(e){return arguments.length?(w=e,C):w},C.noData=function(e){return arguments.length?(E=e,C):E},C.transitionDuration=function(e){return arguments.length?(T=e,C):T},C},e.models.multiChart=function(){"use strict";function C(e){return e.each(function(e){var u=d3.select(this),f=this;C.update=function(){u.transition().call(C)},C.container=this;var k=(r||parseInt(u.style("width"))||960)-t.left-t.right,L=(i||parseInt(u.style("height"))||400)-t.top-t.bottom,A=e.filter(function(e){return!e.disabled&&e.type=="line"&&e.yAxis==1}),O=e.filter(function(e){return!e.disabled&&e.type=="line"&&e.yAxis==2}),M=e.filter(function(e){return!e.disabled&&e.type=="bar"&&e.yAxis==1}),_=e.filter(function(e){return!e.disabled&&e.type=="bar"&&e.yAxis==2}),D=e.filter(function(e){return!e.disabled&&e.type=="area"&&e.yAxis==1}),P=e.filter(function(e){return!e.disabled&&e.type=="area"&&e.yAxis==2}),H=e.filter(function(e){return!e.disabled&&e.yAxis==1}).map(function(e){return e.values.map(function(e,t){return{x:e.x,y:e.y}})}),B=e.filter(function(e){return!e.disabled&&e.yAxis==2}).map(function(e){return e.values.map(function(e,t){return{x:e.x,y:e.y}})});a.domain(d3.extent(d3.merge(H.concat(B)),function(e){return e.x})).range([0,k]);var j=u.selectAll("g.wrap.multiChart").data([e]),F=j.enter().append("g").attr("class","wrap nvd3 multiChart").append("g");F.append("g").attr("class","x axis"),F.append("g").attr("class","y1 axis"),F.append("g").attr("class","y2 axis"),F.append("g").attr("class","lines1Wrap"),F.append("g").attr("class","lines2Wrap"),F.append("g").attr("class","bars1Wrap"),F.append("g").attr("class","bars2Wrap"),F.append("g").attr("class","stack1Wrap"),F.append("g").attr("class","stack2Wrap"),F.append("g").attr("class","legendWrap");var I=j.select("g");s&&(x.width(k/2),I.select(".legendWrap").datum(e.map(function(e){return e.originalKey=e.originalKey===undefined?e.key:e.originalKey,e.key=e.originalKey+(e.yAxis==1?"":" (right axis)"),e})).call(x),t.top!=x.height()&&(t.top=x.height(),L=(i||parseInt(u.style("height"))||400)-t.top-t.bottom),I.select(".legendWrap").attr("transform","translate("+k/2+","+ -t.top+")")),d.width(k).height(L).interpolate("monotone").color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==1&&e[n].type=="line"})),v.width(k).height(L).interpolate("monotone").color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==2&&e[n].type=="line"})),m.width(k).height(L).color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==1&&e[n].type=="bar"})),g.width(k).height(L).color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==2&&e[n].type=="bar"})),y.width(k).height(L).color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==1&&e[n].type=="area"})),b.width(k).height(L).color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==2&&e[n].type=="area"})),I.attr("transform","translate("+t.left+","+t.top+")");var q=I.select(".lines1Wrap").datum(A),R=I.select(".bars1Wrap").datum(M),U=I.select(".stack1Wrap").datum(D),z=I.select(".lines2Wrap").datum(O),W=I.select(".bars2Wrap").datum(_),X=I.select(".stack2Wrap").datum(P),V=D.length?D.map(function(e){return e.values}).reduce(function(e,t){return e.map(function(e,n){return{x:e.x,y:e.y+t[n].y}})}).concat([{x:0,y:0}]):[],$=P.length?P.map(function(e){return e.values}).reduce(function(e,t){return e.map(function(e,n){return{x:e.x,y:e.y+t[n].y}})}).concat([{x:0,y:0}]):[];h.domain(l||d3.extent(d3.merge(H).concat(V),function(e){return e.y})).range([0,L]),p.domain(c||d3.extent(d3.merge(B).concat($),function(e){return e.y})).range([0,L]),d.yDomain(h.domain()),m.yDomain(h.domain()),y.yDomain(h.domain()),v.yDomain(p.domain()),g.yDomain(p.domain()),b.yDomain(p.domain()),D.length&&d3.transition(U).call(y),P.length&&d3.transition(X).call(b),M.length&&d3.transition(R).call(m),_.length&&d3.transition(W).call(g),A.length&&d3.transition(q).call(d),O.length&&d3.transition(z).call(v),w.ticks(k/100).tickSize(-L,0),I.select(".x.axis").attr("transform","translate(0,"+L+")"),d3.transition(I.select(".x.axis")).call(w),E.ticks(L/36).tickSize(-k,0),d3.transition(I.select(".y1.axis")).call(E),S.ticks(L/36).tickSize(-k,0),d3.transition(I.select(".y2.axis")).call(S),I.select(".y2.axis").style("opacity",B.length?1:0).attr("transform","translate("+a.range()[1]+",0)"),x.dispatch.on("stateChange",function(e){C.update()}),T.on("tooltipShow",function(e){o&&N(e,f.parentNode)})}),C}var t={top:30,right:20,bottom:50,left:60},n=d3.scale.category20().range(),r=null,i=null,s=!0,o=!0,u=function(e,t,n,r,i){return"

    "+e+"

    "+"

    "+n+" at "+t+"

    "},a,f,l,c,a=d3.scale.linear(),h=d3.scale.linear(),p=d3.scale.linear(),d=e.models.line().yScale(h),v=e.models.line().yScale(p),m=e.models.multiBar().stacked(!1).yScale(h),g=e.models.multiBar().stacked(!1).yScale(p),y=e.models.stackedArea().yScale(h),b=e.models.stackedArea().yScale(p),w=e.models.axis().scale(a).orient("bottom").tickPadding(5),E=e.models.axis().scale(h).orient("left"),S=e.models.axis().scale(p).orient("right"),x=e.models.legend().height(30),T=d3.dispatch("tooltipShow","tooltipHide"),N=function(t,n){var r=t.pos[0]+(n.offsetLeft||0),i=t.pos[1]+(n.offsetTop||0),s=w.tickFormat()(d.x()(t.point,t.pointIndex)),o=(t.series.yAxis==2?S:E).tickFormat()(d.y()(t.point,t.pointIndex)),a=u(t.series.key,s,o,t,C);e.tooltip.show([r,i],a,undefined,undefined,n.offsetParent)};return d.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],T.tooltipShow(e)}),d.dispatch.on("elementMouseout.tooltip",function(e){T.tooltipHide(e)}),v.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],T.tooltipShow(e)}),v.dispatch.on("elementMouseout.tooltip",function(e){T.tooltipHide(e)}),m.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],T.tooltipShow(e)}),m.dispatch.on("elementMouseout.tooltip",function(e){T.tooltipHide(e)}),g.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],T.tooltipShow(e)}),g.dispatch.on("elementMouseout.tooltip",function(e){T.tooltipHide(e)}),y.dispatch.on("tooltipShow",function(e){if(!Math.round(y.y()(e.point)*100))return setTimeout(function(){d3.selectAll(".point.hover").classed("hover",!1)},0),!1;e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],T.tooltipShow(e)}),y.dispatch.on("tooltipHide",function(e){T.tooltipHide(e)}),b.dispatch.on("tooltipShow",function(e){if(!Math.round(b.y()(e.point)*100))return setTimeout(function(){d3.selectAll(".point.hover").classed("hover",!1)},0),!1;e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],T.tooltipShow(e)}),b.dispatch.on("tooltipHide",function(e){T.tooltipHide(e)}),d.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],T.tooltipShow(e)}),d.dispatch.on("elementMouseout.tooltip",function(e){T.tooltipHide(e)}),v.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],T.tooltipShow(e)}),v.dispatch.on("elementMouseout.tooltip",function(e){T.tooltipHide(e)}),T.on("tooltipHide",function(){o&&e.tooltip.cleanup()}),C.dispatch=T,C.lines1=d,C.lines2=v,C.bars1=m,C.bars2=g,C.stack1=y,C.stack2=b,C.xAxis=w,C.yAxis1=E,C.yAxis2=S,C.options=e.utils.optionsFunc.bind(C),C.x=function(e){return arguments.length?(getX=e,d.x(e),m.x(e),C):getX},C.y=function(e){return arguments.length?(getY=e,d.y(e),m.y(e),C):getY},C.yDomain1=function(e){return arguments.length?(l=e,C):l},C.yDomain2=function(e){return arguments.length?(c=e,C):c},C.margin=function(e){return arguments.length?(t=e,C):t},C.width=function(e){return arguments.length?(r=e,C):r},C.height=function(e){return arguments.length?(i=e,C):i},C.color=function(e){return arguments.length?(n=e,x.color(e),C):n},C.showLegend=function(e){return arguments.length?(s=e,C):s},C.tooltips=function(e){return arguments.length?(o=e,C):o},C.tooltipContent=function(e){return arguments.length?(u=e,C):u},C},e.models.ohlcBar=function(){"use strict";function x(e){return e.each(function(e){var g=n-t.left-t.right,x=r-t.top-t.bottom,T=d3.select(this);s.domain(y||d3.extent(e[0].values.map(u).concat(p))),v?s.range(w||[g*.5/e[0].values.length,g*(e[0].values.length-.5)/e[0].values.length]):s.range(w||[0,g]),o.domain(b||[d3.min(e[0].values.map(h).concat(d)),d3.max(e[0].values.map(c).concat(d))]).range(E||[x,0]),s.domain()[0]===s.domain()[1]&&(s.domain()[0]?s.domain([s.domain()[0]-s.domain()[0]*.01,s.domain()[1]+s.domain()[1]*.01]):s.domain([-1,1])),o.domain()[0]===o.domain()[1]&&(o.domain()[0]?o.domain([o.domain()[0]+o.domain()[0]*.01,o.domain()[1]-o.domain()[1]*.01]):o.domain([-1,1]));var N=d3.select(this).selectAll("g.nv-wrap.nv-ohlcBar").data([e[0].values]),C=N.enter().append("g").attr("class","nvd3 nv-wrap nv-ohlcBar"),k=C.append("defs"),L=C.append("g"),A=N.select("g");L.append("g").attr("class","nv-ticks"),N.attr("transform","translate("+t.left+","+t.top+")"),T.on("click",function(e,t){S.chartClick({data:e,index:t,pos:d3.event,id:i})}),k.append("clipPath").attr("id","nv-chart-clip-path-"+i).append("rect"),N.select("#nv-chart-clip-path-"+i+" rect").attr("width",g).attr("height",x),A.attr("clip-path",m?"url(#nv-chart-clip-path-"+i+")":"");var O=N.select(".nv-ticks").selectAll(".nv-tick").data(function(e){return e});O.exit().remove();var M=O.enter().append("path").attr("class",function(e,t,n){return(f(e,t)>l(e,t)?"nv-tick negative":"nv-tick positive")+" nv-tick-"+n+"-"+t}).attr("d",function(t,n){var r=g/e[0].values.length*.9;return"m0,0l0,"+(o(f(t,n))-o(c(t,n)))+"l"+ -r/2+",0l"+r/2+",0l0,"+(o(h(t,n))-o(f(t,n)))+"l0,"+(o(l(t,n))-o(h(t,n)))+"l"+r/2+",0l"+ -r/2+",0z"}).attr("transform",function(e,t){return"translate("+s(u(e,t))+","+o(c(e,t))+")"}).on("mouseover",function(t,n){d3.select(this).classed("hover",!0),S.elementMouseover({point:t,series:e[0],pos:[s(u(t,n)),o(a(t,n))],pointIndex:n,seriesIndex:0,e:d3.event})}).on("mouseout",function(t,n){d3.select(this).classed("hover",!1),S.elementMouseout({point:t,series:e[0],pointIndex:n,seriesIndex:0,e:d3.event})}).on("click",function(e,t){S.elementClick({value:a(e,t),data:e,index:t,pos:[s(u(e,t)),o(a(e,t))],e:d3.event,id:i}),d3.event.stopPropagation()}).on("dblclick",function(e,t){S.elementDblClick({value:a(e,t),data:e,index:t,pos:[s(u(e,t)),o(a(e,t))],e:d3.event,id:i}),d3.event.stopPropagation()});O.attr("class",function(e,t,n){return(f(e,t)>l(e,t)?"nv-tick negative":"nv-tick positive")+" nv-tick-"+n+"-"+t}),d3.transition(O).attr("transform",function(e,t){return"translate("+s(u(e,t))+","+o(c(e,t))+")"}).attr("d",function(t,n){var r=g/e[0].values.length*.9;return"m0,0l0,"+(o(f(t,n))-o(c(t,n)))+"l"+ -r/2+",0l"+r/2+",0l0,"+(o(h(t,n))-o(f(t,n)))+"l0,"+(o(l(t,n))-o(h(t,n)))+"l"+r/2+",0l"+ -r/2+",0z"})}),x}var t={top:0 -,right:0,bottom:0,left:0},n=960,r=500,i=Math.floor(Math.random()*1e4),s=d3.scale.linear(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=function(e){return e.open},l=function(e){return e.close},c=function(e){return e.high},h=function(e){return e.low},p=[],d=[],v=!1,m=!0,g=e.utils.defaultColor(),y,b,w,E,S=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout");return x.dispatch=S,x.options=e.utils.optionsFunc.bind(x),x.x=function(e){return arguments.length?(u=e,x):u},x.y=function(e){return arguments.length?(a=e,x):a},x.open=function(e){return arguments.length?(f=e,x):f},x.close=function(e){return arguments.length?(l=e,x):l},x.high=function(e){return arguments.length?(c=e,x):c},x.low=function(e){return arguments.length?(h=e,x):h},x.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,x):t},x.width=function(e){return arguments.length?(n=e,x):n},x.height=function(e){return arguments.length?(r=e,x):r},x.xScale=function(e){return arguments.length?(s=e,x):s},x.yScale=function(e){return arguments.length?(o=e,x):o},x.xDomain=function(e){return arguments.length?(y=e,x):y},x.yDomain=function(e){return arguments.length?(b=e,x):b},x.xRange=function(e){return arguments.length?(w=e,x):w},x.yRange=function(e){return arguments.length?(E=e,x):E},x.forceX=function(e){return arguments.length?(p=e,x):p},x.forceY=function(e){return arguments.length?(d=e,x):d},x.padData=function(e){return arguments.length?(v=e,x):v},x.clipEdge=function(e){return arguments.length?(m=e,x):m},x.color=function(t){return arguments.length?(g=e.utils.getColor(t),x):g},x.id=function(e){return arguments.length?(i=e,x):i},x},e.models.pie=function(){"use strict";function S(e){return e.each(function(e){function q(e){var t=(e.startAngle+e.endAngle)*90/Math.PI-90;return t>90?t-180:t}function R(e){e.endAngle=isNaN(e.endAngle)?0:e.endAngle,e.startAngle=isNaN(e.startAngle)?0:e.startAngle,m||(e.innerRadius=0);var t=d3.interpolate(this._current,e);return this._current=t(0),function(e){return A(t(e))}}function U(e){e.innerRadius=0;var t=d3.interpolate({startAngle:0,endAngle:0},e);return function(e){return A(t(e))}}var o=n-t.left-t.right,f=r-t.top-t.bottom,S=Math.min(o,f)/2,x=S-S/5,T=d3.select(this),N=T.selectAll(".nv-wrap.nv-pie").data(e),C=N.enter().append("g").attr("class","nvd3 nv-wrap nv-pie nv-chart-"+u),k=C.append("g"),L=N.select("g");k.append("g").attr("class","nv-pie"),k.append("g").attr("class","nv-pieLabels"),N.attr("transform","translate("+t.left+","+t.top+")"),L.select(".nv-pie").attr("transform","translate("+o/2+","+f/2+")"),L.select(".nv-pieLabels").attr("transform","translate("+o/2+","+f/2+")"),T.on("click",function(e,t){E.chartClick({data:e,index:t,pos:d3.event,id:u})});var A=d3.svg.arc().outerRadius(x);y&&A.startAngle(y),b&&A.endAngle(b),m&&A.innerRadius(S*w);var O=d3.layout.pie().sort(null).value(function(e){return e.disabled?0:s(e)}),M=N.select(".nv-pie").selectAll(".nv-slice").data(O),_=N.select(".nv-pieLabels").selectAll(".nv-label").data(O);M.exit().remove(),_.exit().remove();var D=M.enter().append("g").attr("class","nv-slice").on("mouseover",function(e,t){d3.select(this).classed("hover",!0),E.elementMouseover({label:i(e.data),value:s(e.data),point:e.data,pointIndex:t,pos:[d3.event.pageX,d3.event.pageY],id:u})}).on("mouseout",function(e,t){d3.select(this).classed("hover",!1),E.elementMouseout({label:i(e.data),value:s(e.data),point:e.data,index:t,id:u})}).on("click",function(e,t){E.elementClick({label:i(e.data),value:s(e.data),point:e.data,index:t,pos:d3.event,id:u}),d3.event.stopPropagation()}).on("dblclick",function(e,t){E.elementDblClick({label:i(e.data),value:s(e.data),point:e.data,index:t,pos:d3.event,id:u}),d3.event.stopPropagation()});M.attr("fill",function(e,t){return a(e,t)}).attr("stroke",function(e,t){return a(e,t)});var P=D.append("path").each(function(e){this._current=e});M.select("path").transition().attr("d",A).attrTween("d",R);if(l){var H=d3.svg.arc().innerRadius(0);c&&(H=A),h&&(H=d3.svg.arc().outerRadius(A.outerRadius())),_.enter().append("g").classed("nv-label",!0).each(function(e,t){var n=d3.select(this);n.attr("transform",function(e){if(g){e.outerRadius=x+10,e.innerRadius=x+15;var t=(e.startAngle+e.endAngle)/2*(180/Math.PI);return(e.startAngle+e.endAngle)/2v?r[p]:""})}}),S}var t={top:0,right:0,bottom:0,left:0},n=500,r=500,i=function(e){return e.x},s=function(e){return e.y},o=function(e){return e.description},u=Math.floor(Math.random()*1e4),a=e.utils.defaultColor(),f=d3.format(",.2f"),l=!0,c=!0,h=!1,p="key",v=.02,m=!1,g=!1,y=!1,b=!1,w=.5,E=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout");return S.dispatch=E,S.options=e.utils.optionsFunc.bind(S),S.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,S):t},S.width=function(e){return arguments.length?(n=e,S):n},S.height=function(e){return arguments.length?(r=e,S):r},S.values=function(t){return e.log("pie.values() is no longer supported."),S},S.x=function(e){return arguments.length?(i=e,S):i},S.y=function(e){return arguments.length?(s=d3.functor(e),S):s},S.description=function(e){return arguments.length?(o=e,S):o},S.showLabels=function(e){return arguments.length?(l=e,S):l},S.labelSunbeamLayout=function(e){return arguments.length?(g=e,S):g},S.donutLabelsOutside=function(e){return arguments.length?(h=e,S):h},S.pieLabelsOutside=function(e){return arguments.length?(c=e,S):c},S.labelType=function(e){return arguments.length?(p=e,p=p||"key",S):p},S.donut=function(e){return arguments.length?(m=e,S):m},S.donutRatio=function(e){return arguments.length?(w=e,S):w},S.startAngle=function(e){return arguments.length?(y=e,S):y},S.endAngle=function(e){return arguments.length?(b=e,S):b},S.id=function(e){return arguments.length?(u=e,S):u},S.color=function(t){return arguments.length?(a=e.utils.getColor(t),S):a},S.valueFormat=function(e){return arguments.length?(f=e,S):f},S.labelThreshold=function(e){return arguments.length?(v=e,S):v},S},e.models.pieChart=function(){"use strict";function v(e){return e.each(function(e){var u=d3.select(this),a=this,f=(i||parseInt(u.style("width"))||960)-r.left-r.right,d=(s||parseInt(u.style("height"))||400)-r.top-r.bottom;v.update=function(){u.transition().call(v)},v.container=this,l.disabled=e.map(function(e){return!!e.disabled});if(!c){var m;c={};for(m in l)l[m]instanceof Array?c[m]=l[m].slice(0):c[m]=l[m]}if(!e||!e.length){var g=u.selectAll(".nv-noData").data([h]);return g.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),g.attr("x",r.left+f/2).attr("y",r.top+d/2).text(function(e){return e}),v}u.selectAll(".nv-noData").remove();var y=u.selectAll("g.nv-wrap.nv-pieChart").data([e]),b=y.enter().append("g").attr("class","nvd3 nv-wrap nv-pieChart").append("g"),w=y.select("g");b.append("g").attr("class","nv-pieWrap"),b.append("g").attr("class","nv-legendWrap"),o&&(n.width(f).key(t.x()),y.select(".nv-legendWrap").datum(e).call(n),r.top!=n.height()&&(r.top=n.height(),d=(s||parseInt(u.style("height"))||400)-r.top-r.bottom),y.select(".nv-legendWrap").attr("transform","translate(0,"+ -r.top+")")),y.attr("transform","translate("+r.left+","+r.top+")"),t.width(f).height(d);var E=w.select(".nv-pieWrap").datum([e]);d3.transition(E).call(t),n.dispatch.on("stateChange",function(e){l=e,p.stateChange(l),v.update()}),t.dispatch.on("elementMouseout.tooltip",function(e){p.tooltipHide(e)}),p.on("changeState",function(t){typeof t.disabled!="undefined"&&(e.forEach(function(e,n){e.disabled=t.disabled[n]}),l.disabled=t.disabled),v.update()})}),v}var t=e.models.pie(),n=e.models.legend(),r={top:30,right:20,bottom:20,left:20},i=null,s=null,o=!0,u=e.utils.defaultColor(),a=!0,f=function(e,t,n,r){return"

    "+e+"

    "+"

    "+t+"

    "},l={},c=null,h="No Data Available.",p=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),d=function(n,r){var i=t.description()(n.point)||t.x()(n.point),s=n.pos[0]+(r&&r.offsetLeft||0),o=n.pos[1]+(r&&r.offsetTop||0),u=t.valueFormat()(t.y()(n.point)),a=f(i,u,n,v);e.tooltip.show([s,o],a,n.value<0?"n":"s",null,r)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+r.left,e.pos[1]+r.top],p.tooltipShow(e)}),p.on("tooltipShow",function(e){a&&d(e)}),p.on("tooltipHide",function(){a&&e.tooltip.cleanup()}),v.legend=n,v.dispatch=p,v.pie=t,d3.rebind(v,t,"valueFormat","values","x","y","description","id","showLabels","donutLabelsOutside","pieLabelsOutside","labelType","donut","donutRatio","labelThreshold"),v.options=e.utils.optionsFunc.bind(v),v.margin=function(e){return arguments.length?(r.top=typeof e.top!="undefined"?e.top:r.top,r.right=typeof e.right!="undefined"?e.right:r.right,r.bottom=typeof e.bottom!="undefined"?e.bottom:r.bottom,r.left=typeof e.left!="undefined"?e.left:r.left,v):r},v.width=function(e){return arguments.length?(i=e,v):i},v.height=function(e){return arguments.length?(s=e,v):s},v.color=function(r){return arguments.length?(u=e.utils.getColor(r),n.color(u),t.color(u),v):u},v.showLegend=function(e){return arguments.length?(o=e,v):o},v.tooltips=function(e){return arguments.length?(a=e,v):a},v.tooltipContent=function(e){return arguments.length?(f=e,v):f},v.state=function(e){return arguments.length?(l=e,v):l},v.defaultState=function(e){return arguments.length?(c=e,v):c},v.noData=function(e){return arguments.length?(h=e,v):h},v},e.models.scatter=function(){"use strict";function I(q){return q.each(function(I){function Q(){if(!g)return!1;var e,i=d3.merge(I.map(function(e,t){return e.values.map(function(e,n){var r=f(e,n),i=l(e,n);return[o(r)+Math.random()*1e-7,u(i)+Math.random()*1e-7,t,n,e]}).filter(function(e,t){return b(e[4],t)})}));if(D===!0){if(x){var a=X.select("defs").selectAll(".nv-point-clips").data([s]).enter();a.append("clipPath").attr("class","nv-point-clips").attr("id","nv-points-clip-"+s);var c=X.select("#nv-points-clip-"+s).selectAll("circle").data(i);c.enter().append("circle").attr("r",T),c.exit().remove(),c.attr("cx",function(e){return e[0]}).attr("cy",function(e){return e[1]}),X.select(".nv-point-paths").attr("clip-path","url(#nv-points-clip-"+s+")")}i.length&&(i.push([o.range()[0]-20,u.range()[0]-20,null,null]),i.push([o.range()[1]+20,u.range()[1]+20,null,null]),i.push([o.range()[0]-20,u.range()[0]+20,null,null]),i.push([o.range()[1]+20,u.range()[1]-20,null,null]));var h=d3.geom.polygon([[-10,-10],[-10,r+10],[n+10,r+10],[n+10,-10]]),p=d3.geom.voronoi(i).map(function(e,t){return{data:h.clip(e),series:i[t][2],point:i[t][3]}}),d=X.select(".nv-point-paths").selectAll("path").data(p);d.enter().append("path").attr("class",function(e,t){return"nv-path-"+t}),d.exit().remove(),d.attr("d",function(e){return e.data.length===0?"M 0 0":"M"+e.data.join("L")+"Z"});var v=function(e,n){if(F)return 0;var r=I[e.series];if(typeof r=="undefined")return;var i=r.values[e.point];n({point:i,series:r,pos:[o(f(i,e.point))+t.left,u(l(i,e.point))+t.top],seriesIndex:e.series,pointIndex:e.point})};d.on("click",function(e){v(e,_.elementClick)}).on("mouseover",function(e){v(e,_.elementMouseover)}).on("mouseout",function(e,t){v(e,_.elementMouseout)})}else X.select(".nv-groups").selectAll(".nv-group").selectAll(".nv-point").on("click",function(e,n){if(F||!I[e.series])return 0;var r=I[e.series],i=r.values[n];_.elementClick({point:i,series:r,pos:[o(f(i,n))+t.left,u(l(i,n))+t.top],seriesIndex:e.series,pointIndex:n})}).on("mouseover",function(e,n){if(F||!I[e.series])return 0;var r=I[e.series],i=r.values[n];_.elementMouseover({point:i,series:r,pos:[o(f(i,n))+t.left,u(l(i,n))+t.top],seriesIndex:e.series,pointIndex:n})}).on("mouseout",function(e,t){if(F||!I[e.series])return 0;var n=I[e.series],r=n.values[t];_.elementMouseout({point:r,series:n,seriesIndex:e.series,pointIndex:t})});F=!1}var q=n-t.left-t.right,R=r-t.top-t.bottom,U=d3.select(this);I.forEach(function(e,t){e.values.forEach(function(e){e.series=t})});var W=N&&C&&A?[]:d3.merge(I.map(function(e){return e.values.map(function(e,t){return{x:f(e,t),y:l(e,t),size:c(e,t)}})}));o.domain(N||d3.extent(W.map(function(e){return e.x}).concat(d))),w&&I[0]?o.range(k||[(q*E+q)/(2*I[0].values.length),q-q*(1+E)/(2*I[0].values.length)]):o.range(k||[0,q]),u.domain(C||d3.extent(W.map(function(e){return e.y}).concat(v))).range(L||[R,0]),a.domain(A||d3.extent(W.map(function(e){return e.size}).concat(m))).range(O||[16,256]);if(o.domain()[0]===o.domain()[1]||u.domain()[0]===u.domain()[1])M=!0;o.domain()[0]===o.domain()[1]&&(o.domain()[0]?o.domain([o.domain()[0]-o.domain()[0]*.01,o.domain()[1]+o.domain()[1]*.01]):o.domain([-1,1])),u.domain()[0]===u.domain()[1]&&(u.domain()[0]?u.domain([u.domain()[0]-u.domain()[0]*.01,u.domain()[1]+u.domain()[1]*.01]):u.domain([-1,1])),isNaN(o.domain()[0])&&o.domain([-1,1]),isNaN(u.domain()[0])&&u.domain([-1,1]),P=P||o,H=H||u,B=B||a;var X=U.selectAll("g.nv-wrap.nv-scatter").data([I]),V=X.enter().append("g").attr("class","nvd3 nv-wrap nv-scatter nv-chart-"+s+(M?" nv-single-point":"")),$=V.append("defs"),J=V.append("g"),K=X.select("g");J.append("g").attr("class","nv-groups"),J.append("g").attr("class","nv-point-paths"),X.attr("transform","translate("+t.left+","+t.top+")"),$.append("clipPath").attr("id","nv-edge-clip-"+s).append("rect"),X.select("#nv-edge-clip-"+s+" rect").attr("width",q).attr("height",R>0?R:0),K.attr("clip-path",S?"url(#nv-edge-clip-"+s+")":""),F=!0;var G=X.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e){return e.key});G.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),G.exit().remove(),G.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}),G.transition().style("fill",function(e,t){return i(e,t)}).style("stroke",function(e,t){return i(e,t)}).style("stroke-opacity",1).style("fill-opacity",.5);if(p){var Y=G.selectAll("circle.nv-point").data(function(e){return e.values},y);Y.enter().append("circle").style("fill",function(e,t){return e.color}).style("stroke",function(e,t){return e.color}).attr("cx",function(t,n){return e.utils.NaNtoZero(P(f(t,n)))}).attr("cy",function(t,n){return e.utils.NaNtoZero(H(l(t,n)))}).attr("r",function(e,t){return Math.sqrt(a(c(e,t))/Math.PI)}),Y.exit().remove(),G.exit().selectAll("path.nv-point").transition().attr("cx",function(t,n){return e.utils.NaNtoZero(o(f(t,n)))}).attr("cy",function(t,n){return e.utils.NaNtoZero(u(l(t,n)))}).remove(),Y.each(function(e,t){d3.select(this).classed("nv-point",!0).classed("nv-point-"+t,!0).classed("hover",!1)}),Y.transition().attr("cx",function(t,n){return e.utils.NaNtoZero(o(f(t,n)))}).attr("cy",function(t,n){return e.utils.NaNtoZero(u(l(t,n)))}).attr("r",function(e,t){return Math.sqrt(a(c(e,t))/Math.PI)})}else{var Y=G.selectAll("path.nv-point").data(function(e){return e.values});Y.enter().append("path").style("fill",function(e,t){return e.color}).style("stroke",function(e,t){return e.color}).attr("transform",function(e,t){return"translate("+P(f(e,t))+","+H(l(e,t))+")"}).attr("d",d3.svg.symbol().type(h).size(function(e,t){return a(c(e,t))})),Y.exit().remove(),G.exit().selectAll("path.nv-point").transition().attr("transform",function(e,t){return"translate("+o(f(e,t))+","+u(l(e,t))+")"}).remove(),Y.each(function(e,t){d3.select(this).classed("nv-point",!0).classed("nv-point-"+t,!0).classed("hover",!1)}),Y.transition().attr("transform",function(e,t){return"translate("+o(f(e,t))+","+u(l(e,t))+")"}).attr("d",d3.svg.symbol().type(h).size(function(e,t){return a(c(e,t))}))}clearTimeout(j),j=setTimeout(Q,300),P=o.copy(),H=u.copy(),B=a.copy()}),I}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=e.utils.defaultColor(),s=Math.floor(Math.random()*1e5),o=d3.scale.linear(),u=d3.scale.linear(),a=d3.scale.linear(),f=function(e){return e.x},l=function(e){return e.y},c=function(e){return e.size||1},h=function(e){return e.shape||"circle"},p=!0,d=[],v=[],m=[],g=!0,y=null,b=function(e){return!e.notActive},w=!1,E=.1,S=!1,x=!0,T=function(){return 25},N=null,C=null,k=null,L=null,A=null,O=null,M=!1,_=d3.dispatch("elementClick","elementMouseover","elementMouseout"),D=!0,P,H,B,j,F=!1;return I.clearHighlights=function(){d3.selectAll(".nv-chart-"+s+" .nv-point.hover").classed("hover",!1)},I.highlightPoint=function(e,t,n){d3.select(".nv-chart-"+s+" .nv-series-"+e+" .nv-point-"+t).classed("hover",n)},_.on("elementMouseover.point",function(e){g&&I.highlightPoint(e.seriesIndex,e.pointIndex,!0)}),_.on("elementMouseout.point",function(e){g&&I.highlightPoint(e.seriesIndex,e.pointIndex,!1)}),I.dispatch=_,I.options=e.utils.optionsFunc.bind(I),I.x=function(e){return arguments.length?(f=d3.functor(e),I):f},I.y=function(e){return arguments.length?(l=d3.functor(e),I):l},I.size=function(e){return arguments.length?(c=d3.functor(e),I):c},I.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,I):t},I.width=function(e){return arguments.length?(n=e,I):n},I.height=function(e){return arguments.length?(r=e,I):r},I.xScale=function(e){return arguments.length?(o=e,I):o},I.yScale=function(e){return arguments.length?(u=e,I):u},I.zScale=function(e){return arguments.length?(a=e,I):a},I.xDomain=function(e){return arguments.length?(N=e,I):N},I.yDomain=function(e){return arguments.length?(C=e,I):C},I.sizeDomain=function(e){return arguments.length?(A=e,I):A},I.xRange=function(e){return arguments.length?(k=e,I):k},I.yRange=function(e){return arguments.length?(L=e,I):L},I.sizeRange=function(e){return arguments.length?(O=e,I):O},I.forceX=function(e){return arguments.length?(d=e,I):d},I.forceY=function(e){return arguments.length?(v=e,I):v},I.forceSize=function(e){return arguments.length?(m=e,I):m},I.interactive=function(e){return arguments.length?(g=e,I):g},I.pointKey=function(e){return arguments.length?(y=e,I):y},I.pointActive=function(e){return arguments.length?(b=e,I):b},I.padData=function(e){return arguments.length?(w=e,I):w},I.padDataOuter=function(e){return arguments.length?(E=e,I):E},I.clipEdge=function(e){return arguments.length?(S=e,I):S},I.clipVoronoi=function(e){return arguments.length?(x=e,I):x},I.useVoronoi=function(e){return arguments.length?(D=e,D===!1&&(x=!1),I):D},I.clipRadius=function(e){return arguments.length?(T=e,I):T},I.color=function(t){return arguments.length?(i=e.utils.getColor(t),I):i},I.shape=function(e){return arguments.length?(h=e,I):h},I.onlyCircles=function(e){return arguments.length?(p=e,I):p},I.id=function(e){return arguments.length?(s=e,I):s},I.singlePoint=function(e){return arguments.length?(M=e,I):M},I},e.models.scatterChart=function(){"use strict";function F(e){return e.each(function(e){function K(){if(T)return X.select(".nv-point-paths").style("pointer-events","all"),!1;X.select(".nv-point-paths").style("pointer-events","none");var i=d3.mouse(this);h.distortion(x).focus(i[0]),p.distortion(x).focus(i[1]),X.select(".nv-scatterWrap").call(t),b&&X.select(".nv-x.nv-axis").call(n),w&&X.select(".nv-y.nv-axis").call(r),X.select(".nv-distributionX").datum(e.filter(function(e){return!e.disabled})).call(o),X.select(".nv-distributionY").datum(e.filter(function(e){return!e.disabled})).call(u)}var C=d3.select(this),k=this,L=(f||parseInt(C.style("width"))||960)-a.left-a.right,I=(l||parseInt(C.style("height"))||400)-a.top-a.bottom;F.update=function(){C.transition().duration(D).call(F)},F.container=this,A.disabled=e.map(function(e){return!!e.disabled});if(!O){var q;O={};for(q in A)A[q]instanceof Array?O[q]=A[q].slice(0):O[q]=A[q]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var R=C.selectAll(".nv-noData").data([_]);return R.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),R.attr("x",a.left+L/2).attr("y",a.top+I/2).text(function(e){return e}),F}C.selectAll(".nv-noData").remove(),P=P||h,H=H||p;var U=C.selectAll("g.nv-wrap.nv-scatterChart").data([e]),z=U.enter().append("g").attr("class","nvd3 nv-wrap nv-scatterChart nv-chart-"+t.id()),W=z.append("g"),X=U.select("g");W.append("rect").attr("class","nvd3 nv-background"),W.append("g").attr("class","nv-x nv-axis"),W.append("g").attr("class","nv-y nv-axis"),W.append("g").attr("class","nv-scatterWrap"),W.append("g").attr("class","nv-distWrap"),W.append("g").attr("class","nv-legendWrap"),W.append("g").attr("class","nv-controlsWrap");if(y){var V=S?L/2:L;i.width(V),U.select(".nv-legendWrap").datum(e).call(i),a.top!=i.height()&&(a.top=i.height(),I=(l||parseInt(C.style("height"))||400)-a.top-a.bottom),U.select(".nv-legendWrap").attr("transform","translate("+(L-V)+","+ -a.top+")")}S&&(s.width(180).color(["#444"]),X.select(".nv-controlsWrap").datum(j).attr("transform","translate(0,"+ -a.top+")").call(s)),U.attr("transform","translate("+a.left+","+a.top+")"),E&&X.select(".nv-y.nv-axis").attr("transform","translate("+L+",0)"),t.width(L).height(I).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),d!==0&&t.xDomain(null),v!==0&&t.yDomain(null),U.select(".nv-scatterWrap").datum(e.filter(function(e){return!e.disabled})).call(t);if(d!==0){var $=h.domain()[1]-h.domain()[0];t.xDomain([h.domain()[0]-d*$,h.domain()[1]+d*$])}if(v!==0){var J=p.domain()[1]-p.domain()[0];t.yDomain([p.domain()[0]-v*J,p.domain()[1]+v*J])}(v!==0||d!==0)&&U.select(".nv-scatterWrap").datum(e.filter(function(e){return!e.disabled})).call(t),b&&(n.scale(h).ticks(n.ticks()&&n.ticks().length?n.ticks():L/100).tickSize(-I,0),X.select(".nv-x.nv-axis").attr("transform","translate(0,"+p.range()[0]+")").call(n)),w&&(r.scale(p).ticks(r.ticks()&&r.ticks().length?r.ticks():I/36).tickSize(-L,0),X.select(".nv-y.nv-axis").call(r)),m&&(o.getData(t.x()).scale(h).width(L).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),W.select(".nv-distWrap").append("g").attr("class","nv-distributionX"),X.select(".nv-distributionX").attr("transform","translate(0,"+p.range()[0]+")").datum(e.filter(function(e){return!e.disabled})).call(o)),g&&(u.getData(t.y()).scale(p).width(I).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),W.select(".nv-distWrap").append("g").attr("class","nv-distributionY"),X.select(".nv-distributionY").attr("transform","translate("+(E?L:-u.size())+",0)").datum(e.filter(function(e){return!e.disabled})).call(u)),d3.fisheye&&(X.select(".nv-background").attr("width",L).attr("height",I),X.select(".nv-background").on("mousemove",K),X.select(".nv-background").on("click",function(){T=!T}),t.dispatch.on("elementClick.freezeFisheye",function(){T=!T})),s.dispatch.on("legendClick",function(e,i){e.disabled=!e.disabled,x=e.disabled?0:2.5,X.select(".nv-background").style("pointer-events",e.disabled?"none":"all"),X.select(".nv-point-paths").style("pointer-events",e.disabled?"all":"none"),e.disabled?(h.distortion(x).focus(0),p.distortion(x).focus(0),X.select(".nv-scatterWrap").call(t),X.select(".nv-x.nv-axis").call(n),X.select(".nv-y.nv-axis").call(r)):T=!1,F.update()}),i.dispatch.on("stateChange",function(e){A.disabled=e.disabled,M.stateChange(A),F.update()}),t.dispatch.on("elementMouseover.tooltip",function(e){d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-distx-"+e.pointIndex).attr("y1",function(t,n){return e.pos[1]-I}),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-disty-"+e.pointIndex).attr("x2",e.pos[0]+o.size()),e.pos=[e.pos[0]+a.left,e.pos[1]+a.top],M.tooltipShow(e)}),M.on("tooltipShow",function(e){N&&B(e,k.parentNode)}),M.on("changeState",function(t){typeof t.disabled!="undefined"&&(e.forEach(function(e,n){e.disabled=t.disabled[n]}),A.disabled=t.disabled),F.update()}),P=h.copy(),H=p.copy()}),F}var t=e.models.scatter(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o=e.models.distribution(),u=e.models.distribution(),a={top:30,right:20,bottom:50,left:75},f=null,l=null,c=e.utils.defaultColor(),h=d3.fisheye?d3.fisheye.scale(d3.scale.linear).distortion(0):t.xScale(),p=d3.fisheye?d3.fisheye.scale(d3.scale.linear).distortion(0):t.yScale(),d=0,v=0,m=!1,g=!1,y=!0,b=!0,w=!0,E=!1,S=!!d3.fisheye,x=0,T=!1,N=!0,C=function(e,t,n){return""+t+""},k=function(e,t,n){return""+n+""},L=null,A={},O=null,M=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),_="No Data Available.",D=250;t.xScale(h).yScale(p),n.orient("bottom").tickPadding(10),r.orient(E?"right":"left").tickPadding(10),o.axis("x"),u.axis("y"),s.updateState(!1);var P,H,B=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),f=i.pos[0]+(s.offsetLeft||0),l=p.range()[0]+a.top+(s.offsetTop||0),c=h.range()[0]+a.left+(s.offsetLeft||0),d=i.pos[1]+(s.offsetTop||0),v=n.tickFormat()(t.x()(i.point,i.pointIndex)),m=r.tickFormat()(t.y()(i.point,i.pointIndex));C!=null&&e.tooltip.show([f,l],C(i.series.key,v,m,i,F),"n",1,s,"x-nvtooltip"),k!=null&&e.tooltip.show([c,d],k(i.series.key,v,m,i,F),"e",1,s,"y-nvtooltip"),L!=null&&e.tooltip.show([o,u],L(i.series.key,v,m,i,F),i.value<0?"n":"s",null,s)},j=[{key:"Magnify",disabled:!0}];return t.dispatch.on("elementMouseout.tooltip",function(e){M.tooltipHide(e),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-distx-"+e.pointIndex).attr("y1",0),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-disty-"+e.pointIndex).attr("x2",u.size())}),M.on("tooltipHide",function(){N&&e.tooltip.cleanup()}),F.dispatch=M,F.scatter=t,F.legend=i,F.controls=s,F.xAxis=n,F.yAxis=r,F.distX=o,F.distY=u,d3.rebind(F,t,"id","interactive","pointActive","x","y","shape","size","xScale","yScale","zScale","xDomain","yDomain","xRange","yRange","sizeDomain","sizeRange","forceX","forceY","forceSize","clipVoronoi","clipRadius","useVoronoi"),F.options=e.utils.optionsFunc.bind(F),F.margin=function(e){return arguments.length?(a.top=typeof e.top!="undefined"?e.top:a.top,a.right=typeof e.right!="undefined"?e.right:a.right,a.bottom=typeof e.bottom!="undefined"?e.bottom:a.bottom,a.left=typeof e.left!="undefined"?e.left:a.left,F):a},F.width=function(e){return arguments.length?(f=e,F):f},F.height=function(e){return arguments.length?(l=e,F):l},F.color=function(t){return arguments.length?(c=e.utils.getColor(t),i.color(c),o.color(c),u.color(c),F):c},F.showDistX=function(e){return arguments.length?(m=e,F):m},F.showDistY=function(e){return arguments.length?(g=e,F):g},F.showControls=function(e){return arguments.length?(S=e,F):S},F.showLegend=function(e){return arguments.length?(y=e,F):y},F.showXAxis=function(e){return arguments.length?(b=e,F):b},F.showYAxis=function(e){return arguments.length?(w=e,F):w},F.rightAlignYAxis=function(e){return arguments.length?(E=e,r.orient(e?"right":"left"),F):E},F.fisheye=function(e){return arguments.length?(x=e,F):x},F.xPadding=function(e){return arguments.length?(d=e,F):d},F.yPadding=function(e){return arguments.length?(v=e,F):v},F.tooltips=function(e){return arguments.length?(N=e,F):N},F.tooltipContent=function(e){return arguments.length?(L=e,F):L},F.tooltipXContent=function(e){return arguments.length?(C=e,F):C},F.tooltipYContent=function(e){return arguments.length?(k=e,F):k},F.state=function(e){return arguments.length?(A=e,F):A},F.defaultState=function(e){return arguments.length?(O=e,F):O},F.noData=function(e){return arguments.length?(_=e,F):_},F.transitionDuration=function(e){return arguments.length?(D=e,F):D},F},e.models.scatterPlusLineChart=function(){"use strict";function B(e){return e.each(function(e){function $(){if(S)return z.select(".nv-point-paths").style("pointer-events","all"),!1;z.select(".nv-point-paths").style("pointer-events","none");var i=d3.mouse(this);h.distortion(E).focus(i[0]),p.distortion(E).focus(i[1]),z.select(".nv-scatterWrap").datum(e.filter(function(e){return!e.disabled})).call(t),g&&z.select(".nv-x.nv-axis").call(n),y&&z.select(".nv-y.nv-axis").call(r),z.select(".nv-distributionX").datum(e.filter(function(e){return!e.disabled})).call(o),z.select(".nv-distributionY").datum(e.filter(function(e){return!e.disabled})).call(u)}var T=d3.select(this),N=this,C=(f||parseInt(T.style("width"))||960)-a.left-a.right,j=(l||parseInt(T.style("height"))||400)-a.top-a.bottom;B.update=function(){T.transition().duration(M).call(B)},B.container=this,k.disabled=e.map(function(e){return!!e.disabled});if(!L){var F;L={};for(F in k)k[F]instanceof Array?L[F]=k[F].slice(0):L[F]=k[F]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var I=T.selectAll(".nv-noData").data([O]);return I.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),I.attr("x",a.left+C/2).attr("y",a.top+j/2).text(function(e){return e}),B}T.selectAll(".nv-noData").remove(),h=t.xScale(),p=t.yScale(),_=_||h,D=D||p;var q=T.selectAll("g.nv-wrap.nv-scatterChart").data([e]),R=q.enter().append("g").attr("class","nvd3 nv-wrap nv-scatterChart nv-chart-"+t.id()),U=R.append("g"),z=q.select("g");U.append("rect").attr("class","nvd3 nv-background").style("pointer-events","none"),U.append("g").attr("class","nv-x nv-axis"),U.append("g").attr("class","nv-y nv-axis"),U.append("g").attr("class","nv-scatterWrap"),U.append("g").attr("class","nv-regressionLinesWrap"),U.append("g").attr("class","nv-distWrap"),U.append("g").attr("class","nv-legendWrap"),U.append("g").attr("class","nv-controlsWrap"),q.attr("transform","translate("+a.left+","+a.top+")"),b&&z.select(".nv-y.nv-axis").attr("transform","translate("+C+",0)"),m&&(i.width(C/2),q.select(".nv-legendWrap").datum(e).call(i),a.top!=i.height()&&(a.top=i.height(),j=(l||parseInt(T.style("height"))||400)-a.top-a.bottom),q.select(".nv-legendWrap").attr("transform","translate("+C/2+","+ -a.top+")")),w&&(s.width(180).color(["#444"]),z.select(".nv-controlsWrap").datum(H).attr("transform","translate(0,"+ -a.top+")").call(s)),t.width(C).height(j).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),q.select(".nv-scatterWrap").datum(e.filter(function(e){return!e.disabled})).call(t),q.select(".nv-regressionLinesWrap").attr("clip-path","url(#nv-edge-clip-"+t.id()+")");var W=q.select(".nv-regressionLinesWrap").selectAll(".nv-regLines").data(function(e){return e});W.enter().append("g").attr("class","nv-regLines");var X=W.selectAll(".nv-regLine").data(function(e){return[e]}),V=X.enter().append("line").attr("class","nv-regLine").style("stroke-opacity",0);X.transition().attr("x1",h.range()[0]).attr("x2",h.range()[1]).attr("y1",function(e,t){return p(h.domain()[0]*e.slope+e.intercept)}).attr("y2",function(e,t){return p(h.domain()[1]*e.slope+e.intercept)}).style("stroke",function(e,t,n){return c(e,n)}).style("stroke-opacity",function(e,t){return e.disabled||typeof e.slope=="undefined"||typeof e.intercept=="undefined"?0:1}),g&&(n.scale(h).ticks(n.ticks()?n.ticks():C/100).tickSize(-j,0),z.select(".nv-x.nv-axis").attr("transform","translate(0,"+p.range()[0]+")").call(n)),y&&(r.scale(p).ticks(r.ticks()?r.ticks():j/36).tickSize(-C,0),z.select(".nv-y.nv-axis").call(r)),d&&(o.getData(t.x()).scale(h).width(C).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),U.select(".nv-distWrap").append("g").attr("class","nv-distributionX"),z.select(".nv-distributionX").attr("transform","translate(0,"+p.range()[0]+")").datum(e.filter(function(e){return!e.disabled})).call(o)),v&&(u.getData(t.y()).scale(p).width( -j).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),U.select(".nv-distWrap").append("g").attr("class","nv-distributionY"),z.select(".nv-distributionY").attr("transform","translate("+(b?C:-u.size())+",0)").datum(e.filter(function(e){return!e.disabled})).call(u)),d3.fisheye&&(z.select(".nv-background").attr("width",C).attr("height",j),z.select(".nv-background").on("mousemove",$),z.select(".nv-background").on("click",function(){S=!S}),t.dispatch.on("elementClick.freezeFisheye",function(){S=!S})),s.dispatch.on("legendClick",function(e,i){e.disabled=!e.disabled,E=e.disabled?0:2.5,z.select(".nv-background").style("pointer-events",e.disabled?"none":"all"),z.select(".nv-point-paths").style("pointer-events",e.disabled?"all":"none"),e.disabled?(h.distortion(E).focus(0),p.distortion(E).focus(0),z.select(".nv-scatterWrap").call(t),z.select(".nv-x.nv-axis").call(n),z.select(".nv-y.nv-axis").call(r)):S=!1,B.update()}),i.dispatch.on("stateChange",function(e){k=e,A.stateChange(k),B.update()}),t.dispatch.on("elementMouseover.tooltip",function(e){d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-distx-"+e.pointIndex).attr("y1",e.pos[1]-j),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-disty-"+e.pointIndex).attr("x2",e.pos[0]+o.size()),e.pos=[e.pos[0]+a.left,e.pos[1]+a.top],A.tooltipShow(e)}),A.on("tooltipShow",function(e){x&&P(e,N.parentNode)}),A.on("changeState",function(t){typeof t.disabled!="undefined"&&(e.forEach(function(e,n){e.disabled=t.disabled[n]}),k.disabled=t.disabled),B.update()}),_=h.copy(),D=p.copy()}),B}var t=e.models.scatter(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o=e.models.distribution(),u=e.models.distribution(),a={top:30,right:20,bottom:50,left:75},f=null,l=null,c=e.utils.defaultColor(),h=d3.fisheye?d3.fisheye.scale(d3.scale.linear).distortion(0):t.xScale(),p=d3.fisheye?d3.fisheye.scale(d3.scale.linear).distortion(0):t.yScale(),d=!1,v=!1,m=!0,g=!0,y=!0,b=!1,w=!!d3.fisheye,E=0,S=!1,x=!0,T=function(e,t,n){return""+t+""},N=function(e,t,n){return""+n+""},C=function(e,t,n,r){return"

    "+e+"

    "+"

    "+r+"

    "},k={},L=null,A=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),O="No Data Available.",M=250;t.xScale(h).yScale(p),n.orient("bottom").tickPadding(10),r.orient(b?"right":"left").tickPadding(10),o.axis("x"),u.axis("y"),s.updateState(!1);var _,D,P=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),f=i.pos[0]+(s.offsetLeft||0),l=p.range()[0]+a.top+(s.offsetTop||0),c=h.range()[0]+a.left+(s.offsetLeft||0),d=i.pos[1]+(s.offsetTop||0),v=n.tickFormat()(t.x()(i.point,i.pointIndex)),m=r.tickFormat()(t.y()(i.point,i.pointIndex));T!=null&&e.tooltip.show([f,l],T(i.series.key,v,m,i,B),"n",1,s,"x-nvtooltip"),N!=null&&e.tooltip.show([c,d],N(i.series.key,v,m,i,B),"e",1,s,"y-nvtooltip"),C!=null&&e.tooltip.show([o,u],C(i.series.key,v,m,i.point.tooltip,i,B),i.value<0?"n":"s",null,s)},H=[{key:"Magnify",disabled:!0}];return t.dispatch.on("elementMouseout.tooltip",function(e){A.tooltipHide(e),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-distx-"+e.pointIndex).attr("y1",0),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-disty-"+e.pointIndex).attr("x2",u.size())}),A.on("tooltipHide",function(){x&&e.tooltip.cleanup()}),B.dispatch=A,B.scatter=t,B.legend=i,B.controls=s,B.xAxis=n,B.yAxis=r,B.distX=o,B.distY=u,d3.rebind(B,t,"id","interactive","pointActive","x","y","shape","size","xScale","yScale","zScale","xDomain","yDomain","xRange","yRange","sizeDomain","sizeRange","forceX","forceY","forceSize","clipVoronoi","clipRadius","useVoronoi"),B.options=e.utils.optionsFunc.bind(B),B.margin=function(e){return arguments.length?(a.top=typeof e.top!="undefined"?e.top:a.top,a.right=typeof e.right!="undefined"?e.right:a.right,a.bottom=typeof e.bottom!="undefined"?e.bottom:a.bottom,a.left=typeof e.left!="undefined"?e.left:a.left,B):a},B.width=function(e){return arguments.length?(f=e,B):f},B.height=function(e){return arguments.length?(l=e,B):l},B.color=function(t){return arguments.length?(c=e.utils.getColor(t),i.color(c),o.color(c),u.color(c),B):c},B.showDistX=function(e){return arguments.length?(d=e,B):d},B.showDistY=function(e){return arguments.length?(v=e,B):v},B.showControls=function(e){return arguments.length?(w=e,B):w},B.showLegend=function(e){return arguments.length?(m=e,B):m},B.showXAxis=function(e){return arguments.length?(g=e,B):g},B.showYAxis=function(e){return arguments.length?(y=e,B):y},B.rightAlignYAxis=function(e){return arguments.length?(b=e,r.orient(e?"right":"left"),B):b},B.fisheye=function(e){return arguments.length?(E=e,B):E},B.tooltips=function(e){return arguments.length?(x=e,B):x},B.tooltipContent=function(e){return arguments.length?(C=e,B):C},B.tooltipXContent=function(e){return arguments.length?(T=e,B):T},B.tooltipYContent=function(e){return arguments.length?(N=e,B):N},B.state=function(e){return arguments.length?(k=e,B):k},B.defaultState=function(e){return arguments.length?(L=e,B):L},B.noData=function(e){return arguments.length?(O=e,B):O},B.transitionDuration=function(e){return arguments.length?(M=e,B):M},B},e.models.sparkline=function(){"use strict";function d(e){return e.each(function(e){var i=n-t.left-t.right,d=r-t.top-t.bottom,v=d3.select(this);s.domain(l||d3.extent(e,u)).range(h||[0,i]),o.domain(c||d3.extent(e,a)).range(p||[d,0]);var m=v.selectAll("g.nv-wrap.nv-sparkline").data([e]),g=m.enter().append("g").attr("class","nvd3 nv-wrap nv-sparkline"),b=g.append("g"),w=m.select("g");m.attr("transform","translate("+t.left+","+t.top+")");var E=m.selectAll("path").data(function(e){return[e]});E.enter().append("path"),E.exit().remove(),E.style("stroke",function(e,t){return e.color||f(e,t)}).attr("d",d3.svg.line().x(function(e,t){return s(u(e,t))}).y(function(e,t){return o(a(e,t))}));var S=m.selectAll("circle.nv-point").data(function(e){function n(t){if(t!=-1){var n=e[t];return n.pointIndex=t,n}return null}var t=e.map(function(e,t){return a(e,t)}),r=n(t.lastIndexOf(o.domain()[1])),i=n(t.indexOf(o.domain()[0])),s=n(t.length-1);return[i,r,s].filter(function(e){return e!=null})});S.enter().append("circle"),S.exit().remove(),S.attr("cx",function(e,t){return s(u(e,e.pointIndex))}).attr("cy",function(e,t){return o(a(e,e.pointIndex))}).attr("r",2).attr("class",function(e,t){return u(e,e.pointIndex)==s.domain()[1]?"nv-point nv-currentValue":a(e,e.pointIndex)==o.domain()[0]?"nv-point nv-minValue":"nv-point nv-maxValue"})}),d}var t={top:2,right:0,bottom:2,left:0},n=400,r=32,i=!0,s=d3.scale.linear(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=e.utils.getColor(["#000"]),l,c,h,p;return d.options=e.utils.optionsFunc.bind(d),d.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,d):t},d.width=function(e){return arguments.length?(n=e,d):n},d.height=function(e){return arguments.length?(r=e,d):r},d.x=function(e){return arguments.length?(u=d3.functor(e),d):u},d.y=function(e){return arguments.length?(a=d3.functor(e),d):a},d.xScale=function(e){return arguments.length?(s=e,d):s},d.yScale=function(e){return arguments.length?(o=e,d):o},d.xDomain=function(e){return arguments.length?(l=e,d):l},d.yDomain=function(e){return arguments.length?(c=e,d):c},d.xRange=function(e){return arguments.length?(h=e,d):h},d.yRange=function(e){return arguments.length?(p=e,d):p},d.animate=function(e){return arguments.length?(i=e,d):i},d.color=function(t){return arguments.length?(f=e.utils.getColor(t),d):f},d},e.models.sparklinePlus=function(){"use strict";function v(e){return e.each(function(c){function O(){if(a)return;var e=C.selectAll(".nv-hoverValue").data(u),r=e.enter().append("g").attr("class","nv-hoverValue").style("stroke-opacity",0).style("fill-opacity",0);e.exit().transition().duration(250).style("stroke-opacity",0).style("fill-opacity",0).remove(),e.attr("transform",function(e){return"translate("+s(t.x()(c[e],e))+",0)"}).transition().duration(250).style("stroke-opacity",1).style("fill-opacity",1);if(!u.length)return;r.append("line").attr("x1",0).attr("y1",-n.top).attr("x2",0).attr("y2",b),r.append("text").attr("class","nv-xValue").attr("x",-6).attr("y",-n.top).attr("text-anchor","end").attr("dy",".9em"),C.select(".nv-hoverValue .nv-xValue").text(f(t.x()(c[u[0]],u[0]))),r.append("text").attr("class","nv-yValue").attr("x",6).attr("y",-n.top).attr("text-anchor","start").attr("dy",".9em"),C.select(".nv-hoverValue .nv-yValue").text(l(t.y()(c[u[0]],u[0])))}function M(){function r(e,n){var r=Math.abs(t.x()(e[0],0)-n),i=0;for(var s=0;s2){var h=M.yScale().invert(i.mouseY),p=Infinity,d=null;c.forEach(function(e,t){h=Math.abs(h);var n=Math.abs(e.stackedValue.y0),r=Math.abs(e.stackedValue.y);if(h>=n&&h<=r+n){d=t;return}}),d!=null&&(c[d].highlight=!0)}var v=n.tickFormat()(M.x()(s,a)),m=t.style()=="expand"?function(e,t){return d3.format(".1%")(e)}:function(e,t){return r.tickFormat()(e)};o.tooltip.position({left:f+u.left,top:i.mouseY+u.top}).chartContainer(D.parentNode).enabled(g).valueFormatter(m).data({value:v,series:c})(),o.renderGuideLine(f)}),o.dispatch.on("elementMouseout",function(e){N.tooltipHide(),t.clearHighlights()}),N.on("tooltipShow",function(e){g&&O(e,D.parentNode)}),N.on("changeState",function(e){typeof e.disabled!="undefined"&&y.length===e.disabled.length&&(y.forEach(function(t,n){t.disabled=e.disabled[n]}),S.disabled=e.disabled),typeof e.style!="undefined"&&t.style(e.style),M.update()})}),M}var t=e.models.stackedArea(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o=e.interactiveGuideline(),u={top:30,right:25,bottom:50,left:60},a=null,f=null,l=e.utils.defaultColor(),c=!0,h=!0,p=!0,d=!0,v=!1,m=!1,g=!0,y=function(e,t,n,r,i){return"

    "+e+"

    "+"

    "+n+" on "+t+"

    "},b,w,E=d3.format(",.2f"),S={style:t.style()},x=null,T="No Data Available.",N=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),C=250,k=["Stacked","Stream","Expanded"],L={},A=250;n.orient("bottom").tickPadding(7),r.orient(v?"right":"left"),s.updateState(!1);var O=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=y(i.series.key,a,f,i,M);e.tooltip.show([o,u],l,i.value<0?"n":"s",null,s)};return t.dispatch.on("tooltipShow",function(e){e.pos=[e.pos[0]+u.left,e.pos[1]+u.top],N.tooltipShow(e)}),t.dispatch.on("tooltipHide",function(e){N.tooltipHide(e)}),N.on("tooltipHide",function(){g&&e.tooltip.cleanup()}),M.dispatch=N,M.stacked=t,M.legend=i,M.controls=s,M.xAxis=n,M.yAxis=r,M.interactiveLayer=o,d3.rebind(M,t,"x","y","size","xScale","yScale","xDomain","yDomain","xRange","yRange","sizeDomain","interactive","useVoronoi","offset","order","style","clipEdge","forceX","forceY","forceSize","interpolate"),M.options=e.utils.optionsFunc.bind(M),M.margin=function(e){return arguments.length?(u.top=typeof e.top!="undefined"?e.top:u.top,u.right=typeof e.right!="undefined"?e.right:u.right,u.bottom=typeof e.bottom!="undefined"?e.bottom:u.bottom,u.left=typeof e.left!="undefined"?e.left:u.left,M):u},M.width=function(e){return arguments.length?(a=e,M):a},M.height=function(e){return arguments.length?(f=e,M):f},M.color=function(n){return arguments.length?(l=e.utils.getColor(n),i.color(l),t.color(l),M):l},M.showControls=function(e){return arguments.length?(c=e,M):c},M.showLegend=function(e){return arguments.length?(h=e,M):h},M.showXAxis=function(e){return arguments.length?(p=e,M):p},M.showYAxis=function(e){return arguments.length?(d=e,M):d},M.rightAlignYAxis=function(e){return arguments.length?(v=e,r.orient(e?"right":"left"),M):v},M.useInteractiveGuideline=function(e){return arguments.length?(m=e,e===!0&&(M.interactive(!1),M.useVoronoi(!1)),M):m},M.tooltip=function(e){return arguments.length?(y=e,M):y},M.tooltips=function(e){return arguments.length?(g=e,M):g},M.tooltipContent=function(e){return arguments.length?(y=e,M):y},M.state=function(e){return arguments.length?(S=e,M):S},M.defaultState=function(e){return arguments.length?(x=e,M):x},M.noData=function(e){return arguments.length?(T=e,M):T},M.transitionDuration=function(e){return arguments.length?(A=e,M):A},M.controlsData=function(e){return arguments.length?(k=e,M):k},M.controlLabels=function(e){return arguments.length?typeof e!="object"?L:(L=e,M):L},r.setTickFormat=r.tickFormat,r.tickFormat=function(e){return arguments.length?(E=e,r):E},M}})(); \ No newline at end of file diff --git a/rally/ui/templates/task/directive_widget.js b/rally/ui/templates/task/directive_widget.js deleted file mode 100644 index cd2feba4..00000000 --- a/rally/ui/templates/task/directive_widget.js +++ /dev/null @@ -1,228 +0,0 @@ -var widgetDirective = function($compile) { - var Chart = { - _render: function(node, data, chart, do_after){ - nv.addGraph(function() { - d3.select(node) - .datum(data).transition().duration(0) - .call(chart); - if (typeof do_after === "function") { - do_after(node, chart) - } - nv.utils.windowResize(chart.update); - }) - }, - _widgets: { - Pie: "pie", - StackedArea: "stack", - Lines: "lines", - Histogram: "histogram" - }, - get_chart: function(widget) { - if (widget in this._widgets) { - var name = this._widgets[widget]; - return Chart[name] - } - return function() { console.log("Error: unexpected widget:", widget) } - }, - pie: function(node, data, opts, do_after) { - var chart = nv.models.pieChart() - .x(function(d) { return d.key }) - .y(function(d) { return d.values }) - .showLabels(true) - .labelType("percent") - .donut(true) - .donutRatio(0.25) - .donutLabelsOutside(true) - .color(function(d){ - if (d.data && d.data.color) { return d.data.color } - }); - var colorizer = new Chart.colorizer("errors"), data_ = []; - for (var i in data) { - data_.push({key:data[i][0], values:data[i][1], color:colorizer.get_color(data[i][0])}) - } - Chart._render(node, data_, chart) - }, - colorizer: function(failure_key, failure_color) { - this.failure_key = failure_key || "failed_duration"; - this.failure_color = failure_color || "#d62728"; // red - this.color_idx = -1; - /* NOTE(amaretskiy): this is actually a result of - d3.scale.category20().range(), excluding red color (#d62728) - which is reserved for errors */ - this.colors = ["#1f77b4", "#aec7e8", "#ff7f0e", "#ffbb78", "#2ca02c", - "#98df8a", "#ff9896", "#9467bd", "#c5b0d5", "#8c564b", - "#c49c94", "#e377c2", "#f7b6d2", "#7f7f7f", "#c7c7c7", - "#bcbd22", "#dbdb8d", "#17becf", "#9edae5"]; - this.get_color = function(key) { - if (key === this.failure_key) { - return this.failure_color - } - if (this.color_idx > (this.colors.length - 2)) { - this.color_idx = 0 - } else { - this.color_idx++ - } - return this.colors[this.color_idx] - } - }, - stack: function(node, data, opts, do_after) { - var chart = nv.models.stackedAreaChart() - .x(function(d) { return d[0] }) - .y(function(d) { return d[1] }) - .useInteractiveGuideline(opts.guide) - .showControls(opts.controls) - .clipEdge(true); - chart.xAxis - .axisLabel(opts.xname) - .tickFormat(opts.xformat) - .showMaxMin(opts.showmaxmin); - chart.yAxis - .orient("left") - .tickFormat(d3.format(opts.yformat || ",.3f")); - var colorizer = new Chart.colorizer(), data_ = []; - for (var i in data) { - data_.push({key:data[i][0], values:data[i][1], color:colorizer.get_color(data[i][0])}) - } - Chart._render(node, data_, chart, do_after); - }, - lines: function(node, data, opts, do_after) { - var chart = nv.models.lineChart() - .x(function(d) { return d[0] }) - .y(function(d) { return d[1] }) - .useInteractiveGuideline(opts.guide) - .clipEdge(true); - chart.xAxis - .axisLabel(opts.xname) - .tickFormat(opts.xformat) - .rotateLabels(opts.xrotate) - .showMaxMin(opts.showmaxmin); - chart.yAxis - .orient("left") - .tickFormat(d3.format(opts.yformat || ",.3f")); - var colorizer = new Chart.colorizer(), data_ = []; - for (var i in data) { - data_.push({key:data[i][0], values:data[i][1], color:colorizer.get_color(data[i][0])}) - } - Chart._render(node, data_, chart, do_after) - }, - histogram: function(node, data, opts) { - var chart = nv.models.multiBarChart() - .reduceXTicks(true) - .showControls(false) - .transitionDuration(0) - .groupSpacing(0.05); - chart - .legend.radioButtonMode(true); - chart.xAxis - .axisLabel("Duration (seconds)") - .tickFormat(d3.format(",.2f")); - chart.yAxis - .axisLabel("Iterations (frequency)") - .tickFormat(d3.format("d")); - Chart._render(node, data, chart) - } - }; - - return { - restrict: "A", - scope: { data: "=" }, - link: function(scope, element, attrs) { - scope.$watch("data", function(data) { - if (! data) { return console.log("Chart has no data to render!") } - if (attrs.widget === "Table") { - var ng_class = attrs.lastrowClass ? " ng-class='{"+attrs.lastrowClass+":$last}'" : ""; - var template = "" + - "" + - "" + - "" + - "" + - "
    {{i}}
    {{i}}" + - "
    "; - var el = element.empty().append($compile(template)(scope)).children()[0] - } else if (attrs.widget === "TextArea") { - var template = "
    {{str}}
    "; - var el = element.empty().append($compile(template)(scope)).children()[0] - } else { - - var el_chart = element.addClass("chart").css({display:"block"}); - var el = el_chart.html("").children()[0]; - - var do_after = null; - - if (attrs.widget in {StackedArea:0, Lines:0}) { - - /* Hide widget if not enough data */ - if ((! data.length) || (data[0].length < 1) || (data[0][1].length < 2)) { - return element.empty().css({display:"none"}) - } - - /* NOTE(amaretskiy): Dirty fix for changing chart width in case - if there are too long Y values that overlaps chart box. */ - var do_after = function(node, chart){ - var g_box = angular.element(el_chart[0].querySelector(".nv-y.nv-axis")); - - if (g_box && g_box[0] && g_box[0].getBBox) { - - try { - // 30 is padding aroung graphs - var width = g_box[0].getBBox().width + 30; - } catch (err) { - // This happens sometimes, just skip silently - return - } - - // 890 is chart width (set by CSS) - if (typeof width === "number" && width > 890) { - width = (890 * 2) - width; - if (width > 0) { - angular.element(node).css({width:width+"px"}); - chart.update() - } - } - } - } - } - else if (attrs.widget === "Pie") { - if (! data.length) { - return element.empty().css({display:"none"}) - } - } - - var opts = { - xname: attrs.nameX || "", - xrotate: attrs.rotateX || 0, - yformat: attrs.formatY || ",.3f", - controls: attrs.controls === "true", - guide: attrs.guide === "true", - showmaxmin: attrs.showmaxmin === "true" - }; - if (attrs.formatDateX) { - opts.xformat = function(d) { return d3.time.format(attrs.formatDateX)(new Date(d)) } - } else { - opts.xformat = d3.format(attrs.formatX || "d") - } - Chart.get_chart(attrs.widget)(el, data, opts, do_after); - } - - if (attrs.nameY) { - /* NOTE(amaretskiy): Dirty fix for displaying Y-axis label correctly. - I believe sometimes NVD3 will allow doing this in normal way */ - var label_y = angular.element("
    ").addClass("chart-label-y").text(attrs.nameY); - angular.element(el).parent().prepend(label_y) - } - - if (attrs.description) { - var desc_el = angular.element("
    ").addClass(attrs.descriptionClass || "h3").text(attrs.description); - angular.element(el).parent().prepend(desc_el) - } - - if (attrs.title) { - var title_el = angular.element("
    ").addClass(attrs.titleClass || "h2").text(attrs.title); - angular.element(el).parent().prepend(title_el) - } - - angular.element(el).parent().append(angular.element("
    ")) - }); - } - } -}; diff --git a/rally/ui/templates/task/report.html b/rally/ui/templates/task/report.html deleted file mode 100644 index 1e60f9d6..00000000 --- a/rally/ui/templates/task/report.html +++ /dev/null @@ -1,792 +0,0 @@ -{% extends "/base.html" %} - -{% block html_attr %} ng-app="App"{% endblock %} - -{% block title_text %}Rally Task Report{% endblock %} - -{% block libs %} - {% if include_libs %} - - - {% else %} - - - - - {% endif %} -{% endblock %} - -{% block js_before %} - "use strict"; - {{ include_raw_file("/task/directive_widget.js") }} - var controllerFunction = function($scope, $location) { - $scope.source = {{ source }}; - $scope.scenarios = {{ data }}; -{% raw %} - $scope.location = { - /* #/path/hash/sub/div */ - normalize: function(str) { - /* Remove unwanted characters from string */ - if (typeof str !== "string") { return "" } - return str.replace(/[^\w\-\.]/g, "") - }, - uri: function(obj) { - /* Getter/Setter */ - if (! obj) { - var uri = {path: "", hash: "", sub: "", div: ""}; - var arr = ["div", "sub", "hash", "path"]; - angular.forEach($location.url().split("/"), function(value){ - var v = $scope.location.normalize(value); - if (v) { var k = arr.pop(); if (k) { this[k] = v }} - }, uri); - return uri - } - var arr = [obj.path, obj.hash, obj.sub, obj.div], res = []; - for (var i in arr) { if (! arr[i]) { break }; res.push(arr[i]) } - return $location.url("/" + res.join("/")) - }, - path: function(path, hash) { - /* Getter/Setter */ - if (path === "") { return this.uri({}) } - path = this.normalize(path); - var uri = this.uri(); - if (! path) { return uri.path } - uri.path = path; - var _hash = this.normalize(hash); - if (_hash || hash === "") { uri.hash = _hash } - return this.uri(uri) - }, - hash: function(hash) { - /* Getter/Setter */ - if (hash) { this.uri({path:this.uri().path, hash:hash}) } - return this.uri().hash - } - } - - /* Dispatch */ - - $scope.route = function(uri) { - if (! $scope.scenarios_map) { return } - - // Expand menu if there is only one menu group - if ($scope.nav.length === 1) { - $scope.nav_idx = $scope.nav[0].idx; - } - - if (uri.path in $scope.scenarios_map) { - $scope.view = {is_scenario:true}; - $scope.scenario = $scope.scenarios_map[uri.path]; - $scope.nav_idx = $scope.nav_map[uri.path]; - if ($scope.scenario.iterations.histogram.views.length) { - $scope.mainHistogram = $scope.scenario.iterations.histogram.views[0] - } - if ($scope.scenario.atomic.histogram.views.length) { - $scope.atomicHistogram = $scope.scenario.atomic.histogram.views[0] - } - $scope.outputIteration = 0; - $scope.showTab(uri); - } else { - $scope.scenario = null; - if (uri.path === "source") { - $scope.view = {is_source:true} - } else { - $scope.view = {is_main:true} - } - } - } - - $scope.$on("$locationChangeSuccess", function (event, newUrl, oldUrl) { - $scope.route($scope.location.uri()) - }); - - $scope.showNav = function(nav_idx) { $scope.nav_idx = nav_idx } - - /* Tabs */ - - $scope.tabs = [ - { - id: "overview", - name: "Overview", - visible: function(){ return !! $scope.scenario.iterations.pie.length } - },{ - id: "details", - name: "Details", - visible: function(){ return !! $scope.scenario.atomic.pie.length } - },{ - id: "output", - name: "Scenario Data", - visible: function(){ return $scope.scenario.has_output } - },{ - id: "hooks", - name: "Hooks", - visible: function(){ return $scope.scenario.hooks.length } - },{ - id: "failures", - name: "Failures", - visible: function(){ return !! $scope.scenario.errors.length } - },{ - id: "task", - name: "Input task", - visible: function(){ return !! $scope.scenario.config } - } - ]; - $scope.tabs_map = {}; - angular.forEach($scope.tabs, - function(tab){ this[tab.id] = tab }, $scope.tabs_map); - - $scope.showTab = function(uri) { - $scope.tab = uri.hash in $scope.tabs_map ? uri.hash : "overview"; - if (uri.hash === "output") { - if (typeof $scope.scenario.output === "undefined") { - var has_additive = !! $scope.scenario.additive_output.length; - var has_complete = !! ($scope.scenario.complete_output.length - && $scope.scenario.complete_output[0].length); - $scope.scenario.output = { - has_additive: has_additive, - has_complete: has_complete, - length: has_additive + has_complete, - active: has_additive ? "additive" : (has_complete ? "complete" : "") - } - } - if (uri.sub && $scope.scenario.output["has_" + uri.sub]) { - $scope.scenario.output.active = uri.sub - } - } - else if (uri.hash === "hooks") { - if ($scope.scenario.hooks.length) { - var hook_idx = parseInt(uri.sub); - - if (isNaN(hook_idx) || ($scope.scenario.hooks.length - hook_idx) <= 0) { - hook_idx = 0 - } - - if ($scope.scenario.hook_idx === hook_idx) { - return - } - - $scope.scenario.hooks.cur = $scope.scenario.hooks[hook_idx]; - $scope.scenario.hook_idx = hook_idx; - if (typeof $scope.scenario.hooks.cur.active === "undefined") { - if ($scope.scenario.hooks.cur.additive.length) { - $scope.scenario.hooks.cur.active = "additive" - } - if ($scope.scenario.hooks.cur.complete.length) { - if (typeof $scope.scenario.hooks.cur.active === "undefined") { - $scope.scenario.hooks.cur.active = "complete" - } - $scope.set_hook_run() - } - } - } - } - } - - for (var i in $scope.tabs) { - if ($scope.tabs[i].id === $scope.location.hash()) { - $scope.tab = $scope.tabs[i].id - } - $scope.tabs[i].isVisible = function() { - if ($scope.scenario) { - if (this.visible()) { return true } - /* If tab should be hidden but is selected - show another one */ - if (this.id === $scope.location.hash()) { - for (var i in $scope.tabs) { - var tab = $scope.tabs[i]; - if (tab.id != this.id && tab.visible()) { - $scope.tab = tab.id; - return false - } - } - } - } - return false - } - } - - $scope.set_hook_run = function(idx) { - if (typeof idx !== "undefined") { - $scope.scenario.hooks.cur.run_idx = idx - } - else if (typeof $scope.scenario.hooks.cur.run_idx === "undefined") { - $scope.scenario.hooks.cur.run_idx = 0 - } - idx = $scope.scenario.hooks.cur.run_idx; - if (($scope.scenario.hooks.cur.complete.length - idx) > 0) { - $scope.scenario.hooks.cur.run = $scope.scenario.hooks.cur.complete[idx] - } - } - - $scope.complete_hooks_as_dropdown = function() { - return $scope.scenario.hooks.cur.complete.length > 10 - } - - /* Other helpers */ - - $scope.showError = function(message) { - return (function (e) { - e.style.display = "block"; - e.textContent = message - })(document.getElementById("page-error")) - } - - $scope.compact_atomics = function() { - return ($scope.scenario && $scope.scenario.atomic.iter.length < 9) - } - - /* Initialization */ - - angular.element(document).ready(function(){ - if (! $scope.scenarios.length) { - return $scope.showError("No data...") - } - - /* Compose data mapping */ - - $scope.nav = []; - $scope.nav_map = {}; - $scope.scenarios_map = {}; - var met = [], itr = 0, cls_idx = 0; - var prev_cls, prev_met; - - for (var idx in $scope.scenarios) { - var sc = $scope.scenarios[idx]; - if (! prev_cls) { - prev_cls = sc.cls - } - else if (prev_cls !== sc.cls) { - $scope.nav.push({cls:prev_cls, met:met, idx:cls_idx}); - prev_cls = sc.cls; - met = []; - itr = 1; - cls_idx += 1 - } - - if (prev_met !== sc.met) { itr = 1 }; - sc.ref = $scope.location.normalize(sc.cls+"."+sc.met+(itr > 1 ? "-"+itr : "")); - $scope.scenarios_map[sc.ref] = sc; - $scope.nav_map[sc.ref] = cls_idx; - met.push({name:sc.name, itr:itr, idx:idx, ref:sc.ref}); - prev_met = sc.met; - itr += 1; - } - - if (met.length) { - $scope.nav.push({cls:prev_cls, met:met, idx:cls_idx}) - } - - /* Start */ - - var uri = $scope.location.uri(); - uri.path = $scope.location.path(); - $scope.route(uri); - $scope.$digest() - }) - }; - - if (typeof angular === "object") { - angular.module("App", []) - .controller("Controller", ["$scope", "$location", controllerFunction]) - .directive("widget", widgetDirective) - } - -{% endraw %} -{% endblock %} - -{% block css %} - .aside { margin:0 20px 0 0; display:block; width:255px; float:left } - .aside > div { margin-bottom: 15px } - .aside > div div:first-child { border-top-left-radius:4px; border-top-right-radius:4px } - .aside > div div:last-child { border-bottom-left-radius:4px; border-bottom-right-radius:4px } - .navcls { color:#678; background:#eee; border:1px solid #ddd; margin-bottom:-1px; display:block; padding:8px 9px; font-weight:bold; text-align:left; overflow:hidden; text-overflow:ellipsis; white-space:nowrap; cursor:pointer } - .navcls.expanded { color:#469 } - .navcls.active { background:#428bca; background-image:linear-gradient(to bottom, #428bca 0px, #3278b3 100%); border-color:#3278b3; color:#fff } - .navmet { color:#555; background:#fff; border:1px solid #ddd; font-size:12px; display:block; margin-bottom:-1px; padding:8px 10px; text-align:left; text-overflow:ellipsis; white-space:nowrap; overflow:hidden; cursor:pointer } - .navmet:hover { background:#f8f8f8 } - .navmet.active, .navmet.active:hover { background:#428bca; background-image:linear-gradient(to bottom, #428bca 0px, #3278b3 100%); border-color:#3278b3; color:#fff } - - .buttn { color:#555; background:#fff; border:1px solid #ddd; border-radius:5px; font-size:12px; margin-bottom:-1px; padding:5px 7px; text-align:left; text-overflow:ellipsis; white-space:nowrap; overflow:hidden; cursor:pointer } - .buttn:hover { background:#f8f8f8 } - .buttn.active, .bttn.active:hover { background:#428bca; background-image:linear-gradient(to bottom, #428bca 0px, #3278b3 100%); border-color:#3278b3; color:#fff; cursor:default } - - .tabs { list-style:outside none none; margin:0 0 5px; padding:0; border-bottom:1px solid #ddd } - .tabs:after { clear:both } - .tabs li { float:left; margin-bottom:-1px; display:block; position:relative } - .tabs li div { border:1px solid transparent; border-radius:4px 4px 0 0; line-height:20px; margin-right:2px; padding:10px 15px; color:#428bca } - .tabs li div:hover { border-color:#eee #eee #ddd; background:#eee; cursor:pointer; } - .tabs li.active div { background:#fff; border-color:#ddd #ddd transparent; border-style:solid; border-width:1px; color:#555; cursor:default } - .failure-mesg { color:#900 } - .failure-trace { color:#333; white-space:pre; overflow:auto } - - .link { color:#428BCA; padding:5px 15px 5px 5px; text-decoration:underline; cursor:pointer } - .link.active { color:#333; text-decoration:none; cursor:default } - - .chart { padding:0; margin:0; width:890px } - .chart svg { height:300px; padding:0; margin:0; overflow:visible; float:right } - .chart.lower svg { height:180px } - .chart-label-y { font-size:12px; position:relative; top:5px; padding:0; margin:0 } - - .expandable { cursor:pointer } - .clearfix { clear:both } - .sortable > .arrow { display:inline-block; width:12px; height:inherit; color:#c90 } - .content-main { margin:0 5px; display:block; float:left } -{% endblock %} - -{% block media_queries %} - @media only screen and (min-width: 320px) { .content-wrap { width:900px } .content-main { width:600px } } - @media only screen and (min-width: 900px) { .content-wrap { width:880px } .content-main { width:590px } } - @media only screen and (min-width: 1000px) { .content-wrap { width:980px } .content-main { width:690px } } - @media only screen and (min-width: 1100px) { .content-wrap { width:1080px } .content-main { width:790px } } - @media only screen and (min-width: 1200px) { .content-wrap { width:1180px } .content-main { width:890px } } -{% endblock %} - -{% block body_attr %} ng-controller="Controller"{% endblock %} - -{% block header_text %}task results{% endblock %} - -{% block content %} -{% raw %} - - -
    -
    - - -
    -
    - - -
    -
    - -
    - -
    -

    Task overview

    - - - - - - - - - -
    - Scenario - - - - - - Load duration (s) - - - - - - Full duration (s) - - - - - - Iterations - - - - - - Runner - - - - - - Errors - - - - - - Hooks - - - - - - Success (SLA) - - - - -
    {{sc.ref}} - {{sc.load_duration | number:3}} - {{sc.full_duration | number:3}} - {{sc.iterations_count}} - {{sc.runner}} - {{sc.errors.length}} - {{sc.hooks.length}} - - - -
    -
    - -
    -

    Input file

    -
    {{source}}
    -
    - -
    -

    {{scenario.cls}}.{{scenario.name}} ({{scenario.full_duration | number:3}}s)

    -
    - {{scenario.description}} -
    -
      -
    • -
      {{t.name}}
      -
    • -
      -
    -
    - - - - - - - - - - - - -
    - -
    -
    -{% endraw %} -{% endblock %} - -{% block js_after %} - if (! window.angular) {(function(f){ - f(document.getElementById("content-nav"), "none"); - f(document.getElementById("content-main"), "none"); - f(document.getElementById("page-error"), "block").textContent = "Failed to load AngularJS framework" - })(function(e, s){e.style.display = s; return e})} -{% endblock %} diff --git a/rally/ui/templates/task/trends.html b/rally/ui/templates/task/trends.html deleted file mode 100644 index 79ac0d55..00000000 --- a/rally/ui/templates/task/trends.html +++ /dev/null @@ -1,448 +0,0 @@ -{% extends "/base.html" %} - -{% block html_attr %} ng-app="App"{% endblock %} - -{% block title_text %}Rally Tasks Trends{% endblock %} - -{% block libs %} - {% if include_libs %} - - - {% else %} - - - - - {% endif %} -{% endblock %} - -{% block js_before %} - "use strict"; - {{ include_raw_file("/task/directive_widget.js") }} - var controllerFunction = function($scope, $location) { - $scope.data = {{ data }}; -{% raw %} - $scope.location = { - /* #/path/hash/sub/div */ - normalize: function(str) { - /* Remove unwanted characters from string */ - if (typeof str !== "string") { return "" } - return str.replace(/[^\w\-\.]/g, "") - }, - uri: function(obj) { - /* Getter/Setter */ - if (! obj) { - var uri = {path: "", hash: "", sub: "", div: ""}; - var arr = ["div", "sub", "hash", "path"]; - angular.forEach($location.url().split("/"), function(value){ - var v = $scope.location.normalize(value); - if (v) { var k = arr.pop(); if (k) { this[k] = v }} - }, uri); - return uri - } - var arr = [obj.path, obj.hash, obj.sub, obj.div], res = []; - for (var i in arr) { if (! arr[i]) { break }; res.push(arr[i]) } - return $location.url("/" + res.join("/")) - }, - path: function(path, hash) { - /* Getter/Setter */ - if (path === "") { return this.uri({}) } - path = this.normalize(path); - var uri = this.uri(); - if (! path) { return uri.path } - uri.path = path; - var _hash = this.normalize(hash); - if (_hash || hash === "") { uri.hash = _hash } - return this.uri(uri) - }, - hash: function(hash) { - /* Getter/Setter */ - if (hash) { this.uri({path:this.uri().path, hash:hash}) } - return this.uri().hash - } - } - - /* Dispatch */ - - $scope.route = function(uri) { - if (! $scope.wload_map) { return } - if (uri.path in $scope.wload_map) { - $scope.view = {is_wload:true}; - $scope.wload = $scope.wload_map[uri.path]; - $scope.nav_idx = $scope.nav_map[uri.path]; - $scope.showTab(uri); - } else { - $scope.wload = null; - $scope.view = {is_main:true} - } - } - - $scope.$on("$locationChangeSuccess", function (event, newUrl, oldUrl) { - $scope.route($scope.location.uri()) - }); - - $scope.showNav = function(nav_idx) { $scope.nav_idx = nav_idx } - - /* Tabs */ - - $scope.tabs = [ - { - id: "total", - name: "Total", - visible: function(){ return true } - }, { - id: "actions", - name: "Atomic actions", - visible: function(){ return ($scope.wload.length !== 1) && $scope.wload.actions.length } - }, { - id: "config", - name: "Configuration", - visible: function(){ return !! $scope.wload.config.length } - } - ]; - $scope.tabs_map = {}; - angular.forEach($scope.tabs, - function(tab){ this[tab.id] = tab }, $scope.tabs_map); - - $scope.showTab = function(uri) { - $scope.tab = uri.hash in $scope.tabs_map ? uri.hash : "total" - } - - for (var i in $scope.tabs) { - $scope.tabs[i].isVisible = function() { - if ($scope.wload) { - if (this.visible()) { return true } - - /* If tab should be hidden but is selected - show another one */ - if (this.id === $scope.location.hash()) { - for (var i in $scope.tabs) { - var tab = $scope.tabs[i]; - if (tab.id != this.id && tab.visible()) { - $scope.tab = tab.id; - return false - } - } - } - } - return false - } - } - - /* Other helpers */ - - $scope.showError = function(message) { - return (function (e) { - e.style.display = "block"; - e.textContent = message - })(document.getElementById("page-error")) - } - - /* Initialization */ - - angular.element(document).ready(function(){ - if (! $scope.data.length) { - return $scope.showError("No data...") - } - - /* Compose data mapping */ - - $scope.nav = []; - $scope.nav_map = {}; - $scope.wload_map = {}; - var prev_cls, prev_met, met = [], itr = 0, cls_idx = 0; - - for (var idx in $scope.data) { - var w = $scope.data[idx]; - if (! prev_cls) { - prev_cls = w.cls - } - else if (prev_cls !== w.cls) { - $scope.nav.push({name:prev_cls, met:met, idx:cls_idx}); - prev_cls = w.cls; - met = []; - itr = 1; - cls_idx += 1 - } - - if (prev_met !== w.met) { itr = 1 }; - w.ref = $scope.location.normalize(w.cls+"."+w.met+(itr > 1 ? "-"+itr : "")); - w.order_idx = itr > 1 ? " ["+itr+"]" : "" - $scope.wload_map[w.ref] = w; - $scope.nav_map[w.ref] = cls_idx; - met.push({name:w.met, itr:itr, idx:idx, order_idx:w.order_idx, ref:w.ref}); - prev_met = w.met; - itr += 1; - } - - if (met.length) { - $scope.nav.push({name:prev_cls, met:met, idx:cls_idx}) - } - - /* Start */ - - $scope.route($scope.location.uri()); - $scope.$digest() - }); - }; - - if (typeof angular === "object") { - angular.module("App", []) - .controller("Controller", ["$scope", "$location", controllerFunction]) - .directive("widget", widgetDirective) - } -{% endraw %} -{% endblock %} - -{% block css %} - .aside { margin:0 20px 0 0; display:block; width:255px; float:left } - .aside > div { margin-bottom: 15px } - .aside > div div:first-child { border-top-left-radius:4px; border-top-right-radius:4px } - .aside > div div:last-child { border-bottom-left-radius:4px; border-bottom-right-radius:4px } - .navcls { color:#678; background:#eee; border:1px solid #ddd; margin-bottom:-1px; display:block; padding:8px 9px; font-weight:bold; text-align:left; overflow:hidden; text-overflow:ellipsis; white-space:nowrap; cursor:pointer } - .navcls.expanded { color:#469 } - .navcls.active { background:#428bca; background-image:linear-gradient(to bottom, #428bca 0px, #3278b3 100%); border-color:#3278b3; color:#fff } - .navmet { color:#555; background:#fff; border:1px solid #ddd; font-size:12px; display:block; margin-bottom:-1px; padding:8px 10px; text-align:left; text-overflow:ellipsis; white-space:nowrap; overflow:hidden; cursor:pointer } - .navmet:hover { background:#f8f8f8 } - .navmet.active, .navmet.active:hover { background:#428bca; background-image:linear-gradient(to bottom, #428bca 0px, #3278b3 100%); border-color:#3278b3; color:#fff } - .navmet.single, .single, .single td { color:#999 } - .navmet.active.single { color:#ccc } - - .tabs { list-style:outside none none; margin:0 0 5px; padding:0; border-bottom:1px solid #ddd } - .tabs:after { clear:both } - .tabs li { float:left; margin-bottom:-1px; display:block; position:relative } - .tabs li div { border:1px solid transparent; border-radius:4px 4px 0 0; line-height:20px; margin-right:2px; padding:10px 15px; color:#428bca } - .tabs li div:hover { border-color:#eee #eee #ddd; background:#eee; cursor:pointer; } - .tabs li.active div { background:#fff; border-color:#ddd #ddd transparent; border-style:solid; border-width:1px; color:#555; cursor:default } - .failure-mesg { color:#900 } - .failure-trace { color:#333; white-space:pre; overflow:auto } - - .link { color:#428BCA; padding:5px 15px 5px 5px; text-decoration:underline; cursor:pointer } - .link.active { color:#333; text-decoration:none } - - .chart { padding:0; margin:0; width:890px } - .chart svg { height:300px; padding:0; margin:0; overflow:visible; float:right } - .chart.lower svg { height:180px } - .chart-label-y { font-size:12px; position:relative; top:5px; padding:0; margin:0 } - - .clearfix { clear:both } - .sortable > .arrow { display:inline-block; width:12px; height:inherit; color:#c90 } - .content-main { margin:0 5px; display:block; float:left } - .content-wrap { width:900px } - - .chart-title { color:#f60; font-size:20px; padding:8px 0 3px } -{% endblock %} - -{% block media_queries %} - @media only screen and (min-width: 320px) { .content-wrap { width:900px } .content-main { width:600px } } - @media only screen and (min-width: 900px) { .content-wrap { width:880px } .content-main { width:590px } } - @media only screen and (min-width: 1000px) { .content-wrap { width:980px } .content-main { width:690px } } - @media only screen and (min-width: 1100px) { .content-wrap { width:1080px } .content-main { width:790px } } - @media only screen and (min-width: 1200px) { .content-wrap { width:1180px } .content-main { width:890px } } -{% endblock %} - -{% block body_attr %} ng-controller="Controller"{% endblock %} - -{% block header_text %}tasks trends report{% endblock %} - -{% block content %} -{% raw %} - - -
    -
    - -
    -
    - - -
    -
    - -
    - - -
    -

    Trends overview

    - - - - - - - - - -
    - Scenario - - - - - - Number of runs - - - - - - Min duration - - - - - - Max duration - - - - - - Avg duration - - - - - - SLA - - - - -
    {{w.ref}} - {{w.length}} - - - - {{w.stat.min | number:4}} - - - - {{w.stat.max | number:4}} - - - - {{w.stat.avg | number:4}} - - - -
    -
    - -
    -
    Compare workload runs
    -

    {{wload.cls}}.{{wload.met}}{{wload.order_idx}}

    -
      -
    • -
      {{t.name}}
      -
    • -
      -
    -
    - - - - - - -
    - -
    -
    -{% endraw %} -{% endblock %} - -{% block js_after %} - if (! window.angular) {(function(f){ - f(document.getElementById("content-nav"), "none"); - f(document.getElementById("content-main"), "none"); - f(document.getElementById("page-error"), "block").textContent = "Failed to load AngularJS framework" - })(function(e, s){e.style.display = s; return e})} -{% endblock %} diff --git a/rally/ui/templates/verification/report.html b/rally/ui/templates/verification/report.html deleted file mode 100644 index e6b64bb7..00000000 --- a/rally/ui/templates/verification/report.html +++ /dev/null @@ -1,325 +0,0 @@ -{% extends "/base.html" %} - -{% block html_attr %} ng-app="App" ng-controller="Controller" id="page-html"{% endblock %} - -{% block title_text %}{% raw %}{{title}}{% endraw %}{% endblock %} - -{% block libs %} - {% if include_libs %} - - {% else %} - - {% endif %} -{% endblock %} - -{% block js_before %} - "use strict"; - {{ include_raw_file("/task/directive_widget.js") }} - var controllerFunction = function($scope, $location) { - $scope.data = {{ data }}; - - /* Calculate columns width in percent */ - var td_ctr_width = 4; - var td_result_width = Math.round(1 / ($scope.data.uuids.length+3) * 100); - - $scope.td_width_ = { - counter: td_ctr_width, - test_name: (100 - td_ctr_width - (td_result_width * $scope.data.uuids.length)), - test_result: td_result_width - } - - $scope.td_width = (function(vers_num) { - var uuid_w = Math.round(1 / (vers_num+3) * 100); - return {test: 100 - (uuid_w * vers_num), - uuid: uuid_w} - })($scope.data.uuids.length) - - var bitmask = {"success": 1, - "skip": 2, - "xfail": 4, - "uxsuccess": 8, - "fail": 16}; - - for (var i in $scope.data.tests) { - var t = $scope.data.tests[i]; - var bits = 0; - for (var uuid in t.by_verification) { - var status = t.by_verification[uuid].status; - if (status in bitmask) { - bits |= bitmask[status] - } - } - $scope.data.tests[i].filter = bits; - } - - $scope.set_filter = function(status) { - if (status in $scope.state) { - $scope.state[status] = !$scope.state[status]; - $scope.filter_bits ^= bitmask[status] - } - } - - $scope.state = {"success": true, - "skip": true, - "xfail": true, - "uxsuccess": true, - "fail": true}; - - $scope.filter_by_status = function(test, index, arr) { - return test.filter & $scope.filter_bits - } - - $scope.filter_bits = (function(filter){ - var bits = 0; - for (var status in $scope.state){ - if ($scope.state[status]) { bits ^= bitmask[status] } - } - return bits - })(); - - $scope.toggle_filters_flag = true; - $scope.toggle_filters = function() { - if ($scope.toggle_filters_flag) { - $scope.toggle_filters_flag = false; - $scope.state = {"success": false, - "skip": false, - "xfail": false, - "uxsuccess": false, - "fail": false}; - $scope.filter_bits = 0 - } else { - $scope.toggle_filters_flag = true - $scope.state = {"success": true, - "skip": true, - "xfail": true, - "uxsuccess": true, - "fail": true}; - $scope.filter_bits = 31 - } - } - - var title = "verification result"; - - if ($scope.data.uuids.length > 1) { - title = "verifications results" - } - - $scope.title = title; - - $scope.srt_dir = false; - - $scope.get_tests_count = function() { - var ctr = 0; - for (var i in $scope.data.tests) { - if ($scope.data.tests[i].filter & $scope.filter_bits) { - ctr++ - } - } - return ctr - } - - var title = angular.element(document.getElementById("page-header")); - var header = angular.element(document.getElementById("content-header")); - var tests = angular.element(document.getElementById("tests")); - var sync_positions = function() { - var title_h = title[0].offsetHeight; - var header_h = header[0].offsetHeight; - header.css({top:title_h+"px"}) - tests.css({"margin-top": (title_h+header_h)+"px"}); - } - - /* Make page head sticky */ - window.onload = function() { - title.css({position:"fixed", top:0, width:"100%"}); - header.css({position:"fixed", width:"100%", background:"#fff"}); - - sync_positions(); - window.onresize = sync_positions; - - var goup = document.getElementById("button-goup"); - goup.onclick = function () { scrollTo(0, 0) }; - window.onscroll = function() { - if (window.scrollY > 50) { - goup.style.display = "block"; - } else { - goup.style.display = "none"; - } - } - } - - $scope.show_header = true; - $scope.toggle_header = (function(e) { - return function() { - $scope.show_header = (e.style.display === "none"); - e.style.display = $scope.show_header ? "table" : "none"; - sync_positions() - } - })(document.getElementById("verifications")) - - }; - - if (typeof angular === "object") { - angular.module("App", []) - .controller("Controller", ["$scope", "$location", controllerFunction]) - .directive("widget", widgetDirective) - } -{% endblock %} - -{% block css %} - div.header {margin:0 !important} - div.header .content-wrap { padding-left:10px } - .status.status-success { background: #cfc; color: #333 } - .status.status-uxsuccess { background: #ffd7af; color: #333 } - .status.status-fail { background: #fbb; color: #333 } - .status.status-xfail { background: #ffb; color: #333 } - .status.status-skip { background: #ccf5ff; color: #333 } - .status.checkbox { font-size:18px; text-align:center; cursor:pointer; padding:0 } - .column { display:block; float:left; padding:4px 0 4px 8px; box-sizing:border-box; - background:#fff; font-size:12px; font-weight:bold; - border:#ccc solid; border-width:0 0 1px } - .button { margin:0 5px; padding:0 8px 1px; background:#47a; color:#fff; cursor:pointer; - border:1px #036 solid; border-radius:11px; font-size:12px; font-weight:normal; - opacity:.8} - .button:hover { opacity:1 } - #button-goup { padding:3px 10px 5px; text-align:center; cursor:pointer; - background:#fff; color:#036; line-height:14px; font-size:14px; - position:fixed; bottom:0; right:10px; - border:#ccc solid; border-width:1px 1px 0; border-radius:15px 15px 0 0} -{% endblock %} - -{% block css_content_wrap %}width:100%; padding:0{% endblock %} - -{% block body_attr %} id="page-body" style="position:relative"{% endblock %} - -{% block header_text %}{% raw %}{{title}}{% endraw %}{% endblock %} - -{% block content %} - - {% raw %} -

    processing ...

    - -
    - - - - - - - - - - - -
    Verification UUID - Status - Started at - Finished at - Tests count - Tests duration, sec - success - skipped - expected failures - unexpected success - failures -
    {{uuid}} - {{data.verifications[uuid].status}} - {{data.verifications[uuid].started_at}} - {{data.verifications[uuid].finished_at}} - {{data.verifications[uuid].tests_count}} - {{data.verifications[uuid].tests_duration}} - {{data.verifications[uuid].success}} - {{data.verifications[uuid].skipped}} - {{data.verifications[uuid].expected_failures}} - {{data.verifications[uuid].unexpected_success}} - {{data.verifications[uuid].failures}} -
    - Filter tests by status: - - - - - - - - - - - - - - - -
    - - -
    - - Toggle Header - - - Toggle Tags - - - Toggle All Filters - -
    -
    - -
    - - Test name - (shown {{get_tests_count()}}) - - - - - -
    -
    - {{uuid}} -
    -
    -
    - - - - - - - - -
    - {{t.name}} -
    - {{tag}} -
    -
    -
    - {{t.by_verification[uuid].status}} {{t.by_verification[uuid].duration}} -
    -
    - – -
    -
    -
    -
    {{uuid}}
    -
    {{t.by_verification[uuid].details}}
    -
    -
    - - {% endraw %} - -{% endblock %} diff --git a/rally/ui/utils.py b/rally/ui/utils.py deleted file mode 100644 index 200c7261..00000000 --- a/rally/ui/utils.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jinja2 - - -def get_template(template): - - def include_raw_file(file_name): - try: - return jinja2.Markup(loader.get_source(env, file_name)[0]) - except jinja2.TemplateNotFound: - # NOTE(amaretskiy): re-raise error to make its message clear - raise IOError("File not found: %s" % file_name) - - loader = jinja2.PackageLoader("rally.ui", "templates") - env = jinja2.Environment(loader=loader) - env.globals["include_raw_file"] = include_raw_file - - return env.get_template(template) diff --git a/rally/verification/__init__.py b/rally/verification/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/rally/verification/context.py b/rally/verification/context.py deleted file mode 100644 index a10dec94..00000000 --- a/rally/verification/context.py +++ /dev/null @@ -1,49 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools - -from rally.common.plugin import plugin -from rally.task import context - -# all VerifierContexts should be always hidden -configure = functools.partial(context.configure, hidden=True) - - -@plugin.base() -class VerifierContext(context.BaseContext): - """Verifier context that will be run before starting a verification.""" - - def __init__(self, ctx): - super(VerifierContext, self).__init__(ctx) - self.verification = self.context.get("verification", {}) - self.verifier = self.context["verifier"] - - @classmethod - def validate(cls, config): - # do not validate jsonschema. - pass - - -class ContextManager(context.ContextManager): - - @staticmethod - def validate(ctx): - for name, config in ctx.items(): - VerifierContext.get(name, allow_hidden=True).validate(config) - - def _get_sorted_context_lst(self): - return sorted([ - VerifierContext.get(name, allow_hidden=True)(self.context_obj) - for name in self.context_obj["config"].keys()]) diff --git a/rally/verification/manager.py b/rally/verification/manager.py deleted file mode 100644 index a00b3a83..00000000 --- a/rally/verification/manager.py +++ /dev/null @@ -1,422 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import inspect -import os -import re -import shutil -import sys - -import six - -from rally.common.i18n import _LE, _LI -from rally.common.io import subunit_v2 -from rally.common import logging -from rally.common.plugin import plugin -from rally import exceptions -from rally.verification import context -from rally.verification import utils - - -LOG = logging.getLogger(__name__) - -URL_RE = re.compile( - r"^(?:http|ftp)s?://" # http:// or https:// - r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+" # domain - r"(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain - r"localhost|" # localhost - r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # IP - r"(?::\d+)?" # optional port - r"(?:/?|[/?]\S+)$", re.IGNORECASE) - - -class VerifierSetupFailure(exceptions.RallyException): - error_code = 533 - msg_fmt = "Failed to set up verifier '%(verifier)s': %(message)s" - - -def configure(name, namespace="default", default_repo=None, - default_version=None, context=None): - """Decorator to configure plugin's attributes. - - :param name: Plugin name that is used for searching purpose - :param namespace: Plugin namespace - :param default_repo: Default repository to clone - :param default_version: Default version to checkout - :param context: List of contexts that should be executed for verification - """ - def decorator(plugin_inst): - plugin_inst = plugin.configure(name, platform=namespace)(plugin_inst) - plugin_inst._meta_set("default_repo", default_repo) - plugin_inst._meta_set("default_version", default_version) - plugin_inst._meta_set("context", context or {}) - return plugin_inst - - return decorator - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class VerifierManager(plugin.Plugin): - """Verifier base class. - - This class provides an interface for operating specific tool. - """ - - # These dicts will be used for building docs. PS: we should find a better - # place for them - RUN_ARGS = {"pattern": "a regular expression of tests to launch.", - "concurrency": "Number of processes to be used for launching " - "tests. In case of 0 value, number of processes" - " will be equal to number of CPU cores.", - "load_list": "a list of tests to launch.", - "skip_list": "a list of tests to skip (actually, it is a dict " - "where keys are names of tests, values are " - "reasons).", - "xfail_list": "a list of tests that are expected to fail " - "(actually, it is a dict where keys are names " - "of tests, values are reasons)."} - - @classmethod - def _get_doc(cls): - run_args = {} - for parent in inspect.getmro(cls): - if hasattr(parent, "RUN_ARGS"): - for k, v in parent.RUN_ARGS.items(): - run_args.setdefault(k, v) - - doc = cls.__doc__ or "" - doc += "\n**Running arguments**:\n%s" % "\n".join( - sorted([" * *%s*: %s" % (k, v) for k, v in run_args.items()])) - - doc += "\n**Installation arguments**:\n" - doc += (" * *system_wide*: Whether or not to use the system-wide " - "environment for verifier instead of a virtual environment. " - "Defaults to False.\n" - " * *source*: Path or URL to the repo to clone verifier from." - " Defaults to %(default_source)s\n" - " * *version*: Branch, tag or commit ID to checkout before " - "verifier installation. Defaults to '%(default_version)s'." - % {"default_source": cls._meta_get("default_repo"), - "default_version": cls._meta_get( - "default_version") or "master"}) - - return doc - - def __init__(self, verifier): - """Init a verifier manager. - - :param verifier: `rally.common.objects.verifier.Verifier` instance - """ - self.verifier = verifier - - @property - def base_dir(self): - return os.path.expanduser( - "~/.rally/verification/verifier-%s" % self.verifier.uuid) - - @property - def home_dir(self): - return os.path.join(self.base_dir, "for-deployment-%s" - % self.verifier.deployment["uuid"]) - - @property - def repo_dir(self): - return os.path.join(self.base_dir, "repo") - - @property - def venv_dir(self): - return os.path.join(self.base_dir, ".venv") - - @property - def environ(self): - env = os.environ.copy() - if not self.verifier.system_wide: - # activate virtual environment - env["VIRTUAL_ENV"] = self.venv_dir - env["PATH"] = "%s:%s" % ( - os.path.join(self.venv_dir, "bin"), env["PATH"]) - return env - - def validate_args(self, args): - """Validate given arguments to be used for running verification. - - :param args: A dict of arguments with values - """ - - # NOTE(andreykurilin): By default we do not use jsonschema here. - # So it cannot be extended by inheritors => requires duplication. - if "pattern" in args: - if not isinstance(args["pattern"], six.string_types): - raise exceptions.ValidationError( - "'pattern' argument should be a string.") - if "concurrency" in args: - if (not isinstance(args["concurrency"], int) or - args["concurrency"] < 0): - raise exceptions.ValidationError( - "'concurrency' argument should be a positive integer or " - "zero.") - if "load_list" in args: - if not isinstance(args["load_list"], list): - raise exceptions.ValidationError( - "'load_list' argument should be a list of tests.") - if "skip_list" in args: - if not isinstance(args["skip_list"], dict): - raise exceptions.ValidationError( - "'skip_list' argument should be a dict of tests " - "where keys are test names and values are reasons.") - if "xfail_list" in args: - if not isinstance(args["xfail_list"], dict): - raise exceptions.ValidationError( - "'xfail_list' argument should be a dict of tests " - "where keys are test names and values are reasons.") - - def validate(self, run_args): - """Validate a verifier context and run arguments.""" - context.ContextManager.validate(self._meta_get("context")) - self.validate_args(run_args) - - def _clone(self): - """Clone a repo and switch to a certain version.""" - source = self.verifier.source or self._meta_get("default_repo") - if not URL_RE.match(source) and not os.path.exists(source): - raise exceptions.RallyException("Source path '%s' is not valid." - % source) - - if logging.is_debug(): - LOG.debug("Cloning verifier repo from %s into %s.", source, - self.repo_dir) - else: - LOG.info("Cloning verifier repo from %s.", source) - - cmd = ["git", "clone", source, self.repo_dir] - - default_version = self._meta_get("default_version") - if default_version and default_version != "master": - cmd.extend(["-b", default_version]) - - utils.check_output(cmd) - - version = self.verifier.version - if version: - LOG.info("Switching verifier repo to the '%s' version." % version) - utils.check_output(["git", "checkout", version], cwd=self.repo_dir) - else: - output = utils.check_output(["git", "describe", "--all"], - cwd=self.repo_dir).strip() - if output.startswith("heads/"): # it is a branch - version = output[6:] - else: - head = utils.check_output(["git", "rev-parse", "HEAD"], - cwd=self.repo_dir).strip() - if output.endswith(head[:7]): # it is a commit ID - version = head - else: # it is a tag - version = output - - self.verifier.update_properties(version=version) - - def install(self): - """Clone and install a verifier.""" - utils.create_dir(self.base_dir) - - self._clone() - - if self.verifier.system_wide: - self.check_system_wide() - else: - self.install_venv() - - def uninstall(self, full=False): - """Uninstall a verifier. - - :param full: If False (default behaviour), only deployment-specific - data will be removed - """ - path = self.base_dir if full else self.home_dir - if os.path.exists(path): - shutil.rmtree(path) - - def install_venv(self): - """Install a virtual environment for a verifier.""" - if os.path.exists(self.venv_dir): - # NOTE(andreykurilin): It is necessary to remove the old env while - # performing update action. - LOG.info("Deleting old virtual environment.") - shutil.rmtree(self.venv_dir) - - LOG.info("Creating virtual environment. It may take a few minutes.") - - LOG.debug("Initializing virtual environment in %s directory.", - self.venv_dir) - utils.check_output(["virtualenv", "-p", sys.executable, self.venv_dir], - cwd=self.repo_dir, - msg_on_err="Failed to initialize virtual env " - "in %s directory." % self.venv_dir) - - LOG.debug("Installing verifier in virtual environment.") - # NOTE(ylobankov): Use 'develop mode' installation to provide an - # ability to advanced users to change tests or - # develop new ones in verifier repo on the fly. - utils.check_output(["pip", "install", "-e", "./"], - cwd=self.repo_dir, env=self.environ) - - def check_system_wide(self, reqs_file_path=None): - """Check that all required verifier packages are installed.""" - LOG.debug("Checking system-wide packages for verifier.") - import pip - reqs_file_path = reqs_file_path or os.path.join(self.repo_dir, - "requirements.txt") - required_packages = set( - [r.name.lower() for r in pip.req.parse_requirements( - reqs_file_path, session=False)]) - installed_packages = set( - [r.key for r in pip.get_installed_distributions()]) - missed_packages = required_packages - installed_packages - if missed_packages: - raise VerifierSetupFailure( - "Missed package(s) for system-wide installation found. " - "Please install '%s'." % "', '".join(sorted(missed_packages)), - verifier=self.verifier.name) - - def checkout(self, version): - """Switch a verifier repo.""" - LOG.info("Switching verifier repo to the '%s' version.", version) - utils.check_output(["git", "checkout", "master"], cwd=self.repo_dir) - utils.check_output(["git", "remote", "update"], cwd=self.repo_dir) - utils.check_output(["git", "pull"], cwd=self.repo_dir) - utils.check_output(["git", "checkout", version], cwd=self.repo_dir) - - def configure(self, extra_options=None): - """Configure a verifier. - - :param extra_options: a dictionary with external verifier specific - options for configuration. - :raises NotImplementedError: This feature is verifier-specific, so you - should override this method in your plugin if it supports - configuration - """ - raise NotImplementedError( - _LI("'%s' verifiers don't support configuration at all.") - % self.get_name()) - - def is_configured(self): - """Check whether a verifier is configured or not.""" - return True - - def get_configuration(self): - """Get verifier configuration (e.g., the config file content).""" - return "" - - def override_configuration(self, new_configuration): - """Override verifier configuration. - - :param new_configuration: Content which should be used while overriding - existing configuration - :raises NotImplementedError: This feature is verifier-specific, so you - should override this method in your plugin if it supports - configuration - """ - raise NotImplementedError( - _LE("'%s' verifiers don't support configuration at all.") - % self.get_name()) - - def extend_configuration(self, extra_options): - """Extend verifier configuration with new options. - - :param extra_options: Options to be used for extending configuration - :raises NotImplementedError: This feature is verifier-specific, so you - should override this method in your plugin if it supports - configuration - """ - raise NotImplementedError( - _LE("'%s' verifiers don't support configuration at all.") - % self.get_name()) - - def install_extension(self, source, version=None, extra_settings=None): - """Install a verifier extension. - - :param source: Path or URL to the repo to clone verifier extension from - :param version: Branch, tag or commit ID to checkout before verifier - extension installation - :param extra_settings: Extra installation settings for verifier - extension - :raises NotImplementedError: This feature is verifier-specific, so you - should override this method in your plugin if it supports - extensions - """ - raise NotImplementedError( - _LE("'%s' verifiers don't support extensions.") % self.get_name()) - - def list_extensions(self): - """List all verifier extensions.""" - return [] - - def uninstall_extension(self, name): - """Uninstall a verifier extension. - - :param name: Name of extension to uninstall - :raises NotImplementedError: This feature is verifier-specific, so you - should override this method in your plugin if it supports - extensions - """ - raise NotImplementedError( - _LE("'%s' verifiers don't support extensions.") % self.get_name()) - - @abc.abstractmethod - def list_tests(self, pattern=""): - """List all verifier tests. - - :param pattern: Filter tests by given pattern - """ - - def parse_results(self, results_data): - """Parse subunit results data of a test run.""" - # TODO(andreykurilin): Support more formats. - return subunit_v2.parse(six.StringIO(results_data)) - - @abc.abstractmethod - def run(self, context): - """Run verifier tests. - - Verification Component API expects that this method should return an - object. There is no special class, you do it as you want, but it should - have the following properties: - - .. code-block:: none - - .totals = { - "tests_count": , - "tests_duration": , - "failures": , - "skipped": , - "success": , - "unexpected_success": - , - "expected_failures": - } - - .tests = { - : { - "status": , - "name": , - "duration": , - "reason": , # optional - "traceback": # optional - }, - ... - } - - """ diff --git a/rally/verification/reporter.py b/rally/verification/reporter.py deleted file mode 100755 index 3ebc7e0b..00000000 --- a/rally/verification/reporter.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -Reporter - its the mechanism for exporting rally verification into specified -system or formats. -""" - -import abc - -import jsonschema -import six - -from rally.common.plugin import plugin -from rally import consts - - -configure = plugin.configure - - -REPORT_RESPONSE_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "files": { - "type": "object", - "patternProperties": { - ".{1,}": {"type": "string"} - } - }, - "open": { - "type": "string", - }, - "print": { - "type": "string" - } - }, - "additionalProperties": False -} - - -@plugin.base() -@six.add_metaclass(abc.ABCMeta) -class VerificationReporter(plugin.Plugin): - """Base class for all reporters for verifications.""" - - def __init__(self, verifications, output_destination): - """Init reporter - - :param verifications: list of results to generate report for - :param output_destination: destination of report - """ - super(VerificationReporter, self).__init__() - self.verifications = verifications - self.output_destination = output_destination - - @classmethod - @abc.abstractmethod - def validate(cls, output_destination): - """Validate destination of report. - - :param output_destination: Destination of report - """ - - @abc.abstractmethod - def generate(self): - """Generate report - - :returns: a dict with 3 optional elements: - - - key "files" with a dictionary of files to save on disk. - keys are paths, values are contents; - - key "print" - data to print at CLI level - - key "open" - path to file which should be open in case of - --open flag - """ - - @staticmethod - def make(reporter_cls, verifications, output_destination): - """Initialize reporter, generate and validate report. - - It is a base method which is called from API layer. It cannot be - overridden. Do not even try! :) - - :param reporter_cls: class of VerificationReporter to be used - :param verifications: list of results to generate report for - :param output_destination: destination of report - """ - report = reporter_cls(verifications, output_destination).generate() - - jsonschema.validate(report, REPORT_RESPONSE_SCHEMA) - - return report diff --git a/rally/verification/utils.py b/rally/verification/utils.py deleted file mode 100644 index d5add24d..00000000 --- a/rally/verification/utils.py +++ /dev/null @@ -1,95 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import subprocess - -from oslo_utils import encodeutils -import six -from six.moves import configparser - -from rally.common import logging - - -LOG = logging.getLogger(__name__) - - -def check_output(*args, **kwargs): - """Run command with arguments and return its output. - - If the exit code was non-zero it raises a CalledProcessError. The - CalledProcessError object will have the return code in the returncode - attribute and output in the output attribute. - - The difference between check_output from subprocess package and this - function: - - * Additional arguments: - - "msg_on_err" argument. It is a message that should be written in case - of error. Reduces a number of try...except blocks - - "debug_output" argument(Defaults to True). Print or not output to - LOG.debug - * stderr is hardcoded to stdout - * In case of error, prints failed command and output to LOG.error - * Prints output to LOG.debug - - """ - msg_on_err = kwargs.pop("msg_on_err", None) - debug_output = kwargs.pop("debug_output", True) - - kwargs["stderr"] = subprocess.STDOUT - try: - output = subprocess.check_output(*args, **kwargs) - except subprocess.CalledProcessError as exc: - if msg_on_err: - LOG.error(msg_on_err) - LOG.error("Failed cmd: '%s'" % exc.cmd) - LOG.error("Error output: '%s'" % encodeutils.safe_decode(exc.output)) - raise - - if output and debug_output: - LOG.debug("Subprocess output: '%s'" % encodeutils.safe_decode(output)) - - return output - - -def create_dir(dir_path): - if not os.path.isdir(dir_path): - os.makedirs(dir_path) - - return dir_path - - -def extend_configfile(extra_options, conf_path): - conf_object = configparser.ConfigParser() - conf_object.read(conf_path) - - conf_object = add_extra_options(extra_options, conf_object) - with open(conf_path, "w") as configfile: - conf_object.write(configfile) - - raw_conf = six.StringIO() - conf_object.write(raw_conf) - - return raw_conf.getvalue() - - -def add_extra_options(extra_options, conf_object): - for section in extra_options: - if section not in (conf_object.sections() + ["DEFAULT"]): - conf_object.add_section(section) - for option, value in extra_options[section].items(): - conf_object.set(section, option, value) - - return conf_object diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index a88d3c74..00000000 --- a/requirements.txt +++ /dev/null @@ -1,53 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -# Rally core dependencies -alembic>=0.8.10,<=0.9.2 # MIT -decorator>=3.4.0,<=4.0.11 # new BSD License -Jinja2>=2.8,!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,<=2.9.6 # BSD -jsonschema>=2.0.0,!=2.5.0,<3.0.0 # MIT -netaddr>=0.7.13,!=0.7.16,<=0.7.19 # BSD -oslo.config>=4.0.0,!=4.3.0,!=4.4.0,<=4.7.0 # Apache Software License -oslo.db>=4.23.0,<=4.24.0 # Apache Software License -oslo.i18n>=2.1.0,!=3.15.2,<=3.15.3 # Apache Software License -oslo.log>=3.22.0,<=3.28.1 # Apache Software License -oslo.serialization>=1.10.0,<=2.19.0 # Apache Software License -oslo.utils>=3.20.0,<=3.26.0 # Apache Software License -paramiko>=2.0,<=2.2.1 # LGPL -pbr>=2.0.0,!=2.1.0,<=3.1.1 # Apache Software License -PrettyTable>=0.7.1,<0.8 # BSD -PyYAML>=3.10.0,<=3.12 # MIT -python-subunit>=0.0.18,<=1.2.0 -requests>=2.14.2,<=2.18.1 # Apache License, Version 2.0 -SQLAlchemy>=1.0.10,!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,<=1.1.11 # MIT -six>=1.9.0,<=1.10.0 # MIT -virtualenv>=13.1.0,<=15.1.0 # MIT - -# OpenStack related -boto>=2.32.1,<=2.47.0 # MIT -gnocchiclient>=2.7.0,<=3.3.1 # Apache Software License -keystoneauth1==2.21.0 # Apache Software License -os-faults>=0.1.5,<=0.1.13 # Apache Software License -osprofiler>=1.4.0,<=1.10.1 # Apache Software License -python-ceilometerclient>=2.5.0,<=2.9.0 # Apache Software License -python-cinderclient>=2.1.0,<=2.2.0 # Apache Software License -python-designateclient>=1.5.0,<=2.6.0 # Apache License, Version 2.0 -python-glanceclient==2.7.0 # Apache License, Version 2.0 -python-heatclient>=1.6.1,<=1.10.0 # Apache Software License -python-ironicclient>=1.11.0,<=1.13.0 # Apache Software License -python-keystoneclient>=3.8.0,<=3.12.0 # Apache Software License -python-magnumclient>=2.0.0,<=2.6.0 # Apache Software License -python-manilaclient>=1.12.0,<=1.16.0 # Apache Software License -python-mistralclient==3.1.0 # Apache Software License -python-monascaclient>=1.1.0,<=1.6.0 # Apache Software License -python-muranoclient>=0.8.2,<=0.13.0 # Apache License, Version 2.0 -python-neutronclient==6.3.0 # Apache Software License -python-novaclient>=9.0.0,<=9.0.1 # Apache License, Version 2.0 -python-saharaclient>=1.1.0,<=1.2.0 # Apache License, Version 2.0 -python-senlinclient>=1.1.0,<=1.3.0 # Apache Software License -python-swiftclient>=3.2.0,<=3.3.0 # Apache Software License -python-troveclient>=2.2.0,<=2.10.0 # Apache Software License -python-watcherclient>=0.23.0,<=1.2.0 # Apache Software License -python-zaqarclient>=1.0.0,<=1.6.0 # Apache Software License -kubernetes>=1.0.0b1,<=2.0.0 # Apache License Version 2.0 diff --git a/samples/README.rst b/samples/README.rst deleted file mode 100644 index 7ab94bf8..00000000 --- a/samples/README.rst +++ /dev/null @@ -1,20 +0,0 @@ -============================ -Content of samples directory -============================ - -deployments -~~~~~~~~~~~ - -Input configurations for "rally deployment" command - - -plugins -~~~~~~~ - -Samples of Rally plugins. - - -tasks -~~~~~ - -Input configurations for "rally task" command diff --git a/samples/deployments/README.rst b/samples/deployments/README.rst deleted file mode 100644 index dca1f1f8..00000000 --- a/samples/deployments/README.rst +++ /dev/null @@ -1,46 +0,0 @@ -Rally Deployments -================= - -Rally needs to be configured to use an OpenStack Cloud deployment before it -can benchmark the deployment. - -To configure Rally to use an OpenStack Cloud deployment, you need create a -deployment configuration by supplying the endpoint and credentials, as follows: - -.. code-block:: - - rally deployment create --file --name my_cloud - - -If you don't have OpenStack deployments, Rally can deploy it for you. -For samples of various deployments take a look at samples from -**for_deploying_openstack_with_rally** directory. - - -existing.json -------------- - -Register existing OpenStack cluster. - -existing-keystone-v3.json -------------------------- - -Register existing OpenStack cluster that uses Keystone v3. - -existing-with-predefined-users.json --------------------------------------- - -If you are using read-only backend in Keystone like LDAP, AD then -you need this sample. If you don't specify "users" rally will use already -existing users that you provide. - - - -existing-with-given-endpoint.json ---------------------------------- - -Register existing OpenStack cluster, with parameter "endpoint" specified -to explicitly set keystone management_url. Use this parameter if -keystone fails to setup management_url correctly. -For example, this parameter must be specified for FUEL cluster -and has value "http://:35357/v2.0/" diff --git a/samples/deployments/existing-keystone-v3-osprofiler.json b/samples/deployments/existing-keystone-v3-osprofiler.json deleted file mode 100644 index 89f96412..00000000 --- a/samples/deployments/existing-keystone-v3-osprofiler.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "type": "ExistingCloud", - "creds": { - "openstack": { - "auth_url": "http://example.net:5000/v3/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "myadminpass", - "user_domain_name": "admin", - "project_name": "admin", - "project_domain_name": "admin" - }, - "https_insecure": false, - "https_cacert": "", - "profiler_hmac_key": "SECRET_KEY" - } - } -} diff --git a/samples/deployments/existing-keystone-v3.json b/samples/deployments/existing-keystone-v3.json deleted file mode 100644 index f483d97b..00000000 --- a/samples/deployments/existing-keystone-v3.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "type": "ExistingCloud", - "creds": { - "openstack": { - "auth_url": "http://example.net:5000/v3/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "myadminpass", - "user_domain_name": "admin", - "project_name": "admin", - "project_domain_name": "admin" - }, - "https_insecure": false, - "https_cacert": "" - } - } -} diff --git a/samples/deployments/existing-with-given-endpoint.json b/samples/deployments/existing-with-given-endpoint.json deleted file mode 100644 index 0933a274..00000000 --- a/samples/deployments/existing-with-given-endpoint.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "type": "ExistingCloud", - "creds": { - "openstack": { - "auth_url": "http://example.net:5000/v2.0/", - "region_name": "RegionOne", - "endpoint_type": "public", - "endpoint": "http://:// example: http://172.16.0.2:35357/v2.0/", - "admin": { - "username": "admin", - "password": "pa55word", - "tenant_name": "demo" - }, - "https_insecure": false, - "https_cacert": "" - } - } -} diff --git a/samples/deployments/existing-with-predefined-users.json b/samples/deployments/existing-with-predefined-users.json deleted file mode 100644 index 1b961436..00000000 --- a/samples/deployments/existing-with-predefined-users.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "type": "ExistingCloud", - "creds": { - "openstack": { - "auth_url": "http://example.net:5000/v2.0/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "pa55word", - "tenant_name": "demo" - }, - "users": [ - { - "username": "not_an_admin1", - "password": "password", - "tenant_name": "some_tenant" - }, - { - "username": "not_an_admin2", - "password": "password2", - "tenant_name": "some_tenant2" - } - ] - } - } -} diff --git a/samples/deployments/existing.json b/samples/deployments/existing.json deleted file mode 100644 index 53e12f0d..00000000 --- a/samples/deployments/existing.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "type": "ExistingCloud", - "creds": { - "openstack": { - "auth_url": "http://example.net:5000/v2.0/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "myadminpass", - "tenant_name": "demo" - }, - "https_insecure": false, - "https_cacert": "" - } - } -} diff --git a/samples/deployments/for_deploying_openstack_with_rally/README.rst b/samples/deployments/for_deploying_openstack_with_rally/README.rst deleted file mode 100644 index a7f6b6d2..00000000 --- a/samples/deployments/for_deploying_openstack_with_rally/README.rst +++ /dev/null @@ -1,40 +0,0 @@ -Using Rally for OpenStack deployment -==================================== - -This directory contains various input files for **rally deployment create** -command. This command will deploy OpenStack and register it to Rally DB. - - -devstack-in-existing-servers.json ---------------------------------- - -Deploy with DevStack OpenStack cloud on specified servers. - - -devstack-in-lxc.json --------------------- - -Deploy DevStack cluster in LXC containers - - -devstack-in-openstack.json --------------------------- - -Create require VM in specified OpenStack cloud and deploy using DevStack -OpenStack on them. - - -devstack-lxc-engine-in-existing-servers.json --------------------------------------------- - -See *devstack-lxc-engine-in-existing-servers.rst* for details - -fuel-ha.json ------------- - -Deploy High Availability FUEL cluster. - -fuel-multinode.json -------------------- - -Deploy Multinode FUEL cluster. diff --git a/samples/deployments/for_deploying_openstack_with_rally/devstack-by-cobbler.json b/samples/deployments/for_deploying_openstack_with_rally/devstack-by-cobbler.json deleted file mode 100644 index a36a5982..00000000 --- a/samples/deployments/for_deploying_openstack_with_rally/devstack-by-cobbler.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "type": "MultihostEngine", - "controller": { - "type": "DevstackEngine", - "localrc": { - "ENABLED_SERVICES+": ",-n-cpu,-n-net", - "MULTI_HOST": "1" - }, - "provider": { - "type": "CobblerProvider", - "host": "172.29.74.8", - "user": "cobbler", - "password": "cobbler", - "selector": { - "profile": "OpenStackController", - "owners": "Owner" - } - } - }, - "nodes": [ - { - "type": "DevstackEngine", - "count": 1, - "localrc": { - "ENABLED_SERVICES": "n-net" - }, - "provider": { - "type": "CobblerProvider", - "host": "172.29.74.8", - "user": "cobbler", - "password": "cobbler", - "selector": { - "profile": "OpenStackNetwork", - "owners": "Owner" - } - } - }, - { - "type": "DevstackEngine", - "count": 1, - "localrc": { - "ENABLED_SERVICES": "n-cpu" - }, - "provider": { - "type": "CobblerProvider", - "host": "172.29.74.8", - "user": "cobbler", - "password": "cobbler", - "selector": { - "profile": "OpenStackCompute", - "owners": "Owner" - } - } - - } - ] -} diff --git a/samples/deployments/for_deploying_openstack_with_rally/devstack-in-existing-servers.json b/samples/deployments/for_deploying_openstack_with_rally/devstack-in-existing-servers.json deleted file mode 100644 index c5b71cf9..00000000 --- a/samples/deployments/for_deploying_openstack_with_rally/devstack-in-existing-servers.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "type": "DevstackEngine", - "devstack_repo": "https://git.openstack.org/openstack-dev/devstack", - "devstack_branch": "master", - "provider": { - "type": "ExistingServers", - "credentials": [{"user": "root", "host": "10.2.0.8"}] - } -} diff --git a/samples/deployments/for_deploying_openstack_with_rally/devstack-in-lxc.json b/samples/deployments/for_deploying_openstack_with_rally/devstack-in-lxc.json deleted file mode 100644 index 457616af..00000000 --- a/samples/deployments/for_deploying_openstack_with_rally/devstack-in-lxc.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "type": "DevstackEngine", - "local_conf": { - "VIRT_DRIVER": "fake" - }, - "provider": { - "type": "LxcProvider", - "containers_per_host": 4, - "container_name_prefix": "rally-providertest-02-", - "start_lxc_network": "10.100.1.0/28", - "tunnel_to": ["10.5.0.1"], - "distribution": "ubuntu", - "release": "raring", - "host_provider": { - "type": "ExistingServers", - "credentials": [{"user": "root", "host": "10.2.250.103"}] - } - } -} diff --git a/samples/deployments/for_deploying_openstack_with_rally/devstack-in-openstack.json b/samples/deployments/for_deploying_openstack_with_rally/devstack-in-openstack.json deleted file mode 100644 index 5049d072..00000000 --- a/samples/deployments/for_deploying_openstack_with_rally/devstack-in-openstack.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "type": "DevstackEngine", - "provider": { - "type": "OpenStackProvider", - "deployment_name": "Rally sample deployment", - "user": "admin", - "tenant": "admin", - "region": "RegionOne", - "flavor_id": "2", - "nics": [{"net-id": "97936015-128a-42f1-a3f2-1868ceeeeb6f"}], - "password": "admin", - "auth_url": "http://example.net:5000/v2.0", - "amount": 1, - "wait_for_cloud_init": true, - "image": { - "checksum": "5101b2013b31d9f2f96f64f728926054", - "name": "Ubuntu raring(added by rally)", - "format": "qcow2", - "userdata": "#cloud-config\r\n disable_root: false\r\n manage_etc_hosts: true\r\n", - "url": "http://cloud-images.ubuntu.com/raring/current/raring-server-cloudimg-amd64-disk1.img" - } - } -} diff --git a/samples/deployments/for_deploying_openstack_with_rally/devstack-lxc-engine-in-existing-servers.json b/samples/deployments/for_deploying_openstack_with_rally/devstack-lxc-engine-in-existing-servers.json deleted file mode 100644 index e0eb8922..00000000 --- a/samples/deployments/for_deploying_openstack_with_rally/devstack-lxc-engine-in-existing-servers.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "type": "MultihostEngine", - "controller": { - "type": "DevstackEngine", - "localrc": { - "MULTI_HOST": "1", - "VIRT_DRIVER": "fake", - "API_RATE_LIMIT": "False", - "ENABLED_SERVICES+": ",-n-cpu", - "SCREEN_LOGDIR": "$DEST/logs/screen" - }, - "provider": { - "type": "LxcProvider", - "containers_per_host": 1, - "container_name_prefix": "controller", - "distribution": "ubuntu", - "host_provider": { - "type": "ExistingServers", - "credentials": [{"user": "root", "host": "localhost"}] - } - } - }, - "nodes": [ - { - "type": "LxcEngine", - "distribution": "ubuntu", - "container_name": "devstack-compute", - "nodes_per_server": 64, - "provider": { - "type": "ExistingServers", - "credentials": [{"user": "root", "host": "localhost"}] - }, - "engine": { - "name": "DevstackEngine", - "localrc": { - "VIRT_DRIVER": "fake", - "DATABASE_TYPE": "mysql", - "MYSQL_HOST": "{controller_ip}", - "RABBIT_HOST": "{controller_ip}", - "GLANCE_HOSTPORT": "{controller_ip}:9292", - "API_RATE_LIMIT": "False", - "ENABLED_SERVICES": "n-cpu,n-net", - "SCREEN_LOGDIR": "$DEST/logs/screen" - } - } - } - ] -} diff --git a/samples/deployments/for_deploying_openstack_with_rally/devstack-lxc-engine-in-existing-servers.rst b/samples/deployments/for_deploying_openstack_with_rally/devstack-lxc-engine-in-existing-servers.rst deleted file mode 100644 index 0687685e..00000000 --- a/samples/deployments/for_deploying_openstack_with_rally/devstack-lxc-engine-in-existing-servers.rst +++ /dev/null @@ -1,228 +0,0 @@ -Devstack-LXC-engine-in-dummy -============================ - -How to deploy cloud - -Assume we have 1 server with lots of RAM and linux. It is strongly recommended -to use btrfs for lxc containers. Please note root access is mandatory. - -So, we need one controller node and 64 computes. All nodes can be deployed by DevstackEngine - - -Controller ----------- -:: - - "type": "DevstackEngine", - "local_conf": { - "MULTI_HOST": "1", - "VIRT_DRIVER": "fake", - "ENABLED_SERVICES+": ",-n-cpu", - }, - -Look carefully at ENABLED_SERVICES. Such syntax is translated to 'ENABLED_SERVICES+=,-n-cpu' -in local.conf. This means 'remove n-cpu from ENABLED_SERVICES'. - -Please note: VIRT_DRIVER=fake on controller node is mandatory. - -This node should be deployed in lxc container, so we use the LxcProvider:: - - "provider": { - "type": "LxcProvider", - "containers_per_host": 1, - "container_name_prefix": "controller", - "distribution": "ubuntu", - "host_provider": { - "name": "ExistingServers", - "credentials": [{"user": "root", "host": "localhost"}] - } - } - -ExistingServers is used as sub-provider, because we already have a linux box (localhost). - - -Computes --------- - -Next we need 64 compute nodes. This can be done by LxcEngine. LxcEngine deploys the first -compute instance via the devstack engine, then makes N clones using lxc-clone. - -:: - - "type": "LxcEngine", - "distribution": "ubuntu", - "container_name": "devstack-compute", - "nodes_per_server": 64, - "provider": { - "type": "ExistingServers", - "credentials": [{"user": "root", "host": "localhost"}] - }, - "engine": { - "name": "DevstackEngine", - "local_conf": { - "VIRT_DRIVER": "fake", - "DATABASE_TYPE": "mysql", - "MYSQL_HOST": "{controller_ip}", - "RABBIT_HOST": "{controller_ip}", - "GLANCE_HOSTPORT": "{controller_ip}:9292", - "ENABLED_SERVICES": "n-cpu,n-net", - } - } - -This is very similar to LxcProvider configuration: ExistingServers as sub-provider and DevstackEngine -as sub-engine. Please note controller's ip isn't known at the moment of configuratoin, so -MultihostEngine will replace {contoller_ip} pattern with actual address after first node is deployed. - -Also DATABASE_DRIVER is necessary because of bug in devstack. - - -MultihostEngine ---------------- - -The MultihostEngine configuration contains sections for configuring the controller and compute -nodes, for example:: - - "type": "MultihostEngine", - "controller": { - // CONTROLLER CONFIGURATION HERE - } - "nodes": [ - { - // NODES CONFIGURATION HERE - } - ] - -Here is an example of a complete configuration file, assembled from the snippets above:: - - { - "type": "MultihostEngine", - "controller": { - "type": "DevstackEngine", - "local_conf": { - "MULTI_HOST": "1", - "VIRT_DRIVER": "fake", - "API_RATE_LIMIT": "False", - "ENABLED_SERVICES+": ",-n-cpu", - "SCREEN_LOGDIR": "$DEST/logs/screen" - }, - "provider": { - "type": "LxcProvider", - "containers_per_host": 1, - "container_name_prefix": "controller", - "distribution": "ubuntu", - "host_provider": { - "type": "ExistingServers", - "credentials": [{"user": "root", "host": "localhost"}] - } - } - }, - "nodes": [ - { - "type": "LxcEngine", - "distribution": "ubuntu", - "container_name": "devstack-compute", - "nodes_per_server": 64, - "provider": { - "type": "ExistingServers", - "credentials": [{"user": "root", "host": "localhost"}] - }, - "engine": { - "name": "DevstackEngine", - "local_conf": { - "VIRT_DRIVER": "fake", - "DATABASE_TYPE": "mysql", - "MYSQL_HOST": "{controller_ip}", - "RABBIT_HOST": "{controller_ip}", - "GLANCE_HOSTPORT": "{controller_ip}:9292", - "API_RATE_LIMIT": "False", - "ENABLED_SERVICES": "n-cpu,n-net", - "SCREEN_LOGDIR": "$DEST/logs/screen" - } - } - } - ] - } - -Please note each compute node uses from 90M to 120M of RAM. - - -SSH Access ----------- - -The target host (localhost in this case) should be accessible via a password-less ssh key. -If necessary ssh keys can be setup as follows:: - - $ cd - $ ssh-keygen # just hit enter when asked for password - $ sudo mkdir /root/.ssh - $ sudo cat .ssh/id_rsa.pub >> /root/.ssh/authorized_keys - $ ssh root@localhost - # id - uid=0(root) gid=0(root) groups=0(root) - -Rally uses ssh for communication as most deployments are spread across multiple nodes. - - -Tunneling ---------- - -Both LxcProvider and LxcEngine have 'tunnel_to' configuration option. This is used -for cases when using more then one hardware nodes:: - - +--------------------------+ - | computes-1 | - | | - +---------------| lxcbr0 10.100.1.0/24 | - +--------------------------+ | | eth0 192.168.10.1 | - | | | | | - | rally | | +--------------------------+ - | |---------+ | - | eth0 10.1.1.20 | | | - | | v v +--------------------------+ - +--------------------------+ +---------------+ | computes-2 | - | | | | - | |<-------| lxcbr0 10.100.2.0/24 | - +--------------------------+ | IP NETWORK | | eth0 192.168.10.2 | - | controller | | | | | - | |---->| | +--------------------------+ - | eth0 192.168.1.13 | +---------------+ - | | ^ - |tunnels: | | ........... - |10.100.1/24->192.168.10.1 | | - |10.100.2/24->192.168.10.2 | | - |10.100.x/24->192.168.10.x | | +--------------------------+ - | | | | computes-n | - +--------------------------+ | | | - +---------------| lxcbr0 10.100.x.0/24 | - | eth0 192.168.10.x | - | | - +--------------------------+ - -Each box is a separate hardware node. All nodes can access each other via ip, but lxc containers -are only connected to isolated virtual networks within each node. For communication between -lxc containers ipip tunneling is used. In this example we need to connect all the lxc-containers -to controller node. So, we add the option "tunnel_to": ["192.168.1.13"]:: - - "type": "LxcEngine", - "distribution": "ubuntu", - "container_name": "devstack-compute", - "nodes_per_server": 64, - "start_lxc_network": "10.100.1.0/24", - "tunnel_to": ["10.1.1.20", "192.168.1.13"]: - "provider": { - //SOME PROVIDER WHICH RETURNS N NODES - //LxcEngine will create internal lxc - //network starts from 10.100.1.0/24 (see start_lxc_network) - //e.g 10.100.1.0/24, 10.100.2.0/24, ..., 10.100.n.0/24 - }, - "engine": { - "name": "DevstackEngine", - "local_conf": { - "VIRT_DRIVER": "fake", - "DATABASE_TYPE": "mysql", - "MYSQL_HOST": "{controller_ip}", - "RABBIT_HOST": "{controller_ip}", - "GLANCE_HOSTPORT": "{controller_ip}:9292", - "ENABLED_SERVICES": "n-cpu,n-net", - } - } diff --git a/samples/deployments/for_deploying_openstack_with_rally/multihost.json b/samples/deployments/for_deploying_openstack_with_rally/multihost.json deleted file mode 100644 index c562fed4..00000000 --- a/samples/deployments/for_deploying_openstack_with_rally/multihost.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "type": "MultihostEngine", - "controller": { - "type": "DevstackEngine", - "local_conf": { - "ENABLED_SERVICES+": ",-n-cpu,-n-net", - "MULTI_HOST": "1", - "SCREEN_LOGDIR": "$DEST/logs/screen" - }, - "provider": {"type": "DummyProvider", - "credentials": [{"host": "host-1.net", - "user": "root"}]} - }, - "nodes": [ - { - "type": "DevstackEngine", - "local_conf": {"ENABLED_SERVICES": "n-cpu,n-net"}, - "provider": { - "type": "DummyProvider", - "credentials": [{"host": "host-2.net", - "user": "root"}] - } - } - ] -} diff --git a/samples/deployments/for_deploying_openstack_with_rally/multihost.rst b/samples/deployments/for_deploying_openstack_with_rally/multihost.rst deleted file mode 100644 index 48ff3087..00000000 --- a/samples/deployments/for_deploying_openstack_with_rally/multihost.rst +++ /dev/null @@ -1,18 +0,0 @@ -MultihostEngine -=============== - -How to deploy multihost OpenStack - -MultihostEngine's configuration contains two sections: controller and nodes. - -Controller section ------------------- - -This section is full configuration of a deployment. Returned endpoints is -endpoints of the whole cloud. - -Nodes section -------------- - -This section is a list of full configurations of a compute nodes. Returned -endpoints are silently ignored. diff --git a/samples/plugins/context/context_plugin.py b/samples/plugins/context/context_plugin.py deleted file mode 100644 index c40b7dc4..00000000 --- a/samples/plugins/context/context_plugin.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally import osclients -from rally.task import context - -LOG = logging.getLogger(__name__) - - -@context.configure(name="create_flavor", namespace="openstack", order=1000) -class CreateFlavorContext(context.Context): - """Create sample flavor - - This sample create flavor with specified options before task starts and - delete it after task completion. - To create your own context plugin, inherit it from - rally.task.context.Context - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "additionalProperties": False, - "properties": { - "flavor_name": { - "type": "string", - }, - "ram": { - "type": "integer", - "minimum": 1 - }, - "vcpus": { - "type": "integer", - "minimum": 1 - }, - "disk": { - "type": "integer", - "minimum": 1 - } - } - } - - def setup(self): - """This method is called before the task start.""" - try: - # use rally.osclients to get necessary client instance - nova = osclients.Clients( - self.context["admin"]["credential"]).nova() - # and then do what you need with this client - self.context["flavor"] = nova.flavors.create( - # context settings are stored in self.config - name=self.config.get("flavor_name", "rally_test_flavor"), - ram=self.config.get("ram", 1), - vcpus=self.config.get("vcpus", 1), - disk=self.config.get("disk", 1)).to_dict() - LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"]) - except Exception as e: - msg = "Can't create flavor: %s" % e - if logging.is_debug(): - LOG.exception(msg) - else: - LOG.warning(msg) - - def cleanup(self): - """This method is called after the task finish.""" - try: - nova = osclients.Clients( - self.context["admin"]["credential"]).nova() - nova.flavors.delete(self.context["flavor"]["id"]) - LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"]) - except Exception as e: - msg = "Can't delete flavor: %s" % e - if logging.is_debug(): - LOG.exception(msg) - else: - LOG.warning(msg) diff --git a/samples/plugins/context/test_context.json b/samples/plugins/context/test_context.json deleted file mode 100644 index dcc22a46..00000000 --- a/samples/plugins/context/test_context.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.01 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "create_flavor": { - "ram": 1024 - } - } - } - ] -} diff --git a/samples/plugins/context/test_context.yaml b/samples/plugins/context/test_context.yaml deleted file mode 100644 index a6cb7e15..00000000 --- a/samples/plugins/context/test_context.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.01 - runner: - type: "constant" - times: 5 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - create_flavor: - ram: 512 diff --git a/samples/plugins/runner/runner_plugin.py b/samples/plugins/runner/runner_plugin.py deleted file mode 100644 index 9e68eb1a..00000000 --- a/samples/plugins/runner/runner_plugin.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common.plugin import plugin -from rally import consts -from rally.task import runner - - -@plugin.configure(name="random_times") -class RandomTimesScenarioRunner(runner.ScenarioRunner): - """Sample of scenario runner plugin. - - Run scenario random number of times, which is chosen between min_times and - max_times. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "type": { - "type": "string" - }, - "min_times": { - "type": "integer", - "minimum": 1 - }, - "max_times": { - "type": "integer", - "minimum": 1 - } - }, - "additionalProperties": True - } - - def _run_scenario(self, cls, method_name, context, args): - # runners settings are stored in self.config - min_times = self.config.get("min_times", 1) - max_times = self.config.get("max_times", 1) - - for i in range(random.randrange(min_times, max_times)): - run_args = (i, cls, method_name, - runner._get_scenario_context(context), args) - result = runner._run_scenario_once(run_args) - # use self.send_result for result of each iteration - self._send_result(result) diff --git a/samples/plugins/runner/test_runner.json b/samples/plugins/runner/test_runner.json deleted file mode 100644 index 993aeb92..00000000 --- a/samples/plugins/runner/test_runner.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "Dummy.dummy": [ - { - "runner": { - "type": "random_times", - "min_times": 10, - "max_times": 20, - }, - } - ] -} diff --git a/samples/plugins/runner/test_runner.yaml b/samples/plugins/runner/test_runner.yaml deleted file mode 100644 index 5a339923..00000000 --- a/samples/plugins/runner/test_runner.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - Dummy.dummy: - - - args: - sleep: 2 - runner: - type: "random_times" - min_times: 10 - max_times: 20 diff --git a/samples/plugins/scenario/scenario_plugin.py b/samples/plugins/scenario/scenario_plugin.py deleted file mode 100755 index fd39f9bb..00000000 --- a/samples/plugins/scenario/scenario_plugin.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import validation - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="ScenarioPlugin.list_flavors") -class ListFlavors(scenario.OpenStackScenario): - - @atomic.action_timer("list_flavors") - def _list_flavors(self): - """Sample of usage clients - list flavors - - You can use self.context, self.admin_clients and self.clients which are - initialized on scenario instance creation. - """ - self.clients("nova").flavors.list() - - @atomic.action_timer("list_flavors_as_admin") - def _list_flavors_as_admin(self): - """The same with admin clients.""" - self.admin_clients("nova").flavors.list() - - def run(self): - """List flavors.""" - self._list_flavors() - self._list_flavors_as_admin() diff --git a/samples/plugins/scenario/test_scenario.json b/samples/plugins/scenario/test_scenario.json deleted file mode 100644 index 819af692..00000000 --- a/samples/plugins/scenario/test_scenario.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "ScenarioPlugin.list_flavors": [ - { - "runner": { - "type": "serial", - "times": 5, - }, - "context": { - "create_flavor": { - "ram": 512, - } - } - } - ] -} diff --git a/samples/plugins/scenario/test_scenario.yaml b/samples/plugins/scenario/test_scenario.yaml deleted file mode 100644 index 5b16755c..00000000 --- a/samples/plugins/scenario/test_scenario.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - ScenarioPlugin.list_flavors: - - - runner: - type: "serial" - times: 5 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/plugins/sla/sla_plugin.py b/samples/plugins/sla/sla_plugin.py deleted file mode 100644 index f051862d..00000000 --- a/samples/plugins/sla/sla_plugin.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.i18n import _ -from rally.common.plugin import plugin -from rally.task import sla - - -@plugin.configure(name="max_duration_range") -class MaxDurationRange(sla.SLA): - """Maximum allowed duration range in seconds.""" - - CONFIG_SCHEMA = { - "type": "number", - "minimum": 0.0, - "exclusiveMinimum": True - } - - def __init__(self, criterion_value): - super(MaxDurationRange, self).__init__(criterion_value) - self._min = 0 - self._max = 0 - - def add_iteration(self, iteration): - - # Skipping failed iterations (that raised exceptions) - if iteration.get("error"): - return self.success # This field is defined in base class - - # Updating _min and _max values - self._max = max(self._max, iteration["duration"]) - self._min = min(self._min, iteration["duration"]) - - # Updating successfulness based on new max and min values - self.success = self._max - self._min <= self.criterion_value - return self.success - - def details(self): - return (_("%s - Maximum allowed duration range: %.2f%% <= %.2f%%") % - (self.status(), self._max - self._min, self.criterion_value)) diff --git a/samples/plugins/sla/test_sla.json b/samples/plugins/sla/test_sla.json deleted file mode 100644 index ea4f15a3..00000000 --- a/samples/plugins/sla/test_sla.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Dummy.dummy": [ - { - "args": { - "sleep": 0.01 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "sla": { - "max_duration_range": 2.5 - } - } - ] -} diff --git a/samples/plugins/sla/test_sla.yaml b/samples/plugins/sla/test_sla.yaml deleted file mode 100644 index 1b7a8888..00000000 --- a/samples/plugins/sla/test_sla.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - Dummy.dummy: - - - args: - sleep: 0.01 - runner: - type: "constant" - times: 5 - concurrency: 1 - sla: - max_duration_range: 2.5 diff --git a/samples/plugins/unpack_plugins_samples.sh b/samples/plugins/unpack_plugins_samples.sh deleted file mode 100755 index 8b126a26..00000000 --- a/samples/plugins/unpack_plugins_samples.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -samples_unpacked_dir=$(dirname "${BASH_SOURCE[0]}") -dirs=( $(find "$samples_unpacked_dir" -maxdepth 1 -type d -printf '%P\n') ) -samples=~/.rally/plugins/samples -mkdir -p "$samples" -for dir in "${dirs[@]}"; do - cp -r $samples_unpacked_dir/$dir $samples - printf "\nTo test $dir plugin run next command:\n" - printf "rally task start --task $samples/$dir/test_$dir.yaml\n" - printf "or \nrally task start --task $samples/$dir/test_$dir.json\n" -done diff --git a/samples/tasks/README.rst b/samples/tasks/README.rst deleted file mode 100644 index a2893da1..00000000 --- a/samples/tasks/README.rst +++ /dev/null @@ -1,90 +0,0 @@ -Tasks Configuration Samples -=========================== - -To specify your tasks, use configuration files in json or yaml format. - - -JSON schema of input task format: - -:: - - - { - "type": "object", - "$schema": "http://json-schema.org/draft-04/schema", - "patternProperties": { - ".*": { - "type": "array", - "items": { - "type": "object", - "properties": { - "args": { - "type": "object" - }, - "runner": { - "type": "object", - "properties": { - "type": {"type": "string"} - }, - "required": ["type"] - }, - "context": { - "type": "object" - }, - "sla": { - "type": "object" - }, - }, - "additionalProperties": False - } - } - } - } - - -For humans: - -:: - { - "ScenarioClass.scenario_method": [ - { - "args": { - ... - }, - "runner": { - ... - }, - "context": { - ... - }, - "sla": { - ... - } - } - ] - } - - -ScanarioClass should be a subclass of the base Scenario class -and scenario_method specifies what benchmark task should be run. Section -"args" is also related to scenario. To learn more about scenarios -configuration, see samples in `samples/tasks/scenarios -`_. - -Section "runners" specifies the way, how task should be run. To learn -more about runners configurations, see samples in `samples/tasks/runners -`_. - -Section "context" defines different types of environments in which task can -be launched. Look at `samples/tasks/contexts -`_ -for samples. - -Section "sla" defines details for determining compliance with contracted values -such as maximum error rate or minimum response time. -Look at `samples/tasks/sla -`_ for -samples. - -See a `detailed description of benchmark scenarios, contexts & runners -`_. diff --git a/samples/tasks/contexts/README.rst b/samples/tasks/contexts/README.rst deleted file mode 100644 index 53d01e9d..00000000 --- a/samples/tasks/contexts/README.rst +++ /dev/null @@ -1,5 +0,0 @@ -Contexts Configuration Samples -============================== - -This directory contains samples how to define different types of -environments using context. diff --git a/samples/tasks/contexts/allow-ssh.json b/samples/tasks/contexts/allow-ssh.json deleted file mode 100644 index e5a24de2..00000000 --- a/samples/tasks/contexts/allow-ssh.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "allow_ssh": null - } - } - ] -} diff --git a/samples/tasks/contexts/allow-ssh.yaml b/samples/tasks/contexts/allow-ssh.yaml deleted file mode 100644 index 8b190cd8..00000000 --- a/samples/tasks/contexts/allow-ssh.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - allow_ssh: null diff --git a/samples/tasks/contexts/api-versions.json b/samples/tasks/contexts/api-versions.json deleted file mode 100644 index 095681ca..00000000 --- a/samples/tasks/contexts/api-versions.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "api_versions": { - "nova": { - "version": 2.2 - }, - "cinder": { - "version": 2, - "service_type": "volumev2" - } - } - } - } - ] -} diff --git a/samples/tasks/contexts/api-versions.yaml b/samples/tasks/contexts/api-versions.yaml deleted file mode 100644 index b68c403a..00000000 --- a/samples/tasks/contexts/api-versions.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - api_versions: - nova: - version: 2.2 - cinder: - version: 2 - service_type: "volumev2" \ No newline at end of file diff --git a/samples/tasks/contexts/audit-templates.json b/samples/tasks/contexts/audit-templates.json deleted file mode 100644 index b9e8b980..00000000 --- a/samples/tasks/contexts/audit-templates.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "audit_templates": { - "audit_templates_per_admin": 5, - "fill_strategy": "random", - "params": [ - { - "goal": { - "name": "workload_balancing" - }, - "strategy": { - "name": "workload_stabilization" - } - }, - { - "goal": { - "name": "dummy" - }, - "strategy": { - "name": "dummy" - } - } - ] - } - } - } - ] -} diff --git a/samples/tasks/contexts/audit-templates.yaml b/samples/tasks/contexts/audit-templates.yaml deleted file mode 100644 index 05c2e2a8..00000000 --- a/samples/tasks/contexts/audit-templates.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "random" - params: - - goal: - name: "workload_balancing" - strategy: - name: "workload_stabilization" - - goal: - name: "dummy" - strategy: - name: "dummy" diff --git a/samples/tasks/contexts/ca-certs.json b/samples/tasks/contexts/ca-certs.json deleted file mode 100644 index cb76f81b..00000000 --- a/samples/tasks/contexts/ca-certs.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "ca_certs": { - "directory": "/home/stack" - }, - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "ca_certs": { - "directory": "/home/stack" - }, - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/ca-certs.yaml b/samples/tasks/contexts/ca-certs.yaml deleted file mode 100644 index 7d610159..00000000 --- a/samples/tasks/contexts/ca-certs.yaml +++ /dev/null @@ -1,48 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - ca_certs: - directory: "/home/stack" - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - clusters: - node_count: 2 - ca_certs: - directory: "/home/stack" diff --git a/samples/tasks/contexts/ceilometer.json b/samples/tasks/contexts/ceilometer.json deleted file mode 100644 index c3991d73..00000000 --- a/samples/tasks/contexts/ceilometer.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 60, - "metadata_list": [ - {"status": "active", "name": "fake_resource", - "deleted": "False", - "created_at": "2015-09-04T12:34:19.000000"}, - {"status": "not_active", "name": "fake_resource_1", - "deleted": "False", - "created_at": "2015-09-10T06:55:12.000000"} - ], - "batch_size": 5 - } - } - } - ] -} diff --git a/samples/tasks/contexts/ceilometer.yaml b/samples/tasks/contexts/ceilometer.yaml deleted file mode 100644 index 22a6128c..00000000 --- a/samples/tasks/contexts/ceilometer.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 60 - metadata_list: - - status: "active" - name: "fake_resource" - deleted: "False" - created_at: "2015-09-04T12:34:19.000000" - - status: "not_active" - name: "fake_resource_1" - deleted: "False" - created_at: "2015-09-10T06:55:12.000000" - batch_size: 5 \ No newline at end of file diff --git a/samples/tasks/contexts/cluster-templates.json b/samples/tasks/contexts/cluster-templates.json deleted file mode 100644 index 4594749a..00000000 --- a/samples/tasks/contexts/cluster-templates.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/cluster-templates.yaml b/samples/tasks/contexts/cluster-templates.yaml deleted file mode 100644 index 16ccc4cf..00000000 --- a/samples/tasks/contexts/cluster-templates.yaml +++ /dev/null @@ -1,58 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" diff --git a/samples/tasks/contexts/clusters.json b/samples/tasks/contexts/clusters.json deleted file mode 100644 index b9c271e5..00000000 --- a/samples/tasks/contexts/clusters.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/clusters.yaml b/samples/tasks/contexts/clusters.yaml deleted file mode 100644 index 07bac0ca..00000000 --- a/samples/tasks/contexts/clusters.yaml +++ /dev/null @@ -1,64 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - clusters: - node_count: 2 - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" - clusters: - node_count: 2 diff --git a/samples/tasks/contexts/dummy-context.json b/samples/tasks/contexts/dummy-context.json deleted file mode 100644 index 4ebdc3f3..00000000 --- a/samples/tasks/contexts/dummy-context.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "dummy_context": { - "fail_setup": false, - "fail_cleanup": false - } - } - } - ] -} diff --git a/samples/tasks/contexts/dummy-context.yaml b/samples/tasks/contexts/dummy-context.yaml deleted file mode 100644 index 7ab64cc1..00000000 --- a/samples/tasks/contexts/dummy-context.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - dummy_context: - fail_setup: false - fail_cleanup: false \ No newline at end of file diff --git a/samples/tasks/contexts/ec2-servers.json b/samples/tasks/contexts/ec2-servers.json deleted file mode 100644 index c55a4a1d..00000000 --- a/samples/tasks/contexts/ec2-servers.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "ec2_servers": { - "flavor": { - "name": "m1.tiny" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "servers_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/contexts/ec2-servers.yaml b/samples/tasks/contexts/ec2-servers.yaml deleted file mode 100644 index 376b9442..00000000 --- a/samples/tasks/contexts/ec2-servers.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - ec2_servers: - flavor: - name: "m1.tiny" - image: - name: "^cirros.*-disk$" - servers_per_tenant: 2 diff --git a/samples/tasks/contexts/existing-network.json b/samples/tasks/contexts/existing-network.json deleted file mode 100644 index abbc70a1..00000000 --- a/samples/tasks/contexts/existing-network.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "existing_network": {} - } - } - ] -} diff --git a/samples/tasks/contexts/existing-network.yaml b/samples/tasks/contexts/existing-network.yaml deleted file mode 100644 index 13831066..00000000 --- a/samples/tasks/contexts/existing-network.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - existing_network: {} \ No newline at end of file diff --git a/samples/tasks/contexts/flavors.json b/samples/tasks/contexts/flavors.json deleted file mode 100644 index c398a29c..00000000 --- a/samples/tasks/contexts/flavors.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "flavors": [ - { - "name": "ram64", - "ram": 64 - } - ] - } - } - ] -} diff --git a/samples/tasks/contexts/flavors.yaml b/samples/tasks/contexts/flavors.yaml deleted file mode 100644 index dc680435..00000000 --- a/samples/tasks/contexts/flavors.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - flavors: - - - name: "ram64" - ram: 64 \ No newline at end of file diff --git a/samples/tasks/contexts/heat-dataplane.json b/samples/tasks/contexts/heat-dataplane.json deleted file mode 100644 index 38a0bd6b..00000000 --- a/samples/tasks/contexts/heat-dataplane.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "network": { - "start_cidr": "10.2.0.0/24", - "networks_per_tenant": 1, - "subnets_per_network": 1, - "network_create_args": {}, - "dns_nameservers": ["10.2.0.1"] - }, - "heat_dataplane": { - "stacks_per_tenant": 1, - "template": "samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template", - "files": { - "file1": "f1.yaml", - "file2": "f2.yaml" - }, - "parameters": { - "count": 40, - "delay": 0.1 - }, - "context_parameters": {} - } - } - } - ] -} diff --git a/samples/tasks/contexts/heat-dataplane.yaml b/samples/tasks/contexts/heat-dataplane.yaml deleted file mode 100644 index 9e670690..00000000 --- a/samples/tasks/contexts/heat-dataplane.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 1 - subnets_per_network: 1 - network_create_args: {} - dns_nameservers: - - "10.2.0.1" - heat_dataplane: - stacks_per_tenant: 1 - template: "samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template" - files: - file1: "f1.yaml" - file2: "f2.yaml" - parameters: - count: 40 - delay: 0.1 - context_parameters: {} \ No newline at end of file diff --git a/samples/tasks/contexts/image-command-customizer.json b/samples/tasks/contexts/image-command-customizer.json deleted file mode 100644 index 7748139e..00000000 --- a/samples/tasks/contexts/image-command-customizer.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1, - "timeout": 3000 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "image_command_customizer": { - "image": {"name": "Fedora-x86_64-20-20140618-sda"}, - "flavor": {"name": "m1.small"}, - "command": { - "local_path": "rally-jobs/extra/install_benchmark.sh", - "remote_path": "./install_benchmark.sh" - }, - "username": "root", - "userdata": "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - }, - "network": {} - } - } - ] -} diff --git a/samples/tasks/contexts/image-command-customizer.yaml b/samples/tasks/contexts/image-command-customizer.yaml deleted file mode 100644 index 832e037d..00000000 --- a/samples/tasks/contexts/image-command-customizer.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - concurrency: 1 - timeout: 3000 - times: 1 - type: "constant" - context: - image_command_customizer: - command: - local_path: "rally-jobs/extra/install_benchmark.sh" - remote_path: "./install_benchmark.sh" - flavor: - name: m1.small - image: - name: "Fedora-x86_64-20-20140618-sda" - userdata: "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - username: root - network: {} - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/contexts/images.json b/samples/tasks/contexts/images.json deleted file mode 100644 index 7a2ec600..00000000 --- a/samples/tasks/contexts/images.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "images": { - "image_url": "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img", - "image_type": "qcow2", - "image_container": "bare", - "images_per_tenant": 4 - } - } - } - ] -} diff --git a/samples/tasks/contexts/images.yaml b/samples/tasks/contexts/images.yaml deleted file mode 100644 index 3bb9cd37..00000000 --- a/samples/tasks/contexts/images.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 2 - images: - image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - image_type: "qcow2" - image_container: "bare" - images_per_tenant: 4 diff --git a/samples/tasks/contexts/keypair.json b/samples/tasks/contexts/keypair.json deleted file mode 100644 index 66d0f145..00000000 --- a/samples/tasks/contexts/keypair.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "keypair": {} - } - } - ] -} diff --git a/samples/tasks/contexts/keypair.yaml b/samples/tasks/contexts/keypair.yaml deleted file mode 100644 index 3e5cd148..00000000 --- a/samples/tasks/contexts/keypair.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - keypair: {} diff --git a/samples/tasks/contexts/lbaas.json b/samples/tasks/contexts/lbaas.json deleted file mode 100644 index 442227ff..00000000 --- a/samples/tasks/contexts/lbaas.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "lbaas": { - "pool": {} - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "lbaas": { - "pool": { - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP" - } - } - } - } - ] -} diff --git a/samples/tasks/contexts/lbaas.yaml b/samples/tasks/contexts/lbaas.yaml deleted file mode 100644 index 706fee76..00000000 --- a/samples/tasks/contexts/lbaas.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - lbaas: - pool: {} - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - lbaas: - pool: - lb_method: "ROUND_ROBIN" - protocol: "HTTP" diff --git a/samples/tasks/contexts/manila-security-services.json b/samples/tasks/contexts/manila-security-services.json deleted file mode 100644 index 1f59fd21..00000000 --- a/samples/tasks/contexts/manila-security-services.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "manila_share_networks": { - "use_share_networks": true - }, - "manila_security_services": { - "security_services": [ - { - "type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user" - }, - { - "type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user" - }, - { - "type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user" - } - ] - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/contexts/manila-security-services.yaml b/samples/tasks/contexts/manila-security-services.yaml deleted file mode 100644 index 8c052563..00000000 --- a/samples/tasks/contexts/manila-security-services.yaml +++ /dev/null @@ -1,42 +0,0 @@ -Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_share_networks: - use_share_networks: True - manila_security_services: - security_services: - - - type: "ldap" - server: "LDAP server address" - user: "User that will be used" - password: "Password for specified user" - - - type: "kerberos" - dns_ip: "IP address of DNS service to be used" - server: "Kerberos server address" - domain: "Kerberos realm" - user: "User that will be used" - password: "Password for specified user" - - - type: "active_directory" - dns_ip: "IP address of DNS service to be used" - domain: "Domain from 'Active Directory'" - user: "User from 'Active Directory'" - password: "password for specified user" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/contexts/manila-share-networks.json b/samples/tasks/contexts/manila-share-networks.json deleted file mode 100644 index 7af9620a..00000000 --- a/samples/tasks/contexts/manila-share-networks.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "manila_share_networks": { - "use_share_networks": true - }, - "manila_shares": { - "shares_per_tenant": 1, - "share_proto": "NFS", - "size": 1, - "share_type": "dhss_true" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/contexts/manila-share-networks.yaml b/samples/tasks/contexts/manila-share-networks.yaml deleted file mode 100644 index d18420b4..00000000 --- a/samples/tasks/contexts/manila-share-networks.yaml +++ /dev/null @@ -1,27 +0,0 @@ -Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_share_networks: - use_share_networks: True - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - share_type: "dhss_true" - sla: - failure_rate: - max: 0 \ No newline at end of file diff --git a/samples/tasks/contexts/manila-shares.json b/samples/tasks/contexts/manila-shares.json deleted file mode 100644 index 66104c72..00000000 --- a/samples/tasks/contexts/manila-shares.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "manila_share_networks": { - "use_share_networks": true - }, - "manila_shares": { - "shares_per_tenant": 1, - "share_proto": "NFS", - "size": 1, - "share_type": "dhss_true" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/contexts/manila-shares.yaml b/samples/tasks/contexts/manila-shares.yaml deleted file mode 100644 index d18420b4..00000000 --- a/samples/tasks/contexts/manila-shares.yaml +++ /dev/null @@ -1,27 +0,0 @@ -Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_share_networks: - use_share_networks: True - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - share_type: "dhss_true" - sla: - failure_rate: - max: 0 \ No newline at end of file diff --git a/samples/tasks/contexts/monasca-metrics.json b/samples/tasks/contexts/monasca-metrics.json deleted file mode 100644 index a74889e3..00000000 --- a/samples/tasks/contexts/monasca-metrics.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "monasca-user" - ], - "monasca_metrics": { - "dimensions": { - "region": "RegionOne", - "service": "identity", - "hostname": "fake_host", - "url": "http://fake_host:5000/v2.0" - }, - "metrics_per_tenant": 10 - } - } - } - ] -} diff --git a/samples/tasks/contexts/monasca-metrics.yaml b/samples/tasks/contexts/monasca-metrics.yaml deleted file mode 100644 index 7146c121..00000000 --- a/samples/tasks/contexts/monasca-metrics.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "monasca-user" - monasca_metrics: - dimensions: - region: "RegionOne" - service: "identity" - hostname: "fake_host" - url: "http://fake_host:5000/v2.0" - metrics_per_tenant: 10 diff --git a/samples/tasks/contexts/murano-environments.json b/samples/tasks/contexts/murano-environments.json deleted file mode 100644 index 286b33e8..00000000 --- a/samples/tasks/contexts/murano-environments.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_environments": { - "environments_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/contexts/murano-environments.yaml b/samples/tasks/contexts/murano-environments.yaml deleted file mode 100644 index cc36e35c..00000000 --- a/samples/tasks/contexts/murano-environments.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_environments: - environments_per_tenant: 2 diff --git a/samples/tasks/contexts/murano-packages.json b/samples/tasks/contexts/murano-packages.json deleted file mode 100644 index 31f02a81..00000000 --- a/samples/tasks/contexts/murano-packages.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_packages": { - "app_package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - }, - "roles": ["admin"] - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_packages": { - "app_package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - }, - "roles": ["admin"] - } - } - ] -} diff --git a/samples/tasks/contexts/murano-packages.yaml b/samples/tasks/contexts/murano-packages.yaml deleted file mode 100644 index 87e5709e..00000000 --- a/samples/tasks/contexts/murano-packages.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - roles: - - "admin" - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - roles: - - "admin" diff --git a/samples/tasks/contexts/network.json b/samples/tasks/contexts/network.json deleted file mode 100644 index a28e719a..00000000 --- a/samples/tasks/contexts/network.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "network": {} - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "network": { - "start_cidr": "10.2.0.0/24", - "networks_per_tenant": 1, - "subnets_per_network": 1, - "network_create_args": {}, - "dns_nameservers": ["10.2.0.1"] - } - } - } - ] -} diff --git a/samples/tasks/contexts/network.yaml b/samples/tasks/contexts/network.yaml deleted file mode 100644 index 60c8f67e..00000000 --- a/samples/tasks/contexts/network.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - network: {} - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 1 - subnets_per_network: 1 - network_create_args: {} - dns_nameservers: - - "10.2.0.1" \ No newline at end of file diff --git a/samples/tasks/contexts/profiles.json b/samples/tasks/contexts/profiles.json deleted file mode 100644 index 1db069af..00000000 --- a/samples/tasks/contexts/profiles.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "profiles": { - "type": "os.nova.server", - "version": "1.0", - "properties": { - "name": "cirros_server", - "flavor": 1, - "image": "cirros-0.3.5-x86_64-disk", - "networks": [ - { "network": "private" } - ] - } - } - } - } - ] -} diff --git a/samples/tasks/contexts/profiles.yaml b/samples/tasks/contexts/profiles.yaml deleted file mode 100644 index 48e951a0..00000000 --- a/samples/tasks/contexts/profiles.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - profiles: - type: "os.nova.server" - version: "1.0" - properties: - name: "cirros_server" - flavor: 1 - image: "cirros-0.3.5-x86_64-disk" - networks: - - network: "private" diff --git a/samples/tasks/contexts/quotas.json b/samples/tasks/contexts/quotas.json deleted file mode 100644 index f1aee454..00000000 --- a/samples/tasks/contexts/quotas.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "quotas": { - "manila": { - "share_networks": -1 - } - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "quotas": { - "cinder": { - "volumes": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/contexts/quotas.yaml b/samples/tasks/contexts/quotas.yaml deleted file mode 100644 index 85afcc21..00000000 --- a/samples/tasks/contexts/quotas.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - share_networks: -1 - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - cinder: - volumes: -1 diff --git a/samples/tasks/contexts/roles.json b/samples/tasks/contexts/roles.json deleted file mode 100644 index 0d09cd41..00000000 --- a/samples/tasks/contexts/roles.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "roles": ["role"] - } - } - ] -} diff --git a/samples/tasks/contexts/roles.yaml b/samples/tasks/contexts/roles.yaml deleted file mode 100644 index 32c50488..00000000 --- a/samples/tasks/contexts/roles.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - roles: - - "role" diff --git a/samples/tasks/contexts/router.json b/samples/tasks/contexts/router.json deleted file mode 100644 index b9b83e89..00000000 --- a/samples/tasks/contexts/router.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "router": {} - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "router": { - "routers_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/router.yaml b/samples/tasks/contexts/router.yaml deleted file mode 100644 index 436cfa83..00000000 --- a/samples/tasks/contexts/router.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - router: {} - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - router: - routers_per_tenant: 1 diff --git a/samples/tasks/contexts/sahara-cluster.json b/samples/tasks/contexts/sahara-cluster.json deleted file mode 100644 index 5a40ed5f..00000000 --- a/samples/tasks/contexts/sahara-cluster.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/sahara-cluster.yaml b/samples/tasks/contexts/sahara-cluster.yaml deleted file mode 100644 index 68b915d6..00000000 --- a/samples/tasks/contexts/sahara-cluster.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} diff --git a/samples/tasks/contexts/sahara-image.json b/samples/tasks/contexts/sahara-image.json deleted file mode 100644 index da85b6b0..00000000 --- a/samples/tasks/contexts/sahara-image.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.3.0" - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/sahara-image.yaml b/samples/tasks/contexts/sahara-image.yaml deleted file mode 100644 index 97b01eac..00000000 --- a/samples/tasks/contexts/sahara-image.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.3.0" - network: {} diff --git a/samples/tasks/contexts/sahara-input-data-sources.json b/samples/tasks/contexts/sahara-input-data-sources.json deleted file mode 100644 index 3bdadce4..00000000 --- a/samples/tasks/contexts/sahara-input-data-sources.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_input_data_sources": { - "input_type": "hdfs", - "input_url": "/" - }, - "sahara_output_data_sources": { - "output_type": "hdfs", - "output_url_prefix": "/out_" - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/sahara-input-data-sources.yaml b/samples/tasks/contexts/sahara-input-data-sources.yaml deleted file mode 100644 index ed96d811..00000000 --- a/samples/tasks/contexts/sahara-input-data-sources.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_input_data_sources: - input_type: "hdfs" - input_url: "/" - sahara_output_data_sources: - output_type: "hdfs" - output_url_prefix: "/out_" - network: {} diff --git a/samples/tasks/contexts/sahara-job-binaries.json b/samples/tasks/contexts/sahara-job-binaries.json deleted file mode 100644 index 2e175389..00000000 --- a/samples/tasks/contexts/sahara-job-binaries.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_job_binaries": { - "libs": [{ - "name": "tests.jar", - "download_url": "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - }] - }, - "network": {} - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_job_binaries": { - "mains": [{ - "name": "example.pig", - "download_url": "https://raw.githubusercontent.com/openstack/sahara/master/etc/edp-examples/pig-job/example.pig" - }], - "libs": [{ - "name": "udf.jar", - "download_url": "https://github.com/openstack/sahara/blob/master/etc/edp-examples/pig-job/udf.jar?raw=true" - }] - }, - "network": {} - } - } - ] -} diff --git a/samples/tasks/contexts/sahara-job-binaries.yaml b/samples/tasks/contexts/sahara-job-binaries.yaml deleted file mode 100644 index 47cdf97b..00000000 --- a/samples/tasks/contexts/sahara-job-binaries.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_job_binaries: - libs: - - - name: "tests.jar" - download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - network: {} - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_job_binaries: - mains: - - - name: "example.pig" - download_url: "https://raw.githubusercontent.com/openstack/sahara/master/etc/edp-examples/pig-job/example.pig" - libs: - - - name: "udf.jar" - download_url: "https://github.com/openstack/sahara/blob/master/etc/edp-examples/pig-job/udf.jar?raw=true" - network: {} diff --git a/samples/tasks/contexts/sahara-output-data-sources.json b/samples/tasks/contexts/sahara-output-data-sources.json deleted file mode 100644 index 3bdadce4..00000000 --- a/samples/tasks/contexts/sahara-output-data-sources.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_input_data_sources": { - "input_type": "hdfs", - "input_url": "/" - }, - "sahara_output_data_sources": { - "output_type": "hdfs", - "output_url_prefix": "/out_" - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/sahara-output-data-sources.yaml b/samples/tasks/contexts/sahara-output-data-sources.yaml deleted file mode 100644 index ed96d811..00000000 --- a/samples/tasks/contexts/sahara-output-data-sources.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_input_data_sources: - input_type: "hdfs" - input_url: "/" - sahara_output_data_sources: - output_type: "hdfs" - output_url_prefix: "/out_" - network: {} diff --git a/samples/tasks/contexts/servers.json b/samples/tasks/contexts/servers.json deleted file mode 100644 index 262e524c..00000000 --- a/samples/tasks/contexts/servers.json +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "servers": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "servers_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/contexts/servers.yaml b/samples/tasks/contexts/servers.yaml deleted file mode 100644 index 0365cf16..00000000 --- a/samples/tasks/contexts/servers.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 2 - servers: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - servers_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/contexts/stacks.json b/samples/tasks/contexts/stacks.json deleted file mode 100644 index 0ddc8d2f..00000000 --- a/samples/tasks/contexts/stacks.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "stacks": { - "stacks_per_tenant": 2, - "resources_per_stack": 10 - } - } - } - ] -} diff --git a/samples/tasks/contexts/stacks.yaml b/samples/tasks/contexts/stacks.yaml deleted file mode 100644 index 0080eb92..00000000 --- a/samples/tasks/contexts/stacks.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - stacks: - stacks_per_tenant: 2 - resources_per_stack: 10 \ No newline at end of file diff --git a/samples/tasks/contexts/swift-objects.json b/samples/tasks/contexts/swift-objects.json deleted file mode 100644 index 546f5a44..00000000 --- a/samples/tasks/contexts/swift-objects.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 6, - "concurrency": 3 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ], - "swift_objects": { - "containers_per_tenant": 1, - "objects_per_container": 10, - "object_size": 1024 - } - } - } - ] -} diff --git a/samples/tasks/contexts/swift-objects.yaml b/samples/tasks/contexts/swift-objects.yaml deleted file mode 100644 index 23fe6380..00000000 --- a/samples/tasks/contexts/swift-objects.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 1 - objects_per_container: 10 - object_size: 1024 diff --git a/samples/tasks/contexts/users.json b/samples/tasks/contexts/users.json deleted file mode 100644 index 108f3d44..00000000 --- a/samples/tasks/contexts/users.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2, - "resource_management_workers": 1, - "project_domain": "project", - "user_domain": "demo", - "user_choice_method": "random" - } - } - } - ] -} diff --git a/samples/tasks/contexts/users.yaml b/samples/tasks/contexts/users.yaml deleted file mode 100644 index f2ccacac..00000000 --- a/samples/tasks/contexts/users.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - resource_management_workers: 1 - project_domain: "project" - user_domain: "demo" - user_choice_method: "random" diff --git a/samples/tasks/contexts/volume-types.json b/samples/tasks/contexts/volume-types.json deleted file mode 100644 index 73a962b2..00000000 --- a/samples/tasks/contexts/volume-types.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "volume_types": ["test"] - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/volume-types.yaml b/samples/tasks/contexts/volume-types.yaml deleted file mode 100644 index a0e82cfc..00000000 --- a/samples/tasks/contexts/volume-types.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - volume_types: - - test \ No newline at end of file diff --git a/samples/tasks/contexts/volumes.json b/samples/tasks/contexts/volumes.json deleted file mode 100644 index f5594650..00000000 --- a/samples/tasks/contexts/volumes.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "volumes": { - "size": 1, - "volumes_per_tenant": 4 - } - } - } - ] -} diff --git a/samples/tasks/contexts/volumes.yaml b/samples/tasks/contexts/volumes.yaml deleted file mode 100644 index 3980f211..00000000 --- a/samples/tasks/contexts/volumes.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - volumes: - size: 1 - volumes_per_tenant: 4 diff --git a/samples/tasks/contexts/zones.json b/samples/tasks/contexts/zones.json deleted file mode 100644 index 7bd32339..00000000 --- a/samples/tasks/contexts/zones.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "zones": { - "zones_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/zones.yaml b/samples/tasks/contexts/zones.yaml deleted file mode 100644 index 7ae5f5b4..00000000 --- a/samples/tasks/contexts/zones.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 diff --git a/samples/tasks/runners/README.rst b/samples/tasks/runners/README.rst deleted file mode 100644 index 46e4cfa2..00000000 --- a/samples/tasks/runners/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -Runners Configuration Samples -============================= - -This directory contains task runners configuration samples. - -Samples here are presented with methods of Dummy scenario. To see samples -of usage other scenarios, go to `samples/tasks/scenarios -`_. - diff --git a/samples/tasks/runners/constant/constant-for-duration.json b/samples/tasks/runners/constant/constant-for-duration.json deleted file mode 100644 index 2ae04576..00000000 --- a/samples/tasks/runners/constant/constant-for-duration.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "Dummy.dummy": [ - { - "args": { - "sleep": 5 - }, - "runner": { - "type": "constant_for_duration", - "concurrency": 5, - "duration": 30 - } - } - ] -} diff --git a/samples/tasks/runners/constant/constant-for-duration.yaml b/samples/tasks/runners/constant/constant-for-duration.yaml deleted file mode 100644 index 70846b6a..00000000 --- a/samples/tasks/runners/constant/constant-for-duration.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - Dummy.dummy: - - - args: - sleep: 5 - runner: - type: "constant_for_duration" - concurrency: 5 - duration: 30 diff --git a/samples/tasks/runners/constant/constant-timeout.json b/samples/tasks/runners/constant/constant-timeout.json deleted file mode 100644 index 53917883..00000000 --- a/samples/tasks/runners/constant/constant-timeout.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Dummy.dummy": [ - { - "args": { - "sleep": 10 - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5, - "timeout": 5 - } - } - ] -} diff --git a/samples/tasks/runners/constant/constant-timeout.yaml b/samples/tasks/runners/constant/constant-timeout.yaml deleted file mode 100644 index bda73ce1..00000000 --- a/samples/tasks/runners/constant/constant-timeout.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - Dummy.dummy: - - - args: - sleep: 10 - runner: - type: "constant" - times: 20 - concurrency: 5 - timeout: 5 diff --git a/samples/tasks/runners/rps/rps.json b/samples/tasks/runners/rps/rps.json deleted file mode 100644 index 85bd892e..00000000 --- a/samples/tasks/runners/rps/rps.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Dummy.dummy": [ - { - "args": { - "sleep": 5 - }, - "runner": { - "type": "rps", - "times": 20, - "rps": 3, - "timeout": 6 - } - } - ] -} diff --git a/samples/tasks/runners/rps/rps.yaml b/samples/tasks/runners/rps/rps.yaml deleted file mode 100644 index c7120f9c..00000000 --- a/samples/tasks/runners/rps/rps.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - Dummy.dummy: - - - args: - sleep: 5 - runner: - type: "rps" - times: 20 - rps: 3 - timeout: 6 diff --git a/samples/tasks/runners/serial/serial.json b/samples/tasks/runners/serial/serial.json deleted file mode 100644 index 97c77455..00000000 --- a/samples/tasks/runners/serial/serial.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "Dummy.dummy": [ - { - "args": { - "sleep": 5 - }, - "runner": { - "type": "serial", - "times": 20 - } - } - ] -} diff --git a/samples/tasks/runners/serial/serial.yaml b/samples/tasks/runners/serial/serial.yaml deleted file mode 100644 index 3e760b59..00000000 --- a/samples/tasks/runners/serial/serial.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - Dummy.dummy: - - - args: - sleep: 5 - runner: - type: "serial" - times: 20 diff --git a/samples/tasks/scenarios/README.rst b/samples/tasks/scenarios/README.rst deleted file mode 100644 index 492df129..00000000 --- a/samples/tasks/scenarios/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -Scenarios Configuration Samples -=============================== - -This directory contains task scenarios configuration samples. -To successfully start this samples you may need to substitute some values -from "args" section to actual values for your deployment. - -Samples here are presented with constant runner. To see samples of using -other runners, go to `samples/tasks/runners -`_. - diff --git a/samples/tasks/scenarios/authenticate/keystone.json b/samples/tasks/scenarios/authenticate/keystone.json deleted file mode 100644 index 381bb96d..00000000 --- a/samples/tasks/scenarios/authenticate/keystone.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Authenticate.keystone": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 50 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/keystone.yaml b/samples/tasks/scenarios/authenticate/keystone.yaml deleted file mode 100644 index 96830561..00000000 --- a/samples/tasks/scenarios/authenticate/keystone.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - Authenticate.keystone: - - - runner: - type: "constant" - times: 100 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 50 diff --git a/samples/tasks/scenarios/authenticate/token-validate-ceilometer.json b/samples/tasks/scenarios/authenticate/token-validate-ceilometer.json deleted file mode 100644 index d915cc1d..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-ceilometer.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Authenticate.validate_ceilometer": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-ceilometer.yaml b/samples/tasks/scenarios/authenticate/token-validate-ceilometer.yaml deleted file mode 100644 index 474150e2..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-ceilometer.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Authenticate.validate_ceilometer: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 diff --git a/samples/tasks/scenarios/authenticate/token-validate-cinder.json b/samples/tasks/scenarios/authenticate/token-validate-cinder.json deleted file mode 100644 index 7abc5195..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-cinder.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Authenticate.validate_cinder": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-cinder.yaml b/samples/tasks/scenarios/authenticate/token-validate-cinder.yaml deleted file mode 100644 index cdb1fc5c..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-cinder.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Authenticate.validate_cinder: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 diff --git a/samples/tasks/scenarios/authenticate/token-validate-glance.json b/samples/tasks/scenarios/authenticate/token-validate-glance.json deleted file mode 100644 index fda7d429..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-glance.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Authenticate.validate_glance": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-glance.yaml b/samples/tasks/scenarios/authenticate/token-validate-glance.yaml deleted file mode 100644 index 7b229ae5..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-glance.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Authenticate.validate_glance: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 diff --git a/samples/tasks/scenarios/authenticate/token-validate-heat.json b/samples/tasks/scenarios/authenticate/token-validate-heat.json deleted file mode 100644 index 4ba0e721..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-heat.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Authenticate.validate_heat": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-heat.yaml b/samples/tasks/scenarios/authenticate/token-validate-heat.yaml deleted file mode 100644 index ec61061e..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-heat.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Authenticate.validate_heat: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 diff --git a/samples/tasks/scenarios/authenticate/token-validate-monasca.json b/samples/tasks/scenarios/authenticate/token-validate-monasca.json deleted file mode 100644 index 8164824d..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-monasca.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Authenticate.validate_monasca": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-monasca.yaml b/samples/tasks/scenarios/authenticate/token-validate-monasca.yaml deleted file mode 100644 index ccd67b5e..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-monasca.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Authenticate.validate_monasca: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 diff --git a/samples/tasks/scenarios/authenticate/token-validate-neutron.json b/samples/tasks/scenarios/authenticate/token-validate-neutron.json deleted file mode 100644 index 3c8b983a..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-neutron.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Authenticate.validate_neutron": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-neutron.yaml b/samples/tasks/scenarios/authenticate/token-validate-neutron.yaml deleted file mode 100644 index 4653431f..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-neutron.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Authenticate.validate_neutron: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 diff --git a/samples/tasks/scenarios/authenticate/token-validate-nova.json b/samples/tasks/scenarios/authenticate/token-validate-nova.json deleted file mode 100644 index 6a160cd4..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-nova.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Authenticate.validate_nova": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-nova.yaml b/samples/tasks/scenarios/authenticate/token-validate-nova.yaml deleted file mode 100644 index 5ba767fe..00000000 --- a/samples/tasks/scenarios/authenticate/token-validate-nova.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Authenticate.validate_nova: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 diff --git a/samples/tasks/scenarios/ceilometer/all-list-meters.json b/samples/tasks/scenarios/ceilometer/all-list-meters.json deleted file mode 100644 index f3e2379e..00000000 --- a/samples/tasks/scenarios/ceilometer/all-list-meters.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "CeilometerMeters.list_meters": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "benchmark_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally benchmark on", - "deleted": "false"}, - {"status": "terminated", "name": "rally benchmark off", - "deleted": "true"} - ] - } - }, - "args": { - "limit": 50, - "metadata_query": {"status": "terminated"} - } - } - ] -} - - diff --git a/samples/tasks/scenarios/ceilometer/all-list-meters.yaml b/samples/tasks/scenarios/ceilometer/all-list-meters.yaml deleted file mode 100644 index 38f11391..00000000 --- a/samples/tasks/scenarios/ceilometer/all-list-meters.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - CeilometerMeters.list_meters: - - - runner: - type: constant - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "benchmark_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally benchmark on" - deleted: "false" - - - status: "terminated" - name: "rally benchmark off" - deleted: "true" - args: - limit: 50 - metadata_query: - status: "terminated" diff --git a/samples/tasks/scenarios/ceilometer/all-list-resources.json b/samples/tasks/scenarios/ceilometer/all-list-resources.json deleted file mode 100644 index 5becd4ce..00000000 --- a/samples/tasks/scenarios/ceilometer/all-list-resources.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "CeilometerResource.list_resources": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "benchmark_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally benchmark on", - "deleted": "false"}, - {"status": "terminated", "name": "rally benchmark off", - "deleted": "true"} - ] - } - }, - "args": { - "limit":50, - "metadata_query": {"status": "terminated"} - } - } - ] -} - diff --git a/samples/tasks/scenarios/ceilometer/all-list-resources.yaml b/samples/tasks/scenarios/ceilometer/all-list-resources.yaml deleted file mode 100644 index 9e90afa3..00000000 --- a/samples/tasks/scenarios/ceilometer/all-list-resources.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - CeilometerResource.list_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "benchmark_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally benchmark on" - deleted: "false" - - - status: "terminated" - name: "rally benchmark off" - deleted: "true" - args: - limit: 50 - metadata_query: - status: "terminated" diff --git a/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.json b/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.json deleted file mode 100644 index 8432feb3..00000000 --- a/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "CeilometerAlarms.create_alarm_and_get_history": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "state": "ok", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.yaml b/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.yaml deleted file mode 100644 index 18422af5..00000000 --- a/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - CeilometerAlarms.create_alarm_and_get_history: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - state: "ok" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-alarm.json b/samples/tasks/scenarios/ceilometer/create-alarm.json deleted file mode 100644 index f5479910..00000000 --- a/samples/tasks/scenarios/ceilometer/create-alarm.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "CeilometerAlarms.create_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-alarm.yaml deleted file mode 100644 index d3f60807..00000000 --- a/samples/tasks/scenarios/ceilometer/create-alarm.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - CeilometerAlarms.create_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.json b/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.json deleted file mode 100644 index 8d644a79..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "CeilometerAlarms.create_and_delete_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.yaml deleted file mode 100644 index 3cdb615d..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - CeilometerAlarms.create_and_delete_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-and-get-alarm.json b/samples/tasks/scenarios/ceilometer/create-and-get-alarm.json deleted file mode 100644 index 002789e7..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-get-alarm.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "CeilometerAlarms.create_and_get_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-get-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-and-get-alarm.yaml deleted file mode 100644 index 5ad7c024..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-get-alarm.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - CeilometerAlarms.create_and_get_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-and-list-alarm.json b/samples/tasks/scenarios/ceilometer/create-and-list-alarm.json deleted file mode 100644 index 1f8b4b5b..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-list-alarm.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "CeilometerAlarms.create_and_list_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-list-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-and-list-alarm.yaml deleted file mode 100644 index 43c384c9..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-list-alarm.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - CeilometerAlarms.create_and_list_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.json b/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.json deleted file mode 100644 index bec806c8..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "CeilometerQueries.create_and_query_alarm_history": [ - { - "args": { - "orderby": null, - "limit": null, - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.yaml b/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.yaml deleted file mode 100644 index d763a756..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - CeilometerQueries.create_and_query_alarm_history: - - - args: - orderby: !!null - limit: !!null - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-alarms.json b/samples/tasks/scenarios/ceilometer/create-and-query-alarms.json deleted file mode 100644 index 0562199c..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-alarms.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "CeilometerQueries.create_and_query_alarms": [ - { - "args": { - "filter": {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}, - "orderby": null, - "limit": 10, - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-alarms.yaml b/samples/tasks/scenarios/ceilometer/create-and-query-alarms.yaml deleted file mode 100644 index 070d27b4..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-alarms.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - CeilometerQueries.create_and_query_alarms: - - - args: - filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} - orderby: !!null - limit: 10 - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-samples.json b/samples/tasks/scenarios/ceilometer/create-and-query-samples.json deleted file mode 100644 index a5de20ff..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-samples.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "CeilometerQueries.create_and_query_samples": [ - { - "args": { - "filter": {"=": {"counter_unit": "instance"}}, - "orderby": null, - "limit": 10, - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0, - "resource_id": "resource_id" - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-samples.yaml b/samples/tasks/scenarios/ceilometer/create-and-query-samples.yaml deleted file mode 100644 index 957690ca..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-samples.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - CeilometerQueries.create_and_query_samples: - - - args: - filter: {"=": {"counter_unit": "instance"}} - orderby: !!null - limit: 10 - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resource_id: "resource_id" - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-and-update-alarm.json b/samples/tasks/scenarios/ceilometer/create-and-update-alarm.json deleted file mode 100644 index 21376b92..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-update-alarm.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "CeilometerAlarms.create_and_update_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-update-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-and-update-alarm.yaml deleted file mode 100644 index e0a9e6cc..00000000 --- a/samples/tasks/scenarios/ceilometer/create-and-update-alarm.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - CeilometerAlarms.create_and_update_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.json b/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.json deleted file mode 100644 index b667ded0..00000000 --- a/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "CeilometerStats.create_meter_and_get_stats": [ - { - "args": { - "user_id": "user-id", - "resource_id": "resource-id", - "counter_volume": 1.0, - "counter_unit": "", - "counter_type": "cumulative" - }, - "runner": { - "type": "constant", - "times": 200, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.yaml b/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.yaml deleted file mode 100644 index cfd3ff2a..00000000 --- a/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - CeilometerStats.create_meter_and_get_stats: - - - args: - user_id: "user-id" - resource_id: "resource-id" - counter_volume: 1.0 - counter_unit: "" - counter_type: "cumulative" - runner: - type: "constant" - times: 200 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-get-event.json b/samples/tasks/scenarios/ceilometer/create-user-and-get-event.json deleted file mode 100644 index 9dd0d2cc..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-get-event.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "CeilometerEvents.create_user_and_get_event": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-get-event.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-get-event.yaml deleted file mode 100644 index 137042b9..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-get-event.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - CeilometerEvents.create_user_and_get_event: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.json b/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.json deleted file mode 100644 index 74a415ff..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "CeilometerEvents.create_user_and_list_event_types": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.yaml deleted file mode 100644 index f95bc33c..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - CeilometerEvents.create_user_and_list_event_types: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-events.json b/samples/tasks/scenarios/ceilometer/create-user-and-list-events.json deleted file mode 100644 index 3b86e603..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-events.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "CeilometerEvents.create_user_and_list_events": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-events.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-list-events.yaml deleted file mode 100644 index 6d6e12ac..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-events.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - CeilometerEvents.create_user_and_list_events: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.json b/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.json deleted file mode 100644 index 9862b770..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "CeilometerTraits.create_user_and_list_trait_descriptions": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.yaml deleted file mode 100644 index 4c02f09d..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - CeilometerTraits.create_user_and_list_trait_descriptions: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.json b/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.json deleted file mode 100644 index 93f9e446..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "CeilometerTraits.create_user_and_list_traits": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.yaml deleted file mode 100644 index 6e8720a0..00000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - CeilometerTraits.create_user_and_list_traits: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/get-stats.json b/samples/tasks/scenarios/ceilometer/get-stats.json deleted file mode 100644 index df0c9572..00000000 --- a/samples/tasks/scenarios/ceilometer/get-stats.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "CeilometerStats.get_stats": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "benchmark_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally benchmark on", - "deleted": "false"}, - {"status": "terminated", "name": "rally benchmark off", - "deleted": "true"} - ] - } - }, - "args": { - "meter_name": "benchmark_meter", - "filter_by_user_id": true, - "filter_by_project_id": true, - "filter_by_resource_id": true, - "metadata_query": {"status": "terminated"}, - "period": 300, - "groupby": "resource_id" - } - } - ] -} - diff --git a/samples/tasks/scenarios/ceilometer/get-stats.yaml b/samples/tasks/scenarios/ceilometer/get-stats.yaml deleted file mode 100644 index 754e5a05..00000000 --- a/samples/tasks/scenarios/ceilometer/get-stats.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- - CeilometerStats.get_stats: - - - runner: - type: constant - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "benchmark_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally benchmark on" - deleted: "false" - - - status: "terminated" - name: "rally benchmark off" - deleted: "true" - args: - meter_name: "benchmark_meter" - filter_by_user_id: true - filter_by_project_id: true - filter_by_resource_id: true - metadata_query: - status: "terminated" - period: 300 - groupby: "resource_id" diff --git a/samples/tasks/scenarios/ceilometer/get-tenant-resources.json b/samples/tasks/scenarios/ceilometer/get-tenant-resources.json deleted file mode 100644 index a1e7467a..00000000 --- a/samples/tasks/scenarios/ceilometer/get-tenant-resources.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "CeilometerResource.get_tenant_resources": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/get-tenant-resources.yaml b/samples/tasks/scenarios/ceilometer/get-tenant-resources.yaml deleted file mode 100644 index ae2797f8..00000000 --- a/samples/tasks/scenarios/ceilometer/get-tenant-resources.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CeilometerResource.get_tenant_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_volume: 1.0 - counter_unit: "instance" diff --git a/samples/tasks/scenarios/ceilometer/list-alarms.json b/samples/tasks/scenarios/ceilometer/list-alarms.json deleted file mode 100644 index a5b23c78..00000000 --- a/samples/tasks/scenarios/ceilometer/list-alarms.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "CeilometerAlarms.list_alarms": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/ceilometer/list-alarms.yaml b/samples/tasks/scenarios/ceilometer/list-alarms.yaml deleted file mode 100644 index b611e9eb..00000000 --- a/samples/tasks/scenarios/ceilometer/list-alarms.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - CeilometerAlarms.list_alarms: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ceilometer/list-matched-samples.json b/samples/tasks/scenarios/ceilometer/list-matched-samples.json deleted file mode 100644 index 259ef408..00000000 --- a/samples/tasks/scenarios/ceilometer/list-matched-samples.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "CeilometerSamples.list_matched_samples": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 60, - "metadata_list": [ - {"status": "active", "name": "fake_resource", - "deleted": "False", - "created_at": "2015-09-04T12:34:19.000000"}, - {"status": "not_active", "name": "fake_resource_1", - "deleted": "False", - "created_at": "2015-09-10T06:55:12.000000"} - ] - } - }, - "args":{ - "filter_by_user_id": true, - "filter_by_project_id": true, - "filter_by_resource_id": true, - "limit": 50, - "metadata_query": {"status": "not_active"} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/ceilometer/list-matched-samples.yaml b/samples/tasks/scenarios/ceilometer/list-matched-samples.yaml deleted file mode 100644 index 9f51c2c8..00000000 --- a/samples/tasks/scenarios/ceilometer/list-matched-samples.yaml +++ /dev/null @@ -1,35 +0,0 @@ ---- - CeilometerSamples.list_matched_samples: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 60 - metadata_list: - - status: "active" - name: "fake_resource" - deleted: "False" - created_at: "2015-09-04T12:34:19.000000" - - status: "not_active" - name: "fake_resource_1" - deleted: "False" - created_at: "2015-09-10T06:55:12.000000" - args: - limit: 50 - filter_by_user_id: true - filter_by_project_id: true - filter_by_resource_id: true - metadata_query: - status: "not_active" diff --git a/samples/tasks/scenarios/ceilometer/list-meters.json b/samples/tasks/scenarios/ceilometer/list-meters.json deleted file mode 100644 index 4ba79509..00000000 --- a/samples/tasks/scenarios/ceilometer/list-meters.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "CeilometerMeters.list_matched_meters": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "benchmark_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally benchmark on", - "deleted": "false"}, - {"status": "terminated", "name": "rally benchmark off", - "deleted": "true"} - ] - } - }, - "args": { - "filter_by_user_id": true, - "filter_by_project_id": true, - "filter_by_resource_id": true, - "limit": 50, - "metadata_query": {"status": "terminated"} - } - } - ] -} - - diff --git a/samples/tasks/scenarios/ceilometer/list-meters.yaml b/samples/tasks/scenarios/ceilometer/list-meters.yaml deleted file mode 100644 index 763a7188..00000000 --- a/samples/tasks/scenarios/ceilometer/list-meters.yaml +++ /dev/null @@ -1,35 +0,0 @@ ---- - CeilometerMeters.list_matched_meters: - - - runner: - type: constant - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "benchmark_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally benchmark on" - deleted: "false" - - - status: "terminated" - name: "rally benchmark off" - deleted: "true" - args: - limit: 50 - filter_by_user_id: true - filter_by_project_id: true - filter_by_resource_id: true - metadata_query: - status: "terminated" diff --git a/samples/tasks/scenarios/ceilometer/list-resources.json b/samples/tasks/scenarios/ceilometer/list-resources.json deleted file mode 100644 index d0d634e9..00000000 --- a/samples/tasks/scenarios/ceilometer/list-resources.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "CeilometerResource.list_matched_resources": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "benchmark_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally benchmark on", - "deleted": "false"}, - {"status": "terminated", "name": "rally benchmark off", - "deleted": "true"} - ] - } - }, - "args": { - "limit":50, - "metadata_query": {"status": "terminated"}, - "filter_by_user_id": true, - "filter_by_project_id": true - } - } - ] -} - diff --git a/samples/tasks/scenarios/ceilometer/list-resources.yaml b/samples/tasks/scenarios/ceilometer/list-resources.yaml deleted file mode 100644 index 964da69d..00000000 --- a/samples/tasks/scenarios/ceilometer/list-resources.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- - CeilometerResource.list_matched_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "benchmark_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally benchmark on" - deleted: "false" - - - status: "terminated" - name: "rally benchmark off" - deleted: "true" - args: - limit: 50 - filter_by_user_id: true - filter_by_project_id: true - metadata_query: - status: "terminated" diff --git a/samples/tasks/scenarios/ceilometer/list-samples.json b/samples/tasks/scenarios/ceilometer/list-samples.json deleted file mode 100644 index 72025bd5..00000000 --- a/samples/tasks/scenarios/ceilometer/list-samples.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "CeilometerSamples.list_samples": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 60, - "metadata_list": [ - {"status": "active", "name": "fake_resource", - "deleted": "False", - "created_at": "2015-09-04T12:34:19.000000"}, - {"status": "not_active", "name": "fake_resource_1", - "deleted": "False", - "created_at": "2015-09-10T06:55:12.000000"} - ], - "batch_size": 5 - } - }, - "args":{ - "limit": 50, - "metadata_query": {"status": "not_active"} - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/list-samples.yaml b/samples/tasks/scenarios/ceilometer/list-samples.yaml deleted file mode 100644 index f1ea20b8..00000000 --- a/samples/tasks/scenarios/ceilometer/list-samples.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- - CeilometerSamples.list_samples: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 60 - metadata_list: - - status: "active" - name: "fake_resource" - deleted: "False" - created_at: "2015-09-04T12:34:19.000000" - - status: "not_active" - name: "fake_resource_1" - deleted: "False" - created_at: "2015-09-10T06:55:12.000000" - batch_size: 5 - args: - limit: 50 - metadata_query: - status: "not_active" diff --git a/samples/tasks/scenarios/cinder/create-and-accept-transfer.json b/samples/tasks/scenarios/cinder/create-and-accept-transfer.json deleted file mode 100644 index 339fcf3b..00000000 --- a/samples/tasks/scenarios/cinder/create-and-accept-transfer.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumes.create_and_accept_transfer": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-accept-transfer.yaml b/samples/tasks/scenarios/cinder/create-and-accept-transfer.yaml deleted file mode 100644 index 32a15eaf..00000000 --- a/samples/tasks/scenarios/cinder/create-and-accept-transfer.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumes.create_and_accept_transfer: - - - args: - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-attach-volume.json b/samples/tasks/scenarios/cinder/create-and-attach-volume.json deleted file mode 100644 index 0d255db0..00000000 --- a/samples/tasks/scenarios/cinder/create-and-attach-volume.json +++ /dev/null @@ -1,59 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set availability_zone = availability_zone or "nova" %} -{ - "CinderVolumes.create_and_attach_volume": [ - { - "args": { - "size": 10, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - }, - "create_volume_params": { - "availability_zone": "{{availability_zone}}" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "create_volume_params": { - "availability_zone": "{{availability_zone}}" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-attach-volume.yaml b/samples/tasks/scenarios/cinder/create-and-attach-volume.yaml deleted file mode 100644 index 1ffe3b4f..00000000 --- a/samples/tasks/scenarios/cinder/create-and-attach-volume.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set availability_zone = availability_zone or "nova" %} ---- - CinderVolumes.create_and_attach_volume: - - - args: - size: 10 - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - create_volume_params: - availability_zone: "{{availability_zone}}" - runner: - type: "constant" - times: 5 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - - - args: - size: - min: 1 - max: 5 - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - create_volume_params: - availability_zone: "{{availability_zone}}" - runner: - type: "constant" - times: 5 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.json b/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.json deleted file mode 100644 index 46edc50a..00000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "CinderVolumeTypes.create_and_delete_encryption_type": [ - { - "args": { - "provider": "LuksEncryptor", - "cipher": "aes-xts-plain64", - "key_size": 512, - "control_location": "front-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volume_types": [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.yaml deleted file mode 100644 index 1357880b..00000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.yaml +++ /dev/null @@ -1,24 +0,0 @@ - CinderVolumeTypes.create_and_delete_encryption_type: - - - args: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-delete-snapshot.json b/samples/tasks/scenarios/cinder/create-and-delete-snapshot.json deleted file mode 100644 index fb128bae..00000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-snapshot.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "CinderVolumes.create_and_delete_snapshot": [ - { - "args": { - "force": false - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volumes": { - "size": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-delete-snapshot.yaml b/samples/tasks/scenarios/cinder/create-and-delete-snapshot.yaml deleted file mode 100644 index 9d0974b3..00000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-snapshot.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - CinderVolumes.create_and_delete_snapshot: - - - args: - force: false - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volumes: - size: 1 diff --git a/samples/tasks/scenarios/cinder/create-and-delete-volume-type.json b/samples/tasks/scenarios/cinder/create-and-delete-volume-type.json deleted file mode 100644 index 7514b7c8..00000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-volume-type.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumeTypes.create_and_delete_volume_type": [ - { - "args": { - "description": "rally tests creating types" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-delete-volume-type.yaml b/samples/tasks/scenarios/cinder/create-and-delete-volume-type.yaml deleted file mode 100644 index 225a7c9d..00000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-volume-type.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumeTypes.create_and_delete_volume_type: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-delete-volume.json b/samples/tasks/scenarios/cinder/create-and-delete-volume.json deleted file mode 100644 index 9c096aac..00000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-volume.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "CinderVolumes.create_and_delete_volume": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-delete-volume.yaml b/samples/tasks/scenarios/cinder/create-and-delete-volume.yaml deleted file mode 100644 index 957c86b5..00000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-volume.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- - CinderVolumes.create_and_delete_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - - args: - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/cinder/create-and-extend-volume.json b/samples/tasks/scenarios/cinder/create-and-extend-volume.json deleted file mode 100644 index 18231757..00000000 --- a/samples/tasks/scenarios/cinder/create-and-extend-volume.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "CinderVolumes.create_and_extend_volume": [ - { - "args": { - "size": 1, - "new_size": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "new_size": { - "min": 6, - "max": 10 - } - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-extend-volume.yaml b/samples/tasks/scenarios/cinder/create-and-extend-volume.yaml deleted file mode 100644 index 1caa2a34..00000000 --- a/samples/tasks/scenarios/cinder/create-and-extend-volume.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - CinderVolumes.create_and_extend_volume: - - - args: - size: 1 - new_size: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - - - args: - size: - min: 1 - max: 5 - new_size: - min: 6 - max: 10 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 \ No newline at end of file diff --git a/samples/tasks/scenarios/cinder/create-and-get-qos.json b/samples/tasks/scenarios/cinder/create-and-get-qos.json deleted file mode 100644 index 6a10f111..00000000 --- a/samples/tasks/scenarios/cinder/create-and-get-qos.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "CinderQos.create_and_get_qos": [ - { - "args": { - "consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-get-qos.yaml b/samples/tasks/scenarios/cinder/create-and-get-qos.yaml deleted file mode 100644 index 1ba83b49..00000000 --- a/samples/tasks/scenarios/cinder/create-and-get-qos.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderQos.create_and_get_qos: - - - args: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-get-volume-type.json b/samples/tasks/scenarios/cinder/create-and-get-volume-type.json deleted file mode 100644 index 269023f2..00000000 --- a/samples/tasks/scenarios/cinder/create-and-get-volume-type.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumeTypes.create_and_get_volume_type": [ - { - "args": { - "description": "rally tests creating types" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-get-volume-type.yaml b/samples/tasks/scenarios/cinder/create-and-get-volume-type.yaml deleted file mode 100644 index 8aad5bcc..00000000 --- a/samples/tasks/scenarios/cinder/create-and-get-volume-type.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumeTypes.create_and_get_volume_type: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-get-volume.json b/samples/tasks/scenarios/cinder/create-and-get-volume.json deleted file mode 100644 index 1825b876..00000000 --- a/samples/tasks/scenarios/cinder/create-and-get-volume.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "CinderVolumes.create_and_get_volume": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-get-volume.yaml b/samples/tasks/scenarios/cinder/create-and-get-volume.yaml deleted file mode 100644 index f320e958..00000000 --- a/samples/tasks/scenarios/cinder/create-and-get-volume.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - CinderVolumes.create_and_get_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-encryption-type.json b/samples/tasks/scenarios/cinder/create-and-list-encryption-type.json deleted file mode 100644 index 0233c4a2..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-encryption-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "CinderVolumeTypes.create_and_list_encryption_type": [ - { - "args": { - "provider": "LuksEncryptor", - "cipher": "aes-xts-plain64", - "key_size": 512, - "control_location": "front-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volume_types": [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-and-list-encryption-type.yaml deleted file mode 100644 index 193006e2..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-encryption-type.yaml +++ /dev/null @@ -1,24 +0,0 @@ - CinderVolumeTypes.create_and_list_encryption_type: - - - args: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-qos.json b/samples/tasks/scenarios/cinder/create-and-list-qos.json deleted file mode 100644 index e48d4a8c..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-qos.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "CinderQos.create_and_list_qos": [ - { - "args": { - "consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-qos.yaml b/samples/tasks/scenarios/cinder/create-and-list-qos.yaml deleted file mode 100644 index 294199a0..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-qos.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderQos.create_and_list_qos: - - - args: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-snapshots.json b/samples/tasks/scenarios/cinder/create-and-list-snapshots.json deleted file mode 100644 index 6853645b..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-snapshots.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "CinderVolumes.create_and_list_snapshots": [ - { - "args": { - "force": false, - "detailed": true - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "volumes": { - "size": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-snapshots.yaml b/samples/tasks/scenarios/cinder/create-and-list-snapshots.yaml deleted file mode 100644 index a5d1e975..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-snapshots.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumes.create_and_list_snapshots: - - - args: - force: False - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume-backups.json b/samples/tasks/scenarios/cinder/create-and-list-volume-backups.json deleted file mode 100644 index e41680ef..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume-backups.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumes.create_and_list_volume_backups": [ - { - "args": { - "size": 1, - "detailed": true, - "do_delete": true, - "create_volume_kwargs": {}, - "create_backup_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": ["Member"] - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume-backups.yaml b/samples/tasks/scenarios/cinder/create-and-list-volume-backups.yaml deleted file mode 100644 index 40370dff..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume-backups.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - CinderVolumes.create_and_list_volume_backups: - - - args: - size: 1 - detailed: True - do_delete: True - create_volume_kwargs: {} - create_backup_kwargs: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume-types.json b/samples/tasks/scenarios/cinder/create-and-list-volume-types.json deleted file mode 100644 index 80d70f2e..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume-types.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumeTypes.create_and_list_volume_types": [ - { - "args": { - "description": "rally tests creating types" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume-types.yaml b/samples/tasks/scenarios/cinder/create-and-list-volume-types.yaml deleted file mode 100644 index 7c6edad5..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume-types.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumeTypes.create_and_list_volume_types: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume.json b/samples/tasks/scenarios/cinder/create-and-list-volume.json deleted file mode 100644 index baacf7c2..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "CinderVolumes.create_and_list_volume": [ - { - "args": { - "size": 1, - "detailed": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "detailed": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume.yaml b/samples/tasks/scenarios/cinder/create-and-list-volume.yaml deleted file mode 100644 index e209baa3..00000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - CinderVolumes.create_and_list_volume: - - - args: - size: 1 - detailed: True - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - - - args: - size: - min: 1 - max: 5 - detailed: True - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 \ No newline at end of file diff --git a/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.json b/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.json deleted file mode 100644 index 80c6596e..00000000 --- a/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "CinderVolumes.create_and_restore_volume_backup": [ - { - "args": { - "size": 1, - "do_delete": true, - "create_volume_kwargs": {}, - "create_backup_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": ["Member"] - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.yaml b/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.yaml deleted file mode 100644 index b5e0fa2b..00000000 --- a/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderVolumes.create_and_restore_volume_backup: - - - args: - size: 1 - do_delete: True - create_volume_kwargs: {} - create_backup_kwargs: {} - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" \ No newline at end of file diff --git a/samples/tasks/scenarios/cinder/create-and-set-qos.json b/samples/tasks/scenarios/cinder/create-and-set-qos.json deleted file mode 100644 index b206e133..00000000 --- a/samples/tasks/scenarios/cinder/create-and-set-qos.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "CinderQos.create_and_set_qos": [ - { - "args": { - "consumer": "back-end", - "write_iops_sec": "10", - "read_iops_sec": "1000", - "set_consumer": "both", - "set_write_iops_sec": "11", - "set_read_iops_sec": "1001" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-set-qos.yaml b/samples/tasks/scenarios/cinder/create-and-set-qos.yaml deleted file mode 100644 index 285b6472..00000000 --- a/samples/tasks/scenarios/cinder/create-and-set-qos.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - CinderQos.create_and_set_qos: - - - args: - consumer: "back-end" - write_iops_sec: "10" - read_iops_sec: "1000" - set_consumer: "both" - set_write_iops_sec: "11" - set_read_iops_sec: "1001" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.json b/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.json deleted file mode 100644 index b99b051b..00000000 --- a/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "CinderVolumeTypes.create_and_set_volume_type_keys": [ - { - "args": { - "description": "rally tests creating types", - "volume_type_key": { - "volume_backend_name": "LVM_iSCSI" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.yaml b/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.yaml deleted file mode 100644 index a95e3d62..00000000 --- a/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderVolumeTypes.create_and_set_volume_type_keys: - - - args: - description: "rally tests creating types" - volume_type_key: - volume_backend_name: "LVM_iSCSI" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-update-encryption-type.json b/samples/tasks/scenarios/cinder/create-and-update-encryption-type.json deleted file mode 100644 index 8f1368a4..00000000 --- a/samples/tasks/scenarios/cinder/create-and-update-encryption-type.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "CinderVolumeTypes.create_and_update_encryption_type": [ - { - "args": { - "create_provider": "LuksEncryptor", - "create_cipher": "aes-xts-plain64", - "create_key_size": 512, - "create_control_location": "front-end", - "update_provider": "CryptsetupEncryptor", - "update_cipher": "aes-xts-plain", - "update_key_size": 256, - "update_control_location": "back-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volume_types": [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-update-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-and-update-encryption-type.yaml deleted file mode 100644 index 161b71ed..00000000 --- a/samples/tasks/scenarios/cinder/create-and-update-encryption-type.yaml +++ /dev/null @@ -1,28 +0,0 @@ - CinderVolumeTypes.create_and_update_encryption_type: - - - args: - create_provider: "LuksEncryptor" - create_cipher: "aes-xts-plain64" - create_key_size: 512 - create_control_location: "front-end" - update_provider: "CryptsetupEncryptor" - update_cipher: "aes-xts-plain" - update_key_size: 256 - update_control_location: "back-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.json b/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.json deleted file mode 100644 index 8ed6a5fa..00000000 --- a/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "CinderVolumes.create_volume_and_update_readonly_flag": [ - { - "args": { - "size": 1, - "read_only": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.yaml b/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.yaml deleted file mode 100644 index f8edb26a..00000000 --- a/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - CinderVolumes.create_volume_and_update_readonly_flag: - - - args: - size: 1 - read_only: true - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-update-volume-type.json b/samples/tasks/scenarios/cinder/create-and-update-volume-type.json deleted file mode 100644 index f5a95e6d..00000000 --- a/samples/tasks/scenarios/cinder/create-and-update-volume-type.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "CinderVolumeTypes.create_and_update_volume_type": [ - { - "args": { - "description": "test", - "update_description": "test update" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-update-volume-type.yaml b/samples/tasks/scenarios/cinder/create-and-update-volume-type.yaml deleted file mode 100644 index 8c09cbf9..00000000 --- a/samples/tasks/scenarios/cinder/create-and-update-volume-type.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - CinderVolumeTypes.create_and_update_volume_type: - - - args: - description: "test" - update_description: "test update" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-update-volume.json b/samples/tasks/scenarios/cinder/create-and-update-volume.json deleted file mode 100644 index 0810d4b3..00000000 --- a/samples/tasks/scenarios/cinder/create-and-update-volume.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "CinderVolumes.create_and_update_volume": [ - { - "args": { - "create_volume_kwargs": {}, - "update_volume_kwargs": { - "description": "desc_updated" - }, - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-update-volume.yaml b/samples/tasks/scenarios/cinder/create-and-update-volume.yaml deleted file mode 100644 index 409302fb..00000000 --- a/samples/tasks/scenarios/cinder/create-and-update-volume.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumes.create_and_update_volume: - - - args: - update_volume_kwargs: - description: "desc_updated" - create_volume_kwargs: {} - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.json b/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.json deleted file mode 100644 index ec97b9ad..00000000 --- a/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "CinderVolumes.create_and_upload_volume_to_image": [ - { - "args": { - "size": 1, - "force": false, - "container_format": "bare", - "disk_format": "raw", - "do_delete": true, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "force": false, - "container_format": "bare", - "disk_format": "raw", - "do_delete": true, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.yaml b/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.yaml deleted file mode 100644 index 70078783..00000000 --- a/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- - CinderVolumes.create_and_upload_volume_to_image: - - - args: - size: 1 - force: false - container_format: "bare" - disk_format: "raw" - do_delete: true - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - - args: - size: - min: 1 - max: 5 - force: false - container_format: "bare" - disk_format: "raw" - do_delete: true - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/cinder/create-encryption-type.json b/samples/tasks/scenarios/cinder/create-encryption-type.json deleted file mode 100644 index a32f7c7f..00000000 --- a/samples/tasks/scenarios/cinder/create-encryption-type.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "CinderVolumeTypes.create_volume_type_and_encryption_type": [ - { - "args": { - "description": "rally tests creating types", - "provider": "LuksEncryptor", - "cipher": "aes-xts-plain64", - "key_size": 512, - "control_location": "front-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-encryption-type.yaml deleted file mode 100644 index 5898817f..00000000 --- a/samples/tasks/scenarios/cinder/create-encryption-type.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - CinderVolumeTypes.create_volume_type_and_encryption_type: - - - args: - description: "rally tests creating types" - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.json b/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.json deleted file mode 100644 index cae094ba..00000000 --- a/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "CinderVolumes.create_and_delete_volume": [ - { - "args": { - "size": 1, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.yaml b/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.yaml deleted file mode 100644 index c1cd9582..00000000 --- a/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - CinderVolumes.create_and_delete_volume: - - - args: - size: 1 - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.json b/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.json deleted file mode 100644 index 43a439c7..00000000 --- a/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "CinderVolumes.create_from_volume_and_delete_volume": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "volumes": { - "size": 1 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "volumes": { - "size": 1 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.yaml b/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.yaml deleted file mode 100644 index 7af1f22e..00000000 --- a/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - CinderVolumes.create_from_volume_and_delete_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - - - args: - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 \ No newline at end of file diff --git a/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.json b/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.json deleted file mode 100644 index e3e283a3..00000000 --- a/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "CinderVolumeTypes.create_get_and_delete_encryption_type": [ - { - "args": { - "provider": "LuksEncryptor", - "cipher": "aes-xts-plain64", - "key_size": 512, - "control_location": "front-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volume_types": [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.yaml deleted file mode 100644 index 2e53a2b5..00000000 --- a/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.yaml +++ /dev/null @@ -1,24 +0,0 @@ - CinderVolumeTypes.create_get_and_delete_encryption_type: - - - args: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-incremental-volume-backup.json b/samples/tasks/scenarios/cinder/create-incremental-volume-backup.json deleted file mode 100644 index 81f10747..00000000 --- a/samples/tasks/scenarios/cinder/create-incremental-volume-backup.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "CinderVolumeBackups.create_incremental_volume_backup": [ - { - "args": { - "size": 1, - "create_volume_kwargs": {}, - "create_backup_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "roles": ["admin"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-incremental-volume-backup.yaml b/samples/tasks/scenarios/cinder/create-incremental-volume-backup.yaml deleted file mode 100644 index 73b9a490..00000000 --- a/samples/tasks/scenarios/cinder/create-incremental-volume-backup.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - CinderVolumeBackups.create_incremental_volume_backup: - - - args: - size: 1 - create_volume_kwargs: {} - create_backup_kwargs: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - roles: - - "admin" - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.json b/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.json deleted file mode 100644 index fa238f13..00000000 --- a/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.json +++ /dev/null @@ -1,34 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "CinderVolumes.create_nested_snapshots_and_attach_volume": [ - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "nested_level": 5 - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - }, - "servers": { - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - }, - "servers_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.yaml b/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.yaml deleted file mode 100644 index bb4a5395..00000000 --- a/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - CinderVolumes.create_nested_snapshots_and_attach_volume: - - - args: - size: - min: 1 - max: 5 - nested_level: 5 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 1 - servers: - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - servers_per_tenant: 2 diff --git a/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.json b/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.json deleted file mode 100644 index 4ada671e..00000000 --- a/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.json +++ /dev/null @@ -1,65 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "CinderVolumes.create_snapshot_and_attach_volume": [ - { - "args": { - "volume_type": "lvmdriver-1", - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - }, - "servers": { - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - }, - "servers_per_tenant": 2 - } - } - }, - { - "args": { - "volume_type": "test", - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - }, - "servers": { - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - }, - "servers_per_tenant": 2 - }, - "volume_types": ["test"] - } - } - - ] -} diff --git a/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.yaml b/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.yaml deleted file mode 100644 index ed778887..00000000 --- a/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.yaml +++ /dev/null @@ -1,45 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - CinderVolumes.create_snapshot_and_attach_volume: - - - args: - volume_type: "lvmdriver-1" - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - servers: - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - servers_per_tenant: 2 - - - args: - volume_type: "test" - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - servers: - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - servers_per_tenant: 2 - volume_types: - - "test" diff --git a/samples/tasks/scenarios/cinder/create-volume-and-clone.json b/samples/tasks/scenarios/cinder/create-volume-and-clone.json deleted file mode 100755 index c35a4d51..00000000 --- a/samples/tasks/scenarios/cinder/create-volume-and-clone.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "CinderVolumes.create_volume_and_clone": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "nested_level": 3 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume-and-clone.yaml b/samples/tasks/scenarios/cinder/create-volume-and-clone.yaml deleted file mode 100755 index b88f3741..00000000 --- a/samples/tasks/scenarios/cinder/create-volume-and-clone.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- - CinderVolumes.create_volume_and_clone: - - - args: - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - - args: - size: - min: 1 - max: 5 - nested_level: 3 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/cinder/create-volume-backup.json b/samples/tasks/scenarios/cinder/create-volume-backup.json deleted file mode 100644 index eae48a8f..00000000 --- a/samples/tasks/scenarios/cinder/create-volume-backup.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "CinderVolumes.create_volume_backup": [ - { - "args": { - "size": 1, - "do_delete": true, - "create_volume_kwargs": {}, - "create_backup_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": ["Member"] - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume-backup.yaml b/samples/tasks/scenarios/cinder/create-volume-backup.yaml deleted file mode 100644 index f42b8087..00000000 --- a/samples/tasks/scenarios/cinder/create-volume-backup.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderVolumes.create_volume_backup: - - - args: - size: 1 - do_delete: True - create_volume_kwargs: {} - create_backup_kwargs: {} - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" diff --git a/samples/tasks/scenarios/cinder/create-volume-from-snapshot.json b/samples/tasks/scenarios/cinder/create-volume-from-snapshot.json deleted file mode 100755 index 417b0133..00000000 --- a/samples/tasks/scenarios/cinder/create-volume-from-snapshot.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "CinderVolumes.create_volume_from_snapshot": [ - { - "args": { - "do_delete": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volumes": { - "size": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume-from-snapshot.yaml b/samples/tasks/scenarios/cinder/create-volume-from-snapshot.yaml deleted file mode 100755 index 70d8b0e4..00000000 --- a/samples/tasks/scenarios/cinder/create-volume-from-snapshot.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - CinderVolumes.create_volume_from_snapshot: - - - args: - do_delete: true - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volumes: - size: 1 diff --git a/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.json b/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.json deleted file mode 100644 index a11de209..00000000 --- a/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumeTypes.create_volume_type_add_and_list_type_access": [ - { - "args": { - "description": "rally tests creating types" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.yaml b/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.yaml deleted file mode 100644 index 8afb735b..00000000 --- a/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumeTypes.create_volume_type_add_and_list_type_access: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-volume.json b/samples/tasks/scenarios/cinder/create-volume.json deleted file mode 100644 index fb897d42..00000000 --- a/samples/tasks/scenarios/cinder/create-volume.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "CinderVolumes.create_volume": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume.yaml b/samples/tasks/scenarios/cinder/create-volume.yaml deleted file mode 100644 index e63c10fc..00000000 --- a/samples/tasks/scenarios/cinder/create-volume.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- - CinderVolumes.create_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - - args: - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/cinder/list-transfers.json b/samples/tasks/scenarios/cinder/list-transfers.json deleted file mode 100644 index 534fb2c5..00000000 --- a/samples/tasks/scenarios/cinder/list-transfers.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumes.list_transfers": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/list-transfers.yaml b/samples/tasks/scenarios/cinder/list-transfers.yaml deleted file mode 100644 index 566906ec..00000000 --- a/samples/tasks/scenarios/cinder/list-transfers.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumes.list_transfers: - - - args: - detailed: true - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/list-types.json b/samples/tasks/scenarios/cinder/list-types.json deleted file mode 100644 index f108862e..00000000 --- a/samples/tasks/scenarios/cinder/list-types.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumes.list_types": [ - { - "args": { - "is_public": true - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/list-types.yaml b/samples/tasks/scenarios/cinder/list-types.yaml deleted file mode 100644 index 939c6346..00000000 --- a/samples/tasks/scenarios/cinder/list-types.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumes.list_types: - - - args: - is_public: true - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/list-volumes.json b/samples/tasks/scenarios/cinder/list-volumes.json deleted file mode 100644 index 40273db2..00000000 --- a/samples/tasks/scenarios/cinder/list-volumes.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "CinderVolumes.list_volumes": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "volumes": { - "size": 1, - "volumes_per_tenant": 4 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/list-volumes.yaml b/samples/tasks/scenarios/cinder/list-volumes.yaml deleted file mode 100644 index 6f95156a..00000000 --- a/samples/tasks/scenarios/cinder/list-volumes.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumes.list_volumes: - - - args: - detailed: True - runner: - type: "constant" - times: 100 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - volumes_per_tenant: 4 diff --git a/samples/tasks/scenarios/cinder/modify-volume-metadata.json b/samples/tasks/scenarios/cinder/modify-volume-metadata.json deleted file mode 100644 index dcfe1096..00000000 --- a/samples/tasks/scenarios/cinder/modify-volume-metadata.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "CinderVolumes.modify_volume_metadata": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "volumes": { - "size": 1 - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/modify-volume-metadata.yaml b/samples/tasks/scenarios/cinder/modify-volume-metadata.yaml deleted file mode 100644 index 4c42def2..00000000 --- a/samples/tasks/scenarios/cinder/modify-volume-metadata.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - CinderVolumes.modify_volume_metadata: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - volumes: - size: 1 - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/create-and-delete-domain.json b/samples/tasks/scenarios/designate/create-and-delete-domain.json deleted file mode 100644 index 8d79be7a..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-domain.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "DesignateBasic.create_and_delete_domain": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-domain.yaml b/samples/tasks/scenarios/designate/create-and-delete-domain.yaml deleted file mode 100644 index 80f31dcb..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-domain.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - DesignateBasic.create_and_delete_domain: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/create-and-delete-records.json b/samples/tasks/scenarios/designate/create-and-delete-records.json deleted file mode 100644 index f789e754..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-records.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "DesignateBasic.create_and_delete_records": [ - { - "args": { - "records_per_domain": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 2000, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-records.yaml b/samples/tasks/scenarios/designate/create-and-delete-records.yaml deleted file mode 100644 index d7e34224..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-records.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - DesignateBasic.create_and_delete_records: - - - args: - records_per_domain: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 2000 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/create-and-delete-recordsets.json b/samples/tasks/scenarios/designate/create-and-delete-recordsets.json deleted file mode 100644 index 742c475d..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-recordsets.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "DesignateBasic.create_and_delete_recordsets": [ - { - "args": { - "recordsets_per_zone": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 2000, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "zones": { - "zones_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-recordsets.yaml b/samples/tasks/scenarios/designate/create-and-delete-recordsets.yaml deleted file mode 100644 index ac80f315..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-recordsets.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - DesignateBasic.create_and_delete_recordsets: - - - args: - recordsets_per_zone: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 2000 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 diff --git a/samples/tasks/scenarios/designate/create-and-delete-server.json b/samples/tasks/scenarios/designate/create-and-delete-server.json deleted file mode 100644 index ae24da1e..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-server.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "DesignateBasic.create_and_delete_server": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-server.yaml b/samples/tasks/scenarios/designate/create-and-delete-server.yaml deleted file mode 100644 index 04845e6c..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-server.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - DesignateBasic.create_and_delete_server: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/create-and-delete-zone.json b/samples/tasks/scenarios/designate/create-and-delete-zone.json deleted file mode 100644 index 998d681e..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-zone.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "DesignateBasic.create_and_delete_zone": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-zone.yaml b/samples/tasks/scenarios/designate/create-and-delete-zone.yaml deleted file mode 100644 index 286a64a9..00000000 --- a/samples/tasks/scenarios/designate/create-and-delete-zone.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - DesignateBasic.create_and_delete_zone: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/create-and-list-domain.json b/samples/tasks/scenarios/designate/create-and-list-domain.json deleted file mode 100644 index eec211b1..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-domain.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "DesignateBasic.create_and_list_domains": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-domain.yaml b/samples/tasks/scenarios/designate/create-and-list-domain.yaml deleted file mode 100644 index 8a7df1ce..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-domain.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - DesignateBasic.create_and_list_domains: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/create-and-list-records.json b/samples/tasks/scenarios/designate/create-and-list-records.json deleted file mode 100644 index 4b592bbf..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-records.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "DesignateBasic.create_and_list_records": [ - { - "args": { - "records_per_domain": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 2000, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-records.yaml b/samples/tasks/scenarios/designate/create-and-list-records.yaml deleted file mode 100644 index 84949366..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-records.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - DesignateBasic.create_and_list_records: - - - args: - records_per_domain: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 2000 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/create-and-list-recordsets.json b/samples/tasks/scenarios/designate/create-and-list-recordsets.json deleted file mode 100644 index 4dbba955..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-recordsets.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "DesignateBasic.create_and_list_recordsets": [ - { - "args": { - "recordsets_per_zone": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 2000, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "zones": { - "zones_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-recordsets.yaml b/samples/tasks/scenarios/designate/create-and-list-recordsets.yaml deleted file mode 100644 index 7660f4ad..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-recordsets.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - DesignateBasic.create_and_list_recordsets: - - - args: - recordsets_per_zone: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 2000 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 diff --git a/samples/tasks/scenarios/designate/create-and-list-servers.json b/samples/tasks/scenarios/designate/create-and-list-servers.json deleted file mode 100644 index 4400004c..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-servers.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "DesignateBasic.create_and_list_servers": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-servers.yaml b/samples/tasks/scenarios/designate/create-and-list-servers.yaml deleted file mode 100644 index 9e934f91..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-servers.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - DesignateBasic.create_and_list_servers: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/create-and-list-zones.json b/samples/tasks/scenarios/designate/create-and-list-zones.json deleted file mode 100644 index 4f366a42..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-zones.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "DesignateBasic.create_and_list_zones": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-zones.yaml b/samples/tasks/scenarios/designate/create-and-list-zones.yaml deleted file mode 100644 index bda61e6f..00000000 --- a/samples/tasks/scenarios/designate/create-and-list-zones.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - DesignateBasic.create_and_list_zones: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/create-and-update-domain.json b/samples/tasks/scenarios/designate/create-and-update-domain.json deleted file mode 100644 index 13264857..00000000 --- a/samples/tasks/scenarios/designate/create-and-update-domain.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "DesignateBasic.create_and_update_domain": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-update-domain.yaml b/samples/tasks/scenarios/designate/create-and-update-domain.yaml deleted file mode 100644 index 92293830..00000000 --- a/samples/tasks/scenarios/designate/create-and-update-domain.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - DesignateBasic.create_and_update_domain: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/list-domains.json b/samples/tasks/scenarios/designate/list-domains.json deleted file mode 100644 index d440415e..00000000 --- a/samples/tasks/scenarios/designate/list-domains.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "DesignateBasic.list_domains": [ - { - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-domains.yaml b/samples/tasks/scenarios/designate/list-domains.yaml deleted file mode 100644 index cf2f2824..00000000 --- a/samples/tasks/scenarios/designate/list-domains.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - DesignateBasic.list_domains: - - - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/list-records.json b/samples/tasks/scenarios/designate/list-records.json deleted file mode 100644 index 852f6868..00000000 --- a/samples/tasks/scenarios/designate/list-records.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "DesignateBasic.list_records": [ - { - "args": { - "domain_id": "" - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-records.yaml b/samples/tasks/scenarios/designate/list-records.yaml deleted file mode 100644 index 911d8d99..00000000 --- a/samples/tasks/scenarios/designate/list-records.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - DesignateBasic.list_records: - - - args: - domain_id: - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/list-recordsets.json b/samples/tasks/scenarios/designate/list-recordsets.json deleted file mode 100644 index 0c9a897b..00000000 --- a/samples/tasks/scenarios/designate/list-recordsets.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "DesignateBasic.list_recordsets": [ - { - "args": { - "zone_id": "" - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-recordsets.yaml b/samples/tasks/scenarios/designate/list-recordsets.yaml deleted file mode 100644 index 64a5e3b6..00000000 --- a/samples/tasks/scenarios/designate/list-recordsets.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - DesignateBasic.list_recordsets: - - - args: - zone_id: - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/list-servers.json b/samples/tasks/scenarios/designate/list-servers.json deleted file mode 100644 index 2298de77..00000000 --- a/samples/tasks/scenarios/designate/list-servers.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "DesignateBasic.list_servers": [ - { - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-servers.yaml b/samples/tasks/scenarios/designate/list-servers.yaml deleted file mode 100644 index 2d7780fa..00000000 --- a/samples/tasks/scenarios/designate/list-servers.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - DesignateBasic.list_servers: - - - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/designate/list-zones.json b/samples/tasks/scenarios/designate/list-zones.json deleted file mode 100644 index f7cdb03d..00000000 --- a/samples/tasks/scenarios/designate/list-zones.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "DesignateBasic.list_zones": [ - { - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-zones.yaml b/samples/tasks/scenarios/designate/list-zones.yaml deleted file mode 100644 index 400d324a..00000000 --- a/samples/tasks/scenarios/designate/list-zones.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - DesignateBasic.list_zones: - - - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/dummy/dummy-exception-probability.json b/samples/tasks/scenarios/dummy/dummy-exception-probability.json deleted file mode 100644 index 353c5367..00000000 --- a/samples/tasks/scenarios/dummy/dummy-exception-probability.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "Dummy.dummy_exception_probability": [ - { - "args": { - "exception_probability": 0.5 - }, - "runner": { - "type": "constant", - "times": 1000, - "concurrency": 1 - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy-exception-probability.yaml b/samples/tasks/scenarios/dummy/dummy-exception-probability.yaml deleted file mode 100644 index db77b179..00000000 --- a/samples/tasks/scenarios/dummy/dummy-exception-probability.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - Dummy.dummy_exception_probability: - - - args: - exception_probability: 0.5 - runner: - type: "constant" - times: 1000 - concurrency: 1 diff --git a/samples/tasks/scenarios/dummy/dummy-exception.json b/samples/tasks/scenarios/dummy/dummy-exception.json deleted file mode 100644 index fa115b5b..00000000 --- a/samples/tasks/scenarios/dummy/dummy-exception.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "Dummy.dummy_exception": [ - { - "args": { - "size_of_message": 5 - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy-exception.yaml b/samples/tasks/scenarios/dummy/dummy-exception.yaml deleted file mode 100644 index 33e09069..00000000 --- a/samples/tasks/scenarios/dummy/dummy-exception.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - Dummy.dummy_exception: - - - args: - size_of_message: 5 - runner: - type: "constant" - times: 20 - concurrency: 5 diff --git a/samples/tasks/scenarios/dummy/dummy-failure.json b/samples/tasks/scenarios/dummy/dummy-failure.json deleted file mode 100644 index a6129f85..00000000 --- a/samples/tasks/scenarios/dummy/dummy-failure.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Dummy.failure": [ - { - "args": { - "sleep": 0.2, - "from_iteration": 5, - "to_iteration": 10, - "each": 2 - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy-failure.yaml b/samples/tasks/scenarios/dummy/dummy-failure.yaml deleted file mode 100644 index f02ac041..00000000 --- a/samples/tasks/scenarios/dummy/dummy-failure.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - Dummy.failure: - - - args: - sleep: 0.2 - from_iteration: 5 - to_iteration: 10 - each: 2 - runner: - type: "constant" - times: 20 - concurrency: 5 diff --git a/samples/tasks/scenarios/dummy/dummy-openstack.json b/samples/tasks/scenarios/dummy/dummy-openstack.json deleted file mode 100644 index adb5339c..00000000 --- a/samples/tasks/scenarios/dummy/dummy-openstack.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "Dummy.openstack": [ - { - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy-openstack.yaml b/samples/tasks/scenarios/dummy/dummy-openstack.yaml deleted file mode 100644 index c8efae85..00000000 --- a/samples/tasks/scenarios/dummy/dummy-openstack.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - Dummy.openstack: - - - runner: - type: "constant" - times: 1 - concurrency: 1 diff --git a/samples/tasks/scenarios/dummy/dummy-output.json b/samples/tasks/scenarios/dummy/dummy-output.json deleted file mode 100644 index 08bcc719..00000000 --- a/samples/tasks/scenarios/dummy/dummy-output.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "Dummy.dummy_output": [ - { - "args": { - "random_range": 25 - }, - "runner": { - "type": "constant", - "times": 50, - "concurrency": 5 - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy-output.yaml b/samples/tasks/scenarios/dummy/dummy-output.yaml deleted file mode 100644 index c56c7608..00000000 --- a/samples/tasks/scenarios/dummy/dummy-output.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - Dummy.dummy_output: - - - args: - random_range: 25 - runner: - type: "constant" - times: 50 - concurrency: 5 diff --git a/samples/tasks/scenarios/dummy/dummy-random-action.json b/samples/tasks/scenarios/dummy/dummy-random-action.json deleted file mode 100644 index 6738a461..00000000 --- a/samples/tasks/scenarios/dummy/dummy-random-action.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "Dummy.dummy_random_action": [ - { - "args": { - "actions_num": 5, - "sleep_min": 0, - "sleep_max": 2 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 5 - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy-random-action.yaml b/samples/tasks/scenarios/dummy/dummy-random-action.yaml deleted file mode 100644 index e844645c..00000000 --- a/samples/tasks/scenarios/dummy/dummy-random-action.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - Dummy.dummy_random_action: - - - args: - actions_num: 5 - sleep_min: 0 - sleep_max: 2 - runner: - type: "constant" - times: 5 - concurrency: 5 diff --git a/samples/tasks/scenarios/dummy/dummy-random-fail-in-atomic.json b/samples/tasks/scenarios/dummy/dummy-random-fail-in-atomic.json deleted file mode 100644 index 5452b3c2..00000000 --- a/samples/tasks/scenarios/dummy/dummy-random-fail-in-atomic.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "Dummy.dummy_random_fail_in_atomic": [ - { - "args": { - "exception_probability": 0.5 - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy-random-fail-in-atomic.yaml b/samples/tasks/scenarios/dummy/dummy-random-fail-in-atomic.yaml deleted file mode 100644 index 7b17bdb3..00000000 --- a/samples/tasks/scenarios/dummy/dummy-random-fail-in-atomic.yaml +++ /dev/null @@ -1,8 +0,0 @@ - Dummy.dummy_random_fail_in_atomic: - - - args: - exception_probability: 0.5 - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/dummy/dummy-timed-atomic-actions.json b/samples/tasks/scenarios/dummy/dummy-timed-atomic-actions.json deleted file mode 100644 index 11516fb9..00000000 --- a/samples/tasks/scenarios/dummy/dummy-timed-atomic-actions.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Dummy.dummy_timed_atomic_actions": [ - { - "args": { - "number_of_actions": 1, - "sleep_factor": 1 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 5 - }, - "sla": { - "max_avg_duration_per_atomic": { - "action_0": 1.0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy-timed-atomic-actions.yaml b/samples/tasks/scenarios/dummy/dummy-timed-atomic-actions.yaml deleted file mode 100644 index f790ce04..00000000 --- a/samples/tasks/scenarios/dummy/dummy-timed-atomic-actions.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Dummy.dummy_timed_atomic_actions: - - - args: - number_of_actions: 1 - sleep_factor: 1 - runner: - type: "constant" - times: 5 - concurrency: 5 - sla: - max_avg_duration_per_atomic: - action_0: 1.0 diff --git a/samples/tasks/scenarios/dummy/dummy.json b/samples/tasks/scenarios/dummy/dummy.json deleted file mode 100644 index df5df068..00000000 --- a/samples/tasks/scenarios/dummy/dummy.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "Dummy.dummy": [ - { - "args": { - "sleep": 5 - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy.yaml b/samples/tasks/scenarios/dummy/dummy.yaml deleted file mode 100644 index 1a3f80f4..00000000 --- a/samples/tasks/scenarios/dummy/dummy.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - Dummy.dummy: - - - args: - sleep: 5 - runner: - type: "constant" - times: 20 - concurrency: 5 diff --git a/samples/tasks/scenarios/ec2/boot.json b/samples/tasks/scenarios/ec2/boot.json deleted file mode 100644 index ad418761..00000000 --- a/samples/tasks/scenarios/ec2/boot.json +++ /dev/null @@ -1,26 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "EC2Servers.boot_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ec2/boot.yaml b/samples/tasks/scenarios/ec2/boot.yaml deleted file mode 100644 index 1bc23344..00000000 --- a/samples/tasks/scenarios/ec2/boot.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - EC2Servers.boot_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/ec2/list-servers.json b/samples/tasks/scenarios/ec2/list-servers.json deleted file mode 100644 index 3b2cfae6..00000000 --- a/samples/tasks/scenarios/ec2/list-servers.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "EC2Servers.list_servers": [ - { - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "ec2_servers": { - "flavor": { - "name": "m1.tiny" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "servers_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ec2/list-servers.yaml b/samples/tasks/scenarios/ec2/list-servers.yaml deleted file mode 100644 index 98e74171..00000000 --- a/samples/tasks/scenarios/ec2/list-servers.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - EC2Servers.list_servers: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - ec2_servers: - flavor: - name: "m1.tiny" - image: - name: "^cirros.*-disk$" - servers_per_tenant: 2 diff --git a/samples/tasks/scenarios/glance/create-and-delete-image.json b/samples/tasks/scenarios/glance/create-and-delete-image.json deleted file mode 100644 index 4bead2b7..00000000 --- a/samples/tasks/scenarios/glance/create-and-delete-image.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "GlanceImages.create_and_delete_image": [ - { - "args": { - "image_location": "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-delete-image.yaml b/samples/tasks/scenarios/glance/create-and-delete-image.yaml deleted file mode 100644 index 8feb9735..00000000 --- a/samples/tasks/scenarios/glance/create-and-delete-image.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - GlanceImages.create_and_delete_image: - - - args: - image_location: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/glance/create-and-get-image.json b/samples/tasks/scenarios/glance/create-and-get-image.json deleted file mode 100644 index 76d3bcdd..00000000 --- a/samples/tasks/scenarios/glance/create-and-get-image.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "GlanceImages.create_and_get_image": [ - { - "args": { - "image_location": "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-get-image.yaml b/samples/tasks/scenarios/glance/create-and-get-image.yaml deleted file mode 100644 index 6d189db5..00000000 --- a/samples/tasks/scenarios/glance/create-and-get-image.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - GlanceImages.create_and_get_image: - - - args: - image_location: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/create-and-list-image.json b/samples/tasks/scenarios/glance/create-and-list-image.json deleted file mode 100644 index 57049840..00000000 --- a/samples/tasks/scenarios/glance/create-and-list-image.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "GlanceImages.create_and_list_image": [ - { - "args": { - "image_location": "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-list-image.yaml b/samples/tasks/scenarios/glance/create-and-list-image.yaml deleted file mode 100644 index df334e2f..00000000 --- a/samples/tasks/scenarios/glance/create-and-list-image.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - GlanceImages.create_and_list_image: - - - args: - image_location: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/glance/create-and-update-image.json b/samples/tasks/scenarios/glance/create-and-update-image.json deleted file mode 100644 index 8f5085f3..00000000 --- a/samples/tasks/scenarios/glance/create-and-update-image.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "GlanceImages.create_and_update_image": [ - { - "args": { - "image_location": "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "api_versions": { - "glance": { - "version": 2 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-update-image.yaml b/samples/tasks/scenarios/glance/create-and-update-image.yaml deleted file mode 100644 index 5f0729e2..00000000 --- a/samples/tasks/scenarios/glance/create-and-update-image.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - GlanceImages.create_and_update_image: - - - args: - image_location: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/create-image-and-boot-instances.json b/samples/tasks/scenarios/glance/create-image-and-boot-instances.json deleted file mode 100644 index e7417f08..00000000 --- a/samples/tasks/scenarios/glance/create-image-and-boot-instances.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "GlanceImages.create_image_and_boot_instances": [ - { - "args": { - "image_location": "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img", - "container_format": "bare", - "disk_format": "qcow2", - "flavor": { - "name": "{{flavor_name}}" - }, - "number_instances": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-image-and-boot-instances.yaml b/samples/tasks/scenarios/glance/create-image-and-boot-instances.yaml deleted file mode 100644 index 8a677e92..00000000 --- a/samples/tasks/scenarios/glance/create-image-and-boot-instances.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - GlanceImages.create_image_and_boot_instances: - - - args: - image_location: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - container_format: "bare" - disk_format: "qcow2" - flavor: - name: "{{flavor_name}}" - number_instances: 2 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 5 diff --git a/samples/tasks/scenarios/glance/list-images.json b/samples/tasks/scenarios/glance/list-images.json deleted file mode 100644 index 92cf65f2..00000000 --- a/samples/tasks/scenarios/glance/list-images.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "GlanceImages.list_images": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "images": { - "image_url": "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img", - "disk_format": "qcow2", - "container_format": "bare", - "images_per_tenant": 4 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/list-images.yaml b/samples/tasks/scenarios/glance/list-images.yaml deleted file mode 100644 index de4acdec..00000000 --- a/samples/tasks/scenarios/glance/list-images.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - GlanceImages.list_images: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - images: - image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - disk_format: "qcow2" - container_format: "bare" - images_per_tenant: 4 diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.json b/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.json deleted file mode 100644 index d64b4753..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template", - "parameters": { - "num_instances": 2 - }, - "files": ["samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template"] - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.yaml deleted file mode 100644 index f7704215..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template" - parameters: - num_instances: 2 - files: ["samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template"] - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.json b/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.json deleted file mode 100644 index 3870f775..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.yaml deleted file mode 100644 index 2053a158..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.json b/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.json deleted file mode 100644 index 20059134..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.yaml deleted file mode 100644 index 04a61b56..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.json b/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.json deleted file mode 100644 index 0085dc90..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.yaml deleted file mode 100644 index b2a88b24..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack.json b/samples/tasks/scenarios/heat/create-and-delete-stack.json deleted file mode 100644 index 3b47b767..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/default.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack.yaml deleted file mode 100644 index b665f668..00000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/default.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-and-list-stack.json b/samples/tasks/scenarios/heat/create-and-list-stack.json deleted file mode 100644 index 9208bd29..00000000 --- a/samples/tasks/scenarios/heat/create-and-list-stack.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "HeatStacks.create_and_list_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/default.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-list-stack.yaml b/samples/tasks/scenarios/heat/create-and-list-stack.yaml deleted file mode 100644 index 7b037c74..00000000 --- a/samples/tasks/scenarios/heat/create-and-list-stack.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - HeatStacks.create_and_list_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/default.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/heat/create-check-delete-stack.json b/samples/tasks/scenarios/heat/create-check-delete-stack.json deleted file mode 100644 index 0726b84f..00000000 --- a/samples/tasks/scenarios/heat/create-check-delete-stack.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "HeatStacks.create_check_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-check-delete-stack.yaml b/samples/tasks/scenarios/heat/create-check-delete-stack.yaml deleted file mode 100644 index f66765c7..00000000 --- a/samples/tasks/scenarios/heat/create-check-delete-stack.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - HeatStacks.create_check_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.json b/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.json deleted file mode 100644 index 02157485..00000000 --- a/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "HeatStacks.create_snapshot_restore_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.yaml b/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.yaml deleted file mode 100644 index 26f3ab8c..00000000 --- a/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - HeatStacks.create_snapshot_restore_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.json b/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.json deleted file mode 100644 index 501eb2d2..00000000 --- a/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "HeatStacks.create_stack_and_list_output": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ], - - "HeatStacks.create_stack_and_list_output_via_API": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.yaml b/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.yaml deleted file mode 100644 index 0e64846f..00000000 --- a/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- - HeatStacks.create_stack_and_list_output: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - HeatStacks.create_stack_and_list_output_via_API: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/create-stack-and-scale.json b/samples/tasks/scenarios/heat/create-stack-and-scale.json deleted file mode 100644 index c3fc5528..00000000 --- a/samples/tasks/scenarios/heat/create-stack-and-scale.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "HeatStacks.create_stack_and_scale": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template", - "output_key": "scaling_url", - "delta": 1 - }, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 3 - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-stack-and-scale.yaml b/samples/tasks/scenarios/heat/create-stack-and-scale.yaml deleted file mode 100644 index b4c0e4ad..00000000 --- a/samples/tasks/scenarios/heat/create-stack-and-scale.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - HeatStacks.create_stack_and_scale: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template" - output_key: "scaling_url" - delta: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.json b/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.json deleted file mode 100644 index 3dcd7706..00000000 --- a/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "HeatStacks.create_stack_and_show_output": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template", - "output_key": "val1" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ], - - "HeatStacks.create_stack_and_show_output_via_API": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template", - "output_key": "val1" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.yaml b/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.yaml deleted file mode 100644 index 03916a16..00000000 --- a/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - HeatStacks.create_stack_and_show_output: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - output_key: "val1" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - - HeatStacks.create_stack_and_show_output_via_API: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - output_key: "val1" - runner: - type: "constant" - times: 5 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.json b/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.json deleted file mode 100644 index 667c9383..00000000 --- a/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "HeatStacks.create_suspend_resume_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.yaml b/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.yaml deleted file mode 100644 index 85c875e6..00000000 --- a/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - HeatStacks.create_suspend_resume_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.json b/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.json deleted file mode 100644 index 0c22a3c5..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.yaml deleted file mode 100644 index b2d10f50..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.json b/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.json deleted file mode 100644 index 9748a8df..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.yaml deleted file mode 100644 index 738dcd4a..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-increase.json b/samples/tasks/scenarios/heat/create-update-delete-stack-increase.json deleted file mode 100644 index 030791ba..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-increase.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-increase.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-increase.yaml deleted file mode 100644 index 6f923574..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-increase.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.json b/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.json deleted file mode 100644 index 31b08f8b..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.yaml deleted file mode 100644 index 705946df..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.json b/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.json deleted file mode 100644 index 1930b614..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.yaml deleted file mode 100644 index 69c190e0..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-replace.json b/samples/tasks/scenarios/heat/create-update-delete-stack-replace.json deleted file mode 100644 index da82a781..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-replace.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-replace.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-replace.yaml deleted file mode 100644 index 10dff9b5..00000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-replace.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 diff --git a/samples/tasks/scenarios/heat/list-stack-and-event.json b/samples/tasks/scenarios/heat/list-stack-and-event.json deleted file mode 100644 index 2e33ec29..00000000 --- a/samples/tasks/scenarios/heat/list-stack-and-event.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "HeatStacks.list_stacks_and_events": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "stacks": { - "stacks_per_tenant": 2, - "resources_per_stack": 10 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/list-stack-and-event.yaml b/samples/tasks/scenarios/heat/list-stack-and-event.yaml deleted file mode 100644 index 3f4b3d06..00000000 --- a/samples/tasks/scenarios/heat/list-stack-and-event.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - HeatStacks.list_stacks_and_events: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - stacks: - stacks_per_tenant: 2 - resources_per_stack: 10 diff --git a/samples/tasks/scenarios/heat/list-stack-and-resources.json b/samples/tasks/scenarios/heat/list-stack-and-resources.json deleted file mode 100644 index a1d1485a..00000000 --- a/samples/tasks/scenarios/heat/list-stack-and-resources.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "HeatStacks.list_stacks_and_resources": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "stacks": { - "stacks_per_tenant": 2, - "resources_per_stack": 10 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/list-stack-and-resources.yaml b/samples/tasks/scenarios/heat/list-stack-and-resources.yaml deleted file mode 100644 index 1ff45c7d..00000000 --- a/samples/tasks/scenarios/heat/list-stack-and-resources.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - HeatStacks.list_stacks_and_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - stacks: - stacks_per_tenant: 2 - resources_per_stack: 10 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template b/samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template deleted file mode 100644 index 6c9892b4..00000000 --- a/samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - flavor: - type: string - default: m1.tiny - constraints: - - custom_constraint: nova.flavor - image: - type: string - default: cirros-0.3.5-x86_64-disk - constraints: - - custom_constraint: glance.image - scaling_adjustment: - type: number - default: 1 - max_size: - type: number - default: 5 - constraints: - - range: {min: 1} - - -resources: - asg: - type: OS::Heat::AutoScalingGroup - properties: - resource: - type: OS::Nova::Server - properties: - image: { get_param: image } - flavor: { get_param: flavor } - min_size: 1 - desired_capacity: 3 - max_size: { get_param: max_size } - - scaling_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: asg} - scaling_adjustment: { get_param: scaling_adjustment } - -outputs: - scaling_url: - value: {get_attr: [scaling_policy, alarm_url]} diff --git a/samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template b/samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template deleted file mode 100644 index a22487e3..00000000 --- a/samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template +++ /dev/null @@ -1,17 +0,0 @@ -heat_template_version: 2013-05-23 - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: 1 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/default.yaml.template b/samples/tasks/scenarios/heat/templates/default.yaml.template deleted file mode 100644 index eb4f2f2d..00000000 --- a/samples/tasks/scenarios/heat/templates/default.yaml.template +++ /dev/null @@ -1 +0,0 @@ -heat_template_version: 2014-10-16 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/random-strings.yaml.template b/samples/tasks/scenarios/heat/templates/random-strings.yaml.template deleted file mode 100644 index 2dd676c1..00000000 --- a/samples/tasks/scenarios/heat/templates/random-strings.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template b/samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template deleted file mode 100644 index 4a15ca89..00000000 --- a/samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template +++ /dev/null @@ -1,44 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template that creates a resource group with servers and volumes. - The template allows to create a lot of nested stacks with standard - configuration: nova instance, cinder volume attached to that instance - -parameters: - - num_instances: - type: number - description: number of instances that should be created in resource group - constraints: - - range: {min: 1} - instance_image: - type: string - default: cirros-0.3.5-x86_64-disk - instance_volume_size: - type: number - description: Size of volume to attach to instance - default: 1 - constraints: - - range: {min: 1, max: 1024} - instance_flavor: - type: string - description: Type of the instance to be created. - default: m1.tiny - instance_availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - -resources: - group_of_volumes: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: num_instances} - resource_def: - type: templates/server-with-volume.yaml.template - properties: - image: {get_param: instance_image} - volume_size: {get_param: instance_volume_size} - flavor: {get_param: instance_flavor} - availability_zone: {get_param: instance_availability_zone} diff --git a/samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template b/samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template deleted file mode 100644 index 234e4237..00000000 --- a/samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template +++ /dev/null @@ -1,21 +0,0 @@ -heat_template_version: 2013-05-23 - -description: Template for testing caching. - -parameters: - count: - type: number - default: 40 - delay: - type: number - default: 0.1 - -resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: count} - resource_def: - type: OS::Heat::TestResource - properties: - constraint_prop_secs: {get_param: delay} diff --git a/samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template b/samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template deleted file mode 100644 index f47d03cc..00000000 --- a/samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template +++ /dev/null @@ -1,37 +0,0 @@ -heat_template_version: 2013-05-23 -parameters: - attr_wait_secs: - type: number - default: 0.5 - -resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: 10 - resource_def: - type: OS::Heat::TestResource - properties: - attr_wait_secs: {get_param: attr_wait_secs} - -outputs: - val1: - value: {get_attr: [rg, resource.0.output]} - val2: - value: {get_attr: [rg, resource.1.output]} - val3: - value: {get_attr: [rg, resource.2.output]} - val4: - value: {get_attr: [rg, resource.3.output]} - val5: - value: {get_attr: [rg, resource.4.output]} - val6: - value: {get_attr: [rg, resource.5.output]} - val7: - value: {get_attr: [rg, resource.6.output]} - val8: - value: {get_attr: [rg, resource.7.output]} - val9: - value: {get_attr: [rg, resource.8.output]} - val10: - value: {get_attr: [rg, resource.9.output]} \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/resource-group.yaml.template b/samples/tasks/scenarios/heat/templates/resource-group.yaml.template deleted file mode 100644 index b3f505fa..00000000 --- a/samples/tasks/scenarios/heat/templates/resource-group.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 2 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template b/samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template deleted file mode 100644 index 0e344fc0..00000000 --- a/samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template +++ /dev/null @@ -1,64 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - public_net: - type: string - default: public - image: - type: string - default: cirros-0.3.5-x86_64-disk - flavor: - type: string - default: m1.tiny - cidr: - type: string - default: 11.11.11.0/24 - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - networks: - - port: { get_resource: server_port } - - router: - type: OS::Neutron::Router - properties: - external_gateway_info: - network: {get_param: public_net} - - router_interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: private_subnet } - - private_net: - type: OS::Neutron::Net - - private_subnet: - type: OS::Neutron::Subnet - properties: - network: { get_resource: private_net } - cidr: {get_param: cidr} - - port_security_group: - type: OS::Neutron::SecurityGroup - properties: - name: default_port_security_group - description: > - Default security group assigned to port. The neutron default group is not - used because neutron creates several groups with the same name=default and - nova cannot chooses which one should it use. - - server_port: - type: OS::Neutron::Port - properties: - network: {get_resource: private_net} - fixed_ips: - - subnet: { get_resource: private_subnet } - security_groups: - - { get_resource: port_security_group } diff --git a/samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template b/samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template deleted file mode 100644 index 6e65cec7..00000000 --- a/samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template +++ /dev/null @@ -1,39 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - image: - type: string - default: cirros-0.3.5-x86_64-disk - flavor: - type: string - default: m1.tiny - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server} - mountpoint: /dev/vdc diff --git a/samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template b/samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template deleted file mode 100644 index cf34879c..00000000 --- a/samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template +++ /dev/null @@ -1,23 +0,0 @@ -heat_template_version: 2013-05-23 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates resource parameters without resource re-creation(replacement) - in the stack defined by autoscaling_policy.yaml.template. It allows to measure - performance of "pure" resource update operation only. - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: -1 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template b/samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template deleted file mode 100644 index 03f9a885..00000000 --- a/samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates the stack defined by random-strings.yaml.template with additional resource. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_three: - type: OS::Heat::RandomString - properties: - length: 20 diff --git a/samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template b/samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template deleted file mode 100644 index 414d90d5..00000000 --- a/samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template +++ /dev/null @@ -1,11 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by random-strings.yaml.template. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 diff --git a/samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template b/samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template deleted file mode 100644 index 780fcc16..00000000 --- a/samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by - random-strings.yaml.template and re-creates it with the updated parameters - (so-called update-replace). That happens because some parameters cannot be - changed without resource re-creation. The template allows to measure performance - of update-replace operation. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 40 diff --git a/samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template b/samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template deleted file mode 100644 index 94bc271f..00000000 --- a/samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource-group.yaml.template - and adds children resources to that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 3 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 diff --git a/samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template b/samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template deleted file mode 100644 index a076224a..00000000 --- a/samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource-group.yaml.template - and deletes children resources from that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 1 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 diff --git a/samples/tasks/scenarios/ironic/create-and-delete-node.json b/samples/tasks/scenarios/ironic/create-and-delete-node.json deleted file mode 100644 index 615206b1..00000000 --- a/samples/tasks/scenarios/ironic/create-and-delete-node.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "IronicNodes.create_and_delete_node": [ - { - "args": { - "driver": "pxe_ssh", - "properties": { - "capabilities": "boot_option:local" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ironic/create-and-delete-node.yaml b/samples/tasks/scenarios/ironic/create-and-delete-node.yaml deleted file mode 100644 index 3e9e5fa4..00000000 --- a/samples/tasks/scenarios/ironic/create-and-delete-node.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - IronicNodes.create_and_delete_node: - - - args: - driver: "pxe_ssh" - properties: - capabilities: "boot_option:local" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 5 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/ironic/create-and-list-node.json b/samples/tasks/scenarios/ironic/create-and-list-node.json deleted file mode 100644 index 683d3404..00000000 --- a/samples/tasks/scenarios/ironic/create-and-list-node.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "IronicNodes.create_and_list_node": [ - { - "args": { - "driver": "pxe_ssh" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ironic/create-and-list-node.yaml b/samples/tasks/scenarios/ironic/create-and-list-node.yaml deleted file mode 100644 index 6f091b7d..00000000 --- a/samples/tasks/scenarios/ironic/create-and-list-node.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - IronicNodes.create_and_list_node: - - - args: - driver: "pxe_ssh" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 5 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/keystone/add-and-remove-user-role.json b/samples/tasks/scenarios/keystone/add-and-remove-user-role.json deleted file mode 100644 index 1e0c3e21..00000000 --- a/samples/tasks/scenarios/keystone/add-and-remove-user-role.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.add_and_remove_user_role": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/add-and-remove-user-role.yaml b/samples/tasks/scenarios/keystone/add-and-remove-user-role.yaml deleted file mode 100644 index cef6a1d7..00000000 --- a/samples/tasks/scenarios/keystone/add-and-remove-user-role.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.add_and_remove_user_role: - - - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.json b/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.json deleted file mode 100644 index 564bc897..00000000 --- a/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.authenticate_user_and_validate_token": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.yaml b/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.yaml deleted file mode 100644 index 86e86ca9..00000000 --- a/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.authenticate_user_and_validate_token: - - - args: {} - runner: - type: "constant" - times: 20 - concurrency: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.json b/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.json deleted file mode 100644 index 065bff98..00000000 --- a/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_add_and_list_user_roles": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.yaml b/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.yaml deleted file mode 100644 index e5e87325..00000000 --- a/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.create_add_and_list_user_roles: - - - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.json b/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.json deleted file mode 100644 index 1c4ba742..00000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_and_delete_ec2credential": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.yaml b/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.yaml deleted file mode 100644 index 2bf29658..00000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - KeystoneBasic.create_and_delete_ec2credential: - - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/keystone/create-and-delete-role.json b/samples/tasks/scenarios/keystone/create-and-delete-role.json deleted file mode 100644 index 25f95e6d..00000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-role.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "KeystoneBasic.create_and_delete_role": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/keystone/create-and-delete-role.yaml b/samples/tasks/scenarios/keystone/create-and-delete-role.yaml deleted file mode 100644 index 2e7d8083..00000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-role.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - KeystoneBasic.create_and_delete_role: - - - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/create-and-delete-service.json b/samples/tasks/scenarios/keystone/create-and-delete-service.json deleted file mode 100644 index b07e2208..00000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-service.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "KeystoneBasic.create_and_delete_service": [ - { - "args": { - "service_type": "Rally_test_type", - "description": "test_description" - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/keystone/create-and-delete-service.yaml b/samples/tasks/scenarios/keystone/create-and-delete-service.yaml deleted file mode 100644 index 9535fb7d..00000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-service.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - KeystoneBasic.create_and_delete_service: - - - args: - service_type: "Rally_test_type" - description: "test_description" - runner: - type: "constant" - times: 100 - concurrency: 10 \ No newline at end of file diff --git a/samples/tasks/scenarios/keystone/create-and-delete-user.json b/samples/tasks/scenarios/keystone/create-and-delete-user.json deleted file mode 100644 index c21b7327..00000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-user.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "KeystoneBasic.create_delete_user": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-delete-user.yaml b/samples/tasks/scenarios/keystone/create-and-delete-user.yaml deleted file mode 100644 index c518fc09..00000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-user.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - KeystoneBasic.create_delete_user: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/create-and-get-role.json b/samples/tasks/scenarios/keystone/create-and-get-role.json deleted file mode 100644 index e2c7b3a5..00000000 --- a/samples/tasks/scenarios/keystone/create-and-get-role.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "KeystoneBasic.create_and_get_role": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-get-role.yaml b/samples/tasks/scenarios/keystone/create-and-get-role.yaml deleted file mode 100644 index 88cd1226..00000000 --- a/samples/tasks/scenarios/keystone/create-and-get-role.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - KeystoneBasic.create_and_get_role: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.json b/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.json deleted file mode 100644 index 930466ea..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_and_list_ec2credentials": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.yaml b/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.yaml deleted file mode 100644 index f61e20f4..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - KeystoneBasic.create_and_list_ec2credentials: - - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/keystone/create-and-list-roles.json b/samples/tasks/scenarios/keystone/create-and-list-roles.json deleted file mode 100644 index 6633a3ac..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-roles.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "KeystoneBasic.create_and_list_roles": [ - { - "args": { - "create_role_kwargs": {}, - "list_role_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-roles.yaml b/samples/tasks/scenarios/keystone/create-and-list-roles.yaml deleted file mode 100644 index 09dce1f8..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-roles.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - KeystoneBasic.create_and_list_roles: - - - args: - create_role_kwargs: {} - list_role_kwargs: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-list-services.json b/samples/tasks/scenarios/keystone/create-and-list-services.json deleted file mode 100644 index f8fe5178..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-services.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "KeystoneBasic.create_and_list_services": [ - { - "args": { - "service_type": "Rally_test_type", - "description": "test_description" - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-services.yaml b/samples/tasks/scenarios/keystone/create-and-list-services.yaml deleted file mode 100644 index 76ac4d46..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-services.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - KeystoneBasic.create_and_list_services: - - - args: - service_type: "Rally_test_type" - description: "test_description" - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/create-and-list-tenants.json b/samples/tasks/scenarios/keystone/create-and-list-tenants.json deleted file mode 100644 index b23fb49a..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-tenants.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "KeystoneBasic.create_and_list_tenants": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-tenants.yaml b/samples/tasks/scenarios/keystone/create-and-list-tenants.yaml deleted file mode 100644 index d18c9eb9..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-tenants.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - KeystoneBasic.create_and_list_tenants: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 1 diff --git a/samples/tasks/scenarios/keystone/create-and-list-users.json b/samples/tasks/scenarios/keystone/create-and-list-users.json deleted file mode 100644 index ed7532c9..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-users.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "KeystoneBasic.create_and_list_users": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-users.yaml b/samples/tasks/scenarios/keystone/create-and-list-users.yaml deleted file mode 100644 index 6bfd8791..00000000 --- a/samples/tasks/scenarios/keystone/create-and-list-users.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - KeystoneBasic.create_and_list_users: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/create-and-update-user.json b/samples/tasks/scenarios/keystone/create-and-update-user.json deleted file mode 100644 index 04a74de4..00000000 --- a/samples/tasks/scenarios/keystone/create-and-update-user.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "KeystoneBasic.create_and_update_user": [ - { - "args": { - "create_user_kwargs": {}, - "update_user_kwargs": { - "enabled": false - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-update-user.yaml b/samples/tasks/scenarios/keystone/create-and-update-user.yaml deleted file mode 100644 index d9a7d1b9..00000000 --- a/samples/tasks/scenarios/keystone/create-and-update-user.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - KeystoneBasic.create_and_update_user: - - - args: - create_user_kwargs: {} - update_user_kwargs: - enabled: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-tenant-with-users.json b/samples/tasks/scenarios/keystone/create-tenant-with-users.json deleted file mode 100644 index 2f63c29a..00000000 --- a/samples/tasks/scenarios/keystone/create-tenant-with-users.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "KeystoneBasic.create_tenant_with_users": [ - { - "args": { - "users_per_tenant": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-tenant-with-users.yaml b/samples/tasks/scenarios/keystone/create-tenant-with-users.yaml deleted file mode 100644 index 402e7fd3..00000000 --- a/samples/tasks/scenarios/keystone/create-tenant-with-users.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - KeystoneBasic.create_tenant_with_users: - - - args: - users_per_tenant: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/create-tenant.json b/samples/tasks/scenarios/keystone/create-tenant.json deleted file mode 100644 index dc3690e1..00000000 --- a/samples/tasks/scenarios/keystone/create-tenant.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "KeystoneBasic.create_tenant": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-tenant.yaml b/samples/tasks/scenarios/keystone/create-tenant.yaml deleted file mode 100644 index bd60e8a1..00000000 --- a/samples/tasks/scenarios/keystone/create-tenant.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - KeystoneBasic.create_tenant: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.json b/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.json deleted file mode 100644 index 9562faaa..00000000 --- a/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "KeystoneBasic.create_update_and_delete_tenant": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.yaml b/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.yaml deleted file mode 100644 index bac1f44c..00000000 --- a/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - KeystoneBasic.create_update_and_delete_tenant: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.json b/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.json deleted file mode 100644 index cda0bd7c..00000000 --- a/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "KeystoneBasic.create_user_set_enabled_and_delete": [ - { - "args": { - "enabled": true - }, - "runner": { - "type": "constant", - "concurrency": 10, - "times": 100 - } - }, - { - "args": { - "enabled": false - }, - "runner": { - "type": "constant", - "concurrency": 10, - "times": 100 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.yaml b/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.yaml deleted file mode 100644 index 127957ec..00000000 --- a/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - KeystoneBasic.create_user_set_enabled_and_delete: - - - args: - enabled: true - runner: - type: "constant" - times: 100 - concurrency: 10 - - - args: - enabled: false - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/create-user-update-password.json b/samples/tasks/scenarios/keystone/create-user-update-password.json deleted file mode 100644 index 0ae323b1..00000000 --- a/samples/tasks/scenarios/keystone/create-user-update-password.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "KeystoneBasic.create_user_update_password": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-user-update-password.yaml b/samples/tasks/scenarios/keystone/create-user-update-password.yaml deleted file mode 100644 index 5c740979..00000000 --- a/samples/tasks/scenarios/keystone/create-user-update-password.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - KeystoneBasic.create_user_update_password: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/create-user.json b/samples/tasks/scenarios/keystone/create-user.json deleted file mode 100644 index 8aa6bca2..00000000 --- a/samples/tasks/scenarios/keystone/create-user.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "KeystoneBasic.create_user": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-user.yaml b/samples/tasks/scenarios/keystone/create-user.yaml deleted file mode 100644 index 9ed26a24..00000000 --- a/samples/tasks/scenarios/keystone/create-user.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - KeystoneBasic.create_user: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/keystone/get-entities.json b/samples/tasks/scenarios/keystone/get-entities.json deleted file mode 100644 index 3441cc57..00000000 --- a/samples/tasks/scenarios/keystone/get-entities.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "KeystoneBasic.get_entities": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/keystone/get-entities.yaml b/samples/tasks/scenarios/keystone/get-entities.yaml deleted file mode 100644 index 10d2dc17..00000000 --- a/samples/tasks/scenarios/keystone/get-entities.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - KeystoneBasic.get_entities: - - - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/magnum/artifacts/nginx.yaml.k8s b/samples/tasks/scenarios/magnum/artifacts/nginx.yaml.k8s deleted file mode 100644 index a26e23a7..00000000 --- a/samples/tasks/scenarios/magnum/artifacts/nginx.yaml.k8s +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: nginx-1 - labels: - app: nginx-1 -spec: - containers: - - name: nginx-1 - image: nginx - ports: - - containerPort: 80 diff --git a/samples/tasks/scenarios/magnum/artifacts/rc_nginx.yaml.k8s b/samples/tasks/scenarios/magnum/artifacts/rc_nginx.yaml.k8s deleted file mode 100644 index 013a7a04..00000000 --- a/samples/tasks/scenarios/magnum/artifacts/rc_nginx.yaml.k8s +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: nginx-controller -spec: - replicas: 2 - # selector identifies the set of pods that this - # replication controller is responsible for managing - selector: - name: nginx - # template defines the 'cookie cutter' used for creating - # new pods when necessary - template: - metadata: - labels: - # Important: these labels need to match the selector above - # The api server enforces this constraint. - name: nginx - spec: - containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 diff --git a/samples/tasks/scenarios/magnum/create-and-list-clusters.json b/samples/tasks/scenarios/magnum/create-and-list-clusters.json deleted file mode 100644 index 2f6078a9..00000000 --- a/samples/tasks/scenarios/magnum/create-and-list-clusters.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "MagnumClusters.create_and_list_clusters": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "node_count": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "node_count": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "node_count": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/create-and-list-clusters.yaml b/samples/tasks/scenarios/magnum/create-and-list-clusters.yaml deleted file mode 100644 index 0ed7fbc3..00000000 --- a/samples/tasks/scenarios/magnum/create-and-list-clusters.yaml +++ /dev/null @@ -1,58 +0,0 @@ ---- - MagnumClusters.create_and_list_clusters: - - - args: - node_count: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - - - args: - node_count: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - - - args: - node_count: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" diff --git a/samples/tasks/scenarios/magnum/create-pods.json b/samples/tasks/scenarios/magnum/create-pods.json deleted file mode 100644 index 023f6865..00000000 --- a/samples/tasks/scenarios/magnum/create-pods.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "K8sPods.create_pods": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "manifests": ["artifacts/nginx.yaml.k8s"] - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel" - }, - "clusters": { - "node_count": 2 - }, - "ca_certs": { - "directory": "/home/stack" - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "manifests": ["artifacts/nginx.yaml.k8s"] - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel", - "tls_disabled": true - }, - "clusters": { - "node_count": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/create-pods.yaml b/samples/tasks/scenarios/magnum/create-pods.yaml deleted file mode 100644 index eabd9535..00000000 --- a/samples/tasks/scenarios/magnum/create-pods.yaml +++ /dev/null @@ -1,47 +0,0 @@ ---- - K8sPods.create_pods: - - - args: - manifests: ["artifacts/nginx.yaml.k8s"] - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - ca_certs: - directory: "/home/stack" - - - args: - manifests: ["artifacts/nginx.yaml.k8s"] - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - tls_disabled: True - clusters: - node_count: 2 diff --git a/samples/tasks/scenarios/magnum/create-rcs.json b/samples/tasks/scenarios/magnum/create-rcs.json deleted file mode 100644 index 2ac7a708..00000000 --- a/samples/tasks/scenarios/magnum/create-rcs.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "K8sPods.create_rcs": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "manifests": ["artifacts/rc_nginx.yaml.k8s"] - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel" - }, - "clusters": { - "node_count": 2 - }, - "ca_certs": { - "directory": "/home/stack" - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "manifests": ["artifacts/rc_nginx.yaml.k8s"] - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel", - "tls_disabled": true - }, - "clusters": { - "node_count": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/create-rcs.yaml b/samples/tasks/scenarios/magnum/create-rcs.yaml deleted file mode 100644 index 43951888..00000000 --- a/samples/tasks/scenarios/magnum/create-rcs.yaml +++ /dev/null @@ -1,47 +0,0 @@ ---- - K8sPods.create_rcs: - - - args: - manifests: ["artifacts/rc_nginx.yaml.k8s"] - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - ca_certs: - directory: "/home/stack" - - - args: - manifests: ["artifacts/rc_nginx.yaml.k8s"] - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - tls_disabled: True - clusters: - node_count: 2 diff --git a/samples/tasks/scenarios/magnum/list-cluster-templates.json b/samples/tasks/scenarios/magnum/list-cluster-templates.json deleted file mode 100644 index d2d0b703..00000000 --- a/samples/tasks/scenarios/magnum/list-cluster-templates.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "MagnumClusterTemplates.list_cluster_templates": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/list-cluster-templates.yaml b/samples/tasks/scenarios/magnum/list-cluster-templates.yaml deleted file mode 100644 index decbe756..00000000 --- a/samples/tasks/scenarios/magnum/list-cluster-templates.yaml +++ /dev/null @@ -1,52 +0,0 @@ ---- - MagnumClusterTemplates.list_cluster_templates: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" diff --git a/samples/tasks/scenarios/magnum/list-clusters.json b/samples/tasks/scenarios/magnum/list-clusters.json deleted file mode 100644 index 950ab2ae..00000000 --- a/samples/tasks/scenarios/magnum/list-clusters.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "MagnumClusters.list_clusters": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/list-clusters.yaml b/samples/tasks/scenarios/magnum/list-clusters.yaml deleted file mode 100644 index fd28a613..00000000 --- a/samples/tasks/scenarios/magnum/list-clusters.yaml +++ /dev/null @@ -1,58 +0,0 @@ ---- - MagnumClusters.list_clusters: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - clusters: - node_count: 2 - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" - clusters: - node_count: 2 diff --git a/samples/tasks/scenarios/magnum/list-pods.json b/samples/tasks/scenarios/magnum/list-pods.json deleted file mode 100644 index 1226e54c..00000000 --- a/samples/tasks/scenarios/magnum/list-pods.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "K8sPods.list_pods": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel" - }, - "clusters": { - "node_count": 2 - }, - "ca_certs": { - "directory": "" - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel", - "tls_disabled": true - }, - "clusters": { - "node_count": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/list-pods.yaml b/samples/tasks/scenarios/magnum/list-pods.yaml deleted file mode 100644 index ca6631b0..00000000 --- a/samples/tasks/scenarios/magnum/list-pods.yaml +++ /dev/null @@ -1,43 +0,0 @@ ---- - K8sPods.list_pods: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - ca_certs: - directory: "" - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - tls_disabled: True - clusters: - node_count: 2 diff --git a/samples/tasks/scenarios/manila/attach-security-service-to-share-network.json b/samples/tasks/scenarios/manila/attach-security-service-to-share-network.json deleted file mode 100644 index 3b35181e..00000000 --- a/samples/tasks/scenarios/manila/attach-security-service-to-share-network.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "ManilaShares.attach_security_service_to_share_network": [ - { - "args": { - "security_service_type": "active_directory" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "quotas": { - "manila": { - "share_networks": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/attach-security-service-to-share-network.yaml b/samples/tasks/scenarios/manila/attach-security-service-to-share-network.yaml deleted file mode 100644 index bf16af73..00000000 --- a/samples/tasks/scenarios/manila/attach-security-service-to-share-network.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - ManilaShares.attach_security_service_to_share_network: - - - args: - security_service_type: "active_directory" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - share_networks: -1 diff --git a/samples/tasks/scenarios/manila/create-security-service-and-delete.json b/samples/tasks/scenarios/manila/create-security-service-and-delete.json deleted file mode 100644 index 4d547e2b..00000000 --- a/samples/tasks/scenarios/manila/create-security-service-and-delete.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "ManilaShares.create_security_service_and_delete": [ - { - "args": { - "security_service_type": "active_directory", - "dns_ip": "fake_dns_ip", - "server": "fake-server", - "domain": "fake_domain", - "user": "fake_user", - "password": "fake_password", - "name": "fake_name", - "description": "fake_description" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-security-service-and-delete.yaml b/samples/tasks/scenarios/manila/create-security-service-and-delete.yaml deleted file mode 100644 index 09c3848a..00000000 --- a/samples/tasks/scenarios/manila/create-security-service-and-delete.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - ManilaShares.create_security_service_and_delete: - - - args: - security_service_type: "active_directory" - dns_ip: "fake_dns_ip" - server: "fake-server" - domain: "fake_domain" - user: "fake_user" - password: "fake_password" - name: "fake_name" - description: "fake_description" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/manila/create-share-and-extend.json b/samples/tasks/scenarios/manila/create-share-and-extend.json deleted file mode 100644 index fa91e145..00000000 --- a/samples/tasks/scenarios/manila/create-share-and-extend.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "ManilaShares.create_and_extend_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "new_size": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-and-extend.yaml b/samples/tasks/scenarios/manila/create-share-and-extend.yaml deleted file mode 100644 index 46242ce9..00000000 --- a/samples/tasks/scenarios/manila/create-share-and-extend.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - ManilaShares.create_and_extend_share: - - - args: - share_proto: "nfs" - size: 1 - new_size: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/manila/create-share-and-shrink.json b/samples/tasks/scenarios/manila/create-share-and-shrink.json deleted file mode 100644 index 773f9296..00000000 --- a/samples/tasks/scenarios/manila/create-share-and-shrink.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "ManilaShares.create_and_shrink_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "new_size": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-and-shrink.yaml b/samples/tasks/scenarios/manila/create-share-and-shrink.yaml deleted file mode 100644 index f0daf57a..00000000 --- a/samples/tasks/scenarios/manila/create-share-and-shrink.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - ManilaShares.create_and_shrink_share: - - - args: - share_proto: "nfs" - size: 1 - new_size: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/manila/create-share-network-and-delete.json b/samples/tasks/scenarios/manila/create-share-network-and-delete.json deleted file mode 100644 index 7ec3d857..00000000 --- a/samples/tasks/scenarios/manila/create-share-network-and-delete.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "ManilaShares.create_share_network_and_delete": [ - { - "args": { - "name": "rally" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "manila": { - "share_networks": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-network-and-delete.yaml b/samples/tasks/scenarios/manila/create-share-network-and-delete.yaml deleted file mode 100644 index 8693c878..00000000 --- a/samples/tasks/scenarios/manila/create-share-network-and-delete.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - ManilaShares.create_share_network_and_delete: - - - args: - name: "rally" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - manila: - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/manila/create-share-network-and-list.json b/samples/tasks/scenarios/manila/create-share-network-and-list.json deleted file mode 100644 index 6d3a645e..00000000 --- a/samples/tasks/scenarios/manila/create-share-network-and-list.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "ManilaShares.create_share_network_and_list": [ - { - "args": { - "name": "rally", - "detailed": true, - "search_opts": { - "name": "rally" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "manila": { - "share_networks": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-network-and-list.yaml b/samples/tasks/scenarios/manila/create-share-network-and-list.yaml deleted file mode 100644 index d7cb8286..00000000 --- a/samples/tasks/scenarios/manila/create-share-network-and-list.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - ManilaShares.create_share_network_and_list: - - - args: - name: "rally" - detailed: True - search_opts: - name: "rally" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - manila: - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.json b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.json deleted file mode 100644 index 55de4e34..00000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.json +++ /dev/null @@ -1,61 +0,0 @@ -{% set use_security_services = use_security_services or False %} -{ - "ManilaShares.create_and_delete_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "min_sleep": 1, - "max_sleep": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - }, - "network": { - "networks_per_tenant": 1, - "start_cidr": "99.0.0.0/24" - }, - "manila_share_networks": { - "use_share_networks": true - } - {% if use_security_services %} - , - "manila_security_services": { - "security_services": [ - {"security_service_type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user"} - ] - } - {% endif %} - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.yaml b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.yaml deleted file mode 100644 index 2dc9756c..00000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{% set use_security_services = use_security_services or False %} ---- - ManilaShares.create_and_delete_share: - - - args: - share_proto: "nfs" - size: 1 - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - network: - networks_per_tenant: 1 - start_cidr: "99.0.0.0/24" - manila_share_networks: - use_share_networks: True - {% if use_security_services %} - manila_security_services: - security_services: [ - {"security_service_type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user"}, - ] - {% endif %} diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.json b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.json deleted file mode 100644 index 9ea7f1a3..00000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.json +++ /dev/null @@ -1,66 +0,0 @@ -{% set use_security_services = use_security_services or False %} -{ - "ManilaShares.create_and_list_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "min_sleep": 1, - "max_sleep": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - }, - "network": { - "networks_per_tenant": 1, - "start_cidr": "99.0.0.0/24" - }, - "manila_share_networks": { - "use_share_networks": true - } - {% if use_security_services %} - , - "manila_security_services": { - "security_services": [ - {"security_service_type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user"} - ] - } - {% endif %} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.yaml b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.yaml deleted file mode 100644 index e87237ed..00000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{% set use_security_services = use_security_services or False %} ---- - ManilaShares.create_and_list_share: - - - args: - share_proto: "nfs" - size: 1 - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - network: - networks_per_tenant: 1 - start_cidr: "99.0.0.0/24" - manila_share_networks: - use_share_networks: True - {% if use_security_services %} - manila_security_services: - security_services: [ - {"security_service_type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user"}, - ] - {% endif %} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.json b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.json deleted file mode 100644 index ae9840e5..00000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "ManilaShares.set_and_delete_metadata": [ - { - "args": { - "sets": 1, - "set_size": 3, - "delete_size": 3, - "key_min_length": 1, - "key_max_length": 256, - "value_min_length": 1, - "value_max_length": 1024 - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - }, - "network": { - "networks_per_tenant": 1, - "start_cidr": "99.0.0.0/24" - }, - "manila_share_networks": { - "use_share_networks": true - }, - "manila_shares": { - "shares_per_tenant": 1, - "share_proto": "NFS", - "size": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.yaml b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.yaml deleted file mode 100644 index b632230b..00000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- - ManilaShares.set_and_delete_metadata: - - - args: - sets: 1 - set_size: 3 - delete_size: 3 - key_min_length: 1 - key_max_length: 256 - value_min_length: 1 - value_max_length: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - user_choice_method: "round_robin" - network: - networks_per_tenant: 1 - start_cidr: "99.0.0.0/24" - manila_share_networks: - use_share_networks: True - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.json b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.json deleted file mode 100644 index b2ef639c..00000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "ManilaShares.create_and_delete_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "min_sleep": 1, - "max_sleep": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.yaml b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.yaml deleted file mode 100644 index ad68069f..00000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - ManilaShares.create_and_delete_share: - - - args: - share_proto: "nfs" - size: 1 - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.json b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.json deleted file mode 100644 index 18df4dee..00000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "ManilaShares.create_and_list_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "min_sleep": 1, - "max_sleep": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.yaml b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.yaml deleted file mode 100644 index 11abe549..00000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - ManilaShares.create_and_list_share: - - - args: - share_proto: "nfs" - size: 1 - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.json b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.json deleted file mode 100644 index c80cb619..00000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "ManilaShares.set_and_delete_metadata": [ - { - "args": { - "sets": 1, - "set_size": 3, - "delete_size": 3, - "key_min_length": 1, - "key_max_length": 256, - "value_min_length": 1, - "value_max_length": 1024 - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - }, - "manila_shares": { - "shares_per_tenant": 1, - "share_proto": "NFS", - "size": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.yaml b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.yaml deleted file mode 100644 index 254ee30d..00000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - ManilaShares.set_and_delete_metadata: - - - args: - sets: 1 - set_size: 3 - delete_size: 3 - key_min_length: 1 - key_max_length: 256 - value_min_length: 1 - value_max_length: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/list-share-servers.json b/samples/tasks/scenarios/manila/list-share-servers.json deleted file mode 100644 index 9f2a9e5b..00000000 --- a/samples/tasks/scenarios/manila/list-share-servers.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "ManilaShares.list_share_servers": [ - { - "args": { - "search_opts": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/list-share-servers.yaml b/samples/tasks/scenarios/manila/list-share-servers.yaml deleted file mode 100644 index b251e67d..00000000 --- a/samples/tasks/scenarios/manila/list-share-servers.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - ManilaShares.list_share_servers: - - - args: - search_opts: {} - runner: - type: "constant" - times: 10 - concurrency: 10 diff --git a/samples/tasks/scenarios/manila/list-shares.json b/samples/tasks/scenarios/manila/list-shares.json deleted file mode 100644 index f87d8412..00000000 --- a/samples/tasks/scenarios/manila/list-shares.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "ManilaShares.list_shares": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "times": 12, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 4, - "user_choice_method": "round_robin" - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/list-shares.yaml b/samples/tasks/scenarios/manila/list-shares.yaml deleted file mode 100644 index 658da16d..00000000 --- a/samples/tasks/scenarios/manila/list-shares.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - ManilaShares.list_shares: - - - args: - detailed: True - runner: - type: "constant" - times: 12 - concurrency: 1 - context: - users: - tenants: 3 - users_per_tenant: 4 - user_choice_method: "round_robin" diff --git a/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.json b/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.json deleted file mode 100644 index e14fcb6d..00000000 --- a/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "workflow_name": "wf1", - "do_delete": true - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.yaml b/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.yaml deleted file mode 100644 index 8d2dc426..00000000 --- a/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - workflow_name: wf1 - do_delete: true - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - diff --git a/samples/tasks/scenarios/mistral/create-delete-execution.json b/samples/tasks/scenarios/mistral/create-delete-execution.json deleted file mode 100644 index c715b680..00000000 --- a/samples/tasks/scenarios/mistral/create-delete-execution.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "do_delete": true - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-delete-execution.yaml b/samples/tasks/scenarios/mistral/create-delete-execution.yaml deleted file mode 100644 index 5c8e20d2..00000000 --- a/samples/tasks/scenarios/mistral/create-delete-execution.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - do_delete: true - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/mistral/create-delete-workbook.json b/samples/tasks/scenarios/mistral/create-delete-workbook.json deleted file mode 100644 index 1d2013cc..00000000 --- a/samples/tasks/scenarios/mistral/create-delete-workbook.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "MistralWorkbooks.create_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "do_delete": true - }, - "runner": { - "type": "constant", - "times": 50, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-delete-workbook.yaml b/samples/tasks/scenarios/mistral/create-delete-workbook.yaml deleted file mode 100644 index 7dc4f1fb..00000000 --- a/samples/tasks/scenarios/mistral/create-delete-workbook.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - MistralWorkbooks.create_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - do_delete: true - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - diff --git a/samples/tasks/scenarios/mistral/create-execution-with-inputs.json b/samples/tasks/scenarios/mistral/create-execution-with-inputs.json deleted file mode 100644 index ab341860..00000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-inputs.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "wf_input": "rally-jobs/extra/mistral_input.json" - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-execution-with-inputs.yaml b/samples/tasks/scenarios/mistral/create-execution-with-inputs.yaml deleted file mode 100644 index bc97406b..00000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-inputs.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - wf_input: rally-jobs/extra/mistral_input.json - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/mistral/create-execution-with-params.json b/samples/tasks/scenarios/mistral/create-execution-with-params.json deleted file mode 100644 index 07c59a9d..00000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-params.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "params": "rally-jobs/extra/mistral_params.json" - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-execution-with-params.yaml b/samples/tasks/scenarios/mistral/create-execution-with-params.yaml deleted file mode 100644 index 26bb3d1e..00000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-params.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - params: rally-jobs/extra/mistral_params.json - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.json b/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.json deleted file mode 100644 index f8714438..00000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "workflow_name": "wf1" - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.yaml b/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.yaml deleted file mode 100644 index ec24be61..00000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - workflow_name: wf1 - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/mistral/create-execution.json b/samples/tasks/scenarios/mistral/create-execution.json deleted file mode 100644 index 4d3e5de4..00000000 --- a/samples/tasks/scenarios/mistral/create-execution.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml" - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-execution.yaml b/samples/tasks/scenarios/mistral/create-execution.yaml deleted file mode 100644 index 2d8c01a7..00000000 --- a/samples/tasks/scenarios/mistral/create-execution.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/mistral/create-workbook.json b/samples/tasks/scenarios/mistral/create-workbook.json deleted file mode 100644 index c9c2edad..00000000 --- a/samples/tasks/scenarios/mistral/create-workbook.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "MistralWorkbooks.create_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml" - }, - "runner": { - "type": "constant", - "times": 50, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-workbook.yaml b/samples/tasks/scenarios/mistral/create-workbook.yaml deleted file mode 100644 index 243356ee..00000000 --- a/samples/tasks/scenarios/mistral/create-workbook.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - MistralWorkbooks.create_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - diff --git a/samples/tasks/scenarios/mistral/list-executions.json b/samples/tasks/scenarios/mistral/list-executions.json deleted file mode 100644 index be342ced..00000000 --- a/samples/tasks/scenarios/mistral/list-executions.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "MistralExecutions.list_executions": [ - { - "runner": { - "type": "constant", - "times": 50, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/list-executions.yaml b/samples/tasks/scenarios/mistral/list-executions.yaml deleted file mode 100644 index 5eae1824..00000000 --- a/samples/tasks/scenarios/mistral/list-executions.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - MistralExecutions.list_executions: - - - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/mistral/list-workbooks.json b/samples/tasks/scenarios/mistral/list-workbooks.json deleted file mode 100644 index 04c68d14..00000000 --- a/samples/tasks/scenarios/mistral/list-workbooks.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "MistralWorkbooks.list_workbooks": [ - { - "runner": { - "type": "constant", - "times": 50, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/list-workbooks.yaml b/samples/tasks/scenarios/mistral/list-workbooks.yaml deleted file mode 100644 index ab28301d..00000000 --- a/samples/tasks/scenarios/mistral/list-workbooks.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - MistralWorkbooks.list_workbooks: - - - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/monasca/list-metrics.json b/samples/tasks/scenarios/monasca/list-metrics.json deleted file mode 100644 index 7f09ca93..00000000 --- a/samples/tasks/scenarios/monasca/list-metrics.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "MonascaMetrics.list_metrics": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "monasca-user" - ], - "monasca_metrics": { - "dimensions": { - "region": "RegionOne", - "service": "identity", - "hostname": "fake_host", - "url": "http://fake_host:5000/v2.0" - }, - "metrics_per_tenant": 10 - } - }, - "args": { - "region": "RegionOne", - "limit": 5 - } - } - ] -} diff --git a/samples/tasks/scenarios/monasca/list-metrics.yaml b/samples/tasks/scenarios/monasca/list-metrics.yaml deleted file mode 100644 index 76a99e66..00000000 --- a/samples/tasks/scenarios/monasca/list-metrics.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - MonascaMetrics.list_metrics: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "monasca-user" - monasca_metrics: - "dimensions": - "region": "RegionOne" - "service": "identity" - "hostname": "fake_host" - "url": "http://fake_host:5000/v2.0" - "metrics_per_tenant": 10 - args: - "region": "RegionOne" - "limit": 5 \ No newline at end of file diff --git a/samples/tasks/scenarios/murano/create-and-delete-environment.json b/samples/tasks/scenarios/murano/create-and-delete-environment.json deleted file mode 100644 index d9fa7194..00000000 --- a/samples/tasks/scenarios/murano/create-and-delete-environment.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "MuranoEnvironments.create_and_delete_environment": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/create-and-delete-environment.yaml b/samples/tasks/scenarios/murano/create-and-delete-environment.yaml deleted file mode 100644 index 92e70138..00000000 --- a/samples/tasks/scenarios/murano/create-and-delete-environment.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - MuranoEnvironments.create_and_delete_environment: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/murano/create-and-deploy-environment.json b/samples/tasks/scenarios/murano/create-and-deploy-environment.json deleted file mode 100644 index 52dc1335..00000000 --- a/samples/tasks/scenarios/murano/create-and-deploy-environment.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "MuranoEnvironments.create_and_deploy_environment": [ - { - "args": { - "packages_per_env": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_packages": { - "app_package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - }, - "roles": ["admin"] - } - }, - { - "args": { - "packages_per_env": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_packages": { - "app_package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - }, - "roles": ["admin"] - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/create-and-deploy-environment.yaml b/samples/tasks/scenarios/murano/create-and-deploy-environment.yaml deleted file mode 100644 index ca1dc564..00000000 --- a/samples/tasks/scenarios/murano/create-and-deploy-environment.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - MuranoEnvironments.create_and_deploy_environment: - - - args: - packages_per_env: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - roles: - - "admin" - - - args: - packages_per_env: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - roles: - - "admin" diff --git a/samples/tasks/scenarios/murano/import-and-delete-package.json b/samples/tasks/scenarios/murano/import-and-delete-package.json deleted file mode 100644 index 69731a17..00000000 --- a/samples/tasks/scenarios/murano/import-and-delete-package.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "MuranoPackages.import_and_delete_package": [ - { - "args": { - "package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/import-and-delete-package.yaml b/samples/tasks/scenarios/murano/import-and-delete-package.yaml deleted file mode 100644 index 926f3a46..00000000 --- a/samples/tasks/scenarios/murano/import-and-delete-package.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - MuranoPackages.import_and_delete_package: - - - args: - package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/murano/import-and-filter-applications.json b/samples/tasks/scenarios/murano/import-and-filter-applications.json deleted file mode 100644 index 7be44741..00000000 --- a/samples/tasks/scenarios/murano/import-and-filter-applications.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "MuranoPackages.import_and_filter_applications": [ - { - "args": { - "package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/", - "filter_query": {"category" : "Web"} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/import-and-filter-applications.yaml b/samples/tasks/scenarios/murano/import-and-filter-applications.yaml deleted file mode 100644 index 07771bc8..00000000 --- a/samples/tasks/scenarios/murano/import-and-filter-applications.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - MuranoPackages.import_and_filter_applications: - - - args: - package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - filter_query: {"category" : "Web"} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/murano/import-and-list-packages.json b/samples/tasks/scenarios/murano/import-and-list-packages.json deleted file mode 100644 index 1def045a..00000000 --- a/samples/tasks/scenarios/murano/import-and-list-packages.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "MuranoPackages.import_and_list_packages": [ - { - "args": { - "package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/import-and-list-packages.yaml b/samples/tasks/scenarios/murano/import-and-list-packages.yaml deleted file mode 100644 index 1b16b718..00000000 --- a/samples/tasks/scenarios/murano/import-and-list-packages.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - MuranoPackages.import_and_list_packages: - - - args: - package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/murano/list-environments.json b/samples/tasks/scenarios/murano/list-environments.json deleted file mode 100644 index 2494a42e..00000000 --- a/samples/tasks/scenarios/murano/list-environments.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "MuranoEnvironments.list_environments": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_environments": { - "environments_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/list-environments.yaml b/samples/tasks/scenarios/murano/list-environments.yaml deleted file mode 100644 index f7c43fe4..00000000 --- a/samples/tasks/scenarios/murano/list-environments.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - MuranoEnvironments.list_environments: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_environments: - environments_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/murano/package-lifecycle.json b/samples/tasks/scenarios/murano/package-lifecycle.json deleted file mode 100644 index f58c4d06..00000000 --- a/samples/tasks/scenarios/murano/package-lifecycle.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "MuranoPackages.package_lifecycle": [ - { - "args": { - "package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/", - "body": {"categories": ["Web"]}, - "operation": "add" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/package-lifecycle.yaml b/samples/tasks/scenarios/murano/package-lifecycle.yaml deleted file mode 100644 index 72f55a8b..00000000 --- a/samples/tasks/scenarios/murano/package-lifecycle.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - MuranoPackages.package_lifecycle: - - - args: - package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - body: {"categories": ["Web"]} - operation: "add" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.json deleted file mode 100644 index 198b9378..00000000 --- a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "network":{} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.yaml deleted file mode 100644 index 43daef03..00000000 --- a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.json deleted file mode 100644 index bd893383..00000000 --- a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "router":{} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.yaml deleted file mode 100644 index c37851fd..00000000 --- a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - router: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.json deleted file mode 100644 index e46fa95b..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "NeutronBGPVPN.create_and_delete_bgpvpns": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.yaml deleted file mode 100644 index 174afe0f..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NeutronBGPVPN.create_and_delete_bgpvpns: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.json b/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.json deleted file mode 100644 index 42ab99c5..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_floating_ips": [ - { - "args": { - "floating_network": "public", - "floating_ip_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "floatingip": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.yaml b/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.yaml deleted file mode 100644 index e5f2c6e7..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - NeutronNetworks.create_and_delete_floating_ips: - - - args: - floating_network: "public" - floating_ip_args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - floatingip: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.json b/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.json deleted file mode 100644 index 8b8e7d8d..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_delete_healthmonitors": [ - { - "args": { - "healthmonitor_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "health_monitor": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.yaml b/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.yaml deleted file mode 100644 index 2b722c21..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_delete_healthmonitors: - - - args: - healthmonitor_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 2 - quotas: - neutron: - health_monitor: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-networks.json b/samples/tasks/scenarios/neutron/create-and-delete-networks.json deleted file mode 100644 index 78488a0f..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-networks.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_networks": [ - { - "args": { - "network_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-networks.yaml b/samples/tasks/scenarios/neutron/create-and-delete-networks.yaml deleted file mode 100644 index a4fb9d70..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-networks.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-pools.json b/samples/tasks/scenarios/neutron/create-and-delete-pools.json deleted file mode 100644 index eaf57e4b..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-pools.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_delete_pools": [ - { - "args": { - "pool_create_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "network":{}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-pools.yaml b/samples/tasks/scenarios/neutron/create-and-delete-pools.yaml deleted file mode 100644 index c9cb5a32..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-pools.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_delete_pools: - - - args: - pool_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-ports.json b/samples/tasks/scenarios/neutron/create-and-delete-ports.json deleted file mode 100644 index 406f393c..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-ports.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_ports": [ - { - "args": { - "network_create_args": {}, - "port_create_args": {}, - "ports_per_network": 10 - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "port": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-ports.yaml b/samples/tasks/scenarios/neutron/create-and-delete-ports.yaml deleted file mode 100644 index c09bb85a..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-ports.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 10 - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - port: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-routers.json b/samples/tasks/scenarios/neutron/create-and-delete-routers.json deleted file mode 100644 index 951b7c80..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-routers.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_routers": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2, - "router_create_args": {} - }, - "runner": { - "type": "constant", - "times": 30, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "router": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-routers.yaml b/samples/tasks/scenarios/neutron/create-and-delete-routers.yaml deleted file mode 100644 index 7d5f7f65..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-routers.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - NeutronNetworks.create_and_delete_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - runner: - type: "constant" - times: 30 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-security-groups.json b/samples/tasks/scenarios/neutron/create-and-delete-security-groups.json deleted file mode 100644 index 1e1f667d..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-security-groups.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_delete_security_groups": [ - { - "args": { - "security_group_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-security-groups.yaml b/samples/tasks/scenarios/neutron/create-and-delete-security-groups.yaml deleted file mode 100644 index f2905705..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-security-groups.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronSecurityGroup.create_and_delete_security_groups: - - - args: - security_group_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-subnets.json b/samples/tasks/scenarios/neutron/create-and-delete-subnets.json deleted file mode 100644 index 25f424bf..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-subnets.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_subnets": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-subnets.yaml b/samples/tasks/scenarios/neutron/create-and-delete-subnets.yaml deleted file mode 100644 index e97f3902..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-subnets.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-vips.json b/samples/tasks/scenarios/neutron/create-and-delete-vips.json deleted file mode 100644 index bcbd9444..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-vips.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_delete_vips": [ - { - "args": { - "vip_create_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 1 - }, - "network": {}, - "lbaas": { - "pool": {} - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1, - "vip": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-vips.yaml b/samples/tasks/scenarios/neutron/create-and-delete-vips.yaml deleted file mode 100644 index 5ca7fdb0..00000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-vips.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_delete_vips: - - - args: - vip_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 1 - network: {} - lbaas: - pool: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 - vip: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.json deleted file mode 100644 index b336553d..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "NeutronBGPVPN.create_and_list_bgpvpns": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.yaml deleted file mode 100644 index cede19ba..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NeutronBGPVPN.create_and_list_bgpvpns: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 \ No newline at end of file diff --git a/samples/tasks/scenarios/neutron/create-and-list-floating-ips.json b/samples/tasks/scenarios/neutron/create-and-list-floating-ips.json deleted file mode 100644 index 63b3a618..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-floating-ips.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "NeutronNetworks.create_and_list_floating_ips": [ - { - "args": { - "floating_network": "public", - "floating_ip_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "floatingip": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-floating-ips.yaml b/samples/tasks/scenarios/neutron/create-and-list-floating-ips.yaml deleted file mode 100644 index f54083a3..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-floating-ips.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - NeutronNetworks.create_and_list_floating_ips: - - - args: - floating_network: "public" - floating_ip_args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - floatingip: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.json b/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.json deleted file mode 100644 index 714d49cd..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_list_healthmonitors": [ - { - "args": { - "healthmonitor_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "health_monitor": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.yaml b/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.yaml deleted file mode 100644 index b54c43c8..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_list_healthmonitors: - - - args: - healthmonitor_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 2 - quotas: - neutron: - health_monitor: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.json b/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.json deleted file mode 100644 index 2bf67baa..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "NeutronLoadbalancerV2.create_and_list_loadbalancers": [ - { - "args": { - "lb_create_args": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.yaml b/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.yaml deleted file mode 100644 index e5b431cb..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - NeutronLoadbalancerV2.create_and_list_loadbalancers: - - - args: - lb_create_args: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-networks-associations.json b/samples/tasks/scenarios/neutron/create-and-list-networks-associations.json deleted file mode 100644 index d83c9bd7..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-networks-associations.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "NeutronBGPVPN.create_and_list_networks_associations": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "network":{} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-networks-associations.yaml b/samples/tasks/scenarios/neutron/create-and-list-networks-associations.yaml deleted file mode 100644 index 3a50a468..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-networks-associations.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronBGPVPN.create_and_list_networks_associations: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-networks.json b/samples/tasks/scenarios/neutron/create-and-list-networks.json deleted file mode 100644 index 826278c8..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-networks.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "NeutronNetworks.create_and_list_networks": [ - { - "args": { - "network_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "network_create_args": { - "provider:network_type": "vxlan" - } - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - }, - "roles": ["admin"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-networks.yaml b/samples/tasks/scenarios/neutron/create-and-list-networks.yaml deleted file mode 100644 index 4c59fc85..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-networks.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- - NeutronNetworks.create_and_list_networks: - - - args: - network_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 - - - args: - network_create_args: - provider:network_type: "vxlan" - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - roles: - - "admin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-pools.json b/samples/tasks/scenarios/neutron/create-and-list-pools.json deleted file mode 100644 index 6d5e0fc0..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-pools.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_list_pools": [ - { - "args": { - "pool_create_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "network":{}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-pools.yaml b/samples/tasks/scenarios/neutron/create-and-list-pools.yaml deleted file mode 100644 index 49c25152..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-pools.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_list_pools: - - - args: - pool_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-list-ports.json b/samples/tasks/scenarios/neutron/create-and-list-ports.json deleted file mode 100644 index c387f623..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-ports.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "NeutronNetworks.create_and_list_ports": [ - { - "args": { - "network_create_args": {}, - "port_create_args": {}, - "ports_per_network": 10 - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "port": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-ports.yaml b/samples/tasks/scenarios/neutron/create-and-list-ports.yaml deleted file mode 100644 index b10c674e..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-ports.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 10 - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - port: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-list-routers-associations.json b/samples/tasks/scenarios/neutron/create-and-list-routers-associations.json deleted file mode 100644 index a37db0ef..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-routers-associations.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "NeutronBGPVPN.create_and_list_routers_associations": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "router":{} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-routers-associations.yaml b/samples/tasks/scenarios/neutron/create-and-list-routers-associations.yaml deleted file mode 100644 index de209d6d..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-routers-associations.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronBGPVPN.create_and_list_routers_associations: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - router: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-routers.json b/samples/tasks/scenarios/neutron/create-and-list-routers.json deleted file mode 100644 index 8f16e9be..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-routers.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "NeutronNetworks.create_and_list_routers": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2, - "router_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "router": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-routers.yaml b/samples/tasks/scenarios/neutron/create-and-list-routers.yaml deleted file mode 100644 index 22cd13b5..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-routers.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.json b/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.json deleted file mode 100644 index a7b0a7cd..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_list_security_group_rules": [ - { - "args": { - "security_group_args": {}, - "security_group_rule_args":{} - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.yaml b/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.yaml deleted file mode 100644 index f39e46ae..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronSecurityGroup.create_and_list_security_group_rules: - - - args: - security_group_args: {} - security_group_rule_args: {} - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-security-groups.json b/samples/tasks/scenarios/neutron/create-and-list-security-groups.json deleted file mode 100644 index 7a0b5c76..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-security-groups.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_list_security_groups": [ - { - "args": { - "security_group_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-security-groups.yaml b/samples/tasks/scenarios/neutron/create-and-list-security-groups.yaml deleted file mode 100644 index 9f72cad4..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-security-groups.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronSecurityGroup.create_and_list_security_groups: - - - args: - security_group_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-list-subnets.json b/samples/tasks/scenarios/neutron/create-and-list-subnets.json deleted file mode 100644 index 46956b8b..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-subnets.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronNetworks.create_and_list_subnets": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-subnets.yaml b/samples/tasks/scenarios/neutron/create-and-list-subnets.yaml deleted file mode 100644 index 0450e703..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-subnets.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - NeutronNetworks.create_and_list_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-list-vips.json b/samples/tasks/scenarios/neutron/create-and-list-vips.json deleted file mode 100644 index 1751ad6d..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-vips.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_list_vips": [ - { - "args": { - "vip_create_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 2 - }, - "network":{}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1, - "vip": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-vips.yaml b/samples/tasks/scenarios/neutron/create-and-list-vips.yaml deleted file mode 100644 index a0ea4bcf..00000000 --- a/samples/tasks/scenarios/neutron/create-and-list-vips.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_list_vips: - - - args: - vip_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 2 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 - vip: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-show-network.json b/samples/tasks/scenarios/neutron/create-and-show-network.json deleted file mode 100644 index 559fbbcc..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-network.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronNetworks.create_and_show_network": [ - { - "args": { - "network_create_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-network.yaml b/samples/tasks/scenarios/neutron/create-and-show-network.yaml deleted file mode 100644 index 11090023..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-network.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronNetworks.create_and_show_network: - - - args: - network_create_args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-ports.json b/samples/tasks/scenarios/neutron/create-and-show-ports.json deleted file mode 100644 index 8fec44d2..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-ports.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "NeutronNetworks.create_and_show_ports": [ - { - "args": { - "network_create_args": {}, - "port_create_args": {}, - "ports_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "network": -1, - "port": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-ports.yaml b/samples/tasks/scenarios/neutron/create-and-show-ports.yaml deleted file mode 100644 index 13fb0308..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-ports.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - NeutronNetworks.create_and_show_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 2 - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-routers.json b/samples/tasks/scenarios/neutron/create-and-show-routers.json deleted file mode 100644 index 16cea002..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-routers.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "NeutronNetworks.create_and_show_routers": [ - { - "args": { - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "router": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-routers.yaml b/samples/tasks/scenarios/neutron/create-and-show-routers.yaml deleted file mode 100644 index 0b7a43ab..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-routers.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronNetworks.create_and_show_routers: - - - args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.json b/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.json deleted file mode 100644 index 6816e0c8..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_show_security_group_rule": [ - { - "args": { - "security_group_args": {}, - "security_group_rule_args":{} - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.yaml b/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.yaml deleted file mode 100644 index 759ff95d..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronSecurityGroup.create_and_show_security_group_rule: - - - args: - security_group_args: {} - security_group_rule_args: {} - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-security-group.json b/samples/tasks/scenarios/neutron/create-and-show-security-group.json deleted file mode 100644 index b4b54b7b..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-security-group.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_show_security_group": [ - { - "args": { - "security_group_create_args": {} - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-security-group.yaml b/samples/tasks/scenarios/neutron/create-and-show-security-group.yaml deleted file mode 100644 index 099274b4..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-security-group.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronSecurityGroup.create_and_show_security_group: - - - args: - security_group_create_args: {} - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-subnets.json b/samples/tasks/scenarios/neutron/create-and-show-subnets.json deleted file mode 100644 index d944b153..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-subnets.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "NeutronNetworks.create_and_show_subnets": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-subnets.yaml b/samples/tasks/scenarios/neutron/create-and-show-subnets.yaml deleted file mode 100644 index f1432ef1..00000000 --- a/samples/tasks/scenarios/neutron/create-and-show-subnets.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - NeutronNetworks.create_and_show_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.json deleted file mode 100644 index 411045e2..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "NeutronBGPVPN.create_and_update_bgpvpns": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.yaml deleted file mode 100644 index e248ad10..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NeutronBGPVPN.create_and_update_bgpvpns: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.json b/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.json deleted file mode 100644 index 93633211..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_update_healthmonitors": [ - { - "args": { - "healthmonitor_create_args": {}, - "healthmonitor_update_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "health_monitor": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.yaml b/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.yaml deleted file mode 100644 index cf8a6bfb..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_update_healthmonitors: - - - args: - healthmonitor_create_args: {} - healthmonitor_update_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 2 - quotas: - neutron: - health_monitor: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-update-networks.json b/samples/tasks/scenarios/neutron/create-and-update-networks.json deleted file mode 100644 index f3ab67d9..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-networks.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "NeutronNetworks.create_and_update_networks": [ - { - "args": { - "network_update_args": { - "admin_state_up": false, - "name": "_updated" - }, - "network_create_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-networks.yaml b/samples/tasks/scenarios/neutron/create-and-update-networks.yaml deleted file mode 100644 index 5a94b04e..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-networks.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: False - name: "_updated" - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-update-pools.json b/samples/tasks/scenarios/neutron/create-and-update-pools.json deleted file mode 100644 index 0ccaa56b..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-pools.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_update_pools": [ - { - "args": { - "pool_create_args":{}, - "pool_update_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "network":{}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-pools.yaml b/samples/tasks/scenarios/neutron/create-and-update-pools.yaml deleted file mode 100644 index 8ef300b5..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-pools.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_update_pools: - - - args: - pool_create_args: {} - pool_update_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-update-ports.json b/samples/tasks/scenarios/neutron/create-and-update-ports.json deleted file mode 100644 index 4c7dbdb6..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-ports.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "NeutronNetworks.create_and_update_ports": [ - { - "args": { - "network_create_args": {}, - "port_create_args": {}, - "port_update_args": { - "admin_state_up": false, - "device_id": "dummy_id", - "device_owner": "dummy_owner", - "name": "_port_updated" - }, - "ports_per_network": 5 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "port": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-ports.yaml b/samples/tasks/scenarios/neutron/create-and-update-ports.yaml deleted file mode 100644 index eae30bb2..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-ports.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 5 - port_update_args: - admin_state_up: False - device_id: "dummy_id" - device_owner: "dummy_owner" - name: "_port_updated" - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - port: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-update-routers.json b/samples/tasks/scenarios/neutron/create-and-update-routers.json deleted file mode 100644 index 8e299fab..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-routers.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "NeutronNetworks.create_and_update_routers": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2, - "router_create_args": {}, - "router_update_args": { - "admin_state_up": false, - "name": "_router_updated" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "router": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-routers.yaml b/samples/tasks/scenarios/neutron/create-and-update-routers.yaml deleted file mode 100644 index 98bb6669..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-routers.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - router_update_args: - admin_state_up: False - name: "_router_updated" - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-update-security-groups.json b/samples/tasks/scenarios/neutron/create-and-update-security-groups.json deleted file mode 100644 index 1ef8739f..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-security-groups.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_update_security_groups": [ - { - "args": { - "security_group_create_args": {}, - "security_group_update_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-security-groups.yaml b/samples/tasks/scenarios/neutron/create-and-update-security-groups.yaml deleted file mode 100644 index da8031e1..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-security-groups.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - NeutronSecurityGroup.create_and_update_security_groups: - - - args: - security_group_create_args: {} - security_group_update_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-update-subnets.json b/samples/tasks/scenarios/neutron/create-and-update-subnets.json deleted file mode 100644 index 8d9e8933..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-subnets.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "NeutronNetworks.create_and_update_subnets": [ - { - "args": { - "subnet_update_args": { - "enable_dhcp": false, - "name": "_subnet_updated" - }, - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.4.0.0/16", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-subnets.yaml b/samples/tasks/scenarios/neutron/create-and-update-subnets.yaml deleted file mode 100644 index c759c8bf..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-subnets.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnets_per_network: 2 - subnet_update_args: - enable_dhcp: False - name: "_subnet_updated" - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 diff --git a/samples/tasks/scenarios/neutron/create-and-update-vips.json b/samples/tasks/scenarios/neutron/create-and-update-vips.json deleted file mode 100644 index b734a1fb..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-vips.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_update_vips": [ - { - "args": { - "vip_create_args":{}, - "vip_update_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "network": {}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1, - "vip": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-vips.yaml b/samples/tasks/scenarios/neutron/create-and-update-vips.yaml deleted file mode 100644 index df649b4d..00000000 --- a/samples/tasks/scenarios/neutron/create-and-update-vips.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_update_vips: - - - args: - vip_create_args: {} - vip_update_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 - vip: -1 diff --git a/samples/tasks/scenarios/neutron/list-agents.json b/samples/tasks/scenarios/neutron/list-agents.json deleted file mode 100644 index e9fbcb6b..00000000 --- a/samples/tasks/scenarios/neutron/list-agents.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NeutronNetworks.list_agents": [ - { - "args": { - "agent_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/list-agents.yaml b/samples/tasks/scenarios/neutron/list-agents.yaml deleted file mode 100644 index b1d15cb0..00000000 --- a/samples/tasks/scenarios/neutron/list-agents.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronNetworks.list_agents: - - - args: - agent_args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.json b/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.json deleted file mode 100644 index 5a0cbfb9..00000000 --- a/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "NeutronNetworks.set_and_clear_router_gateway": [ - { - "args": { - "network_create_args": { - "router:external": true - }, - "router_create_args": {} - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "network": -1, - "router": -1 - } - }, - "roles": ["admin"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.yaml b/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.yaml deleted file mode 100644 index 27af6873..00000000 --- a/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - NeutronNetworks.set_and_clear_router_gateway: - - - args: - network_create_args: - router:external: True - router_create_args: {} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - router: -1 - roles: - - "admin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.json b/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.json deleted file mode 100644 index 87c0d66e..00000000 --- a/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_associate_floating_ip": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.yaml b/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.yaml deleted file mode 100644 index 47f6ef4a..00000000 --- a/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_associate_floating_ip: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} diff --git a/samples/tasks/scenarios/nova/boot-and-block-migrate.json b/samples/tasks/scenarios/nova/boot-and-block-migrate.json deleted file mode 100644 index 82ba229c..00000000 --- a/samples/tasks/scenarios/nova/boot-and-block-migrate.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_live_migrate_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "block_migration": true - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-block-migrate.yaml b/samples/tasks/scenarios/nova/boot-and-block-migrate.yaml deleted file mode 100644 index a46a9b07..00000000 --- a/samples/tasks/scenarios/nova/boot-and-block-migrate.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_live_migrate_server: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - block_migration: true - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/boot-and-delete-multiple.json b/samples/tasks/scenarios/nova/boot-and-delete-multiple.json deleted file mode 100644 index bf086d49..00000000 --- a/samples/tasks/scenarios/nova/boot-and-delete-multiple.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_delete_multiple_servers": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "count": 5, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - } - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/nova/boot-and-delete-multiple.yaml b/samples/tasks/scenarios/nova/boot-and-delete-multiple.yaml deleted file mode 100644 index 9662ccda..00000000 --- a/samples/tasks/scenarios/nova/boot-and-delete-multiple.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_delete_multiple_servers: - - - args: - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - count: 5 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.json b/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.json deleted file mode 100644 index b8b75e33..00000000 --- a/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.json +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaKeypair.boot_and_delete_server_with_keypair": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "boot_server_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - }, - "network": { - "start_cidr": "100.1.0.0/26" - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.yaml b/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.yaml deleted file mode 100644 index bed258e9..00000000 --- a/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaKeypair.boot_and_delete_server_with_keypair: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - boot_server_kwargs: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: - start_cidr: "100.1.0.0/26" diff --git a/samples/tasks/scenarios/nova/boot-and-delete.json b/samples/tasks/scenarios/nova/boot-and-delete.json deleted file mode 100644 index ceb3cccc..00000000 --- a/samples/tasks/scenarios/nova/boot-and-delete.json +++ /dev/null @@ -1,53 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_delete_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - }, - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "auto_assign_nic": true - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "network": { - "start_cidr": "10.2.0.0/24", - "networks_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-delete.yaml b/samples/tasks/scenarios/nova/boot-and-delete.yaml deleted file mode 100644 index 0b201876..00000000 --- a/samples/tasks/scenarios/nova/boot-and-delete.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - auto_assign_nic: true - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/boot-and-get-console-server.json b/samples/tasks/scenarios/nova/boot-and-get-console-server.json deleted file mode 100644 index 92c76c0f..00000000 --- a/samples/tasks/scenarios/nova/boot-and-get-console-server.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = image_name or "^cirros.*-disk$" %} -{ - "NovaServers.boot_and_get_console_output": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - } - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-get-console-server.yaml b/samples/tasks/scenarios/nova/boot-and-get-console-server.yaml deleted file mode 100644 index 01b81749..00000000 --- a/samples/tasks/scenarios/nova/boot-and-get-console-server.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = image_name or "^cirros.*-disk$" %} ---- - NovaServers.boot_and_get_console_output: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/boot-and-get-console-url.json b/samples/tasks/scenarios/nova/boot-and-get-console-url.json deleted file mode 100644 index 7f34131a..00000000 --- a/samples/tasks/scenarios/nova/boot-and-get-console-url.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_get_console_url": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "console_type": "novnc" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-get-console-url.yaml b/samples/tasks/scenarios/nova/boot-and-get-console-url.yaml deleted file mode 100644 index b1c930a3..00000000 --- a/samples/tasks/scenarios/nova/boot-and-get-console-url.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_get_console_url: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - console_type: "novnc" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-list.json b/samples/tasks/scenarios/nova/boot-and-list.json deleted file mode 100644 index f633c4a0..00000000 --- a/samples/tasks/scenarios/nova/boot-and-list.json +++ /dev/null @@ -1,28 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_list_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "detailed": true - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/nova/boot-and-list.yaml b/samples/tasks/scenarios/nova/boot-and-list.yaml deleted file mode 100644 index 90d8694c..00000000 --- a/samples/tasks/scenarios/nova/boot-and-list.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_list_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - detailed: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - diff --git a/samples/tasks/scenarios/nova/boot-and-live-migrate.json b/samples/tasks/scenarios/nova/boot-and-live-migrate.json deleted file mode 100644 index f93131db..00000000 --- a/samples/tasks/scenarios/nova/boot-and-live-migrate.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_live_migrate_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "block_migration": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-live-migrate.yaml b/samples/tasks/scenarios/nova/boot-and-live-migrate.yaml deleted file mode 100644 index 92b84053..00000000 --- a/samples/tasks/scenarios/nova/boot-and-live-migrate.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_live_migrate_server: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - block_migration: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/boot-and-migrate.json b/samples/tasks/scenarios/nova/boot-and-migrate.json deleted file mode 100644 index 64b53cdf..00000000 --- a/samples/tasks/scenarios/nova/boot-and-migrate.json +++ /dev/null @@ -1,26 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_migrate_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-migrate.yaml b/samples/tasks/scenarios/nova/boot-and-migrate.yaml deleted file mode 100644 index e15d32c9..00000000 --- a/samples/tasks/scenarios/nova/boot-and-migrate.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_migrate_server: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/boot-and-rebuild.json b/samples/tasks/scenarios/nova/boot-and-rebuild.json deleted file mode 100644 index 98a5e8ef..00000000 --- a/samples/tasks/scenarios/nova/boot-and-rebuild.json +++ /dev/null @@ -1,29 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_rebuild_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "from_image": { - "name": "^cirros.*-disk$" - }, - "to_image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-rebuild.yaml b/samples/tasks/scenarios/nova/boot-and-rebuild.yaml deleted file mode 100644 index e540dfe5..00000000 --- a/samples/tasks/scenarios/nova/boot-and-rebuild.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_rebuild_server: - - args: - flavor: - name: "{{flavor_name}}" - from_image: - name: "^cirros.*-disk$" - to_image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/boot-and-show-server.json b/samples/tasks/scenarios/nova/boot-and-show-server.json deleted file mode 100644 index 17b6ff28..00000000 --- a/samples/tasks/scenarios/nova/boot-and-show-server.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = image_name or "^cirros.*-disk$" %} -{ - "NovaServers.boot_and_show_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - } - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-show-server.yaml b/samples/tasks/scenarios/nova/boot-and-show-server.yaml deleted file mode 100644 index 0328df6b..00000000 --- a/samples/tasks/scenarios/nova/boot-and-show-server.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = image_name or "^cirros.*-disk$" %} ---- - NovaServers.boot_and_show_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/boot-and-update-server.json b/samples/tasks/scenarios/nova/boot-and-update-server.json deleted file mode 100644 index caba47ff..00000000 --- a/samples/tasks/scenarios/nova/boot-and-update-server.json +++ /dev/null @@ -1,26 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_update_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-update-server.yaml b/samples/tasks/scenarios/nova/boot-and-update-server.yaml deleted file mode 100644 index ab560856..00000000 --- a/samples/tasks/scenarios/nova/boot-and-update-server.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_update_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/boot-bounce-delete.json b/samples/tasks/scenarios/nova/boot-bounce-delete.json deleted file mode 100644 index 8491447c..00000000 --- a/samples/tasks/scenarios/nova/boot-bounce-delete.json +++ /dev/null @@ -1,33 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_bounce_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false, - "actions": [ - {"hard_reboot": 1}, - {"soft_reboot": 1}, - {"stop_start": 1}, - {"rescue_unrescue": 1} - ] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-bounce-delete.yaml b/samples/tasks/scenarios/nova/boot-bounce-delete.yaml deleted file mode 100644 index da211aa9..00000000 --- a/samples/tasks/scenarios/nova/boot-bounce-delete.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_bounce_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - actions: - - - hard_reboot: 1 - - - soft_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/boot-from-volume-and-delete.json b/samples/tasks/scenarios/nova/boot-from-volume-and-delete.json deleted file mode 100755 index 2aa71e12..00000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-and-delete.json +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} -{ - "NovaServers.boot_server_from_volume_and_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "volume_size": 10, - "volume_type": "{{volume_type}}", - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-from-volume-and-delete.yaml b/samples/tasks/scenarios/nova/boot-from-volume-and-delete.yaml deleted file mode 100755 index 8e555550..00000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-and-delete.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} ---- - NovaServers.boot_server_from_volume_and_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - volume_size: 10 - volume_type: "{{volume_type}}" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/boot-from-volume-and-resize.json b/samples/tasks/scenarios/nova/boot-from-volume-and-resize.json deleted file mode 100644 index 870dc092..00000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-and-resize.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server_from_volume_and_resize": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "to_flavor": { - "name": "m1.small" - }, - "confirm": true, - "volume_size": 1, - "force_delete": false, - "do_delete": true, - "boot_server_kwargs": {}, - "create_volume_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-from-volume-and-resize.yaml b/samples/tasks/scenarios/nova/boot-from-volume-and-resize.yaml deleted file mode 100644 index a65e60d3..00000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-and-resize.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server_from_volume_and_resize: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - to_flavor: - name: "m1.small" - confirm: true - volume_size: 1 - force_delete: false - do_delete: true - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/boot-from-volume-snapshot.json b/samples/tasks/scenarios/nova/boot-from-volume-snapshot.json deleted file mode 100755 index 785003f3..00000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-snapshot.json +++ /dev/null @@ -1,29 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} -{ - "NovaServers.boot_server_from_volume_snapshot": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "volume_size": 10, - "volume_type": "{{volume_type}}" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-from-volume-snapshot.yaml b/samples/tasks/scenarios/nova/boot-from-volume-snapshot.yaml deleted file mode 100755 index 818cfe80..00000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-snapshot.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} ---- - NovaServers.boot_server_from_volume_snapshot: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - volume_size: 10 - volume_type: "{{volume_type}}" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/boot-from-volume.json b/samples/tasks/scenarios/nova/boot-from-volume.json deleted file mode 100755 index 86418e3e..00000000 --- a/samples/tasks/scenarios/nova/boot-from-volume.json +++ /dev/null @@ -1,29 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} -{ - "NovaServers.boot_server_from_volume": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "volume_size": 10, - "volume_type": "{{volume_type}}" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-from-volume.yaml b/samples/tasks/scenarios/nova/boot-from-volume.yaml deleted file mode 100755 index ae0e96be..00000000 --- a/samples/tasks/scenarios/nova/boot-from-volume.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} ---- - NovaServers.boot_server_from_volume: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - volume_size: 10 - volume_type: "{{volume_type}}" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.json b/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.json deleted file mode 100644 index ab82e173..00000000 --- a/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.json +++ /dev/null @@ -1,26 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_lock_unlock_and_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.yaml b/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.yaml deleted file mode 100644 index da76049f..00000000 --- a/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_lock_unlock_and_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/boot-server-and-attach-interface.json b/samples/tasks/scenarios/nova/boot-server-and-attach-interface.json deleted file mode 100644 index 09a964f8..00000000 --- a/samples/tasks/scenarios/nova/boot-server-and-attach-interface.json +++ /dev/null @@ -1,43 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} -{ - "NovaServers.boot_server_and_attach_interface": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - }, - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "boot_server_args": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-and-attach-interface.yaml b/samples/tasks/scenarios/nova/boot-server-and-attach-interface.yaml deleted file mode 100644 index e8d3ddd8..00000000 --- a/samples/tasks/scenarios/nova/boot-server-and-attach-interface.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} ---- - NovaServers.boot_server_and_attach_interface: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - boot_server_args: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.json b/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.json deleted file mode 100644 index 6a03128b..00000000 --- a/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} -{ - "NovaServers.boot_server_and_list_interfaces": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - } - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "network": { - "start_cidr": "100.1.0.0/26" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.yaml b/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.yaml deleted file mode 100644 index 7439252f..00000000 --- a/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} ---- - NovaServers.boot_server_and_list_interfaces: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - network: - start_cidr: "100.1.0.0/26" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.json b/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.json deleted file mode 100644 index 1f880767..00000000 --- a/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server_associate_and_dissociate_floating_ip": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 5 - }, - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "context": { - "users": { - "users_per_tenant": 2, - "tenants": 3 - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.yaml b/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.yaml deleted file mode 100644 index 1e397d12..00000000 --- a/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server_associate_and_dissociate_floating_ip: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: {} - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.json b/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.json deleted file mode 100644 index 2b6e69db..00000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.json +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server_attach_created_volume_and_live_migrate": [ - { - "args": { - "size": 10, - "block_migration": false, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - }, - "boot_server_kwargs": {}, - "create_volume_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.yaml b/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.yaml deleted file mode 100644 index eaf40726..00000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server_attach_created_volume_and_live_migrate: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - size: 10 - block_migration: false - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 5 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.json b/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.json deleted file mode 100644 index 093b0f43..00000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server_attach_created_volume_and_resize": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "to_flavor": { - "name": "m1.small" - }, - "confirm": true, - "volume_size": 1, - "force_delete": false, - "do_delete": true, - "boot_server_kwargs": {}, - "create_volume_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.yaml b/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.yaml deleted file mode 100644 index 4a1a8047..00000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server_attach_created_volume_and_resize: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - to_flavor: - name: "m1.small" - confirm: true - volume_size: 1 - force_delete: false - do_delete: true - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.json b/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.json deleted file mode 100644 index 5cef165d..00000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.json +++ /dev/null @@ -1,36 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} -{ - "NovaServers.boot_server_attach_volume_and_list_attachments": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - }, - "volume_size": 1, - "volume_num": 2, - "boot_server_kwargs": {}, - "create_volume_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.yaml b/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.yaml deleted file mode 100644 index a734ffa7..00000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} ---- - NovaServers.boot_server_attach_volume_and_list_attachments: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - volume_size: 1 - volume_num: 2 - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.json b/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.json deleted file mode 100755 index 5fe80a1a..00000000 --- a/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.json +++ /dev/null @@ -1,31 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} -{ - "NovaServers.boot_server_from_volume_and_live_migrate": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "block_migration": false, - "volume_size": 10, - "volume_type": "{{volume_type}}", - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.yaml b/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.yaml deleted file mode 100755 index b5227e19..00000000 --- a/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} ---- - NovaServers.boot_server_from_volume_and_live_migrate: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - block_migration: false - volume_size: 10 - volume_type: "{{volume_type}}" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.json b/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.json deleted file mode 100644 index ba41ba0e..00000000 --- a/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.snapshot_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.yaml b/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.yaml deleted file mode 100644 index ffacd319..00000000 --- a/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.snapshot_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/boot.json b/samples/tasks/scenarios/nova/boot.json deleted file mode 100644 index 5169ae56..00000000 --- a/samples/tasks/scenarios/nova/boot.json +++ /dev/null @@ -1,26 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot.yaml b/samples/tasks/scenarios/nova/boot.yaml deleted file mode 100644 index 1567df79..00000000 --- a/samples/tasks/scenarios/nova/boot.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.json b/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.json deleted file mode 100644 index 8f52fca2..00000000 --- a/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_aggregate_add_and_remove_host": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.yaml b/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.yaml deleted file mode 100644 index ae3ccb06..00000000 --- a/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_aggregate_add_and_remove_host: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.json b/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.json deleted file mode 100644 index 5d86a065..00000000 --- a/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "NovaAggregates.create_aggregate_add_host_and_boot_server": [ - { - "args": { - "image": { - "name": "^cirros.*-disk$" - }, - "metadata": { - "test_metadata": "true" - }, - "availability_zone": "nova", - "ram": 512, - "vcpus": 1, - "disk": 1, - "boot_server_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.yaml b/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.yaml deleted file mode 100644 index b0d58865..00000000 --- a/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - NovaAggregates.create_aggregate_add_host_and_boot_server: - - - args: - image: - name: "^cirros.*-disk$" - metadata: - test_metadata: "true" - availability_zone: "nova" - ram: 512 - vcpus: 1 - disk: 1 - boot_server_kwargs: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-delete-aggregate.json b/samples/tasks/scenarios/nova/create-and-delete-aggregate.json deleted file mode 100644 index b02d58f9..00000000 --- a/samples/tasks/scenarios/nova/create-and-delete-aggregate.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_and_delete_aggregate": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-delete-aggregate.yaml b/samples/tasks/scenarios/nova/create-and-delete-aggregate.yaml deleted file mode 100644 index e442639b..00000000 --- a/samples/tasks/scenarios/nova/create-and-delete-aggregate.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_and_delete_aggregate: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-delete-flavor.json b/samples/tasks/scenarios/nova/create-and-delete-flavor.json deleted file mode 100644 index 33ce7826..00000000 --- a/samples/tasks/scenarios/nova/create-and-delete-flavor.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "NovaFlavors.create_and_delete_flavor": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-delete-flavor.yaml b/samples/tasks/scenarios/nova/create-and-delete-flavor.yaml deleted file mode 100644 index 35d7e7b6..00000000 --- a/samples/tasks/scenarios/nova/create-and-delete-flavor.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - NovaFlavors.create_and_delete_flavor: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - args: - ram: 500 - vcpus : 1 - disk: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-delete-keypair.json b/samples/tasks/scenarios/nova/create-and-delete-keypair.json deleted file mode 100644 index 26391438..00000000 --- a/samples/tasks/scenarios/nova/create-and-delete-keypair.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "NovaKeypair.create_and_delete_keypair": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-delete-keypair.yaml b/samples/tasks/scenarios/nova/create-and-delete-keypair.yaml deleted file mode 100644 index eabd4b0e..00000000 --- a/samples/tasks/scenarios/nova/create-and-delete-keypair.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - NovaKeypair.create_and_delete_keypair: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/create-and-get-aggregate-details.json b/samples/tasks/scenarios/nova/create-and-get-aggregate-details.json deleted file mode 100644 index 45efe31e..00000000 --- a/samples/tasks/scenarios/nova/create-and-get-aggregate-details.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_and_get_aggregate_details": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-get-aggregate-details.yaml b/samples/tasks/scenarios/nova/create-and-get-aggregate-details.yaml deleted file mode 100644 index 1bf4fefd..00000000 --- a/samples/tasks/scenarios/nova/create-and-get-aggregate-details.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_and_get_aggregate_details: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-get-flavor.json b/samples/tasks/scenarios/nova/create-and-get-flavor.json deleted file mode 100644 index e75e46f2..00000000 --- a/samples/tasks/scenarios/nova/create-and-get-flavor.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "NovaFlavors.create_and_get_flavor": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-get-flavor.yaml b/samples/tasks/scenarios/nova/create-and-get-flavor.yaml deleted file mode 100644 index b31cec2b..00000000 --- a/samples/tasks/scenarios/nova/create-and-get-flavor.yaml +++ /dev/null @@ -1,19 +0,0 @@ - ---- - NovaFlavors.create_and_get_flavor: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - args: - ram: 500 - vcpus : 1 - disk: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-get-keypair.json b/samples/tasks/scenarios/nova/create-and-get-keypair.json deleted file mode 100644 index 0166998c..00000000 --- a/samples/tasks/scenarios/nova/create-and-get-keypair.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "NovaKeypair.create_and_get_keypair": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-get-keypair.yaml b/samples/tasks/scenarios/nova/create-and-get-keypair.yaml deleted file mode 100644 index 22827d36..00000000 --- a/samples/tasks/scenarios/nova/create-and-get-keypair.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NovaKeypair.create_and_get_keypair: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-get-server-group.json b/samples/tasks/scenarios/nova/create-and-get-server-group.json deleted file mode 100644 index 46fb5296..00000000 --- a/samples/tasks/scenarios/nova/create-and-get-server-group.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "NovaServerGroups.create_and_get_server_group": [ - { - "args": { - "kwargs": { - "policies": [ - "affinity" - ] - } - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-get-server-group.yaml b/samples/tasks/scenarios/nova/create-and-get-server-group.yaml deleted file mode 100644 index e1d2d2e1..00000000 --- a/samples/tasks/scenarios/nova/create-and-get-server-group.yaml +++ /dev/null @@ -1,16 +0,0 @@ - NovaServerGroups.create_and_get_server_group: - - - args: - kwargs: - policies: ["affinity"] - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-list-aggregates.json b/samples/tasks/scenarios/nova/create-and-list-aggregates.json deleted file mode 100644 index 7e976223..00000000 --- a/samples/tasks/scenarios/nova/create-and-list-aggregates.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_and_list_aggregates": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-list-aggregates.yaml b/samples/tasks/scenarios/nova/create-and-list-aggregates.yaml deleted file mode 100644 index 6ccd85ec..00000000 --- a/samples/tasks/scenarios/nova/create-and-list-aggregates.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_and_list_aggregates: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-list-flavor-access.json b/samples/tasks/scenarios/nova/create-and-list-flavor-access.json deleted file mode 100644 index 680be246..00000000 --- a/samples/tasks/scenarios/nova/create-and-list-flavor-access.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "NovaFlavors.create_and_list_flavor_access": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-list-flavor-access.yaml b/samples/tasks/scenarios/nova/create-and-list-flavor-access.yaml deleted file mode 100644 index b16bbd50..00000000 --- a/samples/tasks/scenarios/nova/create-and-list-flavor-access.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - NovaFlavors.create_and_list_flavor_access: - - - args: - ram: 500 - vcpus: 1 - disk: 1 - runner: - type: "constant" - times: 10 - concurrency: 2 diff --git a/samples/tasks/scenarios/nova/create-and-list-keypairs.json b/samples/tasks/scenarios/nova/create-and-list-keypairs.json deleted file mode 100644 index 878fcdbe..00000000 --- a/samples/tasks/scenarios/nova/create-and-list-keypairs.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "NovaKeypair.create_and_list_keypairs": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-list-keypairs.yaml b/samples/tasks/scenarios/nova/create-and-list-keypairs.yaml deleted file mode 100644 index edc04eee..00000000 --- a/samples/tasks/scenarios/nova/create-and-list-keypairs.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - NovaKeypair.create_and_list_keypairs: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/create-and-list-server-groups.json b/samples/tasks/scenarios/nova/create-and-list-server-groups.json deleted file mode 100644 index bdf06335..00000000 --- a/samples/tasks/scenarios/nova/create-and-list-server-groups.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NovaServerGroups.create_and_list_server_groups": [ - { - "args": { - "kwargs": { - "policies": [ - "affinity" - ] - }, - "all_projects": false - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-list-server-groups.yaml b/samples/tasks/scenarios/nova/create-and-list-server-groups.yaml deleted file mode 100644 index cc4dbe59..00000000 --- a/samples/tasks/scenarios/nova/create-and-list-server-groups.yaml +++ /dev/null @@ -1,17 +0,0 @@ - NovaServerGroups.create_and_list_server_groups: - - - args: - kwargs: - policies: ["affinity"] - all_projects: false - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-update-aggregate.json b/samples/tasks/scenarios/nova/create-and-update-aggregate.json deleted file mode 100644 index 65b0c46f..00000000 --- a/samples/tasks/scenarios/nova/create-and-update-aggregate.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_and_update_aggregate": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-update-aggregate.yaml b/samples/tasks/scenarios/nova/create-and-update-aggregate.yaml deleted file mode 100644 index 5fcf5c1a..00000000 --- a/samples/tasks/scenarios/nova/create-and-update-aggregate.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_and_update_aggregate: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.json b/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.json deleted file mode 100644 index 51435a54..00000000 --- a/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "NovaFlavors.create_flavor_and_add_tenant_access": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.yaml b/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.yaml deleted file mode 100644 index b40a42a0..00000000 --- a/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NovaFlavors.create_flavor_and_add_tenant_access: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - args: - ram: 500 - vcpus : 1 - disk: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/nova/create-flavor-and-set-keys.json b/samples/tasks/scenarios/nova/create-flavor-and-set-keys.json deleted file mode 100644 index e150a296..00000000 --- a/samples/tasks/scenarios/nova/create-flavor-and-set-keys.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NovaFlavors.create_flavor_and_set_keys": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1, - "extra_specs": { - "quota:disk_read_bytes_sec": 10240 - } - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-flavor-and-set-keys.yaml b/samples/tasks/scenarios/nova/create-flavor-and-set-keys.yaml deleted file mode 100644 index c376cc00..00000000 --- a/samples/tasks/scenarios/nova/create-flavor-and-set-keys.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NovaFlavors.create_flavor_and_set_keys: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - args: - ram: 500 - vcpus : 1 - disk: 1 - extra_specs: - quota:disk_read_bytes_sec: 10240 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-flavor.json b/samples/tasks/scenarios/nova/create-flavor.json deleted file mode 100644 index ec65d493..00000000 --- a/samples/tasks/scenarios/nova/create-flavor.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "NovaFlavors.create_flavor": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-flavor.yaml b/samples/tasks/scenarios/nova/create-flavor.yaml deleted file mode 100644 index 988f3a41..00000000 --- a/samples/tasks/scenarios/nova/create-flavor.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - NovaFlavors.create_flavor: - - - args: - ram: 500 - vcpus: 1 - disk: 1 - runner: - type: "constant" - times: 10 - concurrency: 2 diff --git a/samples/tasks/scenarios/nova/list-agents.json b/samples/tasks/scenarios/nova/list-agents.json deleted file mode 100644 index c724e8ae..00000000 --- a/samples/tasks/scenarios/nova/list-agents.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "NovaAgents.list_agents": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-agents.yaml b/samples/tasks/scenarios/nova/list-agents.yaml deleted file mode 100644 index 54e9e4da..00000000 --- a/samples/tasks/scenarios/nova/list-agents.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - NovaAgents.list_agents: - - - runner: - type: "constant" - concurrency: 2 - times: 10 diff --git a/samples/tasks/scenarios/nova/list-aggregates.json b/samples/tasks/scenarios/nova/list-aggregates.json deleted file mode 100644 index a0125145..00000000 --- a/samples/tasks/scenarios/nova/list-aggregates.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "NovaAggregates.list_aggregates": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-aggregates.yaml b/samples/tasks/scenarios/nova/list-aggregates.yaml deleted file mode 100644 index 34bc95a5..00000000 --- a/samples/tasks/scenarios/nova/list-aggregates.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - NovaAggregates.list_aggregates: - - - runner: - type: "constant" - concurrency: 2 - times: 10 diff --git a/samples/tasks/scenarios/nova/list-and-get-hosts.json b/samples/tasks/scenarios/nova/list-and-get-hosts.json deleted file mode 100644 index 32673c9d..00000000 --- a/samples/tasks/scenarios/nova/list-and-get-hosts.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "NovaHosts.list_and_get_hosts": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-and-get-hosts.yaml b/samples/tasks/scenarios/nova/list-and-get-hosts.yaml deleted file mode 100644 index 9319052a..00000000 --- a/samples/tasks/scenarios/nova/list-and-get-hosts.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - NovaHosts.list_and_get_hosts: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-and-get-hypervisors.json b/samples/tasks/scenarios/nova/list-and-get-hypervisors.json deleted file mode 100644 index 9deb4000..00000000 --- a/samples/tasks/scenarios/nova/list-and-get-hypervisors.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaHypervisors.list_and_get_hypervisors": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-and-get-hypervisors.yaml b/samples/tasks/scenarios/nova/list-and-get-hypervisors.yaml deleted file mode 100644 index 40497f12..00000000 --- a/samples/tasks/scenarios/nova/list-and-get-hypervisors.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaHypervisors.list_and_get_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.json b/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.json deleted file mode 100644 index 0f5396eb..00000000 --- a/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaHypervisors.list_and_get_uptime_hypervisors": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.yaml b/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.yaml deleted file mode 100644 index 51e0a2f3..00000000 --- a/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaHypervisors.list_and_get_uptime_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-and-search-hypervisor.json b/samples/tasks/scenarios/nova/list-and-search-hypervisor.json deleted file mode 100644 index 17e2f263..00000000 --- a/samples/tasks/scenarios/nova/list-and-search-hypervisor.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaHypervisors.list_and_search_hypervisors": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-and-search-hypervisor.yaml b/samples/tasks/scenarios/nova/list-and-search-hypervisor.yaml deleted file mode 100644 index d4fdb45f..00000000 --- a/samples/tasks/scenarios/nova/list-and-search-hypervisor.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaHypervisors.list_and_search_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-availability-zones.json b/samples/tasks/scenarios/nova/list-availability-zones.json deleted file mode 100644 index a9fd1b78..00000000 --- a/samples/tasks/scenarios/nova/list-availability-zones.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "NovaAvailabilityZones.list_availability_zones": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "detailed": true - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-availability-zones.yaml b/samples/tasks/scenarios/nova/list-availability-zones.yaml deleted file mode 100644 index 995315a9..00000000 --- a/samples/tasks/scenarios/nova/list-availability-zones.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - NovaAvailabilityZones.list_availability_zones: - - - args: - detailed: true - runner: - type: "constant" - concurrency: 2 - times: 10 diff --git a/samples/tasks/scenarios/nova/list-flavors.json b/samples/tasks/scenarios/nova/list-flavors.json deleted file mode 100644 index 80b91a37..00000000 --- a/samples/tasks/scenarios/nova/list-flavors.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "NovaFlavors.list_flavors": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "detailed": true - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-flavors.yaml b/samples/tasks/scenarios/nova/list-flavors.yaml deleted file mode 100644 index 0245f4a8..00000000 --- a/samples/tasks/scenarios/nova/list-flavors.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - NovaFlavors.list_flavors: - - - args: - detailed: True - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/list-hosts.json b/samples/tasks/scenarios/nova/list-hosts.json deleted file mode 100644 index a02de4a5..00000000 --- a/samples/tasks/scenarios/nova/list-hosts.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "NovaHosts.list_hosts": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-hosts.yaml b/samples/tasks/scenarios/nova/list-hosts.yaml deleted file mode 100644 index f4e88fff..00000000 --- a/samples/tasks/scenarios/nova/list-hosts.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - NovaHosts.list_hosts: - - - runner: - type: "constant" - concurrency: 2 - times: 10 diff --git a/samples/tasks/scenarios/nova/list-hypervisors.json b/samples/tasks/scenarios/nova/list-hypervisors.json deleted file mode 100644 index d4705f87..00000000 --- a/samples/tasks/scenarios/nova/list-hypervisors.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "NovaHypervisors.list_hypervisors": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "detailed": true - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/nova/list-hypervisors.yaml b/samples/tasks/scenarios/nova/list-hypervisors.yaml deleted file mode 100644 index 746ce92b..00000000 --- a/samples/tasks/scenarios/nova/list-hypervisors.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- - NovaHypervisors.list_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 10 - concurrency: 2 diff --git a/samples/tasks/scenarios/nova/list-images.json b/samples/tasks/scenarios/nova/list-images.json deleted file mode 100644 index 8620b5c6..00000000 --- a/samples/tasks/scenarios/nova/list-images.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "NovaImages.list_images": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "detailed": true - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-images.yaml b/samples/tasks/scenarios/nova/list-images.yaml deleted file mode 100644 index 94dd3b11..00000000 --- a/samples/tasks/scenarios/nova/list-images.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - NovaImages.list_images: - - - args: - detailed: True - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/list-servers.json b/samples/tasks/scenarios/nova/list-servers.json deleted file mode 100644 index 0eafd709..00000000 --- a/samples/tasks/scenarios/nova/list-servers.json +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.list_servers": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "servers": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "servers_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-servers.yaml b/samples/tasks/scenarios/nova/list-servers.yaml deleted file mode 100644 index d08e048f..00000000 --- a/samples/tasks/scenarios/nova/list-servers.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.list_servers: - - - args: - detailed: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - servers: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - servers_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/list-services.json b/samples/tasks/scenarios/nova/list-services.json deleted file mode 100644 index 2a1a6e7b..00000000 --- a/samples/tasks/scenarios/nova/list-services.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "NovaServices.list_services": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-services.yaml b/samples/tasks/scenarios/nova/list-services.yaml deleted file mode 100644 index 6aa0a4cd..00000000 --- a/samples/tasks/scenarios/nova/list-services.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - NovaServices.list_services: - - - runner: - type: "constant" - times: 10 - concurrency: 2 diff --git a/samples/tasks/scenarios/nova/pause-and-unpause.json b/samples/tasks/scenarios/nova/pause-and-unpause.json deleted file mode 100644 index af24ecab..00000000 --- a/samples/tasks/scenarios/nova/pause-and-unpause.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.pause_and_unpause_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/pause-and-unpause.yaml b/samples/tasks/scenarios/nova/pause-and-unpause.yaml deleted file mode 100644 index d72417fc..00000000 --- a/samples/tasks/scenarios/nova/pause-and-unpause.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.pause_and_unpause_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/resize-server.json b/samples/tasks/scenarios/nova/resize-server.json deleted file mode 100644 index e5f3f98e..00000000 --- a/samples/tasks/scenarios/nova/resize-server.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.resize_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "to_flavor": { - "name": "m1.small" - }, - "confirm": true, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/nova/resize-server.yaml b/samples/tasks/scenarios/nova/resize-server.yaml deleted file mode 100644 index 8d41b988..00000000 --- a/samples/tasks/scenarios/nova/resize-server.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.resize_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - to_flavor: - name: "m1.small" - confirm: true - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/nova/resize-shutoff-server.json b/samples/tasks/scenarios/nova/resize-shutoff-server.json deleted file mode 100644 index 334c26eb..00000000 --- a/samples/tasks/scenarios/nova/resize-shutoff-server.json +++ /dev/null @@ -1,37 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.resize_shutoff_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "to_flavor": { - "name": "m1.small" - }, - "confirm": true, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/nova/resize-shutoff-server.yaml b/samples/tasks/scenarios/nova/resize-shutoff-server.yaml deleted file mode 100644 index 759b2f22..00000000 --- a/samples/tasks/scenarios/nova/resize-shutoff-server.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.resize_shutoff_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - to_flavor: - name: "m1.small" - confirm: true - force_delete: false - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/shelve-and-unshelve.json b/samples/tasks/scenarios/nova/shelve-and-unshelve.json deleted file mode 100644 index d3986c14..00000000 --- a/samples/tasks/scenarios/nova/shelve-and-unshelve.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.shelve_and_unshelve_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/shelve-and-unshelve.yaml b/samples/tasks/scenarios/nova/shelve-and-unshelve.yaml deleted file mode 100644 index c02709ad..00000000 --- a/samples/tasks/scenarios/nova/shelve-and-unshelve.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.shelve_and_unshelve_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/nova/statistics-hypervisors.json b/samples/tasks/scenarios/nova/statistics-hypervisors.json deleted file mode 100644 index f04dad62..00000000 --- a/samples/tasks/scenarios/nova/statistics-hypervisors.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "NovaHypervisors.statistics_hypervisors": [ - { - "args": {}, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/statistics-hypervisors.yaml b/samples/tasks/scenarios/nova/statistics-hypervisors.yaml deleted file mode 100644 index e3939a81..00000000 --- a/samples/tasks/scenarios/nova/statistics-hypervisors.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NovaHypervisors.statistics_hypervisors: - - - args: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/suspend-and-resume.json b/samples/tasks/scenarios/nova/suspend-and-resume.json deleted file mode 100644 index 04daf890..00000000 --- a/samples/tasks/scenarios/nova/suspend-and-resume.json +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.suspend_and_resume_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/suspend-and-resume.yaml b/samples/tasks/scenarios/nova/suspend-and-resume.yaml deleted file mode 100644 index 97914e57..00000000 --- a/samples/tasks/scenarios/nova/suspend-and-resume.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.suspend_and_resume_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/quotas/cinder-get.json b/samples/tasks/scenarios/quotas/cinder-get.json deleted file mode 100644 index 627a0aa4..00000000 --- a/samples/tasks/scenarios/quotas/cinder-get.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "Quotas.cinder_get": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - - } - ] -} diff --git a/samples/tasks/scenarios/quotas/cinder-get.yaml b/samples/tasks/scenarios/quotas/cinder-get.yaml deleted file mode 100644 index 0c6bcd22..00000000 --- a/samples/tasks/scenarios/quotas/cinder-get.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Quotas.cinder_get: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/quotas/cinder-update-and-delete.json b/samples/tasks/scenarios/quotas/cinder-update-and-delete.json deleted file mode 100644 index 9c3842b8..00000000 --- a/samples/tasks/scenarios/quotas/cinder-update-and-delete.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Quotas.cinder_update_and_delete": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/cinder-update-and-delete.yaml b/samples/tasks/scenarios/quotas/cinder-update-and-delete.yaml deleted file mode 100644 index 03104b1b..00000000 --- a/samples/tasks/scenarios/quotas/cinder-update-and-delete.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/quotas/cinder-update.json b/samples/tasks/scenarios/quotas/cinder-update.json deleted file mode 100644 index b70f31eb..00000000 --- a/samples/tasks/scenarios/quotas/cinder-update.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Quotas.cinder_update": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/cinder-update.yaml b/samples/tasks/scenarios/quotas/cinder-update.yaml deleted file mode 100644 index 95b5c226..00000000 --- a/samples/tasks/scenarios/quotas/cinder-update.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Quotas.cinder_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/quotas/neutron-update.json b/samples/tasks/scenarios/quotas/neutron-update.json deleted file mode 100644 index 8a0b4338..00000000 --- a/samples/tasks/scenarios/quotas/neutron-update.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Quotas.neutron_update": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/neutron-update.yaml b/samples/tasks/scenarios/quotas/neutron-update.yaml deleted file mode 100644 index 0650dc5a..00000000 --- a/samples/tasks/scenarios/quotas/neutron-update.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Quotas.neutron_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/quotas/nova-get.json b/samples/tasks/scenarios/quotas/nova-get.json deleted file mode 100644 index 86d99fbd..00000000 --- a/samples/tasks/scenarios/quotas/nova-get.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "Quotas.nova_get": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - - } - ] -} diff --git a/samples/tasks/scenarios/quotas/nova-get.yaml b/samples/tasks/scenarios/quotas/nova-get.yaml deleted file mode 100644 index 15602ba4..00000000 --- a/samples/tasks/scenarios/quotas/nova-get.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Quotas.nova_get: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/quotas/nova-update-and-delete.json b/samples/tasks/scenarios/quotas/nova-update-and-delete.json deleted file mode 100644 index 41aca426..00000000 --- a/samples/tasks/scenarios/quotas/nova-update-and-delete.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Quotas.nova_update_and_delete": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/nova-update-and-delete.yaml b/samples/tasks/scenarios/quotas/nova-update-and-delete.yaml deleted file mode 100644 index 0af74b6c..00000000 --- a/samples/tasks/scenarios/quotas/nova-update-and-delete.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Quotas.nova_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/quotas/nova-update.json b/samples/tasks/scenarios/quotas/nova-update.json deleted file mode 100644 index 3412aa3a..00000000 --- a/samples/tasks/scenarios/quotas/nova-update.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "Quotas.nova_update": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/nova-update.yaml b/samples/tasks/scenarios/quotas/nova-update.yaml deleted file mode 100644 index 9aa30dd1..00000000 --- a/samples/tasks/scenarios/quotas/nova-update.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - Quotas.nova_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/requests/check-random-request.json b/samples/tasks/scenarios/requests/check-random-request.json deleted file mode 100644 index 5dcb7ead..00000000 --- a/samples/tasks/scenarios/requests/check-random-request.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "HttpRequests.check_random_request": [ - { - "args": { - "requests": [{"url": "http://www.example.com", "method": "GET", - "status_code": 200}, - {"url": "http://www.openstack.org", "method": "GET"}], - "status_code": 200 - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - } - } - ] -} diff --git a/samples/tasks/scenarios/requests/check-random-request.yaml b/samples/tasks/scenarios/requests/check-random-request.yaml deleted file mode 100644 index 6ccb826a..00000000 --- a/samples/tasks/scenarios/requests/check-random-request.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - HttpRequests.check_random_request: - - - args: - requests: - - - url: "http://www.example.com" - method: "GET" - status_code: 200 - - - url: "http://www.openstack.org" - method: "GET" - status_code: 200 - runner: - type: "constant" - times: 20 - concurrency: 5 diff --git a/samples/tasks/scenarios/requests/check-request.json b/samples/tasks/scenarios/requests/check-request.json deleted file mode 100644 index 85196c3e..00000000 --- a/samples/tasks/scenarios/requests/check-request.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "HttpRequests.check_request": [ - { - "args": { - "url": "http://www.example.com", - "method": "GET", - "status_code": 200, - "allow_redirects": false - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - } - } - ] -} diff --git a/samples/tasks/scenarios/requests/check-request.yaml b/samples/tasks/scenarios/requests/check-request.yaml deleted file mode 100644 index 7ed1c047..00000000 --- a/samples/tasks/scenarios/requests/check-request.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - HttpRequests.check_request: - - - args: - url: "http://www.example.com" - method: "GET" - status_code: 200 - allow_redirects: False - runner: - type: "constant" - times: 20 - concurrency: 5 diff --git a/samples/tasks/scenarios/sahara/create-and-delete-cluster.json b/samples/tasks/scenarios/sahara/create-and-delete-cluster.json deleted file mode 100644 index 0f96fd41..00000000 --- a/samples/tasks/scenarios/sahara/create-and-delete-cluster.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "SaharaClusters.create_and_delete_cluster": [ - { - "args": { - "master_flavor": { - "name": "m1.large" - }, - "worker_flavor": { - "name": "m1.medium" - }, - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.3.0", - "auto_security_group": true - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.3.0" - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/sahara/create-and-delete-cluster.yaml b/samples/tasks/scenarios/sahara/create-and-delete-cluster.yaml deleted file mode 100644 index 43fa1246..00000000 --- a/samples/tasks/scenarios/sahara/create-and-delete-cluster.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- - SaharaClusters.create_and_delete_cluster: - - - args: - master_flavor: - name: "m1.large" - worker_flavor: - name: "m1.medium" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.3.0" - auto_security_group: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.3.0" - network: {} diff --git a/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.json b/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.json deleted file mode 100644 index fd5ebc34..00000000 --- a/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "SaharaNodeGroupTemplates.create_and_list_node_group_templates": [ - { - "args": { - "flavor": { - "name": "m1.small" - } - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.yaml b/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.yaml deleted file mode 100644 index 2b69b8c8..00000000 --- a/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - SaharaNodeGroupTemplates.create_and_list_node_group_templates: - - - args: - flavor: - name: "m1.small" - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/sahara/create-delete-node-group-templates.json b/samples/tasks/scenarios/sahara/create-delete-node-group-templates.json deleted file mode 100644 index 3af8c153..00000000 --- a/samples/tasks/scenarios/sahara/create-delete-node-group-templates.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "SaharaNodeGroupTemplates.create_delete_node_group_templates": [ - { - "args": { - "flavor": { - "name": "m1.small" - } - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/sahara/create-delete-node-group-templates.yaml b/samples/tasks/scenarios/sahara/create-delete-node-group-templates.yaml deleted file mode 100644 index 960bd74c..00000000 --- a/samples/tasks/scenarios/sahara/create-delete-node-group-templates.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - SaharaNodeGroupTemplates.create_delete_node_group_templates: - - - args: - flavor: - name: "m1.small" - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/sahara/create-scale-delete-cluster.json b/samples/tasks/scenarios/sahara/create-scale-delete-cluster.json deleted file mode 100644 index 401e9648..00000000 --- a/samples/tasks/scenarios/sahara/create-scale-delete-cluster.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "SaharaClusters.create_scale_delete_cluster": [ - { - "args": { - "master_flavor": { - "name": "m1.large" - }, - "worker_flavor": { - "name": "m1.medium" - }, - "workers_count": 3, - "deltas": [1, -1, 1, -1], - "plugin_name": "vanilla", - "hadoop_version": "2.3.0", - "auto_security_group": true - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.3.0" - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/scenarios/sahara/create-scale-delete-cluster.yaml b/samples/tasks/scenarios/sahara/create-scale-delete-cluster.yaml deleted file mode 100644 index 0ae8fd85..00000000 --- a/samples/tasks/scenarios/sahara/create-scale-delete-cluster.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- - SaharaClusters.create_scale_delete_cluster: - - - args: - master_flavor: - name: "m1.large" - worker_flavor: - name: "m1.medium" - workers_count: 3 - deltas: - - 1 - - -1 - - 1 - - -1 - plugin_name: "vanilla" - hadoop_version: "2.3.0" - auto_security_group: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.3.0" - network: {} diff --git a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.json b/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.json deleted file mode 100644 index 2141682c..00000000 --- a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "SaharaJob.create_launch_job_sequence_with_scaling": [ - { - "args": { - "jobs": [ - { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.fs.TestDFSIO" - }, - "args": ["-write", "-nrFiles", "10", "-fileSize", "100"] - } - }, { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.fs.TestDFSIO" - }, - "args": ["-read", "-nrFiles", "10", "-fileSize", "100"] - } - } - ], - "deltas": [2, 2, 2] - }, - "runner": { - "type": "serial", - "times": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.6.0" - }, - "sahara_job_binaries": { - "libs": [ - { - "name": "tests.jar", - "download_url": "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - } - ] - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.yaml b/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.yaml deleted file mode 100644 index 4b94f4bf..00000000 --- a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.yaml +++ /dev/null @@ -1,56 +0,0 @@ ---- - SaharaJob.create_launch_job_sequence_with_scaling: - - - args: - jobs: - - - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.fs.TestDFSIO" - args: - - "-write" - - "-nrFiles" - - "10" - - "-fileSize" - - "100" - - - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.fs.TestDFSIO" - args: - - "-read" - - "-nrFiles" - - "10" - - "-fileSize" - - "100" - deltas: - - 2 - - 2 - - 2 - runner: - type: "serial" - times: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.6.0" - sahara_job_binaries: - libs: - - - name: "tests.jar" - download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} diff --git a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.json b/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.json deleted file mode 100644 index 7447db6c..00000000 --- a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "SaharaJob.create_launch_job_sequence": [ - { - "args": { - "jobs": [ - { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.fs.TestDFSIO" - }, - "args": ["-write", "-nrFiles", "10", "-fileSize", "100"] - } - }, { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.fs.TestDFSIO" - }, - "args": ["-read", "-nrFiles", "10", "-fileSize", "100"] - } - } - ] - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.6.0" - }, - "sahara_job_binaries": { - "libs": [ - { - "name": "tests.jar", - "download_url": "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - } - ] - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.yaml b/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.yaml deleted file mode 100644 index 5f1b4f2e..00000000 --- a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.yaml +++ /dev/null @@ -1,53 +0,0 @@ ---- - SaharaJob.create_launch_job_sequence: - - - args: - jobs: - - - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.fs.TestDFSIO" - args: - - "-write" - - "-nrFiles" - - "10" - - "-fileSize" - - "100" - - - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.fs.TestDFSIO" - args: - - "-read" - - "-nrFiles" - - "10" - - "-fileSize" - - "100" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.6.0" - sahara_job_binaries: - libs: - - - name: "tests.jar" - download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} diff --git a/samples/tasks/scenarios/sahara/jobs/java-action-job.json b/samples/tasks/scenarios/sahara/jobs/java-action-job.json deleted file mode 100644 index bb17363b..00000000 --- a/samples/tasks/scenarios/sahara/jobs/java-action-job.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "SaharaJob.create_launch_job": [ - { - "args": { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.examples.PiEstimator" - }, - "args": ["10", "10"] - } - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.6.0" - }, - "sahara_job_binaries": { - "libs": [ - { - "name": "examples.jar", - "download_url": "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-mapreduce-examples/2.6.0/hadoop-mapreduce-examples-2.6.0.jar" - } - ] - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/jobs/java-action-job.yaml b/samples/tasks/scenarios/sahara/jobs/java-action-job.yaml deleted file mode 100644 index f98cfdf3..00000000 --- a/samples/tasks/scenarios/sahara/jobs/java-action-job.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- - SaharaJob.create_launch_job: - - - args: - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.examples.PiEstimator" - args: - - "10" - - "10" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.6.0" - sahara_job_binaries: - libs: - - - name: "examples.jar" - download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-mapreduce-examples/2.6.0/hadoop-mapreduce-examples-2.6.0.jar" - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} diff --git a/samples/tasks/scenarios/sahara/jobs/pig-script-job.json b/samples/tasks/scenarios/sahara/jobs/pig-script-job.json deleted file mode 100644 index 69d12be6..00000000 --- a/samples/tasks/scenarios/sahara/jobs/pig-script-job.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "SaharaJob.create_launch_job": [ - { - "args": { - "job_type": "Pig", - "configs": {} - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.6.0" - }, - "sahara_job_binaries": { - "mains": [ - { - "name": "example.pig", - "download_url": "https://raw.githubusercontent.com/openstack/sahara/master/etc/edp-examples/pig-job/example.pig" - } - ], - "libs": [ - { - "name": "udf.jar", - "download_url": "https://github.com/openstack/sahara/blob/master/etc/edp-examples/pig-job/udf.jar?raw=true" - } - ] - }, - "sahara_input_data_sources": { - "input_type": "hdfs", - "input_url": "/" - }, - "sahara_output_data_sources": { - "output_type": "hdfs", - "output_url_prefix": "/out_" - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/jobs/pig-script-job.yaml b/samples/tasks/scenarios/sahara/jobs/pig-script-job.yaml deleted file mode 100644 index f19a080c..00000000 --- a/samples/tasks/scenarios/sahara/jobs/pig-script-job.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- - SaharaJob.create_launch_job: - - - args: - job_type: "Pig" - configs: {} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.6.0" - sahara_job_binaries: - mains: - - - name: "example.pig" - download_url: "https://raw.githubusercontent.com/openstack/sahara/master/etc/edp-examples/pig-job/example.pig" - libs: - - - name: "udf.jar" - download_url: "https://github.com/openstack/sahara/blob/master/etc/edp-examples/pig-job/udf.jar?raw=true" - sahara_input_data_sources: - input_type: "hdfs" - input_url: "/" - sahara_output_data_sources: - output_type: "hdfs" - output_url_prefix: "/out_" - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} diff --git a/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.json b/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.json deleted file mode 100644 index fc602523..00000000 --- a/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "SenlinClusters.create_and_delete_cluster": [ - { - "args": { - "desired_capacity": 3, - "min_size": 0, - "max_size": 5 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "profiles": { - "type": "os.nova.server", - "version": "1.0", - "properties": { - "name": "cirros_server", - "flavor": 1, - "image": "cirros-0.3.5-x86_64-disk", - "networks": [ - { "network": "private" } - ] - } - } - } - } - ] -} diff --git a/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.yaml b/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.yaml deleted file mode 100644 index cbf43beb..00000000 --- a/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - SenlinClusters.create_and_delete_cluster: - - - args: - desired_capacity: 3 - min_size: 0 - max_size: 5 - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - profiles: - type: os.nova.server - version: "1.0" - properties: - name: cirros_server - flavor: 1 - image: "cirros-0.3.5-x86_64-disk" - networks: - - network: private diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.json b/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.json deleted file mode 100644 index 02b67062..00000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "SwiftObjects.create_container_and_object_then_delete_all": [ - { - "args": { - "objects_per_container": 5, - "object_size": 102400 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ] - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.yaml b/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.yaml deleted file mode 100644 index 510af158..00000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - SwiftObjects.create_container_and_object_then_delete_all: - - - args: - objects_per_container: 5 - object_size: 102400 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.json b/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.json deleted file mode 100644 index 7b988e4c..00000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "SwiftObjects.create_container_and_object_then_download_object": [ - { - "args": { - "objects_per_container": 5, - "object_size": 1024 - }, - "runner": { - "type": "constant", - "times": 6, - "concurrency": 3 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ] - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.yaml b/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.yaml deleted file mode 100644 index 6ea50cf6..00000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - SwiftObjects.create_container_and_object_then_download_object: - - - args: - objects_per_container: 5 - object_size: 1024 - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.json b/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.json deleted file mode 100644 index f807d4b6..00000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "SwiftObjects.create_container_and_object_then_list_objects": [ - { - "args": { - "objects_per_container": 2, - "object_size": 5120 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ] - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.yaml b/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.yaml deleted file mode 100644 index bc01aa4b..00000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - SwiftObjects.create_container_and_object_then_list_objects: - - - args: - objects_per_container: 2 - object_size: 5120 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" diff --git a/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.json b/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.json deleted file mode 100644 index 8ed1b0ee..00000000 --- a/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "SwiftObjects.list_and_download_objects_in_containers": [ - { - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ], - "swift_objects": { - "containers_per_tenant": 2, - "objects_per_container": 5, - "object_size": 10240 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.yaml b/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.yaml deleted file mode 100644 index a11aa6b8..00000000 --- a/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - SwiftObjects.list_and_download_objects_in_containers: - - - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 2 - objects_per_container: 5 - object_size: 10240 diff --git a/samples/tasks/scenarios/swift/list-objects-in-containers.json b/samples/tasks/scenarios/swift/list-objects-in-containers.json deleted file mode 100644 index 576561dc..00000000 --- a/samples/tasks/scenarios/swift/list-objects-in-containers.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "SwiftObjects.list_objects_in_containers": [ - { - "runner": { - "type": "constant", - "times": 6, - "concurrency": 3 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ], - "swift_objects": { - "containers_per_tenant": 1, - "objects_per_container": 10, - "object_size": 1024 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/list-objects-in-containers.yaml b/samples/tasks/scenarios/swift/list-objects-in-containers.yaml deleted file mode 100644 index 2f27638d..00000000 --- a/samples/tasks/scenarios/swift/list-objects-in-containers.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - SwiftObjects.list_objects_in_containers: - - - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 1 - objects_per_container: 10 - object_size: 1024 diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.json b/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.json deleted file mode 100644 index c7670097..00000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "VMTasks.boot_runcommand_delete": [ - { - "args": { - "flavor": {"name": "m1.small"}, - "command": { - "remote_path": "./instance_test.sh" - }, - "username": "root", - "userdata": "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1, - "timeout": 3000 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "image_command_customizer": { - "image": {"name": "Fedora-x86_64-20-20140618-sda"}, - "flavor": {"name": "m1.small"}, - "command": { - "local_path": "rally-jobs/extra/install_benchmark.sh", - "remote_path": "./install_benchmark.sh" - }, - "username": "root", - "userdata": "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - }, - "network": {} - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.yaml b/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.yaml deleted file mode 100644 index f8b17fa4..00000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - VMTasks.boot_runcommand_delete: - - - args: - command: - remote_path: "./instance_test.sh" - flavor: - name: m1.small - userdata: "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - username: root - runner: - concurrency: 1 - timeout: 3000 - times: 1 - type: "constant" - context: - image_command_customizer: - command: - local_path: "rally-jobs/extra/install_benchmark.sh" - remote_path: "./install_benchmark.sh" - flavor: - name: m1.small - image: - name: "Fedora-x86_64-20-20140618-sda" - userdata: "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - username: root - network: {} - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.json b/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.json deleted file mode 100644 index 1de2fd5c..00000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "VMTasks.boot_runcommand_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "floating_network": "public", - "force_delete": false, - "command": { - "interpreter": "/bin/sh", - "script_inline": "ls -la" - }, - "username": "cirros" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "network": { - } - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.yaml b/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.yaml deleted file mode 100644 index a660eb31..00000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - VMTasks.boot_runcommand_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - floating_network: "public" - force_delete: false - command: - interpreter: "/bin/sh" - script_inline: "ls -la" - username: "cirros" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: {} diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.json b/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.json deleted file mode 100644 index 7f40604c..00000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.json +++ /dev/null @@ -1,38 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "VMTasks.boot_runcommand_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "volume_args": { - "size": 2 - }, - "fixed_network": "private", - "floating_network": "public", - "use_floating_ip": true, - "force_delete": false, - "command": { - "interpreter": "/bin/sh", - "script_file": "samples/tasks/support/instance_test.sh" - }, - "username": "cirros" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.yaml b/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.yaml deleted file mode 100644 index cd79cae1..00000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - VMTasks.boot_runcommand_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - volume_args: - size: 2 - fixed_network: "private" - floating_network: "public" - use_floating_ip: true - force_delete: false - command: - interpreter: "/bin/sh" - script_file: "samples/tasks/support/instance_test.sh" - username: "cirros" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete.json b/samples/tasks/scenarios/vm/boot-runcommand-delete.json deleted file mode 100644 index 54f773a1..00000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "VMTasks.boot_runcommand_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "floating_network": "public", - "force_delete": false, - "command": { - "interpreter": "/bin/sh", - "script_file": "samples/tasks/support/instance_test.sh" - }, - "username": "cirros" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "network": { - } - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete.yaml b/samples/tasks/scenarios/vm/boot-runcommand-delete.yaml deleted file mode 100644 index eaa79cd1..00000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - VMTasks.boot_runcommand_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - floating_network: "public" - force_delete: false - command: - interpreter: "/bin/sh" - script_file: "samples/tasks/support/instance_test.sh" - username: "cirros" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: {} diff --git a/samples/tasks/scenarios/vm/dd-load-test.json b/samples/tasks/scenarios/vm/dd-load-test.json deleted file mode 100644 index 1374ca29..00000000 --- a/samples/tasks/scenarios/vm/dd-load-test.json +++ /dev/null @@ -1,34 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "VMTasks.dd_load_test": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "floating_network": "public", - "force_delete": false, - "command": { - "interpreter": "/bin/sh" - }, - "username": "cirros" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "network": { - } - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/dd-load-test.yaml b/samples/tasks/scenarios/vm/dd-load-test.yaml deleted file mode 100644 index 158eaf47..00000000 --- a/samples/tasks/scenarios/vm/dd-load-test.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - VMTasks.dd_load_test: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - floating_network: "public" - force_delete: false - command: - interpreter: "/bin/sh" - username: "cirros" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: {} \ No newline at end of file diff --git a/samples/tasks/scenarios/watcher/create-audit-and-delete.json b/samples/tasks/scenarios/watcher/create-audit-and-delete.json deleted file mode 100644 index c6814409..00000000 --- a/samples/tasks/scenarios/watcher/create-audit-and-delete.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "Watcher.create_audit_and_delete": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "audit_templates": { - "audit_templates_per_admin": 5, - "fill_strategy": "round_robin", - "params": [ - { - "goal": { - "name": "dummy" - }, - "strategy": { - "name": "dummy" - } - } - ] - } - } - } - ] -} diff --git a/samples/tasks/scenarios/watcher/create-audit-and-delete.yaml b/samples/tasks/scenarios/watcher/create-audit-and-delete.yaml deleted file mode 100644 index 70fb65c7..00000000 --- a/samples/tasks/scenarios/watcher/create-audit-and-delete.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - Watcher.create_audit_and_delete: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "round_robin" - params: - - goal: - name: "dummy" - strategy: - name: "dummy" diff --git a/samples/tasks/scenarios/watcher/create-audit-template-and-delete.json b/samples/tasks/scenarios/watcher/create-audit-template-and-delete.json deleted file mode 100644 index 1af665d2..00000000 --- a/samples/tasks/scenarios/watcher/create-audit-template-and-delete.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Watcher.create_audit_template_and_delete": [ - { - "args": { - "goal": { - "name": "dummy" - }, - "strategy": { - "name": "dummy" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - } - } - ] -} diff --git a/samples/tasks/scenarios/watcher/create-audit-template-and-delete.yaml b/samples/tasks/scenarios/watcher/create-audit-template-and-delete.yaml deleted file mode 100644 index f51334ab..00000000 --- a/samples/tasks/scenarios/watcher/create-audit-template-and-delete.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - Watcher.create_audit_template_and_delete: - - - args: - goal: - name: "dummy" - strategy: - name: "dummy" - runner: - type: "constant" - times: 10 - concurrency: 2 diff --git a/samples/tasks/scenarios/watcher/list-audit-templates.json b/samples/tasks/scenarios/watcher/list-audit-templates.json deleted file mode 100644 index 122b1da1..00000000 --- a/samples/tasks/scenarios/watcher/list-audit-templates.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "Watcher.list_audit_templates": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "audit_templates": { - "audit_templates_per_admin": 5, - "fill_strategy": "random", - "params": [ - { - "goal": { - "name": "workload_balancing" - }, - "strategy": { - "name": "workload_stabilization" - } - }, - { - "goal": { - "name": "dummy" - }, - "strategy": { - "name": "dummy" - } - } - ] - } - } - } - ] -} diff --git a/samples/tasks/scenarios/watcher/list-audit-templates.yaml b/samples/tasks/scenarios/watcher/list-audit-templates.yaml deleted file mode 100644 index 9174b081..00000000 --- a/samples/tasks/scenarios/watcher/list-audit-templates.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - Watcher.list_audit_templates: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "random" - params: - - goal: - name: "workload_balancing" - strategy: - name: "workload_stabilization" - - goal: - name: "dummy" - strategy: - name: "dummy" diff --git a/samples/tasks/scenarios/workload/wordpress.json b/samples/tasks/scenarios/workload/wordpress.json deleted file mode 100644 index 0476bd22..00000000 --- a/samples/tasks/scenarios/workload/wordpress.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "VMTasks.runcommand_heat": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "timeout": 3000, - "times": 1 - }, - "args": { - "files": { - "wp-instances.yaml": "rally-jobs/extra/workload/wp-instances.yaml" - }, - "workload": { - "username": "fedora", - "resource": [ - "rally.plugins.workload", - "siege.py" - ] - }, - "template": "rally-jobs/extra/workload/wordpress_heat_template.yaml", - "parameters": { - "router_id": "c497caa1-9d73-402b-bcd1-cc269e9af29e", - "instance_type": "gig", - "wp_image": "fedora", - "network_id": "9d477754-e9ba-4560-9b2b-9ce9d36638ce", - "image": "fedora", - "wp_instance_type": "gig", - "wp_instances_count": 2 - } - }, - "context": { - "flavors": [ - { - "vcpus": 1, - "disk": 4, - "ram": 1024, - "name": "gig" - } - ], - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/workload/wordpress.yaml b/samples/tasks/scenarios/workload/wordpress.yaml deleted file mode 100644 index 391a83c6..00000000 --- a/samples/tasks/scenarios/workload/wordpress.yaml +++ /dev/null @@ -1,35 +0,0 @@ ---- - - VMTasks.runcommand_heat: - - - args: - workload: - resource: ["rally.plugins.workload", "siege.py"] - username: "fedora" - template: rally-jobs/extra/workload/wordpress_heat_template.yaml - files: - wp-instances.yaml: rally-jobs/extra/workload/wp-instances.yaml - parameters: - wp_instances_count: 2 - wp_instance_type: gig - instance_type: gig - wp_image: fedora - image: fedora - network_id: 9d477754-e9ba-4560-9b2b-9ce9d36638ce - router_id: c497caa1-9d73-402b-bcd1-cc269e9af29e - - context: - users: - tenants: 1 - users_per_tenant: 1 - flavors: - - name: gig - ram: 1024 - disk: 4 - vcpus: 1 - - runner: - concurrency: 1 - timeout: 3000 - times: 1 - type: constant diff --git a/samples/tasks/scenarios/zaqar/create-queue.json b/samples/tasks/scenarios/zaqar/create-queue.json deleted file mode 100644 index 53e7ac94..00000000 --- a/samples/tasks/scenarios/zaqar/create-queue.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "ZaqarBasic.create_queue": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/zaqar/create-queue.yaml b/samples/tasks/scenarios/zaqar/create-queue.yaml deleted file mode 100644 index 332d068e..00000000 --- a/samples/tasks/scenarios/zaqar/create-queue.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - ZaqarBasic.create_queue: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/scenarios/zaqar/producer-consumer.json b/samples/tasks/scenarios/zaqar/producer-consumer.json deleted file mode 100644 index ef60740b..00000000 --- a/samples/tasks/scenarios/zaqar/producer-consumer.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "ZaqarBasic.producer_consumer": [ - { - "args": { - "min_msg_count": 50, - "max_msg_count": 200 - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - } - } - ] -} diff --git a/samples/tasks/scenarios/zaqar/producer-consumer.yaml b/samples/tasks/scenarios/zaqar/producer-consumer.yaml deleted file mode 100644 index 59f23138..00000000 --- a/samples/tasks/scenarios/zaqar/producer-consumer.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - ZaqarBasic.producer_consumer: - - - args: - min_msg_count: 50 - max_msg_count: 200 - runner: - type: "constant" - times: 100 - concurrency: 10 diff --git a/samples/tasks/sla/README.rst b/samples/tasks/sla/README.rst deleted file mode 100644 index 81dd3618..00000000 --- a/samples/tasks/sla/README.rst +++ /dev/null @@ -1,21 +0,0 @@ -SLA Configuration Samples -========================= - -This directory contains SLA configuration samples. - -SLA (Service-level agreement) is set of details for determining compliance -with contracted values such as maximum error rate or minimum response time. - -Currently supported criteria: - - -failure_rate -------------------- - -A maximum and/or a minimum failure rate expressed as max or min sub-keys. - - -max_seconds_per_iteration -------------------------- - -Maximum time in seconds per one iteration. diff --git a/samples/tasks/sla/create-and-delete-user.json b/samples/tasks/sla/create-and-delete-user.json deleted file mode 100644 index 534a097d..00000000 --- a/samples/tasks/sla/create-and-delete-user.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "KeystoneBasic.create_delete_user": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "max_seconds_per_iteration": 4.0, - "failure_rate": {"max": 1}, - "max_avg_duration": 3.0, - "outliers": { - "max": 1, - "min_iterations": 10, - "sigmas": 10 - } - } - } - ] -} diff --git a/samples/tasks/sla/create-and-delete-user.yaml b/samples/tasks/sla/create-and-delete-user.yaml deleted file mode 100644 index e15bd8fb..00000000 --- a/samples/tasks/sla/create-and-delete-user.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - KeystoneBasic.create_delete_user: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - max_seconds_per_iteration: 4.0 - failure_rate: - max: 1 - max_avg_duration: 3.0 - outliers: - max: 1 - min_iterations: 10 - sigmas: 10 diff --git a/samples/tasks/support/README.rst b/samples/tasks/support/README.rst deleted file mode 100644 index eef482dc..00000000 --- a/samples/tasks/support/README.rst +++ /dev/null @@ -1,12 +0,0 @@ -instance_linpack.sh -=================== - -instance_linpack.sh, will kick off a CPU intensive workload within a OpenStack instance. -This script will return the avg gflops and max gflops Linpack reports in a JSON format. -To run this workload, the VM must have linpack installed prior to running. - -instance_test.sh -================ - -instance_test.sh loads server by spawning processes. Finally, it outputs -JSON data for HTML report charts, with values of CPU, memory and disk usage. diff --git a/samples/tasks/support/instance_linpack.sh b/samples/tasks/support/instance_linpack.sh deleted file mode 100755 index 330180b3..00000000 --- a/samples/tasks/support/instance_linpack.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh -# Location of Linpack binary -LINPACK='/opt/linpack/xlinpack_xeon64' -type -P $LINPACK &>/dev/null && continue || { echo "Linpack Not Found"; exit 1 } - -# Location to create linpack dat file -LINPACK_DAT='~/linpack.dat' - -NUM_CPU=`cat /proc/cpuinfo | grep processor | wc -l` -export OMP_NUM_THREADS=$NUM_CPU -echo "Sample Intel(R) LINPACK data file (from lininput_xeon64)" > ${LINPACK_DAT} -echo "Intel(R) LINPACK data" >> ${LINPACK_DAT} -echo "1 # number of tests" >> ${LINPACK_DAT} -echo "10514 # problem sizes" >> ${LINPACK_DAT} -echo "20016 # leading dimensions" >> ${LINPACK_DAT} -echo "2 # times to run a test " >> ${LINPACK_DAT} -echo "4 # alignment values (in KBytes)" >> ${LINPACK_DAT} -OUTPUT=$(${LINPACK} < ${LINPACK_DAT} | grep -A 1 Average | grep 20016) -AVERAGE=$(echo $OUTPUT | awk '{print $4}') -MAX=$(echo $OUTPUT | awk '{print $5}') - -echo "{ - \"average_gflops\": $AVERAGE, - \"max_gflops\": $MAX - }" diff --git a/samples/tasks/support/instance_test.sh b/samples/tasks/support/instance_test.sh deleted file mode 100755 index e15bd045..00000000 --- a/samples/tasks/support/instance_test.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/sh -# Load server and output JSON results ready to be processed -# by Rally scenario - -for ex in awk top grep free tr df dc dd gzip -do - if ! type ${ex} >/dev/null - then - echo "Executable is required by script but not available on a server: ${ex}" >&2 - return 1 - fi -done - -get_used_cpu_percent() { - echo 100 $(top -b -n 1 | grep -i CPU | head -n 1 | awk '{print $8}' | tr -d %) - p | dc -} - -get_used_ram_percent() { - local total=$(free | grep Mem: | awk '{print $2}') - local used=$(free | grep -- -/+\ buffers | awk '{print $3}') - echo ${used} 100 \* ${total} / p | dc -} - -get_used_disk_percent() { - df -P / | grep -v Filesystem | awk '{print $5}' | tr -d % -} - -get_seconds() { - (time -p ${1}) 2>&1 | awk '/real/{print $2}' -} - -complete_load() { - local script_file=${LOAD_SCRIPT_FILE:-/tmp/load.sh} - local stop_file=${LOAD_STOP_FILE:-/tmp/load.stop} - local processes_num=${LOAD_PROCESSES_COUNT:-20} - local size=${LOAD_SIZE_MB:-5} - - cat << EOF > ${script_file} -until test -e ${stop_file} -do dd if=/dev/urandom bs=1M count=${size} 2>/dev/null | gzip >/dev/null ; done -EOF - - local sep - local cpu - local ram - local dis - rm -f ${stop_file} - for i in $(seq ${processes_num}) - do - i=$((i-1)) - sh ${script_file} & - cpu="${cpu}${sep}[${i}, $(get_used_cpu_percent)]" - ram="${ram}${sep}[${i}, $(get_used_ram_percent)]" - dis="${dis}${sep}[${i}, $(get_used_disk_percent)]" - sep=", " - done - > ${stop_file} - cat << EOF - { - "title": "Generate load by spawning processes", - "description": "Each process runs gzip for ${size}M urandom data in a loop", - "chart_plugin": "Lines", - "axis_label": "Number of processes", - "label": "Usage, %", - "data": [ - ["CPU", [${cpu}]], - ["Memory", [${ram}]], - ["Disk", [${dis}]]] - } -EOF -} - -additive_dd() { - local c=${1:-50} # Megabytes - local file=/tmp/dd_test.img - local write=$(get_seconds "dd if=/dev/urandom of=${file} bs=1M count=${c}") - local read=$(get_seconds "dd if=${file} of=/dev/null bs=1M count=${c}") - local gzip=$(get_seconds "gzip ${file}") - rm ${file}.gz - cat << EOF - { - "title": "Write, read and gzip file", - "description": "Using file '${file}', size ${c}Mb.", - "chart_plugin": "StackedArea", - "data": [ - ["write_${c}M", ${write}], - ["read_${c}M", ${read}], - ["gzip_${c}M", ${gzip}]] - }, - { - "title": "Statistics for write/read/gzip", - "chart_plugin": "StatsTable", - "data": [ - ["write_${c}M", ${write}], - ["read_${c}M", ${read}], - ["gzip_${c}M", ${gzip}]] - } - -EOF -} - -cat << EOF -{ - "additive": [$(additive_dd)], - "complete": [$(complete_load)] -} -EOF diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 812d3565..00000000 --- a/setup.cfg +++ /dev/null @@ -1,51 +0,0 @@ -[metadata] -name = rally -summary = Benchmark System for OpenStack -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://docs.openstack.org/rally/latest/ -license = Apache License, Version 2.0 -classifier = - Environment :: OpenStack - Intended Audience :: Developers - Intended Audience :: Information Technology - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.4 - Programming Language :: Python :: 3.5 - -[files] -packages = - rally - -data_files = - etc/bash_completion.d = - etc/rally.bash_completion - -[entry_points] -console_scripts = - rally = rally.cli.main:main - rally-manage = rally.cli.manage:main -oslo.config.opts = - rally = rally.common.opts:list_opts - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[extras] -mysql = - PyMySQL>=0.7.6 # MIT -postgres = - psycopg2>=2.5 # LGPL/ZPL - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source diff --git a/setup.py b/setup.py deleted file mode 100644 index 782bb21f..00000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=1.8'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 6670ec97..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,26 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -hacking>=0.12.0,!=0.13.0,<=0.13.0 # Apache Software License - -pytest>=2.7,<=3.1.3 # MIT -# py.test plugin for measuring coverage. -pytest-cov>=2.2.1,<=2.5.1 # MIT -# py.test plugin for generating HTML reports -pytest-html>=1.10.0,<=1.15.1 # Mozilla Public License 2.0 (MPL 2.0) -# py.test xdist plugin for distributed testing and loop-on-failing modes -pytest-xdist<=1.15.0 # MIT - -coverage>=4.0,!=4.4,<=4.4.1 # Apache License, Version 2.0 -ddt>=1.0.1,<=1.1.1 -mock>=2.0,<=2.0.0 -python-dateutil>=2.4.2,<=2.6.0 # Simplified BSD -testtools>=1.4.0,<=2.3.0 - -sphinx>=1.6.2,<=1.6.3 # BSD -oslosphinx>=4.7.0,<=4.15.0 # Apache Software License -oslotest>=1.10.0,<=2.16.0 # Apache Software License - -testresources>=0.2.4,<=2.0.1 -testscenarios>=0.4,<=0.5.0 diff --git a/tests/README.rst b/tests/README.rst deleted file mode 100644 index 0a88662d..00000000 --- a/tests/README.rst +++ /dev/null @@ -1,94 +0,0 @@ -Testing -======= - -Please, don't hesitate to write tests ;) - - -Unit tests ----------- - -*Files: /tests/unit/** - -The goal of unit tests is to ensure that internal parts of the code work properly. -All internal methods should be fully covered by unit tests with a reasonable mocks usage. - - -About Rally unit tests: - -- All `unit tests `_ are located inside /tests/unit/* -- Tests are written on top of: *testtools* and *mock* libs -- `Tox `_ is used to run unit tests - - -To run unit tests locally:: - - $ pip install tox - $ tox - -To run py27, py34, py35 or pep8 only:: - - $ tox -e - - # NOTE: is one of py27, py34, py35 or pep8 - -To run py27/py34/py35 against mysql or psql - - $ export RALLY_UNITTEST_DB_URL="mysql://user:secret@localhost/rally" - $ tox -epy27 - -To run specific test of py27/py34/py35:: - - $ tox -e py27 -- tests.unit.test_osclients - -To get test coverage:: - - $ tox -e cover - - # NOTE: Results will be in ./cover/index.html - -To generate docs:: - - $ tox -e docs - - # NOTE: Documentation will be in doc/source/_build/html/index.html - -Functional tests ----------------- - -*Files: /tests/functional/** - -The goal of `functional tests `_ is to check that everything works well together. -Fuctional tests use Rally API only and check responses without touching internal parts. - -To run functional tests locally:: - - $ source openrc - $ rally deployment create --fromenv --name testing - $ tox -e cli - - # NOTE: openrc file with OpenStack admin credentials - -Output of every Rally execution will be collected under some reports root in -directory structure like: reports_root/ClassName/MethodName_suffix.extension -This functionality implemented in tests.functional.utils.Rally.__call__ method. -Use 'gen_report_path' method of 'Rally' class to get automatically generated file -path and name if you need. You can use it to publish html reports, generated -during tests. -Reports root can be passed through environment variable 'REPORTS_ROOT'. Default is -'rally-cli-output-files'. - - -Rally CI scripts ----------------- - -*Files: /tests/ci/** - -This directory contains scripts and files related to the Rally CI system. - -Rally Style Commandments ------------------------- - -*File: /tests/hacking/checks.py* - -This module contains Rally specific hacking rules for checking commandments. - diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/ci/README.rst b/tests/ci/README.rst deleted file mode 100644 index 2cbde87e..00000000 --- a/tests/ci/README.rst +++ /dev/null @@ -1,91 +0,0 @@ -=============== -Rally Gate Jobs -=============== - -For each patch submitted for review on Gerrit, there is a set of tests called -**gate jobs** to be run against it. These tests check whether the Rally code -works correctly after applying the patch and provide additional guarantees that -it won't break the software when it gets merged. Rally gate jobs contain tests -checking the codestyle (via *pep8*), unit tests suites, functional tests and a -set of Rally benchmark tasks that are executed against a real *devstack* -deployment. - - -rally-gate.sh -------------- -This script runs a set of real Rally benchmark tasks and fetches their results -in textual / visualized form (available via a special html page by clicking the -corresponding job title in Gerrit). It checks that scenarios don't fail while -being executed against a devstack deployment and also tests SLA criteria to -ensure that benchmark tasks have completed successfully. -Jenkins uses this script by runnint the 'gate-rally-dsvm-rally' job, -'gate-rally-dsvm-manila-multibackend' job and 'gate-rally-dsvm-neutron-rally' -job. - - -rally_verify.py ---------------- -This script runs various "rally verify" commands. This set of commands allow us -to perform Tempest tests of OpenStack live cloud and display verification -results. -The verification results obtained by running various "rally verify " -commands including "start", "show", "list" are compared using the "rally verify -results" command, which are then saved in csv, html and json formats in the -"rally-verify" directory. -Jenkins uses this script by running the 'gate-rally-dsvm-verify' job. - - -test_install.sh ---------------- -This script tests the correct working of the install_rally.sh, used for the -installation of Rally. Jenkins tests this script by running it against Centos7 -and Ubuntu 16.04 in the corresponding jobs 'gate-rally-install-centos-7' and -'gate-rally-install-ubuntu-xenial'. - - -Jenkins -------- -Jenkins is a Continuous Integration system which works as the scheduler. It -receives events related to proposed changes, triggers tests based on those -events, and reports back. -For each patch that is uploaded for review on Gerrit, Jenkins runs it against -the various rally gate jobs listed below along with their functions and local -equivalents: - -* gate-rally-pep8 : code style check - (equal to tox -epep8) -* gate-rally-docs : documention generation - (equal to tox -edocs) -* gate-rally-python27 : unit tests against python27 - (equal to tox -epy27) -* gate-rally-python35 : unit tests against python35 - (equal to tox -epy35) -* rally-coverage : generates unit test - coverage - (equal to tox -cover) -* gate-rally-install-centos-7 : testing of test_install.sh - (described above) against - Centos7 -* gate-rally-install-ubuntu-xenial : testing of test_install.sh - (described above) against - Ubuntu 16.04 -* gate-rally-dsvm-rally : runs rally-gate.sh - (described above) against - OpenStack deployed by - devstack with nova-network - (It is standard dsvm job) -* gate-rally-dsvm-manila-multibackend(non-voting) : runs rally-gate.sh against - manila -* gate-rally-dsvm-neutron-rally : runs rally-gate.sh against - OpenStack deployed by - devastack with neutron -* gate-rally-dsvm-cli : runs tests/functional/* - (equal to tox -ecli) -* gate-rally-dsvm-verify(non-voting) : runs rally_verify.py and - tests Rally and Tempest - integration in all possible - ways -* gate-rally-tox-self(non-voting) : not yet used - - and a success in these tests(except non-voting) would mean that the patch is - approved by Jenkins. diff --git a/tests/ci/__init__.py b/tests/ci/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/ci/cover.sh b/tests/ci/cover.sh deleted file mode 100755 index ba693778..00000000 --- a/tests/ci/cover.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -# -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -ALLOWED_EXTRA_MISSING=4 - -show_diff () { - head -1 $1 - diff -U 0 $1 $2 | sed 1,2d -} - -if ! git diff --exit-code || ! git diff --cached --exit-code -then - echo "There are uncommitted changes!" - echo "Please clean git working directory and try again" - exit 1 -fi - -# Checkout master and save coverage report -git checkout HEAD^ - -baseline_report=$(mktemp -t rally_coverageXXXXXXX) -py.test --cov=rally tests/unit/ --cov-report=html -n auto -coverage report > $baseline_report -mv cover cover-master -cat $baseline_report -baseline_missing=$(awk 'END { print $3 }' $baseline_report) - -# Checkout back and save coverage report -git checkout - - -current_report=$(mktemp -t rally_coverageXXXXXXX) -py.test --cov=rally tests/unit/ --cov-report=html -n auto -coverage report > $current_report -current_missing=$(awk 'END { print $3 }' $current_report) - -# Show coverage details -allowed_missing=$((baseline_missing+ALLOWED_EXTRA_MISSING)) - -echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}" -echo "Missing lines in master : ${baseline_missing}" -echo "Missing lines in proposed change : ${current_missing}" - -if [ $allowed_missing -gt $current_missing ]; -then - if [ $baseline_missing -lt $current_missing ]; - then - show_diff $baseline_report $current_report - echo "I believe you can cover all your code with 100% coverage!" - else - echo "Thank you! You are awesome! Keep writing unit tests! :)" - fi - exit_code=0 -else - show_diff $baseline_report $current_report - echo "Please write more unit tests, we should keep our test coverage :( " - exit_code=1 -fi - -rm $baseline_report $current_report -exit $exit_code diff --git a/tests/ci/hooks/certification_post_test_hook.sh b/tests/ci/hooks/certification_post_test_hook.sh deleted file mode 100755 index b2ff9720..00000000 --- a/tests/ci/hooks/certification_post_test_hook.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source $SCRIPT_DIR/../rally_gate_functions.sh - -setUp - -TASK=$RALLY_DIR/certification/openstack/task.yaml -TASK_ARGS=$RALLY_DIR/rally-jobs/certifcation_task_args.yaml - -TASK_ARGS="--task-args-file $TASK_ARGS" - -run $TASK $TASK_ARGS diff --git a/tests/ci/osresources.py b/tests/ci/osresources.py deleted file mode 100755 index 2be0a537..00000000 --- a/tests/ci/osresources.py +++ /dev/null @@ -1,587 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""List and compare most used OpenStack cloud resources.""" - - -import argparse -import json -import os -import subprocess -import sys - -import six - -from rally.cli import cliutils -from rally.common.plugin import discover -from rally import consts -from rally.plugins.openstack import credential - - -def skip_if_service(service): - def wrapper(func): - def inner(self): - if service in self.clients.services().values(): - return [] - return func(self) - return inner - return wrapper - - -class ResourceManager(object): - - REQUIRED_SERVICE = None - STR_ATTRS = ("id", "name") - - def __init__(self, clients): - self.clients = clients - - def is_available(self): - if self.REQUIRED_SERVICE: - return self.REQUIRED_SERVICE in self.clients.services().values() - return True - - @property - def client(self): - return getattr(self.clients, self.__class__.__name__.lower())() - - def get_resources(self): - all_resources = [] - cls = self.__class__.__name__.lower() - for prop in dir(self): - if not prop.startswith("list_"): - continue - f = getattr(self, prop) - resources = f() or [] - resource_name = prop[5:][:-1] - for raw_res in resources: - res = {"cls": cls, "resource_name": resource_name, - "id": {}, "props": {}} - if not isinstance(raw_res, dict): - raw_res = {k: getattr(raw_res, k) for k in dir(raw_res) - if not k.startswith("_") - if not callable(getattr(raw_res, k))} - for key, value in raw_res.items(): - if key.startswith("_"): - continue - if key in self.STR_ATTRS: - res["id"][key] = value - else: - try: - res["props"][key] = json.dumps(value, indent=2) - except TypeError: - res["props"][key] = str(value) - if not res["id"] and not res["props"]: - print("1: %s" % raw_res) - print("2: %s" % cls) - print("3: %s" % resource_name) - raise ValueError("Failed to represent resource %r" % - raw_res) - all_resources.append(res) - return all_resources - - -class Keystone(ResourceManager): - - REQUIRED_SERVICE = consts.Service.KEYSTONE - - def list_users(self): - return self.client.users.list() - - def list_tenants(self): - if hasattr(self.client, "projects"): - return self.client.projects.list() # V3 - return self.client.tenants.list() # V2 - - def list_roles(self): - return self.client.roles.list() - - def list_ec2credentials(self): - users = self.list_users() - ec2_list = [] - for user in users: - ec2_list.extend( - self.client.ec2.list(user.id)) - return ec2_list - - -class Magnum(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MAGNUM - - def list_cluster_templates(self): - result = [] - marker = None - while True: - ct_list = self.client.cluster_templates.list(marker=marker) - if not ct_list: - break - result.extend(ct_list) - marker = ct_list[-1].uuid - return result - - def list_clusters(self): - result = [] - marker = None - while True: - clusters = self.client.clusters.list(marker=marker) - if not clusters: - break - result.extend(clusters) - marker = clusters[-1].uuid - return result - - -class Mistral(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MISTRAL - - def list_workbooks(self): - return self.client.workbooks.list() - - def list_workflows(self): - return self.client.workflows.list() - - def list_executions(self): - return self.client.executions.list() - - -class Nova(ResourceManager): - - REQUIRED_SERVICE = consts.Service.NOVA - - def list_flavors(self): - return self.client.flavors.list() - - def list_aggregates(self): - return self.client.aggregates.list() - - def list_hosts(self): - return self.client.hosts.list() - - def list_hypervisors(self): - return self.client.hypervisors.list() - - def list_agents(self): - return self.client.agents.list() - - def list_keypairs(self): - return self.client.keypairs.list() - - def list_servers(self): - return self.client.servers.list( - search_opts={"all_tenants": True}) - - def list_server_groups(self): - return self.client.server_groups.list(all_projects=True) - - def list_services(self): - return self.client.services.list() - - def list_availability_zones(self): - return self.client.availability_zones.list() - - -class Neutron(ResourceManager): - - REQUIRED_SERVICE = consts.Service.NEUTRON - - def has_extension(self, name): - extensions = self.client.list_extensions().get("extensions", []) - return any(ext.get("alias") == name for ext in extensions) - - def list_networks(self): - return self.client.list_networks()["networks"] - - def list_subnets(self): - return self.client.list_subnets()["subnets"] - - def list_routers(self): - return self.client.list_routers()["routers"] - - def list_ports(self): - return self.client.list_ports()["ports"] - - def list_floatingips(self): - return self.client.list_floatingips()["floatingips"] - - def list_security_groups(self): - return self.client.list_security_groups()["security_groups"] - - def list_health_monitors(self): - if self.has_extension("lbaas"): - return self.client.list_health_monitors()["health_monitors"] - - def list_pools(self): - if self.has_extension("lbaas"): - return self.client.list_pools()["pools"] - - def list_vips(self): - if self.has_extension("lbaas"): - return self.client.list_vips()["vips"] - - def list_bgpvpns(self): - if self.has_extension("bgpvpn"): - return self.client.list_bgpvpns()["bgpvpns"] - - -class Glance(ResourceManager): - - REQUIRED_SERVICE = consts.Service.GLANCE - - def list_images(self): - return self.client.images.list() - - -class Heat(ResourceManager): - - REQUIRED_SERVICE = consts.Service.HEAT - - def list_resource_types(self): - return self.client.resource_types.list() - - def list_stacks(self): - return self.client.stacks.list() - - -class Cinder(ResourceManager): - - REQUIRED_SERVICE = consts.Service.CINDER - - def list_availability_zones(self): - return self.client.availability_zones.list() - - def list_backups(self): - return self.client.backups.list() - - def list_volume_snapshots(self): - return self.client.volume_snapshots.list() - - def list_volume_types(self): - return self.client.volume_types.list() - - def list_encryption_types(self): - return self.client.volume_encryption_types.list() - - def list_transfers(self): - return self.client.transfers.list() - - def list_volumes(self): - return self.client.volumes.list(search_opts={"all_tenants": True}) - - def list_qos(self): - return self.client.qos_specs.list() - - -class Senlin(ResourceManager): - - REQUIRED_SERVICE = consts.Service.SENLIN - - def list_clusters(self): - return self.client.clusters() - - def list_profiles(self): - return self.client.profiles() - - -class Manila(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MANILA - - def list_shares(self): - return self.client.shares.list(detailed=False, - search_opts={"all_tenants": True}) - - def list_share_networks(self): - return self.client.share_networks.list( - detailed=False, search_opts={"all_tenants": True}) - - def list_share_servers(self): - return self.client.share_servers.list( - search_opts={"all_tenants": True}) - - -class Gnocchi(ResourceManager): - - REQUIRED_SERVICE = consts.Service.GNOCCHI - - def list_resources(self): - return self.client.resource.list() - - -class Ironic(ResourceManager): - - REQUIRED_SERVICE = consts.Service.IRONIC - - def list_nodes(self): - return self.client.node.list() - - -class Sahara(ResourceManager): - - REQUIRED_SERVICE = consts.Service.SAHARA - - def list_node_group_templates(self): - return self.client.node_group_templates.list() - - -class Murano(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MURANO - - def list_environments(self): - return self.client.environments.list() - - def list_packages(self): - return self.client.packages.list(include_disabled=True) - - -class Designate(ResourceManager): - - REQUIRED_SERVICE = consts.Service.DESIGNATE - - def list_domains(self): - return self.client.domains.list() - - def list_records(self): - result = [] - result.extend(self.client.records.list(domain_id) - for domain_id in self.client.domains.list()) - return result - - def list_servers(self): - return self.client.servers.list() - - def list_zones(self): - return self.clients.designate("2").zones.list() - - def list_recordset(self): - client = self.clients.designate("2") - results = [] - results.extend(client.recordsets.list(zone_id) - for zone_id in client.zones.list()) - return results - - -class Trove(ResourceManager): - - REQUIRED_SERVICE = consts.Service.TROVE - - def list_backups(self): - return self.client.backup.list() - - def list_clusters(self): - return self.client.cluster.list() - - def list_configurations(self): - return self.client.configuration.list() - - def list_databases(self): - return self.client.database.list() - - def list_datastore(self): - return self.client.datastore.list() - - def list_instances(self): - return self.client.list(include_clustered=True) - - def list_modules(self): - return self.client.module.list(datastore="all") - - -class Monasca(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MONASCA - - def list_metrics(self): - return self.client.metrics.list() - - -class Watcher(ResourceManager): - - REQUIRED_SERVICE = consts.Service.WATCHER - - REPR_KEYS = ("uuid", "name") - - def list_audits(self): - return self.client.audit.list() - - def list_audit_templates(self): - return self.client.audit_template.list() - - def list_goals(self): - return self.client.goal.list() - - def list_strategies(self): - return self.client.strategy.list() - - def list_action_plans(self): - return self.client.action_plan.list() - - -class CloudResources(object): - """List and compare cloud resources. - - resources = CloudResources(auth_url=..., ...) - saved_list = resources.list() - - # Do something with the cloud ... - - changes = resources.compare(saved_list) - has_changed = any(changes) - removed, added = changes - """ - - def __init__(self, **kwargs): - self.clients = credential.OpenStackCredential(**kwargs).clients() - - def list(self): - managers_classes = discover.itersubclasses(ResourceManager) - resources = [] - for cls in managers_classes: - manager = cls(self.clients) - if manager.is_available(): - resources.extend(manager.get_resources()) - return resources - - def compare(self, with_list): - def make_uuid(res): - return"%s.%s:%s" % ( - res["cls"], res["resource_name"], - ";".join(["%s=%s" % (k, v) - for k, v in sorted(res["id"].items())])) - - current_resources = dict((make_uuid(r), r) for r in self.list()) - saved_resources = dict((make_uuid(r), r) for r in with_list) - - removed = set(saved_resources.keys()) - set(current_resources.keys()) - removed = [saved_resources[k] for k in sorted(removed)] - added = set(current_resources.keys()) - set(saved_resources.keys()) - added = [current_resources[k] for k in sorted(added)] - - return removed, added - - -def _print_tabular_resources(resources, table_label): - def dict_formatter(d): - return "\n".join("%s:%s" % (k, v) for k, v in d.items()) - - cliutils.print_list( - objs=[dict(r) for r in resources], - fields=("cls", "resource_name", "id", "fields"), - field_labels=("service", "resource type", "id", "fields"), - table_label=table_label, - formatters={"id": lambda d: dict_formatter(d["id"]), - "fields": lambda d: dict_formatter(d["props"])} - ) - print("") - - -def main(): - - parser = argparse.ArgumentParser( - description=("Save list of OpenStack cloud resources or compare " - "with previously saved list.")) - parser.add_argument("--credentials", - type=argparse.FileType("r"), - metavar="", - help="cloud credentials in JSON format") - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument("--dump-list", - type=argparse.FileType("w"), - metavar="", - help="dump resources to given file in JSON format") - group.add_argument("--compare-with-list", - type=argparse.FileType("r"), - metavar="", - help=("compare current resources with a list from " - "given JSON file")) - args = parser.parse_args() - - if args.credentials: - config = json.load(args.credentials) - else: - out = subprocess.check_output(["rally", "deployment", "config", - "--deployment", "devstack"]) - config = json.loads(out if six.PY2 else out.decode("utf-8")) - config = config["creds"]["openstack"] - config.update(config.pop("admin")) - if "users" in config: - del config["users"] - - resources = CloudResources(**config) - - if args.dump_list: - resources_list = resources.list() - json.dump(resources_list, args.dump_list) - elif args.compare_with_list: - given_list = json.load(args.compare_with_list) - changes = resources.compare(with_list=given_list) - removed, added = changes - - # Cinder has a feature - cache images for speeding-up time of creating - # volumes from images. let's put such cache-volumes into expected list - volume_names = [ - "image-%s" % i["id"]["id"] for i in given_list - if i["cls"] == "glance" and i["resource_name"] == "image"] - - # filter out expected additions - expected = [] - for resource in added: - if ( - (resource["cls"] == "keystone" and - resource["resource_name"] == "role" and - resource["id"].get("name") == "_member_") or - - (resource["cls"] == "neutron" and - resource["resource_name"] == "security_group" and - resource["id"].get("name") == "default") or - - (resource["cls"] == "cinder" and - resource["resource_name"] == "volume" and - resource["id"].get("name") in volume_names) or - - resource["cls"] == "murano" or - - # Glance has issues with uWSGI integration... - resource["cls"] == "glance"): - expected.append(resource) - - for resource in expected: - added.remove(resource) - - if removed: - _print_tabular_resources(removed, "Removed resources") - - if added: - _print_tabular_resources(added, "Added resources (unexpected)") - - if expected: - _print_tabular_resources(expected, "Added resources (expected)") - - if any(changes): - # NOTE(andreykurilin): '1' return value will fail gate job. It is - # ok for changes to Rally project, but changes to other - # projects, which have rally job, should not be affected by - # this check, since in most cases resources are left due - # to wrong cleanup of a particular scenario. - if os.environ.get("ZUUL_PROJECT") == "openstack/rally": - return 1 - return 0 - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/tests/ci/pytest_launcher.py b/tests/ci/pytest_launcher.py deleted file mode 100755 index d1e76d10..00000000 --- a/tests/ci/pytest_launcher.py +++ /dev/null @@ -1,120 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import argparse -import os -import subprocess -import sys - - -PYTEST_REPORT = os.environ.get("PYTEST_REPORT", - ".test_results/pytest_results.html") -TESTR_REPORT = "testr_results.html" -PYTEST_ARGUMENTS = ("py.test" # base command - " -vv" # show test names in logs - " --html=%(html_report)s" # html report - " --self-contained-html" # embedded css - " --durations=10" # get a list of the slowest 10 tests - " -n auto" # launch tests in parallel - " %(path)s" - ) - - -def error(msg): - print(msg) - exit(1) - - -def main(args): - parser = argparse.ArgumentParser(args[0]) - parser.add_argument("discovery_path", metavar="", type=str, - help="Path to location of all tests.") - parser.add_argument("--posargs", metavar="", type=str, default="", - help="TOX posargs. Currently supported only string to " - "partial test or tests group to launch.") - parser.add_argument("--timeout", metavar="", type=int, default=60, - help="Timeout for individual test execution. " - "Defaults to 60") - args = parser.parse_args(args[1:]) - - # We allow only one parameter - path to partial test or tests group - path = args.posargs - if len(path.split(" ")) > 1: - error("Wrong value of posargs. It should include only path to single " - "test or tests group to launch.") - # NOTE(andreykurilin): Previously, next format was supported: - # tests.unit.test_osclients.SomeTestCase.some_method - # It is more simple and pythonic than native pytest-way: - # tests/unit/test_osclients.py::SomeTestCase::some_method - # Let's return this support - if path: - if "/" not in path: - path = path.split(".") - module = "" - for i in range(0, len(path)): - part = os.path.join(module, path[i]) - if os.path.exists(part): - module = part - continue - if os.path.exists("%s.py" % part): - if i != (len(path) - 1): - module = "%s.py::%s" % (part, "::".join(path[i + 1:])) - else: - module = "%s.py" % part - break - - error("Non-existing path to single test or tests group to " - "launch. %s %s" % (module, part)) - path = module - - path = os.path.abspath(os.path.expanduser(path)) - if not path.startswith(os.path.abspath(args.discovery_path)): - # Prevent to launch functional tests from unit tests launcher. - error("Wrong path to single test or tests group to launch. It " - "should be in %s." % args.discovery_path) - else: - path = args.discovery_path - - print("Test(s) to launch (pytest format): %s" % path) - - # NOTE(andreykurilin): we cannot publish pytest reports at gates, but we - # can mask them as testr reports. It looks like a dirty hack and I - # prefer to avoid it, but I see no other solutions at this point. - - # apply dirty hack only in gates. - if os.environ.get("ZUUL_PROJECT"): - pytest_report = TESTR_REPORT - else: - pytest_report = PYTEST_REPORT - - args = PYTEST_ARGUMENTS % {"html_report": pytest_report, - "path": path, - "timeout": args.timeout} - try: - subprocess.check_call(args.split(" "), - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - # NOTE(andreykurilin): it is ok, since tests can fail. - exit_code = 1 - else: - exit_code = 0 - - if os.path.exists(pytest_report) and os.environ.get("ZUUL_PROJECT"): - subprocess.check_call(["gzip", "-9", "-f", pytest_report], - stderr=subprocess.STDOUT) - - if exit_code == 1: - error("") - -if __name__ == "__main__": - sys.exit(main(sys.argv)) diff --git a/tests/ci/rally-gate.sh b/tests/ci/rally-gate.sh deleted file mode 100755 index 8fadb540..00000000 --- a/tests/ci/rally-gate.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -ex -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed by post_test_hook function in devstack gate. - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source $SCRIPT_DIR/rally_gate_functions.sh - -PROJECT=`echo $ZUUL_PROJECT | cut -d \/ -f 2` - -RALLY_JOB_DIR=$BASE/new/$PROJECT/rally-scenarios -if [ ! -d $RALLY_JOB_DIR ]; then - RALLY_JOB_DIR=$BASE/new/$PROJECT/rally-jobs -fi - -echo $RALLY_JOB_DIR -echo $RALLY_DIR -ls $BASE/new/$PROJECT - -if [ "$RALLY_SCENARIO" == "rally-keystone-api-v2" ]; then - echo "WARNING: RALLY TASK WILL NOT BE LAUNCHED." - exit 0 -fi - -setUp $RALLY_JOB_DIR - -BASE_FOR_TASK=${RALLY_JOB_DIR}/${RALLY_SCENARIO} - -TASK=${BASE_FOR_TASK}.yaml -TASK_ARGS="" -if [ -f ${BASE_FOR_TASK}_args.yaml ]; then - TASK_ARGS="--task-args-file ${BASE_FOR_TASK}_args.yaml" -fi - -run $TASK $TASK_ARGS diff --git a/tests/ci/rally_app.py b/tests/ci/rally_app.py deleted file mode 100644 index 4a412b71..00000000 --- a/tests/ci/rally_app.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Simple app based on rally api for testing porpuses""" - -import sys - -from rally import api as rapi - - -def main(): - api = rapi.API(config_args=sys.argv[1:]) - print(len(api.task.list())) - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/tests/ci/rally_gate_functions.sh b/tests/ci/rally_gate_functions.sh deleted file mode 100644 index 6a050e49..00000000 --- a/tests/ci/rally_gate_functions.sh +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env bash - -RALLY_DIR=$BASE/new/rally -RALLY_PLUGINS_DIR=~/.rally/plugins -RALLY_EXTRA_DIR=~/.rally/extra - -function setUp () { - set -x - - JOB_DIR=$1 - - mkdir -p $RALLY_PLUGINS_DIR - mkdir -p $RALLY_EXTRA_DIR - - if [ -n "$JOB_DIR" ]; then - PLUGINS_DIR=${JOB_DIR}/plugins - EXTRA_DIR=${JOB_DIR}/extra - - if [ -d $PLUGINS_DIR ]; then - cp -r $PLUGINS_DIR/ $RALLY_PLUGINS_DIR - fi - - if [ -d $EXTRA_DIR ]; then - cp -r $EXTRA_DIR/* ~/.rally/extra/ - fi - fi - - touch ~/.rally/extra/fake-image.img - - env - set -o pipefail - - rally deployment use --deployment devstack - - source ~/.rally/openrc admin admin - - OPENSTACK_SERVICES=$(openstack service list) - if [[ $OPENSTACK_SERVICES == *"glance"* ]]; then - openstack image list - fi - if [[ $OPENSTACK_SERVICES == *"cinder"* ]]; then - openstack volume list --all-projects - fi - if [[ $OPENSTACK_SERVICES == *"neutron"* ]]; then - openstack network list - fi - - # NOTE(ikhudoshyn): Create additional users and register a new env - # so that we could run scenarios using 'existing_users' context - if [ "$DEVSTACK_GATE_PREPOPULATE_USERS" = "1" ]; then - openstack --version - - openstack project create rally-test-project-1 - openstack user create --project rally-test-project-1 --password rally-test-password-1 rally-test-user-1 - openstack role add --project rally-test-project-1 --user rally-test-user-1 Member - - openstack project create rally-test-project-2 - openstack user create --project rally-test-project-2 --password rally-test-password-2 rally-test-user-2 - openstack role add --project rally-test-project-2 --user rally-test-user-2 Member - - set +e - NEUTRON_EXISTS=$(openstack --os-interface admin service list | grep neutron) - set -e - if [ "$NEUTRON_EXISTS" ]; then - OS_QUOTA_STR="--networks -1 --subnets -1 --routers -1 --floating-ips -1 --subnetpools -1 --secgroups -1 --secgroup-rules -1 --ports -1" - openstack --debug quota set $OS_QUOTA_STR rally-test-project-1 - openstack --debug quota show rally-test-project-1 - openstack --debug quota set $OS_QUOTA_STR rally-test-project-2 - openstack --debug quota show rally-test-project-2 - fi - - DEPLOYMENT_CONFIG_FILE=~/.rally/with-existing-users-config - - echo ' -{ - "type": "ExistingCloud", - "creds": { - "openstack": { - "users": [ - {"username": "rally-test-user-1", - "password": "rally-test-password-1", - "project_name": "rally-test-project-1", - "user_domain_name": "Default", - "project_domain_name": "Default" - }, - {"username": "rally-test-user-2", - "password": "rally-test-password-2", - "project_name": "rally-test-project-2", - "user_domain_name": "Default", - "project_domain_name": "Default" - }], - "auth_url": "'$OS_AUTH_URL'", - "region_name": "RegionOne" - } - } -} -' > $DEPLOYMENT_CONFIG_FILE - - rally deployment create --name devstask-with-users --filename $DEPLOYMENT_CONFIG_FILE - fi - - rally deployment config - rally --debug deployment check - - if rally deployment check | grep 'nova' | grep 'Available' > /dev/null; - then - nova flavor-create m1.nano 42 64 0 1 - fi -} - -function run () { - set -x - - TASK=$1 - TASK_ARGS="$2 $3" - - if [ "$DEVSTACK_GATE_USE_PYTHON3" = "True" ]; then - PYTHON=python3 - else - PYTHON=python - fi - - $PYTHON $RALLY_DIR/tests/ci/osresources.py --dump-list resources_at_start.txt - - rally --rally-debug task start --task $TASK $TASK_ARGS - - mkdir -p rally-plot/extra - $PYTHON $RALLY_DIR/tests/ci/render.py ci/index.html > rally-plot/extra/index.html - cp $TASK rally-plot/task.txt - tar -czf rally-plot/plugins.tar.gz -C $RALLY_PLUGINS_DIR . - rally task results | python -m json.tool > rally-plot/results.json - rally task import --file rally-plot/results.json - gzip -9 rally-plot/results.json - rally task detailed > rally-plot/detailed.txt - gzip -9 rally-plot/detailed.txt - rally task detailed --iterations-data > rally-plot/detailed_with_iterations.txt - gzip -9 rally-plot/detailed_with_iterations.txt - rally task report --html --out rally-plot/results.html - gzip -9 rally-plot/results.html - rally task export --type junit-xml --to rally-plot/junit.xml - gzip -9 rally-plot/junit.xml - - # NOTE(stpierre): if the sla check fails, we still want osresources.py - # to run, so we turn off -e and save the return value - set +e - rally task sla-check | tee rally-plot/sla.txt - retval=$? - set -e - - cp resources_at_start.txt rally-plot/ - $PYTHON $RALLY_DIR/tests/ci/osresources.py\ - --compare-with-list resources_at_start.txt\ - | gzip > rally-plot/resources_diff.txt.gz - - exit $retval -} diff --git a/tests/ci/rally_self_job.sh b/tests/ci/rally_self_job.sh deleted file mode 100755 index 6ee43f1b..00000000 --- a/tests/ci/rally_self_job.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash -ex -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -TASK_FILE=$1 -PLUGIN_PATHS=rally-jobs/plugins -if [ -n "$ZUUL_PROJECT" ]; then - HTML_REPORT=testr_results.html -else - HTML_REPORT=rally_self_results.html -fi -RND=$(head /dev/urandom | tr -dc a-z0-9 | head -c 5) -TMP_RALLY_CONF="/tmp/self-rally-$RND.conf" -TMP_RALLY_DB="/tmp/self-rally-$RND.sqlite" -TMP_RALLY_DEPLOYMENT="/tmp/self-rally-dep-$RND.json" -DBCONNSTRING="sqlite:///$TMP_RALLY_DB" -RALLY="rally --config-file $TMP_RALLY_CONF" - -# Create temp db -cp etc/rally/rally.conf.sample $TMP_RALLY_CONF -sed -i.bak "s|#connection =.*|connection = \"$DBCONNSTRING\"|" $TMP_RALLY_CONF -rally-manage --config-file $TMP_RALLY_CONF db create - -# Create self deployment -echo '{"type": "ExistingCloud", "creds": {}}' > $TMP_RALLY_DEPLOYMENT -$RALLY -d deployment create --file=$TMP_RALLY_DEPLOYMENT --name=self - -# Run task -$RALLY -d --plugin-paths=$PLUGIN_PATHS task start $TASK_FILE -$RALLY task report --html-static --out $HTML_REPORT - -if [ -n "$ZUUL_PROJECT" ]; then - gzip -9 -f $HTML_REPORT -fi - -# Check sla (this may fail the job) -$RALLY task sla-check diff --git a/tests/ci/rally_verify.py b/tests/ci/rally_verify.py deleted file mode 100755 index dda1421c..00000000 --- a/tests/ci/rally_verify.py +++ /dev/null @@ -1,528 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import argparse -import collections -import gzip -import json -import logging -import os -import re -import subprocess -import sys -import uuid - -from rally import api -from rally.ui import utils - -LOG = logging.getLogger("verify-job") -LOG.setLevel(logging.DEBUG) - -# NOTE(andreykurilin): this variable is used to generate output file names -# with prefix ${CALL_COUNT}_ . -_call_count = 0 - - -class Status(object): - PASS = "success" - ERROR = "error" - SKIPPED = "skip" - FAILURE = "fail" - - -class Step(object): - COMMAND = None - DEPENDS_ON = None - CALL_ARGS = {} - - BASE_DIR = "rally-verify" - HTML_TEMPLATE = ("[%(status)s]\n" - "%(doc)s\n" - "$ %(cmd)s") - - def __init__(self, args, rapi): - self.args = args - self.rapi = rapi - self.result = {"status": Status.PASS, - "doc": self.__doc__} - - @property - def name(self): - return " ".join(re.findall("[A-Z][^A-Z]*", - self.__class__.__name__)).lower() - - def check(self, results): - """Check weather this step should be executed or skipped.""" - if self.DEPENDS_ON is not None: - if results[self.DEPENDS_ON].result["status"] in ( - Status.PASS, Status.FAILURE): - return True - else: - self.result["status"] = Status.SKIPPED - msg = ("Step '%s' is skipped, since depends on step '%s' is " - "skipped or finished with an error." % - (self.name, results[self.DEPENDS_ON].name)) - stdout_file = self._generate_path( - "%s.txt.gz" % self.__class__.__name__) - - self.result["output_file"] = self._write_file( - stdout_file, msg, compress=True) - return False - return True - - def setUp(self): - """Obtain variables required for execution""" - pass - - def run(self): - """Execute step. The default action - execute the command""" - self.setUp() - - cmd = "rally --rally-debug %s" % (self.COMMAND % self.CALL_ARGS) - self.result["cmd"] = cmd - self.result["status"], self.result["output"] = self.call_rally(cmd) - - stdout_file = self._generate_path("%s.txt.gz" % cmd) - self.result["output_file"] = self._write_file( - stdout_file, self.result["output"], compress=True) - - @classmethod - def _generate_path(cls, root): - global _call_count - _call_count += 1 - - root = root.replace("<", "").replace(">", "").replace("/", "_") - parts = ["%s" % _call_count] - for path in root.split(" "): - if path.startswith(cls.BASE_DIR): - path = path[len(cls.BASE_DIR) + 1:] - parts.append(path) - return os.path.join(cls.BASE_DIR, "_".join(parts)) - - @classmethod - def _write_file(cls, path, data, compress=False): - """Create a file and write some data to it.""" - if compress: - with gzip.open(path, "wb") as f: - f.write(data) - else: - with open(path, "wb") as f: - f.write(data) - return path - - @staticmethod - def call_rally(command): - """Execute a Rally verify command.""" - try: - LOG.info("Start `%s` command." % command) - stdout = subprocess.check_output(command.split(), - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - LOG.error("Command `%s` failed." % command) - return Status.ERROR, e.output - else: - return Status.PASS, stdout - - def to_html(self): - return self.HTML_TEMPLATE % self.result - - -class SetUpStep(Step): - """Validate deployment, create required resources and directories.""" - - DEPLOYMENT_NAME = "devstack" - - def run(self): - if not os.path.exists("%s/extra" % self.BASE_DIR): - os.makedirs("%s/extra" % self.BASE_DIR) - - # ensure that deployment exit - deployment = self.rapi.deployment._get(self.DEPLOYMENT_NAME) - # check it - result = self.rapi.deployment.check( - deployment=self.DEPLOYMENT_NAME)["openstack"] - if "admin_error" in result[0] or "user_error" in result[0]: - self.result["status"] = Status.ERROR - return - - try: - subprocess.check_call(["rally", "deployment", "use", - "--deployment", self.DEPLOYMENT_NAME], - stdout=sys.stdout) - except subprocess.CalledProcessError: - self.result["status"] = Status.ERROR - return - - credentials = deployment.get_credentials_for("openstack")["admin"] - clients = credentials.clients() - - if self.args.ctx_create_resources: - # If the 'ctx-create-resources' arg is provided, delete images and - # flavors, and also create a shared network to make Tempest context - # create needed resources. - LOG.info("The 'ctx-create-resources' arg is provided. Deleting " - "images and flavors, and also creating a shared network " - "to make Tempest context create needed resources.") - - LOG.info("Deleting images.") - for image in clients.glance().images.list(): - clients.glance().images.delete(image.id) - - LOG.info("Deleting flavors.") - for flavor in clients.nova().flavors.list(): - clients.nova().flavors.delete(flavor.id) - - LOG.info("Creating a shared network.") - net_body = { - "network": { - "name": "shared-net-%s" % str(uuid.uuid4()), - "tenant_id": clients.keystone.auth_ref.project_id, - "shared": True - } - } - clients.neutron().create_network(net_body) - else: - # Otherwise, just in case create only flavors with the following - # properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make - # Tempest context discover them. - LOG.info("The 'ctx-create-resources' arg is not provided. " - "Creating flavors to make Tempest context discover them.") - for flv_ram in [64, 128]: - params = { - "name": "flavor-%s" % str(uuid.uuid4()), - "ram": flv_ram, - "vcpus": 1, - "disk": 0 - } - LOG.info("Creating flavor '%s' with the following properties: " - "RAM = %dMB, VCPUs = 1, disk = 0GB" % - (params["name"], flv_ram)) - clients.nova().flavors.create(**params) - - def to_html(self): - return "" - - -class ListPlugins(Step): - """List plugins for verifiers management.""" - - COMMAND = "verify list-plugins" - DEPENDS_ON = SetUpStep - - -class CreateVerifier(Step): - """Create a Tempest verifier.""" - - COMMAND = ("verify create-verifier --type %(type)s --name %(name)s " - "--source %(source)s") - DEPENDS_ON = ListPlugins - CALL_ARGS = {"type": "tempest", - "name": "my-verifier", - "source": "https://git.openstack.org/openstack/tempest"} - - -class ShowVerifier(Step): - """Show information about the created verifier.""" - - COMMAND = "verify show-verifier" - DEPENDS_ON = CreateVerifier - - -class ListVerifiers(Step): - """List all installed verifiers.""" - - COMMAND = "verify list-verifiers" - DEPENDS_ON = CreateVerifier - - -class UpdateVerifier(Step): - """Switch the verifier to the penultimate version.""" - - COMMAND = "verify update-verifier --version %(version)s --update-venv" - DEPENDS_ON = CreateVerifier - - def setUp(self): - """Obtain penultimate verifier commit for downgrading to it""" - verifier_id = self.rapi.verifier.list()[0]["uuid"] - verifications_dir = os.path.join( - os.path.expanduser("~"), - ".rally/verification/verifier-%s/repo" % verifier_id) - # Get the penultimate verifier commit ID - p_commit_id = subprocess.check_output( - ["git", "log", "-n", "1", "--pretty=format:%H"], - cwd=verifications_dir).strip() - self.CALL_ARGS = {"version": p_commit_id} - - -class ConfigureVerifier(Step): - """Generate and show the verifier config file.""" - - COMMAND = "verify configure-verifier --show" - DEPENDS_ON = CreateVerifier - - -class ExtendVerifier(Step): - """Extend verifier with keystone integration tests.""" - - COMMAND = "verify add-verifier-ext --source %(source)s" - DEPENDS_ON = CreateVerifier - CALL_ARGS = {"source": "https://git.openstack.org/openstack/" - "keystone-tempest-plugin"} - - -class ListVerifierExtensions(Step): - """List all extensions of verifier.""" - - COMMAND = "verify list-verifier-exts" - DEPENDS_ON = ExtendVerifier - - -class ListVerifierTests(Step): - """List all tests of specific verifier.""" - - COMMAND = "verify list-verifier-tests" - DEPENDS_ON = CreateVerifier - - -class RunVerification(Step): - """Run a verification.""" - - DEPENDS_ON = ConfigureVerifier - COMMAND = ("verify start --pattern set=%(set)s --skip-list %(skip_tests)s " - "--xfail-list %(xfail_tests)s --tag %(tag)s %(set)s-set " - "--detailed") - SKIP_TESTS = { - "tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON." - "test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]": - "This test was skipped intentionally"} - XFAIL_TESTS = { - "tempest.api.compute.servers.test_server_actions." - "ServerActionsTestJSON.test_get_vnc_console" - "[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]": - "This test fails because 'novnc' console type is unavailable"} - - def setUp(self): - self.CALL_ARGS["tag"] = "tag-1 tag-2" - self.CALL_ARGS["set"] = "full" if self.args.mode == "full" else "smoke" - # Start a verification, show results and generate reports - skip_tests = json.dumps(self.SKIP_TESTS) - xfail_tests = json.dumps(self.XFAIL_TESTS) - self.CALL_ARGS["skip_tests"] = self._write_file( - self._generate_path("skip-list.json"), skip_tests) - self.CALL_ARGS["xfail_tests"] = self._write_file( - self._generate_path("xfail-list.json"), xfail_tests) - - def run(self): - super(RunVerification, self).run() - if "Success: 0" in self.result["output"]: - self.result["status"] = Status.FAILURE - - -class ReRunVerification(RunVerification): - """Re-Run previous verification.""" - - COMMAND = "verify rerun --tag one-more-attempt" - - def run(self): - super(RunVerification, self).run() - if "Success: 0" in self.result["output"]: - self.result["status"] = Status.FAILURE - - -class ShowVerification(Step): - """Show results of verification.""" - - COMMAND = "verify show" - DEPENDS_ON = RunVerification - - -class ShowSecondVerification(ShowVerification): - """Show results of verification.""" - - DEPENDS_ON = ReRunVerification - - -class ShowDetailedVerification(Step): - """Show detailed results of verification.""" - - COMMAND = "verify show --detailed" - DEPENDS_ON = RunVerification - - -class ShowDetailedSecondVerification(ShowDetailedVerification): - """Show detailed results of verification.""" - - DEPENDS_ON = ReRunVerification - - -class ReportVerificationMixin(Step): - """Mixin for obtaining reports of verifications.""" - - COMMAND = "verify report --uuid %(uuids)s --type %(type)s --to %(out)s" - - HTML_TEMPLATE = ("[%(status)s]\n" - "%(doc)s " - "[Output from CLI]\n" - "$ %(cmd)s") - - def setUp(self): - self.CALL_ARGS["out"] = "" - self.CALL_ARGS["uuids"] = " " - cmd = self.COMMAND % self.CALL_ARGS - report = "%s.%s" % (cmd.replace("/", "_").replace(" ", "_"), - self.CALL_ARGS["type"]) - print(report) - self.CALL_ARGS["out"] = self._generate_path(report) - self.CALL_ARGS["uuids"] = " ".join( - [v["uuid"] for v in self.rapi.verification.list()]) - print(self.COMMAND % self.CALL_ARGS) - - def run(self): - super(ReportVerificationMixin, self).run() - creport = "%s.gz" % self.CALL_ARGS["out"] - with open(self.CALL_ARGS["out"], "rb") as f_in: - with gzip.open(creport, "wb") as f_out: - f_out.writelines(f_in) - self.result["out"] = creport - - -class HtmlVerificationReport(ReportVerificationMixin): - """Generate HTML report for verification(s).""" - - CALL_ARGS = {"type": "html"} - DEPENDS_ON = RunVerification - - -class JsonVerificationReport(ReportVerificationMixin): - """Generate JSON report for verification(s).""" - - CALL_ARGS = {"type": "json"} - DEPENDS_ON = RunVerification - - -class JunitVerificationReport(ReportVerificationMixin): - """Generate JUNIT report for verification(s).""" - - CALL_ARGS = {"type": "junit-xml"} - DEPENDS_ON = RunVerification - - -class ListVerifications(Step): - """List all verifications.""" - - COMMAND = "verify list" - DEPENDS_ON = CreateVerifier - - -class DeleteVerifierExtension(Step): - """Delete keystone extension.""" - - COMMAND = "verify delete-verifier-ext --name %(name)s" - CALL_ARGS = {"name": "keystone_tests"} - DEPENDS_ON = ExtendVerifier - - -class DeleteVerifier(Step): - """Delete only Tempest verifier. - - all verifications will be delete when destroy deployment. - - """ - COMMAND = "verify delete-verifier --id %(id)s --force" - CALL_ARGS = {"id": CreateVerifier.CALL_ARGS["name"]} - DEPENDS_ON = CreateVerifier - - -class DestroyDeployment(Step): - """Delete the deployment, and verifications of this deployment.""" - - COMMAND = "deployment destroy --deployment %(id)s" - CALL_ARGS = {"id": SetUpStep.DEPLOYMENT_NAME} - DEPENDS_ON = SetUpStep - - -def run(args): - - steps = [SetUpStep, - ListPlugins, - CreateVerifier, - ShowVerifier, - ListVerifiers, - UpdateVerifier, - ConfigureVerifier, - ExtendVerifier, - ListVerifierExtensions, - ListVerifierTests, - RunVerification, - ShowVerification, - ShowDetailedVerification, - HtmlVerificationReport, - JsonVerificationReport, - JunitVerificationReport, - ListVerifications, - DeleteVerifierExtension, - DestroyDeployment, - DeleteVerifier] - - if args.compare: - # need to launch one more verification - place_to_insert = steps.index(ShowDetailedVerification) + 1 - # insert steps in reverse order to be able to use the same index - steps.insert(place_to_insert, ShowDetailedSecondVerification) - steps.insert(place_to_insert, ShowSecondVerification) - steps.insert(place_to_insert, ReRunVerification) - - results = collections.OrderedDict() - rapi = api.API() - for step_cls in steps: - step = step_cls(args, rapi=rapi) - if step.check(results): - step.run() - results[step_cls] = step - - return results.values() - - -def main(): - parser = argparse.ArgumentParser(description="Launch rally-verify job.") - parser.add_argument("--mode", type=str, default="light", - help="Mode of job. The 'full' mode corresponds to the " - "full set of verifier tests. The 'light' mode " - "corresponds to the smoke set of verifier tests.", - choices=["light", "full"]) - parser.add_argument("--compare", action="store_true", - help="Start the second verification to generate a " - "trends report for two verifications.") - # TODO(ylobankov): Remove hard-coded Tempest related things and make it - # configurable. - parser.add_argument("--ctx-create-resources", action="store_true", - help="Make Tempest context create needed resources " - "for the tests.") - - args = parser.parse_args() - - steps = run(args) - results = [step.to_html() for step in steps] - - template = utils.get_template("ci/index_verify.html") - with open(os.path.join(Step.BASE_DIR, "extra/index.html"), "w") as f: - f.write(template.render(steps=results)) - - if len([None for step in steps - if step.result["status"] == Status.PASS]) == len(steps): - return 0 - return 1 - -if __name__ == "__main__": - sys.exit(main()) diff --git a/tests/ci/render.py b/tests/ci/render.py deleted file mode 100644 index 2c4419c1..00000000 --- a/tests/ci/render.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -import re -import sys - -from rally.ui import utils - - -HELP_MESSAGE = ( - "Usage:\n\t" - "render.py ci/template.html" - "[= = ...]\n\n\t" - "Where key-1,value-1 and key-2,value-2 are key pairs of template.") - - -if __name__ == "__main__": - args = sys.argv - if (len(args) < 1 or not all(re.match("^[^=]+=[^=]+$", - arg) for arg in args[2:])): - print(HELP_MESSAGE, file=sys.stderr) - sys.exit(1) - render_kwargs = dict([arg.split("=") for arg in args[2:]]) - print(utils.get_template(args[1]).render(**render_kwargs)) diff --git a/tests/ci/sync_requirements.py b/tests/ci/sync_requirements.py deleted file mode 100644 index 641cebc9..00000000 --- a/tests/ci/sync_requirements.py +++ /dev/null @@ -1,340 +0,0 @@ -#!/usr/bin/env python -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Synchronizes, formats and prepares requirements to release(obtains and adds -maximum allowed version). -""" - -import argparse -import logging -import re -import sys -import textwrap - -import requests - - -LOG = logging.getLogger(__name__) -if not LOG.handlers: - LOG.addHandler(logging.StreamHandler()) - LOG.setLevel(logging.INFO) - - -GLOBAL_REQUIREMENTS_LOCATIONS = ( - "https://raw.githubusercontent.com/openstack/requirements/master/", - "http://git.openstack.org/cgit/openstack/requirements/plain/" -) -GLOBAL_REQUIREMENTS_FILENAME = "global-requirements.txt" -RALLY_REQUIREMENTS_FILES = ( - "requirements.txt", - "test-requirements.txt" -) -DO_NOT_TOUCH_TAG = "[do-not-touch]" - - -class Comment(object): - def __init__(self, s=None, finished=False): - self._comments = [] - self.is_finished = finished - if s: - self.append(s) - - def finish_him(self): - self.is_finished = True - - def append(self, s): - self._comments.append(s[1:].strip()) - - def __str__(self): - return textwrap.fill("\n".join(self._comments), width=80, - initial_indent="# ", subsequent_indent="# ") - - -class Requirement(object): - RE_NAME = re.compile(r"[a-zA-Z0-9-._]+") - RE_CONST_VERSION = re.compile(r"==[a-zA-Z0-9.]+") - RE_MIN_VERSION = re.compile(r">=?[a-zA-Z0-9.]+") - RE_MAX_VERSION = re.compile(r"<=?[a-zA-Z0-9.]+") - RE_NE_VERSIONS = re.compile(r"!=[a-zA-Z0-9.]+") - # NOTE(andreykurilin): one license can have different labels. Let's use - # unified variant. - LICENSE_MAP = {"MIT license": "MIT", - "MIT License": "MIT", - "BSD License": "BSD", - "Apache 2.0": "Apache License, Version 2.0"} - - def __init__(self, package_name, version): - self.package_name = package_name - self.version = version - self._license = None - self._pypy_info = None - self.do_not_touch = False - - def sync_max_version_with_pypy(self): - if isinstance(self.version, dict) and not self.do_not_touch: - self.version["max"] = "<=%s" % self.pypy_info["info"]["version"] - - @property - def pypy_info(self): - if self._pypy_info is None: - resp = requests.get("https://pypi.python.org/pypi/%s/json" % - self.package_name) - if resp.status_code != 200: - raise Exception(resp.text) - self._pypy_info = resp.json() - return self._pypy_info - - @property - def license(self): - if self._license is None: - if self.pypy_info["info"]["license"]: - self._license = self.pypy_info["info"]["license"] - else: - # try to parse classifiers - prefix = "License :: OSI Approved :: " - classifiers = [c[len(prefix):] - for c in self.pypy_info["info"]["classifiers"] - if c.startswith(prefix)] - self._license = "/".join(classifiers) - self._license = self.LICENSE_MAP.get(self._license, self._license) - if self._license == "UNKNOWN": - self._license = None - return self._license - - @classmethod - def parse_line(cls, line): - match = cls.RE_NAME.match(line) - if match: - name = match.group() - # remove name - versions = line.replace(name, "") - # remove comments - versions = versions.split("#")[0] - # remove python classifiers - versions = versions.split(";")[0].strip() - if not cls.RE_CONST_VERSION.match(versions): - versions = versions.strip().split(",") - min_version = None - max_version = None - ne_versions = [] - for version in versions: - if cls.RE_MIN_VERSION.match(version): - if min_version: - raise Exception("Found several min versions for " - "%s package." % name) - min_version = version - elif cls.RE_MAX_VERSION.match(version): - if max_version: - raise Exception("Found several max versions for " - "%s package." % name) - max_version = version - elif cls.RE_NE_VERSIONS.match(version): - ne_versions.append(version) - versions = {"min": min_version, - "max": max_version, - "ne": ne_versions} - return cls(name, versions) - - def __str__(self): - if isinstance(self.version, dict): - version = [] - - min_equal_to_max = False - if self.version["min"] and self.version["max"]: - if (self.version["min"].startswith(">=") and - self.version["max"].startswith("<=") and - self.version["min"][2:] == self.version["max"][2:]): - # min and max versions are equal there is no need to write - # both of them - min_equal_to_max = True - version.append("==%s" % self.version["min"][2:]) - - if not min_equal_to_max and self.version["min"]: - version.append(self.version["min"]) - - if not min_equal_to_max and self.version["ne"]: - version.extend(self.version["ne"]) - - if not min_equal_to_max and self.version["max"]: - version.append(self.version["max"]) - - version = ",".join(version) - else: - if self.do_not_touch: - version = self.version - else: - # remove const version - version = ">=%s" % self.version[2:] - - string = "%s%s" % (self.package_name, version) - if self.license: - # NOTE(andreykurilin): When I start implementation of this script, - # python-keystoneclient dependency string took around ~45-55 - # chars, so let's use this length as indent. Feel free to modify - # it to lower or greater value. - magic_number = 55 - if len(string) < magic_number: - indent = magic_number - len(string) - else: - indent = 2 - string += " " * indent + "# " + self.license - return string - - def __eq__(self, other): - return (isinstance(other, self.__class__) and - self.package_name == other.package_name) - - def __ne__(self, other): - return not self.__eq__(other) - - -def parse_data(raw_data, include_comments=True): - # first elem is None to simplify checks of last elem in requirements - requirements = [None] - for line in raw_data.split("\n"): - if line.startswith("#"): - if not include_comments: - continue - - if getattr(requirements[-1], "is_finished", True): - requirements.append(Comment()) - - requirements[-1].append(line) - elif line == "": - # just empty line - if isinstance(requirements[-1], Comment): - requirements[-1].finish_him() - requirements.append(Comment(finished=True)) - else: - if (isinstance(requirements[-1], Comment) and - not requirements[-1].is_finished): - requirements[-1].finish_him() - # parse_line - req = Requirement.parse_line(line) - if req: - if (isinstance(requirements[-1], Comment) and - DO_NOT_TOUCH_TAG in str(requirements[-1])): - req.do_not_touch = True - requirements.append(req) - for i in range(len(requirements) - 1, 0, -1): - # remove empty lines at the end of file - if isinstance(requirements[i], Comment): - if str(requirements[i]) == "": - requirements.pop(i) - else: - break - return requirements[1:] - - -def _read_requirements(): - """Read all rally requirements.""" - LOG.info("Reading rally requirements...") - for file_name in RALLY_REQUIREMENTS_FILES: - LOG.debug("Try to read '%s'." % file_name) - with open(file_name) as f: - data = f.read() - LOG.info("Parsing requirements from %s." % file_name) - yield file_name, parse_data(data) - - -def _write_requirements(filename, requirements): - """Saves requirements to file.""" - LOG.info("Saving requirements to %s." % filename) - with open(filename, "w") as f: - for entity in requirements: - f.write(str(entity)) - f.write("\n") - - -def _sync(): - LOG.info("Obtaining global-requirements...") - for i in range(0, len(GLOBAL_REQUIREMENTS_LOCATIONS)): - url = GLOBAL_REQUIREMENTS_LOCATIONS[i] + GLOBAL_REQUIREMENTS_FILENAME - LOG.debug("Try to obtain global-requirements from %s" % url) - try: - raw_gr = requests.get(url).text - except requests.ConnectionError as e: - LOG.exception(e) - if i == len(GLOBAL_REQUIREMENTS_LOCATIONS) - 1: - # there are no more urls to try - raise Exception("Unable to obtain %s" % - GLOBAL_REQUIREMENTS_FILENAME) - else: - break - - LOG.info("Parsing global-requirements...") - # NOTE(andreykurilin): global-requirements includes comments which can be - # unrelated to Rally project. - gr = parse_data(raw_gr, include_comments=False) - for filename, requirements in _read_requirements(): - for i in range(0, len(requirements)): - if (isinstance(requirements[i], Requirement) and - not requirements[i].do_not_touch): - try: - gr_item = gr[gr.index(requirements[i])] - except ValueError: - # it not g-r requirements - if isinstance(requirements[i].version, dict): - requirements[i].version["max"] = None - else: - requirements[i].version = gr_item.version - yield filename, requirements - - -def sync(): - """Synchronizes Rally requirements with OpenStack global-requirements.""" - for filename, requirements in _sync(): - _write_requirements(filename, requirements) - - -def format_requirements(): - """Obtain package licenses from pypy and write requirements to file.""" - for filename, requirements in _read_requirements(): - _write_requirements(filename, requirements) - - -def add_uppers(): - """Obtains latest version of packages and put them to requirements.""" - for filename, requirements in _sync(): - LOG.info("Obtaining latest versions of packages for %s." % filename) - for req in requirements: - if isinstance(req, Requirement): - if isinstance(req.version, dict) and not req.version["max"]: - req.sync_max_version_with_pypy() - _write_requirements(filename, requirements) - - -def main(): - parser = argparse.ArgumentParser( - prog="Python Requirement Manager for Rally", - description=__doc__.strip(), - add_help=True - ) - - action_groups = parser.add_mutually_exclusive_group() - action_groups.add_argument("--format", - action="store_const", - const=format_requirements, - dest="action") - action_groups.add_argument("--add-upper", - action="store_const", - const=add_uppers, - dest="action") - action_groups.set_defaults(action=sync) - parser.parse_args(sys.argv[1:]).action() - -if __name__ == "__main__": - sys.exit(main()) diff --git a/tests/ci/test_install.sh b/tests/ci/test_install.sh deleted file mode 100755 index ea889734..00000000 --- a/tests/ci/test_install.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh -ex -# -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -env - -sudo yum remove -y python-crypto || true - -# NOTE(pabelanger): We run apt-get update to ensure we don't have a stale -# package cache in the gate. -sudo apt-get update || true - -sudo ./install_rally.sh --system --yes -rally deployment list -[ -d /etc/bash_completion.d ] && cat /etc/bash_completion.d/rally.bash_completion || true - -sudo ./install_rally.sh --system --yes -rally deployment list - -sudo ./install_rally.sh --yes -d /tmp/rallytest_root/ -/tmp/rallytest_root/bin/rally deployment list -cat /tmp/rallytest_root/etc/bash_completion.d/rally.bash_completion - -sudo rm -fr ~/.rally - -./install_rally.sh --yes -d /tmp/rallytest_user -/tmp/rallytest_user/bin/rally deployment list - -./install_rally.sh --overwrite --dbtype sqlite diff --git a/tests/ci/wip-rally-gate.py b/tests/ci/wip-rally-gate.py deleted file mode 100755 index 4baee40a..00000000 --- a/tests/ci/wip-rally-gate.py +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -import json -import os -import pwd -import re -import shutil -import subprocess -import sys -import tempfile - -from six.moves.urllib import parse - -from rally.ui import utils - - -def use_keystone_v3(): - """Alter deployment to use keystone v3.""" - print("Changing deployment to v3") - config = json.loads(subprocess.check_output(["rally", "deployment", - "config"])) - v3_url = parse.urlsplit(config["auth_url"])._replace(path="v3").geturl() - config["auth_url"] = v3_url - endpoint = config.get("endpoint") - if endpoint: - v3_enpoint = parse.urlsplit(endpoint)._replace(path="v3").geturl() - config["endpoint"] = v3_enpoint - config["project_name"] = config["tenant"] - config["project_domain_name"] = config["tenant"] - cfg_file = tempfile.NamedTemporaryFile() - json.dump(config, cfg_file) - print("New config for keystone v3:") - print(json.dumps(config, indent=2)) - cfg_file.flush() - subprocess.call(["rally", "deployment", "create", - "--name", "V3", "--file", cfg_file.name]) - print(subprocess.check_output(["rally", "deployment", "check"])) - -TAG_HANDLERS = {"v3": use_keystone_v3} - - -def perror(s): - sys.stderr.write(s + "\n") - sys.stderr.flush() - - -def run(cmd, stdout=None, gzip=True, check=False): - """Run shell command. - - Save output to file, and gzip-compress if needed. - If exit status is non-zero and check is True then raise exception. - Return exit status otherwise. - """ - print("Starting %s" % " ".join(cmd)) - status = subprocess.call(cmd, stdout=open(stdout, "w") if stdout else None) - if stdout and gzip: - subprocess.call(["gzip", "-9", stdout]) - if check and status: - raise Exception("Failed with status %d" % status) - return status - - -def run_task(task, tags=None): - new_home_dir = tempfile.mkdtemp(prefix="rally_gate_") - shutil.copytree(os.path.join(pwd.getpwuid(os.getuid()).pw_dir, ".rally"), - os.path.join(new_home_dir, ".rally")) - print("Setting $HOME to %s" % new_home_dir) - os.environ["HOME"] = new_home_dir - for tag in tags or []: - if tag == "args": - continue - if tag not in TAG_HANDLERS: - perror("Warning! Unknown tag '%s'" % tag) - continue - try: - TAG_HANDLERS[tag]() - except Exception as e: - perror("Error processing tag '%s': %s" % (tag, e)) - - run(["rally", "task", "validate", "--task", task], check=True) - cmd = ["rally", "task", "start", "--task", task] - args_file, ext = task.rsplit(".", 1) - args_file = args_file + "_args." + ext - if os.path.isfile(args_file): - cmd += ["--task-args-file", args_file] - run(cmd, check=True) - task_name = os.path.split(task)[-1] - pub_dir = os.environ.get("RCI_PUB_DIR", "rally-plot") - try: - os.makedirs(os.path.join(pub_dir, "extra")) - except Exception as e: - if e.errno != errno.EEXIST: - raise - run(["rally", "task", "report", "--out", - "%s/%s.html" % (pub_dir, task_name)]) - run(["rally", "task", "results"], - stdout="%s/results-%s.json" % (pub_dir, task_name)) - status = run(["rally", "task", "sla-check"], - stdout="%s/%s.sla.txt" % (pub_dir, task_name)) - run(["rally", "task", "detailed"], - stdout="rally-plot/detailed-%s.txt" % task_name) - run(["rally", "task", "detailed", "--iterations-data"], - stdout="rally-plot/detailed_with_iterations-%s.txt" % task_name) - - return status - - -def get_name_from_git(): - """Determine org/project name from git.""" - r = re.compile(".*/(.*?)/(.*?).git$") - for l in open(".git/config"): - m = r.match(l.strip()) - if m: - return m.groups() - raise Exception("Unable to get project name from git") - - -def get_project_name(): - for var in ("ZUUL_PROJECT", "GERRIT_PROJECT"): - if var in os.environ: - return os.environ[var].split("/") - return get_name_from_git() - - -def main(): - statuses = [] - org, project = get_project_name() - - base = os.environ.get("BASE") - if base: - base_jobs_dir = os.path.join(base, "new", project) - else: - base_jobs_dir = os.path.realpath(".") - - rally_root = "/home/rally/rally/" - if not os.path.exists(rally_root): - rally_root = os.environ["BASE"] + "/new/rally/" - - jobs_dir = os.path.join(base_jobs_dir, "rally-jobs") - if not os.path.exists(jobs_dir): - # fallback to legacy path - jobs_dir = os.path.join(base_jobs_dir, "rally-scenarios") - if not os.path.exists(jobs_dir): - raise Exception("Rally jobs directory does not exist.") - - for directory in ("plugins", "extra"): - dst = os.path.expanduser("~/.rally/%s" % directory) - try: - shutil.copytree(os.path.join(jobs_dir, directory), dst) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - scenario = os.environ.get("RALLY_SCENARIO", project).rsplit(".", 1) - scenario_name = scenario.pop(0) - scenario_ext = scenario.pop() if scenario else "yaml" - print("Processing scenario %s" % scenario_name) - - for fname in os.listdir(jobs_dir): - print("Processing %s" % fname) - if fname.startswith(scenario_name): - tags = fname[len(scenario_name):-len(scenario_ext) - 1].split("_") - statuses.append(run_task(os.path.join(jobs_dir, fname), tags)) - else: - print("Ignoring file %s" % fname) - print("Exit statuses: %r" % statuses) - template = utils.get_template("ci/index.html") - with open("rally-plot/extra/index.html", "w") as output: - output.write(template.render()) - return any(statuses) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/functional/extra/fake_dir1/fake_plugin1.py b/tests/functional/extra/fake_dir1/fake_plugin1.py deleted file mode 100644 index 4d2c1a49..00000000 --- a/tests/functional/extra/fake_dir1/fake_plugin1.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.task import scenario - - -@scenario.configure(name="FakeScenarioPlugin1.list") -class FakeScenarioPlugin1(scenario.Scenario): - - def run(self): - """Sample fake scenario.""" - pass diff --git a/tests/functional/extra/fake_dir2/fake_plugin2.py b/tests/functional/extra/fake_dir2/fake_plugin2.py deleted file mode 100644 index e380842c..00000000 --- a/tests/functional/extra/fake_dir2/fake_plugin2.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.task import scenario - - -@scenario.configure(name="FakeScenarioPlugin2.list") -class FakeScenarioPlugin2(scenario.Scenario): - - def run(self): - """Sample fake scenario.""" - pass diff --git a/tests/functional/extra/test_fake_scenario.json b/tests/functional/extra/test_fake_scenario.json deleted file mode 100644 index e831ae7e..00000000 --- a/tests/functional/extra/test_fake_scenario.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "FakeScenarioPlugin1.list": [ - { - "runner": { - "type": "constant", - "times": 5 - } - } - ], - "FakeScenarioPlugin2.list": [ - { - "runner": { - "type": "constant", - "times": 5 - } - } - ] -} \ No newline at end of file diff --git a/tests/functional/test_certification_task.py b/tests/functional/test_certification_task.py deleted file mode 100644 index 5386f649..00000000 --- a/tests/functional/test_certification_task.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# Copyright 2014: Catalyst IT Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import traceback -import unittest - -from tests.functional import utils - - -class TestCertificationTask(unittest.TestCase): - - def test_task_samples_is_valid(self): - rally = utils.Rally() - full_path = os.path.join( - os.path.dirname(__file__), os.pardir, os.pardir, - "certification", "openstack") - task_path = os.path.join(full_path, "task.yaml") - args_path = os.path.join(full_path, "task_arguments.yaml") - - try: - rally("task validate --task %s --task-args-file %s" % (task_path, - args_path)) - except Exception: - print(traceback.format_exc()) - self.assertTrue(False, "Wrong task config %s" % full_path) diff --git a/tests/functional/test_cli_deployment.py b/tests/functional/test_cli_deployment.py deleted file mode 100644 index d8c27289..00000000 --- a/tests/functional/test_cli_deployment.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import re -import unittest - -from tests.functional import utils - - -class DeploymentTestCase(unittest.TestCase): - - def setUp(self): - super(DeploymentTestCase, self).setUp() - - def test_create_fromenv_list_show(self): - rally = utils.Rally() - rally.env.update(utils.TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - self.assertIn("t_create_env", rally("deployment list")) - self.assertIn(utils.TEST_ENV["OS_AUTH_URL"], - rally("deployment show")) - - def test_create_fromfile(self): - rally = utils.Rally() - rally.env.update(utils.TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - with open("/tmp/.tmp.deployment", "w") as f: - f.write(rally("deployment config")) - rally("deployment create --name t_create_file " - "--filename /tmp/.tmp.deployment") - self.assertIn("t_create_file", rally("deployment list")) - - def test_config(self): - rally = utils.Rally() - rally.env.update(utils.TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - config = json.loads(rally("deployment config")) - self.assertIn("creds", config) - self.assertIn("openstack", config["creds"]) - oscreds = config["creds"]["openstack"] - self.assertEqual(utils.TEST_ENV["OS_USERNAME"], - oscreds["admin"]["username"]) - self.assertEqual(utils.TEST_ENV["OS_PASSWORD"], - oscreds["admin"]["password"]) - if "project_name" in oscreds["admin"]: - # keystone v3 - self.assertEqual(utils.TEST_ENV["OS_TENANT_NAME"], - oscreds["admin"]["project_name"]) - else: - # keystone v2 - self.assertEqual(utils.TEST_ENV["OS_TENANT_NAME"], - oscreds["admin"]["tenant_name"]) - self.assertEqual(utils.TEST_ENV["OS_AUTH_URL"], - oscreds["auth_url"]) - - def test_destroy(self): - rally = utils.Rally() - rally.env.update(utils.TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - self.assertIn("t_create_env", rally("deployment list")) - rally("deployment destroy") - self.assertNotIn("t_create_env", rally("deployment list")) - - def test_check_success(self): - rally = utils.Rally() - self.assertTrue(rally("deployment check")) - - def test_check_fail(self): - rally = utils.Rally() - rally.env.update(utils.TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - self.assertRaises(utils.RallyCliError, rally, "deployment check") - - def test_check_debug(self): - rally = utils.Rally() - rally.env.update(utils.TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - config = json.loads(rally("deployment config")) - config["creds"]["openstack"]["admin"]["password"] = "fakepassword" - file = utils.JsonTempFile(config) - rally("deployment create --name t_create_file_debug " - "--filename %s" % file.filename) - self.assertIn("t_create_file_debug", rally("deployment list")) - self.assertEqual(config, - json.loads(rally("deployment config"))) - self.assertRaises(utils.RallyCliError, rally, "deployment check") - - try: - rally("--debug deployment check") - except utils.RallyCliError as e: - self.assertIn( - "[-] Unable to authenticate for user %(username)s in" - " project %(tenant_name)s" % - {"username": utils.TEST_ENV["OS_USERNAME"], - "tenant_name": utils.TEST_ENV["OS_TENANT_NAME"]}, - str(e)) - self.assertIn( - "AuthenticationFailed: Failed to authenticate to %(auth_url)s" - " for user '%(username)s' in project '%(tenant_name)s'" % - {"auth_url": utils.TEST_ENV["OS_AUTH_URL"], - "username": utils.TEST_ENV["OS_USERNAME"], - "tenant_name": utils.TEST_ENV["OS_TENANT_NAME"]}, - str(e)) - else: - self.fail("rally deployment fails to raise error for wrong" - " authentication info") - - def test_recreate(self): - rally = utils.Rally() - rally.env.update(utils.TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - rally("deployment recreate --deployment t_create_env") - self.assertIn("t_create_env", rally("deployment list")) - - def test_recreate_from_file(self): - rally = utils.Rally() - rally.env.update(utils.TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - config = json.loads(rally("deployment config")) - config["creds"]["openstack"]["auth_url"] = "http://foo/" - file = utils.JsonTempFile(config) - rally("deployment recreate --deployment t_create_env " - "--filename %s" % file.filename) - self.assertIn("t_create_env", rally("deployment list")) - self.assertEqual(config, - json.loads(rally("deployment config"))) - self.assertIn("http://foo/", rally("deployment show")) - - def test_use(self): - rally = utils.Rally() - rally.env.update(utils.TEST_ENV) - output = rally( - "deployment create --name t_create_env1 --fromenv") - uuid = re.search(r"Using deployment: (?P[0-9a-f\-]{36})", - output).group("uuid") - rally("deployment create --name t_create_env2 --fromenv") - rally("deployment use --deployment %s" % uuid) - current_deployment = utils.get_global("RALLY_DEPLOYMENT", - rally.env) - self.assertEqual(uuid, current_deployment) diff --git a/tests/functional/test_cli_plugin.py b/tests/functional/test_cli_plugin.py deleted file mode 100644 index e71f8254..00000000 --- a/tests/functional/test_cli_plugin.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from tests.functional import utils - - -class PluginTestCase(unittest.TestCase): - - def setUp(self): - super(PluginTestCase, self).setUp() - - def test_show_one(self): - rally = utils.Rally() - result = rally("plugin show Dummy.dummy") - self.assertIn("NAME", result) - self.assertIn("NAMESPACE", result) - self.assertIn("Dummy.dummy", result) - self.assertIn("MODULE", result) - - def test_show_multiple(self): - rally = utils.Rally() - result = rally("plugin show Dummy") - self.assertIn("Multiple plugins found:", result) - self.assertIn("Dummy.dummy", result) - self.assertIn("Dummy.dummy_exception", result) - self.assertIn("Dummy.dummy_random_fail_in_atomic", result) - - def test_show_not_found(self): - rally = utils.Rally() - name = "Dummy666666" - result = rally("plugin show %s" % name) - self.assertIn("There is no plugin: %s" % name, result) - - def test_show_not_found_in_specific_namespace(self): - rally = utils.Rally() - name = "Dummy" - namespace = "non_existing" - result = rally( - "plugin show --name %(name)s --namespace %(namespace)s" - % {"name": name, "namespace": namespace}) - self.assertIn( - "There is no plugin: %(name)s in %(namespace)s namespace" - % {"name": name, "namespace": namespace}, - result) - - def test_list(self): - rally = utils.Rally() - result = rally("plugin list Dummy") - self.assertIn("Dummy.dummy", result) - self.assertIn("Dummy.dummy_exception", result) - self.assertIn("Dummy.dummy_random_fail_in_atomic", result) - - def test_list_not_found_namespace(self): - rally = utils.Rally() - result = rally("plugin list --namespace some") - self.assertIn("There is no plugin namespace: some", result) - - def test_list_not_found_name(self): - rally = utils.Rally() - result = rally("plugin list Dummy2222") - self.assertIn("There is no plugin: Dummy2222", result) diff --git a/tests/functional/test_cli_task.py b/tests/functional/test_cli_task.py deleted file mode 100644 index 587192f9..00000000 --- a/tests/functional/test_cli_task.py +++ /dev/null @@ -1,1322 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import re -import threading -import time -import unittest - -import mock - -from rally import api -from tests.functional import utils - - -FAKE_TASK_UUID = "87ab639d-4968-4638-b9a1-07774c32484a" - - -class TaskTestCase(unittest.TestCase): - - def _get_sample_task_config(self): - return { - "Dummy.dummy_random_fail_in_atomic": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 5 - } - } - ] - } - - def _get_sample_task_config_v2(self): - return { - "version": 2, - "title": "Dummy task", - "tags": ["dummy", "functional_test"], - "subtasks": [ - { - "title": "first-subtask", - "group": "Dummy group", - "description": "The first subtask in dummy task", - "tags": ["dummy", "functional_test"], - "run_in_parallel": False, - "workloads": [{ - "name": "Dummy.dummy", - "args": { - "sleep": 0 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - }] - }, - { - "title": "second-subtask", - "group": "Dummy group", - "description": "The second subtask in dummy task", - "tags": ["dummy", "functional_test"], - "run_in_parallel": False, - "workloads": [{ - "name": "Dummy.dummy", - "args": { - "sleep": 1 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - }] - } - ] - } - - def _get_deployment_uuid(self, output): - return re.search( - r"Using deployment: (?P[0-9a-f\-]{36})", - output).group("uuid") - - def _get_task_uuid(self, output): - return re.search( - r"\trally task results (?P[0-9a-f\-]{36})", - output).group("uuid") - - def test_status(self): - rally = utils.Rally() - cfg = self._get_sample_task_config() - config = utils.TaskConfig(cfg) - rally("task start --task %s" % config.filename) - self.assertIn("finished", rally("task status")) - - def test_detailed(self): - rally = utils.Rally() - cfg = self._get_sample_task_config() - config = utils.TaskConfig(cfg) - rally("task start --task %s" % config.filename) - detailed = rally("task detailed") - self.assertIn("Dummy.dummy_random_fail_in_atomic", detailed) - self.assertIn("dummy_fail_test (x2)", detailed) - detailed_iterations_data = rally("task detailed --iterations-data") - self.assertIn(". dummy_fail_test (x2)", detailed_iterations_data) - self.assertNotIn("n/a", detailed_iterations_data) - - def test_detailed_with_errors(self): - rally = utils.Rally() - cfg = { - "Dummy.dummy_exception": [ - { - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - } - } - ] - } - config = utils.TaskConfig(cfg) - output = rally("task start --task %s" % config.filename) - uuid = re.search( - r"(?P[0-9a-f\-]{36}): started", output).group("uuid") - output = rally("task detailed") - self.assertIn("Task %s has 1 error(s)" % uuid, output) - - def test_detailed_no_atomic_actions(self): - rally = utils.Rally() - cfg = { - "Dummy.dummy": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 5 - } - } - ] - } - config = utils.TaskConfig(cfg) - rally("task start --task %s" % config.filename) - detailed = rally("task detailed") - self.assertIn("Dummy.dummy", detailed) - detailed_iterations_data = rally("task detailed --iterations-data") - self.assertNotIn("n/a", detailed_iterations_data) - - def test_start_with_empty_config(self): - rally = utils.Rally() - config = utils.TaskConfig(None) - with self.assertRaises(utils.RallyCliError) as err: - rally("task start --task %s" % config.filename) - self.assertIn("Input task is empty", err.exception.output) - - def test_results(self): - rally = utils.Rally() - cfg = self._get_sample_task_config() - config = utils.TaskConfig(cfg) - rally("task start --task %s" % config.filename) - self.assertIn("result", rally("task results")) - - def test_results_with_wrong_task_id(self): - rally = utils.Rally() - self.assertRaises(utils.RallyCliError, - rally, "task results --uuid %s" % FAKE_TASK_UUID) - - def test_import_results(self): - rally = utils.Rally() - cfg = self._get_sample_task_config() - config = utils.TaskConfig(cfg) - rally("task start --task %s" % config.filename) - json_report = rally.gen_report_path(extension="json") - with open(json_report, "w+") as f: - f.write(rally("task results")) - import_print = rally("task import --file %s" % json_report) - self.assertIn("successfully", import_print) - task_uuid = re.search("UUID:\s([a-z0-9\-]+)", import_print).group(1) - self.assertIn("Dummy.dummy_random_fail_in_atomic", - rally("task results --uuid %s" % task_uuid)) - - def test_abort_with_wrong_task_id(self): - rally = utils.Rally() - self.assertRaises(utils.RallyCliError, - rally, "task abort --uuid %s" % FAKE_TASK_UUID) - - def test_delete_with_wrong_task_id(self): - rally = utils.Rally() - self.assertRaises(utils.RallyCliError, - rally, "task delete --uuid %s" % FAKE_TASK_UUID) - - def test_detailed_with_wrong_task_id(self): - rally = utils.Rally() - self.assertRaises(utils.RallyCliError, - rally, "task detailed --uuid %s" % FAKE_TASK_UUID) - - def test_report_with_wrong_task_id(self): - rally = utils.Rally() - self.assertRaises(utils.RallyCliError, - rally, "task report --tasks %s" % FAKE_TASK_UUID) - self.assertRaises(utils.RallyCliError, - rally, "task report --uuid %s" % FAKE_TASK_UUID) - - def test_sla_check_with_wrong_task_id(self): - rally = utils.Rally() - self.assertRaises(utils.RallyCliError, - rally, "task sla-check --uuid %s" % FAKE_TASK_UUID) - - def test_status_with_wrong_task_id(self): - rally = utils.Rally() - self.assertRaises(utils.RallyCliError, - rally, "task status --uuid %s" % FAKE_TASK_UUID) - - def _assert_html_report_libs_are_embedded(self, file_path, expected=True): - - embedded_signatures = ["Copyright (c) 2011-2014 Novus Partners, Inc.", - "AngularJS v1.3.3", - "Copyright (c) 2010-2015, Michael Bostock"] - external_signatures = ["