Use ruff
Change-Id: I4f707a2f016aa039930f8a64179562edbeb6170a Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
@@ -18,13 +18,9 @@ repos:
|
||||
hooks:
|
||||
- id: hacking
|
||||
additional_dependencies: []
|
||||
- repo: https://github.com/PyCQA/bandit
|
||||
rev: 1.8.6
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.14.0
|
||||
hooks:
|
||||
- id: bandit
|
||||
args: ['-x', 'tests']
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.20.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py310-plus]
|
||||
- id: ruff-check
|
||||
args: ['--fix', '--unsafe-fixes']
|
||||
- id: ruff-format
|
||||
|
@@ -34,9 +34,7 @@ openstackdocs_bug_tag = ''
|
||||
# sphinxcontrib.apidoc options
|
||||
apidoc_module_dir = '../../oslo_cache'
|
||||
apidoc_output_dir = 'reference/api'
|
||||
apidoc_excluded_paths = [
|
||||
'tests'
|
||||
]
|
||||
apidoc_excluded_paths = ['tests']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
@@ -70,16 +68,19 @@ html_theme = 'openstackdocs'
|
||||
# html_static_path = ['static']
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = '%sdoc' % project
|
||||
htmlhelp_basename = f'{project}doc'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index',
|
||||
'%s.tex' % project,
|
||||
'%s Documentation' % project,
|
||||
'OpenStack Foundation', 'manual'),
|
||||
(
|
||||
'index',
|
||||
f'{project}.tex',
|
||||
f'{project} Documentation',
|
||||
'OpenStack Foundation',
|
||||
'manual',
|
||||
),
|
||||
]
|
||||
|
||||
intersphinx_mapping = {
|
||||
|
@@ -11,8 +11,13 @@
|
||||
# under the License.
|
||||
|
||||
|
||||
from oslo_cache.core import * # noqa
|
||||
|
||||
from oslo_cache.core import (
|
||||
configure,
|
||||
configure_cache_region,
|
||||
create_region,
|
||||
get_memoization_decorator,
|
||||
NO_VALUE,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'configure',
|
||||
|
@@ -14,7 +14,9 @@
|
||||
# under the License.
|
||||
|
||||
"""Thread-safe connection pool for python-binary-memcached."""
|
||||
|
||||
import debtcollector
|
||||
|
||||
try:
|
||||
import eventlet
|
||||
except ImportError:
|
||||
@@ -28,7 +30,8 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
if eventlet and eventlet.patcher.is_monkey_patched('thread'):
|
||||
debtcollector.deprecate(
|
||||
"Eventlet support is deprecated and will be removed.")
|
||||
"Eventlet support is deprecated and will be removed."
|
||||
)
|
||||
|
||||
|
||||
class _BMemcacheClient(bmemcached.Client):
|
||||
@@ -38,6 +41,7 @@ class _BMemcacheClient(bmemcached.Client):
|
||||
methods overloaded by threading.local so we can reuse clients in
|
||||
different threads
|
||||
"""
|
||||
|
||||
__delattr__ = object.__delattr__
|
||||
__getattribute__ = object.__getattribute__
|
||||
__setattr__ = object.__setattr__
|
||||
|
@@ -23,6 +23,7 @@ import threading
|
||||
import time
|
||||
|
||||
import debtcollector
|
||||
|
||||
try:
|
||||
import eventlet
|
||||
except ImportError:
|
||||
@@ -39,7 +40,8 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
if eventlet and eventlet.patcher.is_monkey_patched('thread'):
|
||||
debtcollector.deprecate(
|
||||
"Eventlet support is deprecated and will be removed.")
|
||||
"Eventlet support is deprecated and will be removed."
|
||||
)
|
||||
|
||||
|
||||
class _MemcacheClient(memcache.Client):
|
||||
@@ -49,6 +51,7 @@ class _MemcacheClient(memcache.Client):
|
||||
methods overloaded by threading.local so we can reuse clients in
|
||||
different threads
|
||||
"""
|
||||
|
||||
__delattr__ = object.__delattr__
|
||||
__getattribute__ = object.__getattribute__
|
||||
__setattr__ = object.__setattr__
|
||||
@@ -75,6 +78,7 @@ class ConnectionPool(queue.Queue):
|
||||
This class implements the basic connection pool logic as an abstract base
|
||||
class.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize, unused_timeout, conn_get_timeout=None):
|
||||
"""Initialize the connection pool.
|
||||
|
||||
@@ -118,7 +122,8 @@ class ConnectionPool(queue.Queue):
|
||||
break
|
||||
except Exception as e:
|
||||
self._do_log(
|
||||
LOG.warning, "Unable to cleanup a connection: %s", e)
|
||||
LOG.warning, "Unable to cleanup a connection: %s", e
|
||||
)
|
||||
|
||||
def _create_connection(self):
|
||||
"""Returns a connection instance.
|
||||
@@ -163,9 +168,12 @@ class ConnectionPool(queue.Queue):
|
||||
conn = self.get(timeout=self._connection_get_timeout)
|
||||
except queue.Empty:
|
||||
raise exception.QueueEmpty(
|
||||
_('Unable to get a connection from pool id %(id)s after '
|
||||
'%(seconds)s seconds.') %
|
||||
{'id': id(self), 'seconds': self._connection_get_timeout})
|
||||
_(
|
||||
'Unable to get a connection from pool id %(id)s after '
|
||||
'%(seconds)s seconds.'
|
||||
)
|
||||
% {'id': id(self), 'seconds': self._connection_get_timeout}
|
||||
)
|
||||
self._trace_logger('Acquired connection %s', id(conn))
|
||||
try:
|
||||
yield conn
|
||||
@@ -218,10 +226,12 @@ class ConnectionPool(queue.Queue):
|
||||
pass
|
||||
|
||||
def _put(self, conn):
|
||||
self.queue.append(_PoolItem(
|
||||
ttl=time.time() + self._unused_timeout,
|
||||
connection=conn,
|
||||
))
|
||||
self.queue.append(
|
||||
_PoolItem(
|
||||
ttl=time.time() + self._unused_timeout,
|
||||
connection=conn,
|
||||
)
|
||||
)
|
||||
self._acquired -= 1
|
||||
|
||||
|
||||
@@ -234,10 +244,12 @@ class MemcacheClientPool(ConnectionPool):
|
||||
self._arguments = {
|
||||
'dead_retry': arguments.get('dead_retry', 5 * 60),
|
||||
'socket_timeout': arguments.get('socket_timeout', 3.0),
|
||||
'server_max_value_length':
|
||||
arguments.get('server_max_value_length'),
|
||||
'server_max_value_length': arguments.get(
|
||||
'server_max_value_length'
|
||||
),
|
||||
'flush_on_reconnect': arguments.get(
|
||||
'pool_flush_on_reconnect', False),
|
||||
'pool_flush_on_reconnect', False
|
||||
),
|
||||
}
|
||||
# NOTE(morganfainberg): The host objects expect an int for the
|
||||
# deaduntil value. Initialize this at 0 for each host with 0 indicating
|
||||
@@ -284,7 +296,9 @@ class MemcacheClientPool(ConnectionPool):
|
||||
self._hosts_deaduntil[i] = host.deaduntil
|
||||
self._debug_logger(
|
||||
'Marked host %s dead until %s',
|
||||
self.urls[i], host.deaduntil)
|
||||
self.urls[i],
|
||||
host.deaduntil,
|
||||
)
|
||||
else:
|
||||
self._hosts_deaduntil[i] = 0
|
||||
finally:
|
||||
|
@@ -19,253 +19,385 @@ _DEFAULT_BACKEND = 'dogpile.cache.null'
|
||||
|
||||
FILE_OPTIONS = {
|
||||
'cache': [
|
||||
cfg.StrOpt('config_prefix', default='cache.oslo',
|
||||
help='Prefix for building the configuration dictionary '
|
||||
'for the cache region. This should not need to be '
|
||||
'changed unless there is another dogpile.cache '
|
||||
'region with the same configuration name.'),
|
||||
cfg.IntOpt('expiration_time', default=600,
|
||||
min=1,
|
||||
help='Default TTL, in seconds, for any cached item in '
|
||||
'the dogpile.cache region. This applies to any '
|
||||
'cached method that doesn\'t have an explicit '
|
||||
'cache expiration time defined for it.'),
|
||||
cfg.IntOpt('backend_expiration_time',
|
||||
min=1,
|
||||
help='Expiration time in cache backend to purge '
|
||||
'expired records automatically. This should be '
|
||||
'greater than expiration_time and all cache_time '
|
||||
'options'),
|
||||
cfg.StrOpt(
|
||||
'config_prefix',
|
||||
default='cache.oslo',
|
||||
help=(
|
||||
'Prefix for building the configuration dictionary '
|
||||
'for the cache region. This should not need to be '
|
||||
'changed unless there is another dogpile.cache '
|
||||
'region with the same configuration name.'
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'expiration_time',
|
||||
default=600,
|
||||
min=1,
|
||||
help=(
|
||||
'Default TTL, in seconds, for any cached item in '
|
||||
'the dogpile.cache region. This applies to any '
|
||||
'cached method that doesn\'t have an explicit '
|
||||
'cache expiration time defined for it.'
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'backend_expiration_time',
|
||||
min=1,
|
||||
help=(
|
||||
'Expiration time in cache backend to purge '
|
||||
'expired records automatically. This should be '
|
||||
'greater than expiration_time and all cache_time '
|
||||
'options'
|
||||
),
|
||||
),
|
||||
# NOTE(morganfainberg): It is recommended that either Redis or
|
||||
# Memcached are used as the dogpile backend for real workloads. To
|
||||
# prevent issues with the memory cache ending up in "production"
|
||||
# unintentionally, we register a no-op as the default caching backend.
|
||||
cfg.StrOpt('backend', default=_DEFAULT_BACKEND,
|
||||
choices=['oslo_cache.memcache_pool',
|
||||
'oslo_cache.dict',
|
||||
'oslo_cache.etcd3gw',
|
||||
'dogpile.cache.pymemcache',
|
||||
'dogpile.cache.memcached',
|
||||
'dogpile.cache.pylibmc',
|
||||
'dogpile.cache.bmemcached',
|
||||
'dogpile.cache.dbm',
|
||||
'dogpile.cache.redis',
|
||||
'dogpile.cache.redis_sentinel',
|
||||
'dogpile.cache.memory',
|
||||
'dogpile.cache.memory_pickle',
|
||||
'dogpile.cache.null'],
|
||||
help='Cache backend module. For eventlet-based or '
|
||||
'environments with hundreds of threaded servers, Memcache '
|
||||
'with pooling (oslo_cache.memcache_pool) is recommended. '
|
||||
'For environments with less than 100 threaded servers, '
|
||||
'Memcached (dogpile.cache.memcached) or Redis '
|
||||
'(dogpile.cache.redis) is recommended. Test environments '
|
||||
'with a single instance of the server can use the '
|
||||
'dogpile.cache.memory backend.'),
|
||||
cfg.MultiStrOpt('backend_argument', default=[], secret=True,
|
||||
help='Arguments supplied to the backend module. '
|
||||
'Specify this option once per argument to be '
|
||||
'passed to the dogpile.cache backend. Example '
|
||||
'format: "<argname>:<value>".'),
|
||||
cfg.ListOpt('proxies', default=[],
|
||||
help='Proxy classes to import that will affect the way '
|
||||
'the dogpile.cache backend functions. See the '
|
||||
'dogpile.cache documentation on '
|
||||
'changing-backend-behavior.'),
|
||||
cfg.BoolOpt('enabled', default=False,
|
||||
help='Global toggle for caching.'),
|
||||
cfg.BoolOpt('debug_cache_backend', default=False,
|
||||
help='Extra debugging from the cache backend (cache '
|
||||
'keys, get/set/delete/etc calls). This is only '
|
||||
'really useful if you need to see the specific '
|
||||
'cache-backend get/set/delete calls with the '
|
||||
'keys/values. Typically this should be left set '
|
||||
'to false.'),
|
||||
cfg.ListOpt('memcache_servers', default=['localhost:11211'],
|
||||
help='Memcache servers in the format of "host:port". '
|
||||
'This is used by backends dependent on Memcached.'
|
||||
'If ``dogpile.cache.memcached`` or '
|
||||
'``oslo_cache.memcache_pool`` is used and a given '
|
||||
'host refer to an IPv6 or a given domain refer to '
|
||||
'IPv6 then you should prefix the given address with '
|
||||
'the address family (``inet6``) '
|
||||
'(e.g ``inet6:[::1]:11211``, '
|
||||
'``inet6:[fd12:3456:789a:1::1]:11211``, '
|
||||
'``inet6:[controller-0.internalapi]:11211``). '
|
||||
'If the address family is not given then these '
|
||||
'backends will use the default ``inet`` address '
|
||||
'family which corresponds to IPv4'),
|
||||
cfg.IntOpt('memcache_dead_retry',
|
||||
default=5 * 60,
|
||||
help='Number of seconds memcached server is considered dead'
|
||||
' before it is tried again. (dogpile.cache.memcache and'
|
||||
' oslo_cache.memcache_pool backends only).'),
|
||||
cfg.FloatOpt('memcache_socket_timeout',
|
||||
default=1.0,
|
||||
help='Timeout in seconds for every call to a server.'
|
||||
' (dogpile.cache.memcache and oslo_cache.memcache_pool'
|
||||
' backends only).'),
|
||||
cfg.IntOpt('memcache_pool_maxsize',
|
||||
default=10,
|
||||
help='Max total number of open connections to every'
|
||||
' memcached server. (oslo_cache.memcache_pool backend'
|
||||
' only).'),
|
||||
cfg.IntOpt('memcache_pool_unused_timeout',
|
||||
default=60,
|
||||
help='Number of seconds a connection to memcached is held'
|
||||
' unused in the pool before it is closed.'
|
||||
' (oslo_cache.memcache_pool backend only).'),
|
||||
cfg.IntOpt('memcache_pool_connection_get_timeout',
|
||||
default=10,
|
||||
help='Number of seconds that an operation will wait to get '
|
||||
'a memcache client connection.'),
|
||||
cfg.BoolOpt('memcache_pool_flush_on_reconnect',
|
||||
default=False,
|
||||
help='Global toggle if memcache will be flushed'
|
||||
' on reconnect.'
|
||||
' (oslo_cache.memcache_pool backend only).'),
|
||||
cfg.BoolOpt('memcache_sasl_enabled',
|
||||
default=False,
|
||||
help='Enable the SASL(Simple Authentication and Security'
|
||||
'Layer) if the SASL_enable is true, else disable.'),
|
||||
cfg.StrOpt('memcache_username',
|
||||
help='the user name for the memcached which SASL enabled'),
|
||||
cfg.StrOpt('memcache_password',
|
||||
secret=True,
|
||||
help='the password for the memcached which SASL enabled'),
|
||||
cfg.StrOpt('redis_server',
|
||||
default='localhost:6379',
|
||||
help='Redis server in the format of "host:port"'),
|
||||
cfg.IntOpt('redis_db',
|
||||
default=0,
|
||||
min=0,
|
||||
help='Database id in Redis server'),
|
||||
cfg.StrOpt('redis_username',
|
||||
help='the user name for redis'),
|
||||
cfg.StrOpt('redis_password',
|
||||
secret=True,
|
||||
help='the password for redis'),
|
||||
cfg.ListOpt('redis_sentinels',
|
||||
default=['localhost:26379'],
|
||||
help='Redis sentinel servers in the format of '
|
||||
'"host:port"'),
|
||||
cfg.FloatOpt('redis_socket_timeout',
|
||||
default=1.0,
|
||||
help='Timeout in seconds for every call to a server.'
|
||||
' (dogpile.cache.redis and dogpile.cache.redis_sentinel '
|
||||
'backends only).'),
|
||||
cfg.StrOpt('redis_sentinel_service_name',
|
||||
default='mymaster',
|
||||
help='Service name of the redis sentinel cluster.'),
|
||||
cfg.BoolOpt('tls_enabled',
|
||||
default=False,
|
||||
help='Global toggle for TLS usage when communicating with'
|
||||
' the caching servers. Currently supported by '
|
||||
'``dogpile.cache.bmemcache``, '
|
||||
'``dogpile.cache.pymemcache``, '
|
||||
'``oslo_cache.memcache_pool``, '
|
||||
'``dogpile.cache.redis`` and '
|
||||
'``dogpile.cache.redis_sentinel``.'),
|
||||
cfg.StrOpt('tls_cafile',
|
||||
default=None,
|
||||
help='Path to a file of concatenated CA certificates in PEM'
|
||||
' format necessary to establish the caching servers\''
|
||||
' authenticity. If tls_enabled is False, this option is'
|
||||
' ignored.'),
|
||||
cfg.StrOpt('tls_certfile',
|
||||
default=None,
|
||||
help='Path to a single file in PEM format containing the'
|
||||
' client\'s certificate as well as any number of CA'
|
||||
' certificates needed to establish the certificate\'s'
|
||||
' authenticity. This file is only required when client side'
|
||||
' authentication is necessary. If tls_enabled is False,'
|
||||
' this option is ignored.'),
|
||||
cfg.StrOpt('tls_keyfile',
|
||||
default=None,
|
||||
help='Path to a single file containing the client\'s'
|
||||
' private key in. Otherwise the private key will be taken'
|
||||
' from the file specified in tls_certfile. If tls_enabled'
|
||||
' is False, this option is ignored.'),
|
||||
cfg.StrOpt('tls_allowed_ciphers',
|
||||
default=None,
|
||||
help='Set the available ciphers for sockets created with'
|
||||
' the TLS context. It should be a string in the OpenSSL'
|
||||
' cipher list format. If not specified, all OpenSSL enabled'
|
||||
' ciphers will be available. Currently supported by '
|
||||
'``dogpile.cache.bmemcache``, '
|
||||
'``dogpile.cache.pymemcache`` and '
|
||||
'``oslo_cache.memcache_pool``.'),
|
||||
cfg.StrOpt(
|
||||
'backend',
|
||||
default=_DEFAULT_BACKEND,
|
||||
choices=[
|
||||
'oslo_cache.memcache_pool',
|
||||
'oslo_cache.dict',
|
||||
'oslo_cache.etcd3gw',
|
||||
'dogpile.cache.pymemcache',
|
||||
'dogpile.cache.memcached',
|
||||
'dogpile.cache.pylibmc',
|
||||
'dogpile.cache.bmemcached',
|
||||
'dogpile.cache.dbm',
|
||||
'dogpile.cache.redis',
|
||||
'dogpile.cache.redis_sentinel',
|
||||
'dogpile.cache.memory',
|
||||
'dogpile.cache.memory_pickle',
|
||||
'dogpile.cache.null',
|
||||
],
|
||||
help=(
|
||||
'Cache backend module. For eventlet-based or '
|
||||
'environments with hundreds of threaded servers, Memcache '
|
||||
'with pooling (oslo_cache.memcache_pool) is recommended. '
|
||||
'For environments with less than 100 threaded servers, '
|
||||
'Memcached (dogpile.cache.memcached) or Redis '
|
||||
'(dogpile.cache.redis) is recommended. Test environments '
|
||||
'with a single instance of the server can use the '
|
||||
'dogpile.cache.memory backend.'
|
||||
),
|
||||
),
|
||||
cfg.MultiStrOpt(
|
||||
'backend_argument',
|
||||
default=[],
|
||||
secret=True,
|
||||
help=(
|
||||
'Arguments supplied to the backend module. '
|
||||
'Specify this option once per argument to be '
|
||||
'passed to the dogpile.cache backend. Example '
|
||||
'format: "<argname>:<value>".'
|
||||
),
|
||||
),
|
||||
cfg.ListOpt(
|
||||
'proxies',
|
||||
default=[],
|
||||
help=(
|
||||
'Proxy classes to import that will affect the way '
|
||||
'the dogpile.cache backend functions. See the '
|
||||
'dogpile.cache documentation on '
|
||||
'changing-backend-behavior.'
|
||||
),
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'enabled', default=False, help='Global toggle for caching.'
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'debug_cache_backend',
|
||||
default=False,
|
||||
help=(
|
||||
'Extra debugging from the cache backend (cache '
|
||||
'keys, get/set/delete/etc calls). This is only '
|
||||
'really useful if you need to see the specific '
|
||||
'cache-backend get/set/delete calls with the '
|
||||
'keys/values. Typically this should be left set '
|
||||
'to false.'
|
||||
),
|
||||
),
|
||||
cfg.ListOpt(
|
||||
'memcache_servers',
|
||||
default=['localhost:11211'],
|
||||
help=(
|
||||
'Memcache servers in the format of "host:port". '
|
||||
'This is used by backends dependent on Memcached.'
|
||||
'If ``dogpile.cache.memcached`` or '
|
||||
'``oslo_cache.memcache_pool`` is used and a given '
|
||||
'host refer to an IPv6 or a given domain refer to '
|
||||
'IPv6 then you should prefix the given address with '
|
||||
'the address family (``inet6``) '
|
||||
'(e.g ``inet6:[::1]:11211``, '
|
||||
'``inet6:[fd12:3456:789a:1::1]:11211``, '
|
||||
'``inet6:[controller-0.internalapi]:11211``). '
|
||||
'If the address family is not given then these '
|
||||
'backends will use the default ``inet`` address '
|
||||
'family which corresponds to IPv4'
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'memcache_dead_retry',
|
||||
default=5 * 60,
|
||||
help=(
|
||||
'Number of seconds memcached server is considered dead '
|
||||
'before it is tried again. (dogpile.cache.memcache and '
|
||||
'oslo_cache.memcache_pool backends only).'
|
||||
),
|
||||
),
|
||||
cfg.FloatOpt(
|
||||
'memcache_socket_timeout',
|
||||
default=1.0,
|
||||
help=(
|
||||
'Timeout in seconds for every call to a server. '
|
||||
'(dogpile.cache.memcache and oslo_cache.memcache_pool '
|
||||
'backends only).'
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'memcache_pool_maxsize',
|
||||
default=10,
|
||||
help=(
|
||||
'Max total number of open connections to every '
|
||||
'memcached server. (oslo_cache.memcache_pool backend '
|
||||
'only).'
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'memcache_pool_unused_timeout',
|
||||
default=60,
|
||||
help=(
|
||||
'Number of seconds a connection to memcached is held '
|
||||
'unused in the pool before it is closed. '
|
||||
'(oslo_cache.memcache_pool backend only).'
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'memcache_pool_connection_get_timeout',
|
||||
default=10,
|
||||
help=(
|
||||
'Number of seconds that an operation will wait to get '
|
||||
'a memcache client connection.'
|
||||
),
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'memcache_pool_flush_on_reconnect',
|
||||
default=False,
|
||||
help=(
|
||||
'Global toggle if memcache will be flushed '
|
||||
'on reconnect. '
|
||||
'(oslo_cache.memcache_pool backend only).'
|
||||
),
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'memcache_sasl_enabled',
|
||||
default=False,
|
||||
help=(
|
||||
'Enable the SASL(Simple Authentication and Security'
|
||||
'Layer) if the SASL_enable is true, else disable.'
|
||||
),
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'memcache_username',
|
||||
help='the user name for the memcached which SASL enabled',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'memcache_password',
|
||||
secret=True,
|
||||
help='the password for the memcached which SASL enabled',
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'redis_server',
|
||||
default='localhost:6379',
|
||||
help='Redis server in the format of "host:port"',
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'redis_db', default=0, min=0, help='Database id in Redis server'
|
||||
),
|
||||
cfg.StrOpt('redis_username', help='the user name for redis'),
|
||||
cfg.StrOpt(
|
||||
'redis_password', secret=True, help='the password for redis'
|
||||
),
|
||||
cfg.ListOpt(
|
||||
'redis_sentinels',
|
||||
default=['localhost:26379'],
|
||||
help='Redis sentinel servers in the format of "host:port"',
|
||||
),
|
||||
cfg.FloatOpt(
|
||||
'redis_socket_timeout',
|
||||
default=1.0,
|
||||
help=(
|
||||
'Timeout in seconds for every call to a server. '
|
||||
'(dogpile.cache.redis and dogpile.cache.redis_sentinel '
|
||||
'backends only).'
|
||||
),
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'redis_sentinel_service_name',
|
||||
default='mymaster',
|
||||
help='Service name of the redis sentinel cluster.',
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'tls_enabled',
|
||||
default=False,
|
||||
help=(
|
||||
'Global toggle for TLS usage when communicating with'
|
||||
' the caching servers. Currently supported by '
|
||||
'``dogpile.cache.bmemcache``, '
|
||||
'``dogpile.cache.pymemcache``, '
|
||||
'``oslo_cache.memcache_pool``, '
|
||||
'``dogpile.cache.redis`` and '
|
||||
'``dogpile.cache.redis_sentinel``.'
|
||||
),
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'tls_cafile',
|
||||
default=None,
|
||||
help=(
|
||||
'Path to a file of concatenated CA certificates in PEM '
|
||||
'format necessary to establish the caching servers\' '
|
||||
'authenticity. If tls_enabled is False, this option is '
|
||||
'ignored.'
|
||||
),
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'tls_certfile',
|
||||
default=None,
|
||||
help=(
|
||||
'Path to a single file in PEM format containing the '
|
||||
'client\'s certificate as well as any number of CA '
|
||||
'certificates needed to establish the certificate\'s '
|
||||
'authenticity. This file is only required when client side '
|
||||
'authentication is necessary. If tls_enabled is False, '
|
||||
'this option is ignored.'
|
||||
),
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'tls_keyfile',
|
||||
default=None,
|
||||
help=(
|
||||
'Path to a single file containing the client\'s '
|
||||
'private key in. Otherwise the private key will be taken '
|
||||
'from the file specified in tls_certfile. If tls_enabled '
|
||||
'is False, this option is ignored.'
|
||||
),
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'tls_allowed_ciphers',
|
||||
default=None,
|
||||
help=(
|
||||
'Set the available ciphers for sockets created with '
|
||||
'the TLS context. It should be a string in the OpenSSL '
|
||||
'cipher list format. If not specified, all OpenSSL enabled '
|
||||
'ciphers will be available. Currently supported by '
|
||||
'``dogpile.cache.bmemcache``, '
|
||||
'``dogpile.cache.pymemcache`` and '
|
||||
'``oslo_cache.memcache_pool``.'
|
||||
),
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'enable_socket_keepalive',
|
||||
default=False,
|
||||
help="Global toggle for the socket keepalive of "
|
||||
"dogpile's pymemcache backend"),
|
||||
help=(
|
||||
"Global toggle for the socket keepalive of "
|
||||
"dogpile's pymemcache backend"
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'socket_keepalive_idle',
|
||||
default=1,
|
||||
min=0,
|
||||
help='The time (in seconds) the connection needs to '
|
||||
'remain idle before TCP starts sending keepalive probes. '
|
||||
'Should be a positive integer most greater than zero.'),
|
||||
help=(
|
||||
'The time (in seconds) the connection needs to '
|
||||
'remain idle before TCP starts sending keepalive probes. '
|
||||
'Should be a positive integer most greater than zero.'
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'socket_keepalive_interval',
|
||||
default=1,
|
||||
min=0,
|
||||
help='The time (in seconds) between individual keepalive '
|
||||
'probes. Should be a positive integer greater '
|
||||
'than zero.'),
|
||||
help=(
|
||||
'The time (in seconds) between individual keepalive '
|
||||
'probes. Should be a positive integer greater '
|
||||
'than zero.'
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'socket_keepalive_count',
|
||||
default=1,
|
||||
min=0,
|
||||
help='The maximum number of keepalive probes TCP should '
|
||||
'send before dropping the connection. Should be a '
|
||||
'positive integer greater than zero.'),
|
||||
help=(
|
||||
'The maximum number of keepalive probes TCP should '
|
||||
'send before dropping the connection. Should be a '
|
||||
'positive integer greater than zero.'
|
||||
),
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'enable_retry_client',
|
||||
default=False,
|
||||
help='Enable retry client mechanisms to handle failure. '
|
||||
'Those mechanisms can be used to wrap all kind of pymemcache '
|
||||
'clients. The wrapper allows you to define how many attempts '
|
||||
'to make and how long to wait between attempts.'),
|
||||
help=(
|
||||
'Enable retry client mechanisms to handle failure. '
|
||||
'Those mechanisms can be used to wrap all kind of pymemcache '
|
||||
'clients. The wrapper allows you to define how many attempts '
|
||||
'to make and how long to wait between attempts.'
|
||||
),
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'retry_attempts',
|
||||
min=1,
|
||||
default=2,
|
||||
help='Number of times to attempt an action before failing.'),
|
||||
help='Number of times to attempt an action before failing.',
|
||||
),
|
||||
cfg.FloatOpt(
|
||||
'retry_delay',
|
||||
default=0,
|
||||
help='Number of seconds to sleep between each attempt.'),
|
||||
help='Number of seconds to sleep between each attempt.',
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'hashclient_retry_attempts',
|
||||
min=1,
|
||||
default=2,
|
||||
help='Amount of times a client should be tried '
|
||||
'before it is marked dead and removed from the pool in '
|
||||
'the HashClient\'s internal mechanisms.'),
|
||||
help=(
|
||||
'Amount of times a client should be tried '
|
||||
'before it is marked dead and removed from the pool in '
|
||||
'the HashClient\'s internal mechanisms.'
|
||||
),
|
||||
),
|
||||
cfg.FloatOpt(
|
||||
'hashclient_retry_delay',
|
||||
default=1,
|
||||
help='Time in seconds that should pass between '
|
||||
'retry attempts in the HashClient\'s internal mechanisms.'),
|
||||
help=(
|
||||
'Time in seconds that should pass between '
|
||||
'retry attempts in the HashClient\'s internal mechanisms.'
|
||||
),
|
||||
),
|
||||
cfg.FloatOpt(
|
||||
'dead_timeout',
|
||||
default=60,
|
||||
help='Time in seconds before attempting to add a node '
|
||||
'back in the pool in the HashClient\'s internal mechanisms.'),
|
||||
cfg.BoolOpt('enforce_fips_mode',
|
||||
default=False,
|
||||
help='Global toggle for enforcing the OpenSSL FIPS mode. '
|
||||
'This feature requires Python support. '
|
||||
'This is available in Python 3.9 in all '
|
||||
'environments and may have been backported to older '
|
||||
'Python versions on select environments. If the Python '
|
||||
'executable used does not support OpenSSL FIPS mode, '
|
||||
'an exception will be raised. Currently supported by '
|
||||
'``dogpile.cache.bmemcache``, '
|
||||
'``dogpile.cache.pymemcache`` and '
|
||||
'``oslo_cache.memcache_pool``.'),
|
||||
help=(
|
||||
'Time in seconds before attempting to add a node '
|
||||
'back in the pool in the HashClient\'s internal mechanisms.'
|
||||
),
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'enforce_fips_mode',
|
||||
default=False,
|
||||
help=(
|
||||
'Global toggle for enforcing the OpenSSL FIPS mode. '
|
||||
'This feature requires Python support. '
|
||||
'This is available in Python 3.9 in all '
|
||||
'environments and may have been backported to older '
|
||||
'Python versions on select environments. If the Python '
|
||||
'executable used does not support OpenSSL FIPS mode, '
|
||||
'an exception will be raised. Currently supported by '
|
||||
'``dogpile.cache.bmemcache``, '
|
||||
'``dogpile.cache.pymemcache`` and '
|
||||
'``oslo_cache.memcache_pool``.'
|
||||
),
|
||||
),
|
||||
],
|
||||
}
|
||||
|
||||
@@ -292,7 +424,8 @@ def set_defaults(conf, memcache_pool_flush_on_reconnect=False):
|
||||
|
||||
cfg.set_defaults(
|
||||
FILE_OPTIONS,
|
||||
memcache_pool_flush_on_reconnect=memcache_pool_flush_on_reconnect)
|
||||
memcache_pool_flush_on_reconnect=memcache_pool_flush_on_reconnect,
|
||||
)
|
||||
|
||||
|
||||
def list_opts():
|
||||
|
@@ -18,9 +18,7 @@ from dogpile.cache import api
|
||||
from oslo_cache import core
|
||||
from oslo_utils import timeutils
|
||||
|
||||
__all__ = [
|
||||
'DictCacheBackend'
|
||||
]
|
||||
__all__ = ['DictCacheBackend']
|
||||
|
||||
_NO_VALUE = core.NO_VALUE
|
||||
|
||||
|
@@ -13,14 +13,13 @@
|
||||
# under the License.
|
||||
|
||||
"""dogpile.cache backend that uses etcd 3.x for storage"""
|
||||
|
||||
from dogpile.cache import api
|
||||
|
||||
from oslo_cache import core
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
__all__ = [
|
||||
'Etcd3gwCacheBackend'
|
||||
]
|
||||
__all__ = ['Etcd3gwCacheBackend']
|
||||
|
||||
_NO_VALUE = core.NO_VALUE
|
||||
|
||||
@@ -41,9 +40,10 @@ class Etcd3gwCacheBackend(api.CacheBackend):
|
||||
self.timeout = int(arguments.get('timeout', self.DEFAULT_TIMEOUT))
|
||||
# module etcd3gw is only required when etcd3gw backend is used
|
||||
import etcd3gw
|
||||
self._client = etcd3gw.client(host=self.host,
|
||||
port=self.port,
|
||||
timeout=self.timeout)
|
||||
|
||||
self._client = etcd3gw.client(
|
||||
host=self.host, port=self.port, timeout=self.timeout
|
||||
)
|
||||
|
||||
def get(self, key):
|
||||
values = self._client.get(key, False)
|
||||
|
@@ -63,14 +63,17 @@ class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
|
||||
# Composed from GenericMemcachedBackend's and MemcacheArgs's __init__
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
if (arguments.get('tls_enabled', False) or
|
||||
arguments.get('sasl_enabled', False)):
|
||||
if (arguments.get('sasl_enabled', False) and
|
||||
(arguments.get('username') is None or
|
||||
arguments.get('password') is None)):
|
||||
if arguments.get('tls_enabled', False) or arguments.get(
|
||||
'sasl_enabled', False
|
||||
):
|
||||
if arguments.get('sasl_enabled', False) and (
|
||||
arguments.get('username') is None
|
||||
or arguments.get('password') is None
|
||||
):
|
||||
raise exception.ConfigurationError(
|
||||
'username and password should be configured to use SASL '
|
||||
'authentication.')
|
||||
'authentication.'
|
||||
)
|
||||
if not _bmemcache_pool:
|
||||
raise ImportError("python-binary-memcached package is missing")
|
||||
self.client_pool = _bmemcache_pool.BMemcacheClientPool(
|
||||
@@ -78,8 +81,9 @@ class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
|
||||
arguments,
|
||||
maxsize=arguments.get('pool_maxsize', 10),
|
||||
unused_timeout=arguments.get('pool_unused_timeout', 60),
|
||||
conn_get_timeout=arguments.get('pool_connection_get_timeout',
|
||||
10),
|
||||
conn_get_timeout=arguments.get(
|
||||
'pool_connection_get_timeout', 10
|
||||
),
|
||||
)
|
||||
else:
|
||||
self.client_pool = _memcache_pool.MemcacheClientPool(
|
||||
@@ -87,8 +91,9 @@ class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
|
||||
arguments,
|
||||
maxsize=arguments.get('pool_maxsize', 10),
|
||||
unused_timeout=arguments.get('pool_unused_timeout', 60),
|
||||
conn_get_timeout=arguments.get('pool_connection_get_timeout',
|
||||
10),
|
||||
conn_get_timeout=arguments.get(
|
||||
'pool_connection_get_timeout', 10
|
||||
),
|
||||
)
|
||||
|
||||
# Since all methods in backend just call one of methods of client, this
|
||||
|
@@ -32,8 +32,10 @@ The library has special public value for nonexistent or expired keys called
|
||||
:data:`NO_VALUE`. To use this value you should import it from oslo_cache.core::
|
||||
|
||||
from oslo_cache import core
|
||||
|
||||
NO_VALUE = core.NO_VALUE
|
||||
"""
|
||||
|
||||
import socket
|
||||
import ssl
|
||||
import urllib.parse
|
||||
@@ -67,6 +69,7 @@ _LOG = log.getLogger(__name__)
|
||||
|
||||
class _DebugProxy(proxy.ProxyBackend):
|
||||
"""Extra Logging ProxyBackend."""
|
||||
|
||||
# NOTE(morganfainberg): Pass all key/values through repr to ensure we have
|
||||
# a clean description of the information. Without use of repr, it might
|
||||
# be possible to run into encode/decode error(s). For logging/debugging
|
||||
@@ -75,19 +78,25 @@ class _DebugProxy(proxy.ProxyBackend):
|
||||
|
||||
def get(self, key):
|
||||
value = self.proxied.get(key)
|
||||
_LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
|
||||
{'key': key, 'value': value})
|
||||
_LOG.debug(
|
||||
'CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
|
||||
{'key': key, 'value': value},
|
||||
)
|
||||
return value
|
||||
|
||||
def get_multi(self, keys):
|
||||
values = self.proxied.get_multi(keys)
|
||||
_LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
|
||||
{'keys': keys, 'values': values})
|
||||
_LOG.debug(
|
||||
'CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
|
||||
{'keys': keys, 'values': values},
|
||||
)
|
||||
return values
|
||||
|
||||
def set(self, key, value):
|
||||
_LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
|
||||
{'key': key, 'value': value})
|
||||
_LOG.debug(
|
||||
'CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
|
||||
{'key': key, 'value': value},
|
||||
)
|
||||
return self.proxied.set(key, value)
|
||||
|
||||
def set_multi(self, keys):
|
||||
@@ -125,8 +134,10 @@ def _build_cache_config(conf):
|
||||
try:
|
||||
(argname, argvalue) = argument.split(':', 1)
|
||||
except ValueError:
|
||||
msg = ('Unable to build cache config-key. Expected format '
|
||||
'"<argname>:<value>". Skipping unknown format: %s')
|
||||
msg = (
|
||||
'Unable to build cache config-key. Expected format '
|
||||
'"<argname>:<value>". Skipping unknown format: %s'
|
||||
)
|
||||
_LOG.error(msg, argument)
|
||||
continue
|
||||
|
||||
@@ -139,9 +150,11 @@ def _build_cache_config(conf):
|
||||
# take data and do not handle processing/validation as expected
|
||||
# directly makes for odd behaviors when wrapping dogpile.cache in
|
||||
# a library like oslo.cache
|
||||
if (conf.cache.backend
|
||||
in ('dogpile.cache.memcached', 'oslo_cache.memcache_pool') and
|
||||
argname == 'url'):
|
||||
if (
|
||||
conf.cache.backend
|
||||
in ('dogpile.cache.memcached', 'oslo_cache.memcache_pool')
|
||||
and argname == 'url'
|
||||
):
|
||||
argvalue = argvalue.split(',')
|
||||
conf_dict[arg_key] = argvalue
|
||||
|
||||
@@ -152,21 +165,23 @@ def _build_cache_config(conf):
|
||||
netloc = conf.cache.redis_server
|
||||
else:
|
||||
if conf.cache.redis_username:
|
||||
netloc = '{}:{}@{}'.format(conf.cache.redis_username,
|
||||
conf.cache.redis_password,
|
||||
conf.cache.redis_server)
|
||||
netloc = f'{conf.cache.redis_username}:{conf.cache.redis_password}@{conf.cache.redis_server}'
|
||||
else:
|
||||
netloc = ':{}@{}'.format(conf.cache.redis_password,
|
||||
conf.cache.redis_server)
|
||||
netloc = (
|
||||
f':{conf.cache.redis_password}@{conf.cache.redis_server}'
|
||||
)
|
||||
|
||||
parts = urllib.parse.ParseResult(
|
||||
scheme=('rediss' if conf.cache.tls_enabled else 'redis'),
|
||||
netloc=netloc, path=str(conf.cache.redis_db), params='', query='',
|
||||
fragment='')
|
||||
netloc=netloc,
|
||||
path=str(conf.cache.redis_db),
|
||||
params='',
|
||||
query='',
|
||||
fragment='',
|
||||
)
|
||||
|
||||
conf_dict.setdefault(
|
||||
f'{prefix}.arguments.url',
|
||||
urllib.parse.urlunparse(parts)
|
||||
f'{prefix}.arguments.url', urllib.parse.urlunparse(parts)
|
||||
)
|
||||
for arg in ('socket_timeout',):
|
||||
value = getattr(conf.cache, 'redis_' + arg)
|
||||
@@ -175,11 +190,13 @@ def _build_cache_config(conf):
|
||||
for arg in ('username', 'password', 'socket_timeout', 'db'):
|
||||
value = getattr(conf.cache, 'redis_' + arg)
|
||||
conf_dict[f'{prefix}.arguments.{arg}'] = value
|
||||
conf_dict[f'{prefix}.arguments.service_name'] = \
|
||||
conf_dict[f'{prefix}.arguments.service_name'] = (
|
||||
conf.cache.redis_sentinel_service_name
|
||||
)
|
||||
if conf.cache.redis_sentinels:
|
||||
conf_dict[f'{prefix}.arguments.sentinels'] = [
|
||||
_parse_sentinel(s) for s in conf.cache.redis_sentinels]
|
||||
_parse_sentinel(s) for s in conf.cache.redis_sentinels
|
||||
]
|
||||
else:
|
||||
# NOTE(yorik-sar): these arguments will be used for memcache-related
|
||||
# backends. Use setdefault for url to support old-style setting through
|
||||
@@ -204,13 +221,21 @@ def _build_cache_config(conf):
|
||||
#
|
||||
# The normal non-pooled clients connect explicitly on each use and
|
||||
# does not need the explicit flush_on_reconnect
|
||||
conf_dict.setdefault(f'{prefix}.arguments.url',
|
||||
conf.cache.memcache_servers)
|
||||
conf_dict.setdefault(
|
||||
f'{prefix}.arguments.url', conf.cache.memcache_servers
|
||||
)
|
||||
|
||||
for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
|
||||
'pool_unused_timeout', 'pool_connection_get_timeout',
|
||||
'pool_flush_on_reconnect', 'sasl_enabled', 'username',
|
||||
'password'):
|
||||
for arg in (
|
||||
'dead_retry',
|
||||
'socket_timeout',
|
||||
'pool_maxsize',
|
||||
'pool_unused_timeout',
|
||||
'pool_connection_get_timeout',
|
||||
'pool_flush_on_reconnect',
|
||||
'sasl_enabled',
|
||||
'username',
|
||||
'password',
|
||||
):
|
||||
value = getattr(conf.cache, 'memcache_' + arg)
|
||||
conf_dict[f'{prefix}.arguments.{arg}'] = value
|
||||
|
||||
@@ -218,29 +243,40 @@ def _build_cache_config(conf):
|
||||
if conf.cache.expiration_time > conf.cache.backend_expiration_time:
|
||||
raise exception.ConfigurationError(
|
||||
"backend_expiration_time should not be smaller than "
|
||||
"expiration_time.")
|
||||
if conf.cache.backend in ('dogpile.cache.pymemcache',
|
||||
'dogpile.cache.memcached',
|
||||
'dogpile.cache.pylibmc',
|
||||
'oslo_cache.memcache_pool'):
|
||||
conf_dict[f'{prefix}.arguments.memcached_expire_time'] = \
|
||||
"expiration_time."
|
||||
)
|
||||
if conf.cache.backend in (
|
||||
'dogpile.cache.pymemcache',
|
||||
'dogpile.cache.memcached',
|
||||
'dogpile.cache.pylibmc',
|
||||
'oslo_cache.memcache_pool',
|
||||
):
|
||||
conf_dict[f'{prefix}.arguments.memcached_expire_time'] = (
|
||||
conf.cache.backend_expiration_time
|
||||
elif conf.cache.backend in ('dogpile.cache.redis',
|
||||
'dogpile.cache.redis_sentinel'):
|
||||
conf_dict[f'{prefix}.arguments.redis_expiration_time'] = \
|
||||
)
|
||||
elif conf.cache.backend in (
|
||||
'dogpile.cache.redis',
|
||||
'dogpile.cache.redis_sentinel',
|
||||
):
|
||||
conf_dict[f'{prefix}.arguments.redis_expiration_time'] = (
|
||||
conf.cache.backend_expiration_time
|
||||
)
|
||||
else:
|
||||
raise exception.ConfigurationError(
|
||||
"Enabling backend expiration is not supported by"
|
||||
"the %s driver", conf.cache.backend)
|
||||
"Enabling backend expiration is not supported bythe %s driver",
|
||||
conf.cache.backend,
|
||||
)
|
||||
|
||||
if conf.cache.tls_enabled:
|
||||
if conf.cache.backend in ('dogpile.cache.bmemcache',
|
||||
'dogpile.cache.pymemcache',
|
||||
'oslo_cache.memcache_pool'):
|
||||
if conf.cache.backend in (
|
||||
'dogpile.cache.bmemcache',
|
||||
'dogpile.cache.pymemcache',
|
||||
'oslo_cache.memcache_pool',
|
||||
):
|
||||
_LOG.debug('Oslo Cache TLS - CA: %s', conf.cache.tls_cafile)
|
||||
tls_context = ssl.create_default_context(
|
||||
cafile=conf.cache.tls_cafile)
|
||||
cafile=conf.cache.tls_cafile
|
||||
)
|
||||
|
||||
if conf.cache.enforce_fips_mode:
|
||||
if hasattr(ssl, 'FIPS_mode'):
|
||||
@@ -253,11 +289,13 @@ def _build_cache_config(conf):
|
||||
"executable used to a version with FIPS mode support "
|
||||
"or disable FIPS mode by setting "
|
||||
"the '[cache] enforce_fips_mode' configuration option "
|
||||
"to 'False'.")
|
||||
"to 'False'."
|
||||
)
|
||||
|
||||
if conf.cache.tls_certfile is not None:
|
||||
_LOG.debug('Oslo Cache TLS - cert: %s',
|
||||
conf.cache.tls_certfile)
|
||||
_LOG.debug(
|
||||
'Oslo Cache TLS - cert: %s', conf.cache.tls_certfile
|
||||
)
|
||||
_LOG.debug('Oslo Cache TLS - key: %s', conf.cache.tls_keyfile)
|
||||
tls_context.load_cert_chain(
|
||||
conf.cache.tls_certfile,
|
||||
@@ -274,45 +312,53 @@ def _build_cache_config(conf):
|
||||
conf_dict[f'{prefix}.arguments.tls_context'] = tls_context
|
||||
|
||||
# pass the value of tls_enabled to the backend
|
||||
conf_dict[f'{prefix}.arguments.tls_enabled'] = \
|
||||
conf_dict[f'{prefix}.arguments.tls_enabled'] = (
|
||||
conf.cache.tls_enabled
|
||||
elif conf.cache.backend in ('dogpile.cache.redis',
|
||||
'dogpile.cache.redis_sentinel'):
|
||||
)
|
||||
elif conf.cache.backend in (
|
||||
'dogpile.cache.redis',
|
||||
'dogpile.cache.redis_sentinel',
|
||||
):
|
||||
if conf.cache.tls_allowed_ciphers is not None:
|
||||
raise exception.ConfigurationError(
|
||||
"Limiting allowed ciphers is not supported by "
|
||||
"the %s backend" % conf.cache.backend)
|
||||
f"the {conf.cache.backend} backend"
|
||||
)
|
||||
if conf.cache.enforce_fips_mode:
|
||||
raise exception.ConfigurationError(
|
||||
"FIPS mode is not supported by the %s backend" %
|
||||
conf.cache.backend)
|
||||
f"FIPS mode is not supported by the {conf.cache.backend} backend"
|
||||
)
|
||||
|
||||
conn_kwargs = {}
|
||||
if conf.cache.tls_cafile is not None:
|
||||
_LOG.debug('Oslo Cache TLS - CA: %s', conf.cache.tls_cafile)
|
||||
conn_kwargs['ssl_ca_certs'] = conf.cache.tls_cafile
|
||||
if conf.cache.tls_certfile is not None:
|
||||
_LOG.debug('Oslo Cache TLS - cert: %s',
|
||||
conf.cache.tls_certfile)
|
||||
_LOG.debug(
|
||||
'Oslo Cache TLS - cert: %s', conf.cache.tls_certfile
|
||||
)
|
||||
_LOG.debug('Oslo Cache TLS - key: %s', conf.cache.tls_keyfile)
|
||||
conn_kwargs.update({
|
||||
'ssl_certfile': conf.cache.tls_certfile,
|
||||
'ssl_keyfile': conf.cache.tls_keyfile
|
||||
})
|
||||
conn_kwargs.update(
|
||||
{
|
||||
'ssl_certfile': conf.cache.tls_certfile,
|
||||
'ssl_keyfile': conf.cache.tls_keyfile,
|
||||
}
|
||||
)
|
||||
if conf.cache.backend == 'dogpile.cache.redis_sentinel':
|
||||
conn_kwargs.update({'ssl': True})
|
||||
conf_dict[f'{prefix}.arguments.connection_kwargs'] = \
|
||||
conn_kwargs
|
||||
conf_dict[f'{prefix}.arguments.sentinel_kwargs'] = \
|
||||
conf_dict[f'{prefix}.arguments.connection_kwargs'] = (
|
||||
conn_kwargs
|
||||
)
|
||||
conf_dict[f'{prefix}.arguments.sentinel_kwargs'] = conn_kwargs
|
||||
else:
|
||||
conf_dict[f'{prefix}.arguments.connection_kwargs'] = \
|
||||
conf_dict[f'{prefix}.arguments.connection_kwargs'] = (
|
||||
conn_kwargs
|
||||
)
|
||||
else:
|
||||
raise exception.ConfigurationError(
|
||||
"TLS setting via [cache] tls_enabled is not supported by the "
|
||||
"%s backend. Set [cache] tls_enabled=False or use a different "
|
||||
"backend." % conf.cache.backend
|
||||
f"{conf.cache.backend} backend. Set [cache] tls_enabled=False or use a different "
|
||||
"backend."
|
||||
)
|
||||
|
||||
# NOTE(hberaud): Pymemcache backend and redis backends support socket
|
||||
@@ -325,32 +371,38 @@ def _build_cache_config(conf):
|
||||
if conf.cache.enable_socket_keepalive:
|
||||
if conf.cache.backend == 'dogpile.cache.pymemcache':
|
||||
import pymemcache
|
||||
|
||||
socket_keepalive = pymemcache.KeepaliveOpts(
|
||||
idle=conf.cache.socket_keepalive_idle,
|
||||
intvl=conf.cache.socket_keepalive_interval,
|
||||
cnt=conf.cache.socket_keepalive_count)
|
||||
cnt=conf.cache.socket_keepalive_count,
|
||||
)
|
||||
# As with the TLS context above, the config dict below will be
|
||||
# consumed by dogpile.cache that will be used as a proxy between
|
||||
# oslo.cache and pymemcache.
|
||||
conf_dict[f'{prefix}.arguments.socket_keepalive'] = \
|
||||
conf_dict[f'{prefix}.arguments.socket_keepalive'] = (
|
||||
socket_keepalive
|
||||
elif conf.cache.backend in ('dogpile.cache.redis',
|
||||
'dogpile.cache.redis_sentinel'):
|
||||
)
|
||||
elif conf.cache.backend in (
|
||||
'dogpile.cache.redis',
|
||||
'dogpile.cache.redis_sentinel',
|
||||
):
|
||||
socket_keepalive_options = {
|
||||
socket.TCP_KEEPIDLE: conf.cache.socket_keepalive_idle,
|
||||
socket.TCP_KEEPINTVL: conf.cache.socket_keepalive_interval,
|
||||
socket.TCP_KEEPCNT: conf.cache.socket_keepalive_count
|
||||
socket.TCP_KEEPCNT: conf.cache.socket_keepalive_count,
|
||||
}
|
||||
conf_dict.setdefault(
|
||||
f'{prefix}.arguments.connection_kwargs', {}
|
||||
).update({
|
||||
'socket_keepalive': True,
|
||||
'socket_keepalive_options': socket_keepalive_options
|
||||
})
|
||||
).update(
|
||||
{
|
||||
'socket_keepalive': True,
|
||||
'socket_keepalive_options': socket_keepalive_options,
|
||||
}
|
||||
)
|
||||
else:
|
||||
raise exception.ConfigurationError(
|
||||
"Socket keepalive is not supported by the %s backend"
|
||||
% conf.cache.backend
|
||||
f"Socket keepalive is not supported by the {conf.cache.backend} backend"
|
||||
)
|
||||
|
||||
# NOTE(hberaud): The pymemcache library comes with retry mechanisms that
|
||||
@@ -366,17 +418,19 @@ def _build_cache_config(conf):
|
||||
)
|
||||
raise exception.ConfigurationError(msg)
|
||||
import pymemcache
|
||||
|
||||
conf_dict[f'{prefix}.arguments.enable_retry_client'] = True
|
||||
conf_dict[f'{prefix}.arguments.retry_attempts'] = \
|
||||
conf_dict[f'{prefix}.arguments.retry_attempts'] = (
|
||||
conf.cache.retry_attempts
|
||||
conf_dict[f'{prefix}.arguments.retry_delay'] = \
|
||||
conf.cache.retry_delay
|
||||
conf_dict[f'{prefix}.arguments.hashclient_retry_attempts'] = \
|
||||
)
|
||||
conf_dict[f'{prefix}.arguments.retry_delay'] = conf.cache.retry_delay
|
||||
conf_dict[f'{prefix}.arguments.hashclient_retry_attempts'] = (
|
||||
conf.cache.hashclient_retry_attempts
|
||||
conf_dict[f'{prefix}.arguments.hashclient_retry_delay'] = \
|
||||
)
|
||||
conf_dict[f'{prefix}.arguments.hashclient_retry_delay'] = (
|
||||
conf.cache.hashclient_retry_delay
|
||||
conf_dict[f'{prefix}.arguments.dead_timeout'] = \
|
||||
conf.cache.dead_timeout
|
||||
)
|
||||
conf_dict[f'{prefix}.arguments.dead_timeout'] = conf.cache.dead_timeout
|
||||
|
||||
return conf_dict
|
||||
|
||||
@@ -456,7 +510,8 @@ def configure_cache_region(conf, region):
|
||||
"""
|
||||
if not isinstance(region, dogpile.cache.CacheRegion):
|
||||
raise exception.ConfigurationError(
|
||||
_('region not type dogpile.cache.CacheRegion'))
|
||||
_('region not type dogpile.cache.CacheRegion')
|
||||
)
|
||||
|
||||
if not region.is_configured:
|
||||
# NOTE(morganfainberg): this is how you tell if a region is configured.
|
||||
@@ -464,8 +519,9 @@ def configure_cache_region(conf, region):
|
||||
# easier / less ugly.
|
||||
|
||||
config_dict = _build_cache_config(conf)
|
||||
region.configure_from_config(config_dict,
|
||||
f'{conf.cache.config_prefix}.')
|
||||
region.configure_from_config(
|
||||
config_dict, f'{conf.cache.config_prefix}.'
|
||||
)
|
||||
|
||||
if conf.cache.debug_cache_backend:
|
||||
region.wrap(_DebugProxy)
|
||||
@@ -509,11 +565,13 @@ def _get_should_cache_fn(conf, group):
|
||||
:type group: string
|
||||
:returns: function reference
|
||||
"""
|
||||
|
||||
def should_cache(value):
|
||||
if not conf.cache.enabled:
|
||||
return False
|
||||
conf_group = getattr(conf, group)
|
||||
return getattr(conf_group, 'caching', True)
|
||||
|
||||
return should_cache
|
||||
|
||||
|
||||
@@ -538,9 +596,11 @@ def _get_expiration_time_fn(conf, group):
|
||||
:type group: string
|
||||
:rtype: function reference
|
||||
"""
|
||||
|
||||
def get_expiration_time():
|
||||
conf_group = getattr(conf, group)
|
||||
return getattr(conf_group, 'cache_time', None)
|
||||
|
||||
return get_expiration_time
|
||||
|
||||
|
||||
@@ -566,19 +626,21 @@ def get_memoization_decorator(conf, region, group, expiration_group=None):
|
||||
import oslo_cache.core
|
||||
|
||||
MEMOIZE = oslo_cache.core.get_memoization_decorator(
|
||||
conf, region, group='group1')
|
||||
conf, region, group='group1'
|
||||
)
|
||||
|
||||
|
||||
@MEMOIZE
|
||||
def function(arg1, arg2):
|
||||
...
|
||||
def function(arg1, arg2): ...
|
||||
|
||||
|
||||
ALTERNATE_MEMOIZE = oslo_cache.core.get_memoization_decorator(
|
||||
conf, region, group='group2', expiration_group='group3')
|
||||
conf, region, group='group2', expiration_group='group3'
|
||||
)
|
||||
|
||||
|
||||
@ALTERNATE_MEMOIZE
|
||||
def function2(arg1, arg2):
|
||||
...
|
||||
def function2(arg1, arg2): ...
|
||||
|
||||
:param conf: config object, must have had :func:`configure` called on it.
|
||||
:type conf: oslo_config.cfg.ConfigOpts
|
||||
@@ -598,8 +660,9 @@ def get_memoization_decorator(conf, region, group, expiration_group=None):
|
||||
should_cache = _get_should_cache_fn(conf, group)
|
||||
expiration_time = _get_expiration_time_fn(conf, expiration_group)
|
||||
|
||||
memoize = region.cache_on_arguments(should_cache_fn=should_cache,
|
||||
expiration_time=expiration_time)
|
||||
memoize = region.cache_on_arguments(
|
||||
should_cache_fn=should_cache, expiration_time=expiration_time
|
||||
)
|
||||
|
||||
# Make sure the actual "should_cache" and "expiration_time" methods are
|
||||
# available. This is potentially interesting/useful to pre-seed cache
|
||||
|
@@ -14,7 +14,6 @@
|
||||
|
||||
"""Items useful for external testing."""
|
||||
|
||||
|
||||
import copy
|
||||
|
||||
from dogpile.cache import proxy
|
||||
@@ -60,9 +59,11 @@ class CacheIsolatingProxy(proxy.ProxyBackend):
|
||||
group='cache',
|
||||
backend='dogpile.cache.memory',
|
||||
enabled=True,
|
||||
proxies=['oslo_cache.testing.CacheIsolatingProxy'])
|
||||
proxies=['oslo_cache.testing.CacheIsolatingProxy'],
|
||||
)
|
||||
|
||||
"""
|
||||
|
||||
def get(self, key):
|
||||
return _copy_value(self.proxied.get(key))
|
||||
|
||||
|
@@ -25,7 +25,7 @@ class TestDogpileCachePyMemcacheBackend(test_base.BaseTestCaseCacheBackend):
|
||||
self.config_fixture.config(
|
||||
group="cache",
|
||||
backend="dogpile.cache.pymemcache",
|
||||
memcache_servers=[f'localhost:{MEMCACHED_PORT}']
|
||||
memcache_servers=[f'localhost:{MEMCACHED_PORT}'],
|
||||
)
|
||||
|
||||
# NOTE(hberaud): super must be called after all to ensure that
|
||||
|
@@ -26,7 +26,7 @@ class TestRedisSentinelCacheBackend(test_base.BaseTestCaseCacheBackend):
|
||||
group='cache',
|
||||
backend='dogpile.cache.redis_sentinel',
|
||||
redis_sentinels=[f"127.0.0.1:{REDIS_SENTINEL_PORT}"],
|
||||
redis_sentinel_service_name='pifpaf'
|
||||
redis_sentinel_service_name='pifpaf',
|
||||
)
|
||||
|
||||
# NOTE(hberaud): super must be called after all to ensure that
|
||||
|
@@ -25,7 +25,7 @@ class TestEtcdCacheBackend(test_base.BaseTestCaseCacheBackend):
|
||||
self.config_fixture.config(
|
||||
group='cache',
|
||||
backend='oslo_cache.etcd3gw',
|
||||
backend_argument=['host:127.0.0.1', f'port:{ETCD_PORT}']
|
||||
backend_argument=['host:127.0.0.1', f'port:{ETCD_PORT}'],
|
||||
)
|
||||
|
||||
# NOTE(hberaud): super must be called after all to ensure that
|
||||
|
@@ -26,7 +26,7 @@ class TestMemcachePoolCacheBackend(test_base.BaseTestCaseCacheBackend):
|
||||
group='cache',
|
||||
backend='oslo_cache.memcache_pool',
|
||||
enabled=True,
|
||||
memcache_servers=[f'localhost:{MEMCACHED_PORT}']
|
||||
memcache_servers=[f'localhost:{MEMCACHED_PORT}'],
|
||||
)
|
||||
# NOTE(hberaud): super must be called after all to ensure that
|
||||
# config fixture is properly initialized with value related to
|
||||
@@ -45,6 +45,6 @@ class TestBMemcachePoolCacheBackend(test_base.BaseTestCaseCacheBackend):
|
||||
memcache_servers=[f'localhost:{MEMCACHED_PORT}'],
|
||||
memcache_sasl_enabled=False,
|
||||
memcache_username='sasl_name',
|
||||
memcache_password='sasl_pswd'
|
||||
memcache_password='sasl_pswd',
|
||||
)
|
||||
super().setUp()
|
||||
|
@@ -86,9 +86,11 @@ class BaseTestCaseCacheBackend(base.BaseTestCase):
|
||||
random_key1 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key2 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key3 = uuidutils.generate_uuid(dashed=False)
|
||||
mapping = {random_key1: 'dummyValue1',
|
||||
random_key2: 'dummyValue2',
|
||||
random_key3: 'dummyValue3'}
|
||||
mapping = {
|
||||
random_key1: 'dummyValue1',
|
||||
random_key2: 'dummyValue2',
|
||||
random_key3: 'dummyValue3',
|
||||
}
|
||||
self.region.set_multi(mapping)
|
||||
# should return NO_VALUE as key does not exist in cache
|
||||
self.assertEqual(NO_VALUE, self.region.get(random_key))
|
||||
@@ -102,9 +104,11 @@ class BaseTestCaseCacheBackend(base.BaseTestCase):
|
||||
random_key1 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key2 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key3 = uuidutils.generate_uuid(dashed=False)
|
||||
mapping = {random_key1: 'dummyValue1',
|
||||
random_key2: '',
|
||||
random_key3: 'dummyValue3'}
|
||||
mapping = {
|
||||
random_key1: 'dummyValue1',
|
||||
random_key2: '',
|
||||
random_key3: 'dummyValue3',
|
||||
}
|
||||
self.region.set_multi(mapping)
|
||||
|
||||
keys = [random_key, random_key1, random_key2, random_key3]
|
||||
@@ -120,9 +124,11 @@ class BaseTestCaseCacheBackend(base.BaseTestCase):
|
||||
random_key1 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key2 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key3 = uuidutils.generate_uuid(dashed=False)
|
||||
mapping = {random_key1: 'dummyValue1',
|
||||
random_key2: 'dummyValue2',
|
||||
random_key3: 'dummyValue3'}
|
||||
mapping = {
|
||||
random_key1: 'dummyValue1',
|
||||
random_key2: 'dummyValue2',
|
||||
random_key3: 'dummyValue3',
|
||||
}
|
||||
self.region.set_multi(mapping)
|
||||
# should return NO_VALUE as key does not exist in cache
|
||||
self.assertEqual(NO_VALUE, self.region.get(random_key))
|
||||
@@ -130,8 +136,7 @@ class BaseTestCaseCacheBackend(base.BaseTestCase):
|
||||
self.assertEqual("dummyValue2", self.region.get(random_key2))
|
||||
self.assertEqual("dummyValue3", self.region.get(random_key3))
|
||||
|
||||
mapping = {random_key1: 'dummyValue4',
|
||||
random_key2: 'dummyValue5'}
|
||||
mapping = {random_key1: 'dummyValue4', random_key2: 'dummyValue5'}
|
||||
self.region.set_multi(mapping)
|
||||
self.assertEqual(NO_VALUE, self.region.get(random_key))
|
||||
self.assertEqual("dummyValue4", self.region.get(random_key1))
|
||||
@@ -144,10 +149,12 @@ class BaseTestCaseCacheBackend(base.BaseTestCase):
|
||||
random_key2 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key3 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key4 = uuidutils.generate_uuid(dashed=False)
|
||||
mapping = {random_key1: 'dummyValue1',
|
||||
random_key2: None,
|
||||
random_key3: '',
|
||||
random_key4: 'dummyValue4'}
|
||||
mapping = {
|
||||
random_key1: 'dummyValue1',
|
||||
random_key2: None,
|
||||
random_key3: '',
|
||||
random_key4: 'dummyValue4',
|
||||
}
|
||||
self.region.set_multi(mapping)
|
||||
# should return NO_VALUE as key does not exist in cache
|
||||
self.assertEqual(NO_VALUE, self.region.get(random_key))
|
||||
@@ -166,8 +173,7 @@ class BaseTestCaseCacheBackend(base.BaseTestCase):
|
||||
self.assertEqual("", results[3])
|
||||
self.assertEqual("dummyValue4", results[4])
|
||||
|
||||
mapping = {random_key1: 'dummyValue5',
|
||||
random_key2: 'dummyValue6'}
|
||||
mapping = {random_key1: 'dummyValue5', random_key2: 'dummyValue6'}
|
||||
self.region.set_multi(mapping)
|
||||
self.assertEqual(NO_VALUE, self.region.get(random_key))
|
||||
self.assertEqual("dummyValue5", self.region.get(random_key1))
|
||||
@@ -188,9 +194,11 @@ class BaseTestCaseCacheBackend(base.BaseTestCase):
|
||||
random_key1 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key2 = uuidutils.generate_uuid(dashed=False)
|
||||
random_key3 = uuidutils.generate_uuid(dashed=False)
|
||||
mapping = {random_key1: 'dummyValue1',
|
||||
random_key2: 'dummyValue2',
|
||||
random_key3: 'dummyValue3'}
|
||||
mapping = {
|
||||
random_key1: 'dummyValue1',
|
||||
random_key2: 'dummyValue2',
|
||||
random_key3: 'dummyValue3',
|
||||
}
|
||||
self.region.set_multi(mapping)
|
||||
# should return NO_VALUE as key does not exist in cache
|
||||
self.assertEqual(NO_VALUE, self.region.get(random_key))
|
||||
|
@@ -26,4 +26,5 @@ class BaseTestCase(base.BaseTestCase):
|
||||
group='cache',
|
||||
backend='dogpile.cache.memory',
|
||||
enabled=True,
|
||||
proxies=['oslo_cache.testing.CacheIsolatingProxy'])
|
||||
proxies=['oslo_cache.testing.CacheIsolatingProxy'],
|
||||
)
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -40,8 +40,8 @@ class TestConnectionPool(test_cache.BaseTestCase):
|
||||
self.unused_timeout = 10
|
||||
self.maxsize = 2
|
||||
self.connection_pool = _TestConnectionPool(
|
||||
maxsize=self.maxsize,
|
||||
unused_timeout=self.unused_timeout)
|
||||
maxsize=self.maxsize, unused_timeout=self.unused_timeout
|
||||
)
|
||||
self.addCleanup(self.cleanup_instance('connection_pool'))
|
||||
|
||||
def cleanup_instance(self, *names):
|
||||
@@ -54,6 +54,7 @@ class TestConnectionPool(test_cache.BaseTestCase):
|
||||
for name in names:
|
||||
if hasattr(self, name):
|
||||
delattr(self, name)
|
||||
|
||||
return cleanup
|
||||
|
||||
def test_get_context_manager(self):
|
||||
@@ -68,8 +69,8 @@ class TestConnectionPool(test_cache.BaseTestCase):
|
||||
self.test_get_context_manager()
|
||||
newtime = time.time() + self.unused_timeout * 2
|
||||
non_expired_connection = _memcache_pool._PoolItem(
|
||||
ttl=(newtime * 2),
|
||||
connection=mock.MagicMock())
|
||||
ttl=(newtime * 2), connection=mock.MagicMock()
|
||||
)
|
||||
self.connection_pool.queue.append(non_expired_connection)
|
||||
self.assertThat(self.connection_pool.queue, matchers.HasLength(2))
|
||||
with mock.patch.object(time, 'time', return_value=newtime):
|
||||
@@ -77,7 +78,8 @@ class TestConnectionPool(test_cache.BaseTestCase):
|
||||
with self.connection_pool.acquire():
|
||||
pass
|
||||
conn.assert_has_calls(
|
||||
[mock.call(self.connection_pool.destroyed_value)])
|
||||
[mock.call(self.connection_pool.destroyed_value)]
|
||||
)
|
||||
self.assertThat(self.connection_pool.queue, matchers.HasLength(1))
|
||||
self.assertEqual(0, non_expired_connection.connection.call_count)
|
||||
|
||||
@@ -85,13 +87,15 @@ class TestConnectionPool(test_cache.BaseTestCase):
|
||||
class TestException(Exception):
|
||||
pass
|
||||
|
||||
with mock.patch.object(_TestConnectionPool, '_create_connection',
|
||||
side_effect=TestException):
|
||||
with mock.patch.object(
|
||||
_TestConnectionPool,
|
||||
'_create_connection',
|
||||
side_effect=TestException,
|
||||
):
|
||||
with testtools.ExpectedException(TestException):
|
||||
with self.connection_pool.acquire():
|
||||
pass
|
||||
self.assertThat(self.connection_pool.queue,
|
||||
matchers.HasLength(0))
|
||||
self.assertThat(self.connection_pool.queue, matchers.HasLength(0))
|
||||
self.assertEqual(0, self.connection_pool._acquired)
|
||||
|
||||
def test_connection_pool_limits_maximum_connections(self):
|
||||
@@ -114,9 +118,8 @@ class TestConnectionPool(test_cache.BaseTestCase):
|
||||
|
||||
def test_connection_pool_maximum_connection_get_timeout(self):
|
||||
connection_pool = _TestConnectionPool(
|
||||
maxsize=1,
|
||||
unused_timeout=self.unused_timeout,
|
||||
conn_get_timeout=0)
|
||||
maxsize=1, unused_timeout=self.unused_timeout, conn_get_timeout=0
|
||||
)
|
||||
|
||||
def _acquire_connection():
|
||||
with connection_pool.acquire():
|
||||
@@ -134,7 +137,6 @@ class TestConnectionPool(test_cache.BaseTestCase):
|
||||
|
||||
|
||||
class TestMemcacheClientOverrides(test_cache.BaseTestCase):
|
||||
|
||||
def test_client_stripped_of_threading_local(self):
|
||||
"""threading.local overrides are restored for _MemcacheClient"""
|
||||
client_class = _memcache_pool._MemcacheClient
|
||||
@@ -143,8 +145,10 @@ class TestMemcacheClientOverrides(test_cache.BaseTestCase):
|
||||
self.assertTrue(thread_local is threading.local)
|
||||
for field in thread_local.__dict__.keys():
|
||||
if field not in ('__dict__', '__weakref__'):
|
||||
self.assertNotEqual(id(getattr(thread_local, field, None)),
|
||||
id(getattr(client_class, field, None)))
|
||||
self.assertNotEqual(
|
||||
id(getattr(thread_local, field, None)),
|
||||
id(getattr(client_class, field, None)),
|
||||
)
|
||||
|
||||
def test_can_create_with_kwargs(self):
|
||||
"""Test for lp 1812935
|
||||
@@ -153,6 +157,7 @@ class TestMemcacheClientOverrides(test_cache.BaseTestCase):
|
||||
following to the top of oslo_cache/tests/__init__.py::
|
||||
|
||||
import eventlet
|
||||
|
||||
eventlet.monkey_patch()
|
||||
|
||||
This should happen before any other imports in that file.
|
||||
@@ -165,7 +170,6 @@ class TestMemcacheClientOverrides(test_cache.BaseTestCase):
|
||||
|
||||
|
||||
class TestBMemcacheClient(test_cache.BaseTestCase):
|
||||
|
||||
def test_can_create_with_kwargs(self):
|
||||
client = _bmemcache_pool._BMemcacheClient('foo', password='123456')
|
||||
# Make sure kwargs are properly processed by the client
|
||||
|
@@ -26,7 +26,6 @@ VALUE = 'test_value'
|
||||
|
||||
|
||||
class CacheDictBackendTest(test_cache.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.config_fixture = self.useFixture(config_fixture.Config())
|
||||
@@ -34,7 +33,8 @@ class CacheDictBackendTest(test_cache.BaseTestCase):
|
||||
self.time_fixture = self.useFixture(time_fixture.TimeFixture())
|
||||
self.region = dp_region.make_region()
|
||||
self.region.configure(
|
||||
'oslo_cache.dict', arguments={'expiration_time': 0.5})
|
||||
'oslo_cache.dict', arguments={'expiration_time': 0.5}
|
||||
)
|
||||
|
||||
def test_dict_backend(self):
|
||||
self.assertIs(NO_VALUE, self.region.get(KEY))
|
||||
@@ -64,7 +64,8 @@ class CacheDictBackendTest(test_cache.BaseTestCase):
|
||||
def test_dict_backend_zero_expiration_time(self):
|
||||
self.region = dp_region.make_region()
|
||||
self.region.configure(
|
||||
'oslo_cache.dict', arguments={'expiration_time': 0})
|
||||
'oslo_cache.dict', arguments={'expiration_time': 0}
|
||||
)
|
||||
|
||||
self.region.set(KEY, VALUE)
|
||||
self.time_fixture.advance_time_seconds(1)
|
||||
|
@@ -53,3 +53,16 @@ etcd3gw = [
|
||||
packages = [
|
||||
"oslo_cache"
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 79
|
||||
|
||||
[tool.ruff.format]
|
||||
quote-style = "preserve"
|
||||
docstring-code-format = true
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E4", "E7", "E9", "F", "S", "U"]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"oslo_cache/tests/*" = ["S"]
|
||||
|
@@ -194,10 +194,8 @@ htmlhelp_basename = 'oslo.cacheReleaseNotesDoc'
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
# 'preamble': '',
|
||||
}
|
||||
@@ -206,9 +204,13 @@ latex_elements = {
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'oslo.cacheReleaseNotes.tex',
|
||||
'oslo.cache Release Notes Documentation',
|
||||
'oslo.cache Developers', 'manual'),
|
||||
(
|
||||
'index',
|
||||
'oslo.cacheReleaseNotes.tex',
|
||||
'oslo.cache Release Notes Documentation',
|
||||
'oslo.cache Developers',
|
||||
'manual',
|
||||
),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
@@ -236,9 +238,13 @@ latex_documents = [
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'oslo.cacheReleaseNotes',
|
||||
'oslo.cache Release Notes Documentation',
|
||||
['oslo.cache Developers'], 1)
|
||||
(
|
||||
'index',
|
||||
'oslo.cacheReleaseNotes',
|
||||
'oslo.cache Release Notes Documentation',
|
||||
['oslo.cache Developers'],
|
||||
1,
|
||||
)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
@@ -250,11 +256,15 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'oslo.cacheReleaseNotes',
|
||||
'oslo.cache Release Notes Documentation',
|
||||
'oslo.cache Developers', 'oslo.cacheReleaseNotes',
|
||||
'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
'index',
|
||||
'oslo.cacheReleaseNotes',
|
||||
'oslo.cache Release Notes Documentation',
|
||||
'oslo.cache Developers',
|
||||
'oslo.cacheReleaseNotes',
|
||||
'One line description of project.',
|
||||
'Miscellaneous',
|
||||
),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
|
4
setup.py
4
setup.py
@@ -15,6 +15,4 @@
|
||||
|
||||
import setuptools
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr>=2.0.0'],
|
||||
pbr=True)
|
||||
setuptools.setup(setup_requires=['pbr>=2.0.0'], pbr=True)
|
||||
|
32
tox.ini
32
tox.ini
@@ -31,7 +31,8 @@ commands =
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:docs]
|
||||
allowlist_externals = rm
|
||||
allowlist_externals =
|
||||
rm
|
||||
deps =
|
||||
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
|
||||
-r{toxinidir}/doc/requirements.txt
|
||||
@@ -39,6 +40,14 @@ commands =
|
||||
rm -fr doc/build
|
||||
sphinx-build -W --keep-going -b html doc/source doc/build/html
|
||||
|
||||
[testenv:releasenotes]
|
||||
allowlist_externals =
|
||||
rm
|
||||
deps = {[testenv:docs]deps}
|
||||
commands =
|
||||
rm -rf releasenotes/build
|
||||
sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html
|
||||
|
||||
[testenv:cover]
|
||||
setenv =
|
||||
PYTHON=coverage run --source oslo_cache --parallel-mode
|
||||
@@ -51,18 +60,13 @@ commands =
|
||||
coverage report --show-missing
|
||||
|
||||
[flake8]
|
||||
show-source = True
|
||||
ignore = H405,W504,F405
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
|
||||
show-source = true
|
||||
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,__init__.py
|
||||
# We only enable the hacking (H) checks
|
||||
select = H
|
||||
# H301 Black will put commas after imports that can't fit on one line
|
||||
# H405 Multi-line docstrings are fine
|
||||
ignore = H301,H405
|
||||
|
||||
[hacking]
|
||||
import_exceptions =
|
||||
|
||||
[testenv:releasenotes]
|
||||
allowlist_externals =
|
||||
rm
|
||||
deps = {[testenv:docs]deps}
|
||||
commands =
|
||||
rm -rf releasenotes/build
|
||||
sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html
|
||||
import_exceptions = oslo_cache._i18n
|
||||
|
Reference in New Issue
Block a user