Move search functions to the research context file
To keep `resource_provider.py` file as a pure module for resource provider object and to avoid circular import that can occur in the following refactoring, this patch moves functions to search resource providers for specific conditions from `resource_provider.py` to the new file, `research_context.py`. No functional change or optimizaiton is included in this patch. Change-Id: I7b217cae6db967b1cc7f1885fff67e4148893fc6 Story: 2005712 Task: 31038
This commit is contained in:

committed by
Eric Fried

parent
daf7285a74
commit
fb71a6ab71
@@ -116,11 +116,11 @@ class AllocationCandidates(object):
|
|||||||
# it should be possible to further optimize this attempt at
|
# it should be possible to further optimize this attempt at
|
||||||
# a quick return, but we leave that to future patches for
|
# a quick return, but we leave that to future patches for
|
||||||
# now.
|
# now.
|
||||||
trait_rps = rp_obj.get_provider_ids_having_any_trait(
|
trait_rps = res_ctx.get_provider_ids_having_any_trait(
|
||||||
rg_ctx.context, rg_ctx.required_trait_map)
|
rg_ctx.context, rg_ctx.required_trait_map)
|
||||||
if not trait_rps:
|
if not trait_rps:
|
||||||
return [], []
|
return [], []
|
||||||
rp_candidates = rp_obj.get_trees_matching_all(rg_ctx)
|
rp_candidates = res_ctx.get_trees_matching_all(rg_ctx)
|
||||||
return _alloc_candidates_multiple_providers(rg_ctx, rp_candidates)
|
return _alloc_candidates_multiple_providers(rg_ctx, rp_candidates)
|
||||||
|
|
||||||
# Either we are processing a single-RP request group, or there are no
|
# Either we are processing a single-RP request group, or there are no
|
||||||
@@ -128,14 +128,14 @@ class AllocationCandidates(object):
|
|||||||
# tuples of (internal provider ID, root provider ID) that have ALL
|
# tuples of (internal provider ID, root provider ID) that have ALL
|
||||||
# the requested resources and more efficiently construct the
|
# the requested resources and more efficiently construct the
|
||||||
# allocation requests.
|
# allocation requests.
|
||||||
rp_tuples = rp_obj.get_provider_ids_matching(rg_ctx)
|
rp_tuples = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
return _alloc_candidates_single_provider(rg_ctx, rp_tuples)
|
return _alloc_candidates_single_provider(rg_ctx, rp_tuples)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@db_api.placement_context_manager.reader
|
@db_api.placement_context_manager.reader
|
||||||
def _get_by_requests(cls, context, requests, limit=None,
|
def _get_by_requests(cls, context, requests, limit=None,
|
||||||
group_policy=None, nested_aware=True):
|
group_policy=None, nested_aware=True):
|
||||||
has_trees = rp_obj.has_provider_trees(context)
|
has_trees = res_ctx.has_provider_trees(context)
|
||||||
|
|
||||||
candidates = {}
|
candidates = {}
|
||||||
for suffix, request in requests.items():
|
for suffix, request in requests.items():
|
||||||
@@ -428,7 +428,7 @@ def _alloc_candidates_single_provider(rg_ctx, rp_tuples):
|
|||||||
# AllocationRequest for every possible anchor.
|
# AllocationRequest for every possible anchor.
|
||||||
traits = rp_summary.traits
|
traits = rp_summary.traits
|
||||||
if os_traits.MISC_SHARES_VIA_AGGREGATE in traits:
|
if os_traits.MISC_SHARES_VIA_AGGREGATE in traits:
|
||||||
anchors = set([p[1] for p in rp_obj.anchors_for_sharing_providers(
|
anchors = set([p[1] for p in res_ctx.anchors_for_sharing_providers(
|
||||||
rg_ctx.context, [rp_summary.resource_provider.id])])
|
rg_ctx.context, [rp_summary.resource_provider.id])])
|
||||||
for anchor in anchors:
|
for anchor in anchors:
|
||||||
# We already added self
|
# We already added self
|
||||||
@@ -489,7 +489,7 @@ def _build_provider_summaries(context, usages, prov_traits):
|
|||||||
# provider information (including root, parent and UUID information) for
|
# provider information (including root, parent and UUID information) for
|
||||||
# all providers involved in our operation
|
# all providers involved in our operation
|
||||||
rp_ids = set(usage['resource_provider_id'] for usage in usages)
|
rp_ids = set(usage['resource_provider_id'] for usage in usages)
|
||||||
provider_ids = rp_obj.provider_ids_from_rp_ids(context, rp_ids)
|
provider_ids = res_ctx.provider_ids_from_rp_ids(context, rp_ids)
|
||||||
|
|
||||||
# Build up a dict, keyed by internal resource provider ID, of
|
# Build up a dict, keyed by internal resource provider ID, of
|
||||||
# ProviderSummary objects containing one or more ProviderSummaryResource
|
# ProviderSummary objects containing one or more ProviderSummaryResource
|
||||||
@@ -545,7 +545,7 @@ def _check_traits_for_alloc_request(res_requests, summaries, required_traits,
|
|||||||
resource provider internal IDs in play, else return an empty list.
|
resource provider internal IDs in play, else return an empty list.
|
||||||
|
|
||||||
TODO(tetsuro): For optimization, we should move this logic to SQL in
|
TODO(tetsuro): For optimization, we should move this logic to SQL in
|
||||||
rp_obj.get_trees_matching_all().
|
res_ctx.get_trees_matching_all().
|
||||||
|
|
||||||
:param res_requests: a list of AllocationRequestResource objects that have
|
:param res_requests: a list of AllocationRequestResource objects that have
|
||||||
resource providers to be checked if they collectively
|
resource providers to be checked if they collectively
|
||||||
|
@@ -11,17 +11,38 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
"""Utility methods for getting allocation candidates."""
|
"""Utility methods for getting allocation candidates."""
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import os_traits
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
import six
|
||||||
|
import sqlalchemy as sa
|
||||||
|
from sqlalchemy import sql
|
||||||
|
|
||||||
|
from placement.db.sqlalchemy import models
|
||||||
|
from placement import db_api
|
||||||
from placement import exception
|
from placement import exception
|
||||||
from placement.objects import resource_provider as rp_obj
|
from placement.objects import rp_candidates
|
||||||
from placement.objects import trait as trait_obj
|
from placement.objects import trait as trait_obj
|
||||||
from placement import resource_class_cache as rc_cache
|
from placement import resource_class_cache as rc_cache
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(tetsuro): Move these public symbols in a central place.
|
||||||
|
_TRAIT_TBL = models.Trait.__table__
|
||||||
|
_ALLOC_TBL = models.Allocation.__table__
|
||||||
|
_INV_TBL = models.Inventory.__table__
|
||||||
|
_RP_TBL = models.ResourceProvider.__table__
|
||||||
|
_AGG_TBL = models.PlacementAggregate.__table__
|
||||||
|
_RP_AGG_TBL = models.ResourceProviderAggregate.__table__
|
||||||
|
_RP_TRAIT_TBL = models.ResourceProviderTrait.__table__
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
ProviderIds = collections.namedtuple(
|
||||||
|
'ProviderIds', 'id uuid parent_id parent_uuid root_id root_uuid')
|
||||||
|
|
||||||
|
|
||||||
class RequestGroupSearchContext(object):
|
class RequestGroupSearchContext(object):
|
||||||
"""An adapter object that represents the search for allocation candidates
|
"""An adapter object that represents the search for allocation candidates
|
||||||
for a single request group.
|
for a single request group.
|
||||||
@@ -70,7 +91,7 @@ class RequestGroupSearchContext(object):
|
|||||||
# be satisfied by resource provider(s) under the root provider.
|
# be satisfied by resource provider(s) under the root provider.
|
||||||
self.tree_root_id = None
|
self.tree_root_id = None
|
||||||
if request.in_tree:
|
if request.in_tree:
|
||||||
tree_ids = rp_obj.provider_ids_from_uuid(context, request.in_tree)
|
tree_ids = provider_ids_from_uuid(context, request.in_tree)
|
||||||
if tree_ids is None:
|
if tree_ids is None:
|
||||||
raise exception.ResourceProviderNotFound()
|
raise exception.ResourceProviderNotFound()
|
||||||
self.tree_root_id = tree_ids.root_id
|
self.tree_root_id = tree_ids.root_id
|
||||||
@@ -89,7 +110,7 @@ class RequestGroupSearchContext(object):
|
|||||||
# if not rc_id in (sharable_rc_ids):
|
# if not rc_id in (sharable_rc_ids):
|
||||||
# continue
|
# continue
|
||||||
self._sharing_providers[rc_id] = \
|
self._sharing_providers[rc_id] = \
|
||||||
rp_obj.get_providers_with_shared_capacity(
|
get_providers_with_shared_capacity(
|
||||||
context, rc_id, amount, self.member_of)
|
context, rc_id, amount, self.member_of)
|
||||||
|
|
||||||
# bool indicating there is some level of nesting in the environment
|
# bool indicating there is some level of nesting in the environment
|
||||||
@@ -113,3 +134,882 @@ class RequestGroupSearchContext(object):
|
|||||||
|
|
||||||
def get_rps_with_shared_capacity(self, rc_id):
|
def get_rps_with_shared_capacity(self, rc_id):
|
||||||
return self._sharing_providers.get(rc_id)
|
return self._sharing_providers.get(rc_id)
|
||||||
|
|
||||||
|
|
||||||
|
def provider_ids_from_rp_ids(context, rp_ids):
|
||||||
|
"""Given an iterable of internal resource provider IDs, returns a dict,
|
||||||
|
keyed by internal provider Id, of ProviderIds namedtuples describing those
|
||||||
|
providers.
|
||||||
|
|
||||||
|
:returns: dict, keyed by internal provider Id, of ProviderIds namedtuples
|
||||||
|
:param rp_ids: iterable of internal provider IDs to look up
|
||||||
|
"""
|
||||||
|
# SELECT
|
||||||
|
# rp.id, rp.uuid,
|
||||||
|
# parent.id AS parent_id, parent.uuid AS parent_uuid,
|
||||||
|
# root.id AS root_id, root.uuid AS root_uuid
|
||||||
|
# FROM resource_providers AS rp
|
||||||
|
# INNER JOIN resource_providers AS root
|
||||||
|
# ON rp.root_provider_id = root.id
|
||||||
|
# LEFT JOIN resource_providers AS parent
|
||||||
|
# ON rp.parent_provider_id = parent.id
|
||||||
|
# WHERE rp.id IN ($rp_ids)
|
||||||
|
me = sa.alias(_RP_TBL, name="me")
|
||||||
|
parent = sa.alias(_RP_TBL, name="parent")
|
||||||
|
root = sa.alias(_RP_TBL, name="root")
|
||||||
|
cols = [
|
||||||
|
me.c.id,
|
||||||
|
me.c.uuid,
|
||||||
|
parent.c.id.label('parent_id'),
|
||||||
|
parent.c.uuid.label('parent_uuid'),
|
||||||
|
root.c.id.label('root_id'),
|
||||||
|
root.c.uuid.label('root_uuid'),
|
||||||
|
]
|
||||||
|
me_to_root = sa.join(me, root, me.c.root_provider_id == root.c.id)
|
||||||
|
me_to_parent = sa.outerjoin(
|
||||||
|
me_to_root, parent,
|
||||||
|
me.c.parent_provider_id == parent.c.id)
|
||||||
|
sel = sa.select(cols).select_from(me_to_parent)
|
||||||
|
sel = sel.where(me.c.id.in_(rp_ids))
|
||||||
|
|
||||||
|
ret = {}
|
||||||
|
for r in context.session.execute(sel):
|
||||||
|
ret[r['id']] = ProviderIds(**r)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def provider_ids_from_uuid(context, uuid):
|
||||||
|
"""Given the UUID of a resource provider, returns a namedtuple
|
||||||
|
(ProviderIds) with the internal ID, the UUID, the parent provider's
|
||||||
|
internal ID, parent provider's UUID, the root provider's internal ID and
|
||||||
|
the root provider UUID.
|
||||||
|
|
||||||
|
:returns: ProviderIds object containing the internal IDs and UUIDs of the
|
||||||
|
provider identified by the supplied UUID
|
||||||
|
:param uuid: The UUID of the provider to look up
|
||||||
|
"""
|
||||||
|
# SELECT
|
||||||
|
# rp.id, rp.uuid,
|
||||||
|
# parent.id AS parent_id, parent.uuid AS parent_uuid,
|
||||||
|
# root.id AS root_id, root.uuid AS root_uuid
|
||||||
|
# FROM resource_providers AS rp
|
||||||
|
# INNER JOIN resource_providers AS root
|
||||||
|
# ON rp.root_provider_id = root.id
|
||||||
|
# LEFT JOIN resource_providers AS parent
|
||||||
|
# ON rp.parent_provider_id = parent.id
|
||||||
|
me = sa.alias(_RP_TBL, name="me")
|
||||||
|
parent = sa.alias(_RP_TBL, name="parent")
|
||||||
|
root = sa.alias(_RP_TBL, name="root")
|
||||||
|
cols = [
|
||||||
|
me.c.id,
|
||||||
|
me.c.uuid,
|
||||||
|
parent.c.id.label('parent_id'),
|
||||||
|
parent.c.uuid.label('parent_uuid'),
|
||||||
|
root.c.id.label('root_id'),
|
||||||
|
root.c.uuid.label('root_uuid'),
|
||||||
|
]
|
||||||
|
me_to_root = sa.join(me, root, me.c.root_provider_id == root.c.id)
|
||||||
|
me_to_parent = sa.outerjoin(
|
||||||
|
me_to_root, parent,
|
||||||
|
me.c.parent_provider_id == parent.c.id)
|
||||||
|
sel = sa.select(cols).select_from(me_to_parent)
|
||||||
|
sel = sel.where(me.c.uuid == uuid)
|
||||||
|
res = context.session.execute(sel).fetchone()
|
||||||
|
if not res:
|
||||||
|
return None
|
||||||
|
return ProviderIds(**dict(res))
|
||||||
|
|
||||||
|
|
||||||
|
def _usage_select(rc_ids):
|
||||||
|
usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
|
||||||
|
_ALLOC_TBL.c.resource_class_id,
|
||||||
|
sql.func.sum(_ALLOC_TBL.c.used).label('used')])
|
||||||
|
usage = usage.where(_ALLOC_TBL.c.resource_class_id.in_(rc_ids))
|
||||||
|
usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id,
|
||||||
|
_ALLOC_TBL.c.resource_class_id)
|
||||||
|
return sa.alias(usage, name='usage')
|
||||||
|
|
||||||
|
|
||||||
|
def _capacity_check_clause(amount, usage, inv_tbl=_INV_TBL):
|
||||||
|
return sa.and_(
|
||||||
|
sql.func.coalesce(usage.c.used, 0) + amount <= (
|
||||||
|
(inv_tbl.c.total - inv_tbl.c.reserved) *
|
||||||
|
inv_tbl.c.allocation_ratio),
|
||||||
|
inv_tbl.c.min_unit <= amount,
|
||||||
|
inv_tbl.c.max_unit >= amount,
|
||||||
|
amount % inv_tbl.c.step_size == 0,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def get_providers_with_resource(ctx, rc_id, amount, tree_root_id=None):
|
||||||
|
"""Returns a set of tuples of (provider ID, root provider ID) of providers
|
||||||
|
that satisfy the request for a single resource class.
|
||||||
|
|
||||||
|
:param ctx: Session context to use
|
||||||
|
:param rc_id: Internal ID of resource class to check inventory for
|
||||||
|
:param amount: Amount of resource being requested
|
||||||
|
:param tree_root_id: An optional root provider ID. If provided, the results
|
||||||
|
are limited to the resource providers under the given
|
||||||
|
root resource provider.
|
||||||
|
"""
|
||||||
|
# SELECT rp.id, rp.root_provider_id
|
||||||
|
# FROM resource_providers AS rp
|
||||||
|
# JOIN inventories AS inv
|
||||||
|
# ON rp.id = inv.resource_provider_id
|
||||||
|
# AND inv.resource_class_id = $RC_ID
|
||||||
|
# LEFT JOIN (
|
||||||
|
# SELECT
|
||||||
|
# alloc.resource_provider_id,
|
||||||
|
# SUM(allocs.used) AS used
|
||||||
|
# FROM allocations AS alloc
|
||||||
|
# WHERE allocs.resource_class_id = $RC_ID
|
||||||
|
# GROUP BY allocs.resource_provider_id
|
||||||
|
# ) AS usage
|
||||||
|
# ON inv.resource_provider_id = usage.resource_provider_id
|
||||||
|
# WHERE
|
||||||
|
# used + $AMOUNT <= ((total - reserved) * inv.allocation_ratio)
|
||||||
|
# AND inv.min_unit <= $AMOUNT
|
||||||
|
# AND inv.max_unit >= $AMOUNT
|
||||||
|
# AND $AMOUNT % inv.step_size == 0
|
||||||
|
rpt = sa.alias(_RP_TBL, name="rp")
|
||||||
|
inv = sa.alias(_INV_TBL, name="inv")
|
||||||
|
usage = _usage_select([rc_id])
|
||||||
|
rp_to_inv = sa.join(
|
||||||
|
rpt, inv, sa.and_(
|
||||||
|
rpt.c.id == inv.c.resource_provider_id,
|
||||||
|
inv.c.resource_class_id == rc_id))
|
||||||
|
inv_to_usage = sa.outerjoin(
|
||||||
|
rp_to_inv, usage,
|
||||||
|
inv.c.resource_provider_id == usage.c.resource_provider_id)
|
||||||
|
sel = sa.select([rpt.c.id, rpt.c.root_provider_id])
|
||||||
|
sel = sel.select_from(inv_to_usage)
|
||||||
|
where_conds = _capacity_check_clause(amount, usage, inv_tbl=inv)
|
||||||
|
if tree_root_id is not None:
|
||||||
|
where_conds = sa.and_(
|
||||||
|
rpt.c.root_provider_id == tree_root_id,
|
||||||
|
where_conds)
|
||||||
|
sel = sel.where(where_conds)
|
||||||
|
res = ctx.session.execute(sel).fetchall()
|
||||||
|
res = set((r[0], r[1]) for r in res)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def get_provider_ids_matching(rg_ctx):
|
||||||
|
"""Returns a list of tuples of (internal provider ID, root provider ID)
|
||||||
|
that have available inventory to satisfy all the supplied requests for
|
||||||
|
resources. If no providers match, the empty list is returned.
|
||||||
|
|
||||||
|
:note: This function is used to get results for (a) a RequestGroup with
|
||||||
|
use_same_provider=True in a granular request, or (b) a short cut
|
||||||
|
path for scenarios that do NOT involve sharing or nested providers.
|
||||||
|
Each `internal provider ID` represents a *single* provider that
|
||||||
|
can satisfy *all* of the resource/trait/aggregate criteria. This is
|
||||||
|
in contrast with get_trees_matching_all(), where each provider
|
||||||
|
might only satisfy *some* of the resources, the rest of which are
|
||||||
|
satisfied by other providers in the same tree or shared via
|
||||||
|
aggregate.
|
||||||
|
|
||||||
|
:param rg_ctx: RequestGroupSearchContext
|
||||||
|
"""
|
||||||
|
# TODO(tetsuro): refactor this to have only the rg_ctx argument
|
||||||
|
filtered_rps, forbidden_rp_ids = get_provider_ids_for_traits_and_aggs(
|
||||||
|
rg_ctx.context, rg_ctx.required_trait_map, rg_ctx.forbidden_trait_map,
|
||||||
|
rg_ctx.member_of, rg_ctx.forbidden_aggs)
|
||||||
|
if filtered_rps is None:
|
||||||
|
# If no providers match the traits/aggs, we can short out
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Instead of constructing a giant complex SQL statement that joins multiple
|
||||||
|
# copies of derived usage tables and inventory tables to each other, we do
|
||||||
|
# one query for each requested resource class. This allows us to log a
|
||||||
|
# rough idea of which resource class query returned no results (for
|
||||||
|
# purposes of rough debugging of a single allocation candidates request) as
|
||||||
|
# well as reduce the necessary knowledge of SQL in order to understand the
|
||||||
|
# queries being executed here.
|
||||||
|
#
|
||||||
|
# NOTE(jaypipes): The efficiency of this operation may be improved by
|
||||||
|
# passing the trait_rps and/or forbidden_ip_ids iterables to the
|
||||||
|
# get_providers_with_resource() function so that we don't have to process
|
||||||
|
# as many records inside the loop below to remove providers from the
|
||||||
|
# eventual results list
|
||||||
|
provs_with_resource = set()
|
||||||
|
first = True
|
||||||
|
for rc_id, amount in rg_ctx.resources.items():
|
||||||
|
rc_name = rc_cache.RC_CACHE.string_from_id(rc_id)
|
||||||
|
provs_with_resource = get_providers_with_resource(
|
||||||
|
rg_ctx.context, rc_id, amount, tree_root_id=rg_ctx.tree_root_id)
|
||||||
|
LOG.debug("found %d providers with available %d %s",
|
||||||
|
len(provs_with_resource), amount, rc_name)
|
||||||
|
if not provs_with_resource:
|
||||||
|
return []
|
||||||
|
|
||||||
|
rc_rp_ids = set(p[0] for p in provs_with_resource)
|
||||||
|
# The branching below could be collapsed code-wise, but is in place to
|
||||||
|
# make the debug logging clearer.
|
||||||
|
if first:
|
||||||
|
first = False
|
||||||
|
if filtered_rps:
|
||||||
|
filtered_rps &= rc_rp_ids
|
||||||
|
LOG.debug("found %d providers after applying initial "
|
||||||
|
"aggregate and trait filters", len(filtered_rps))
|
||||||
|
else:
|
||||||
|
filtered_rps = rc_rp_ids
|
||||||
|
# The following condition is not necessary for the logic; just
|
||||||
|
# prevents the message from being logged unnecessarily.
|
||||||
|
if forbidden_rp_ids:
|
||||||
|
# Forbidden trait/aggregate filters only need to be applied
|
||||||
|
# a) on the first iteration; and
|
||||||
|
# b) if not already set up before the loop
|
||||||
|
# ...since any providers in the resulting set are the basis
|
||||||
|
# for intersections, and providers with forbidden traits
|
||||||
|
# are already absent from that set after we've filtered
|
||||||
|
# them once.
|
||||||
|
filtered_rps -= forbidden_rp_ids
|
||||||
|
LOG.debug("found %d providers after applying forbidden "
|
||||||
|
"traits/aggregates", len(filtered_rps))
|
||||||
|
else:
|
||||||
|
filtered_rps &= rc_rp_ids
|
||||||
|
LOG.debug("found %d providers after filtering by previous result",
|
||||||
|
len(filtered_rps))
|
||||||
|
|
||||||
|
if not filtered_rps:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# provs_with_resource will contain a superset of providers with IDs still
|
||||||
|
# in our filtered_rps set. We return the list of tuples of
|
||||||
|
# (internal provider ID, root internal provider ID)
|
||||||
|
return [rpids for rpids in provs_with_resource if rpids[0] in filtered_rps]
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def get_trees_matching_all(rg_ctx):
|
||||||
|
"""Returns a RPCandidates object representing the providers that satisfy
|
||||||
|
the request for resources.
|
||||||
|
|
||||||
|
If traits are also required, this function only returns results where the
|
||||||
|
set of providers within a tree that satisfy the resource request
|
||||||
|
collectively have all the required traits associated with them. This means
|
||||||
|
that given the following provider tree:
|
||||||
|
|
||||||
|
cn1
|
||||||
|
|
|
||||||
|
--> pf1 (SRIOV_NET_VF:2)
|
||||||
|
|
|
||||||
|
--> pf2 (SRIOV_NET_VF:1, HW_NIC_OFFLOAD_GENEVE)
|
||||||
|
|
||||||
|
If a user requests 1 SRIOV_NET_VF resource and no required traits will
|
||||||
|
return both pf1 and pf2. However, a request for 2 SRIOV_NET_VF and required
|
||||||
|
trait of HW_NIC_OFFLOAD_GENEVE will return no results (since pf1 is the
|
||||||
|
only provider with enough inventory of SRIOV_NET_VF but it does not have
|
||||||
|
the required HW_NIC_OFFLOAD_GENEVE trait).
|
||||||
|
|
||||||
|
:note: This function is used for scenarios to get results for a
|
||||||
|
RequestGroup with use_same_provider=False. In this scenario, we are able
|
||||||
|
to use multiple providers within the same provider tree including sharing
|
||||||
|
providers to satisfy different resources involved in a single RequestGroup.
|
||||||
|
|
||||||
|
:param rg_ctx: RequestGroupSearchContext
|
||||||
|
"""
|
||||||
|
# If 'member_of' has values, do a separate lookup to identify the
|
||||||
|
# resource providers that meet the member_of constraints.
|
||||||
|
if rg_ctx.member_of:
|
||||||
|
rps_in_aggs = provider_ids_matching_aggregates(
|
||||||
|
rg_ctx.context, rg_ctx.member_of)
|
||||||
|
if not rps_in_aggs:
|
||||||
|
# Short-circuit. The user either asked for a non-existing
|
||||||
|
# aggregate or there were no resource providers that matched
|
||||||
|
# the requirements...
|
||||||
|
return rp_candidates.RPCandidateList()
|
||||||
|
|
||||||
|
if rg_ctx.forbidden_aggs:
|
||||||
|
rps_bad_aggs = provider_ids_matching_aggregates(
|
||||||
|
rg_ctx.context, [rg_ctx.forbidden_aggs])
|
||||||
|
|
||||||
|
# To get all trees that collectively have all required resource,
|
||||||
|
# aggregates and traits, we use `RPCandidateList` which has a list of
|
||||||
|
# three-tuples with the first element being resource provider ID, the
|
||||||
|
# second element being the root provider ID and the third being resource
|
||||||
|
# class ID.
|
||||||
|
provs_with_inv = rp_candidates.RPCandidateList()
|
||||||
|
|
||||||
|
for rc_id, amount in rg_ctx.resources.items():
|
||||||
|
rc_name = rc_cache.RC_CACHE.string_from_id(rc_id)
|
||||||
|
|
||||||
|
provs_with_inv_rc = rp_candidates.RPCandidateList()
|
||||||
|
rc_provs_with_inv = get_providers_with_resource(
|
||||||
|
rg_ctx.context, rc_id, amount, tree_root_id=rg_ctx.tree_root_id)
|
||||||
|
provs_with_inv_rc.add_rps(rc_provs_with_inv, rc_id)
|
||||||
|
LOG.debug("found %d providers under %d trees with available %d %s",
|
||||||
|
len(provs_with_inv_rc), len(provs_with_inv_rc.trees),
|
||||||
|
amount, rc_name)
|
||||||
|
if not provs_with_inv_rc:
|
||||||
|
# If there's no providers that have one of the resource classes,
|
||||||
|
# then we can short-circuit returning an empty RPCandidateList
|
||||||
|
return rp_candidates.RPCandidateList()
|
||||||
|
|
||||||
|
sharing_providers = rg_ctx.get_rps_with_shared_capacity(rc_id)
|
||||||
|
if sharing_providers and rg_ctx.tree_root_id is None:
|
||||||
|
# There are sharing providers for this resource class, so we
|
||||||
|
# should also get combinations of (sharing provider, anchor root)
|
||||||
|
# in addition to (non-sharing provider, anchor root) we've just
|
||||||
|
# got via get_providers_with_resource() above. We must skip this
|
||||||
|
# process if tree_root_id is provided via the ?in_tree=<rp_uuid>
|
||||||
|
# queryparam, because it restricts resources from another tree.
|
||||||
|
rc_provs_with_inv = anchors_for_sharing_providers(
|
||||||
|
rg_ctx.context, sharing_providers, get_id=True)
|
||||||
|
provs_with_inv_rc.add_rps(rc_provs_with_inv, rc_id)
|
||||||
|
LOG.debug(
|
||||||
|
"considering %d sharing providers with %d %s, "
|
||||||
|
"now we've got %d provider trees",
|
||||||
|
len(sharing_providers), amount, rc_name,
|
||||||
|
len(provs_with_inv_rc.trees))
|
||||||
|
|
||||||
|
if rg_ctx.member_of:
|
||||||
|
# Aggregate on root spans the whole tree, so the rp itself
|
||||||
|
# *or its root* should be in the aggregate
|
||||||
|
provs_with_inv_rc.filter_by_rp_or_tree(rps_in_aggs)
|
||||||
|
LOG.debug("found %d providers under %d trees after applying "
|
||||||
|
"aggregate filter %s",
|
||||||
|
len(provs_with_inv_rc.rps), len(provs_with_inv_rc.trees),
|
||||||
|
rg_ctx.member_of)
|
||||||
|
if not provs_with_inv_rc:
|
||||||
|
# Short-circuit returning an empty RPCandidateList
|
||||||
|
return rp_candidates.RPCandidateList()
|
||||||
|
if rg_ctx.forbidden_aggs:
|
||||||
|
# Aggregate on root spans the whole tree, so the rp itself
|
||||||
|
# *and its root* should be outside the aggregate
|
||||||
|
provs_with_inv_rc.filter_by_rp_nor_tree(rps_bad_aggs)
|
||||||
|
LOG.debug("found %d providers under %d trees after applying "
|
||||||
|
"negative aggregate filter %s",
|
||||||
|
len(provs_with_inv_rc.rps), len(provs_with_inv_rc.trees),
|
||||||
|
rg_ctx.forbidden_aggs)
|
||||||
|
if not provs_with_inv_rc:
|
||||||
|
# Short-circuit returning an empty RPCandidateList
|
||||||
|
return rp_candidates.RPCandidateList()
|
||||||
|
|
||||||
|
# Adding the resource providers we've got for this resource class,
|
||||||
|
# filter provs_with_inv to have only trees with enough inventories
|
||||||
|
# for this resource class. Here "tree" includes sharing providers
|
||||||
|
# in its terminology
|
||||||
|
provs_with_inv.merge_common_trees(provs_with_inv_rc)
|
||||||
|
LOG.debug(
|
||||||
|
"found %d providers under %d trees after filtering by "
|
||||||
|
"previous result",
|
||||||
|
len(provs_with_inv.rps), len(provs_with_inv.trees))
|
||||||
|
if not provs_with_inv:
|
||||||
|
return rp_candidates.RPCandidateList()
|
||||||
|
|
||||||
|
if (not rg_ctx.required_trait_map and not rg_ctx.forbidden_trait_map) or (
|
||||||
|
rg_ctx.exists_sharing):
|
||||||
|
# If there were no traits required, there's no difference in how we
|
||||||
|
# calculate allocation requests between nested and non-nested
|
||||||
|
# environments, so just short-circuit and return. Or if sharing
|
||||||
|
# providers are in play, we check the trait constraints later
|
||||||
|
# in _alloc_candidates_multiple_providers(), so skip.
|
||||||
|
return provs_with_inv
|
||||||
|
|
||||||
|
# Return the providers where the providers have the available inventory
|
||||||
|
# capacity and that set of providers (grouped by their tree) have all
|
||||||
|
# of the required traits and none of the forbidden traits
|
||||||
|
rp_tuples_with_trait = _get_trees_with_traits(
|
||||||
|
rg_ctx.context, provs_with_inv.rps, rg_ctx.required_trait_map,
|
||||||
|
rg_ctx.forbidden_trait_map)
|
||||||
|
provs_with_inv.filter_by_rp(rp_tuples_with_trait)
|
||||||
|
LOG.debug("found %d providers under %d trees after applying "
|
||||||
|
"traits filter - required: %s, forbidden: %s",
|
||||||
|
len(provs_with_inv.rps), len(provs_with_inv.trees),
|
||||||
|
list(rg_ctx.required_trait_map),
|
||||||
|
list(rg_ctx.forbidden_trait_map))
|
||||||
|
|
||||||
|
return provs_with_inv
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def _get_trees_with_traits(ctx, rp_ids, required_traits, forbidden_traits):
|
||||||
|
"""Given a list of provider IDs, filter them to return a set of tuples of
|
||||||
|
(provider ID, root provider ID) of providers which belong to a tree that
|
||||||
|
can satisfy trait requirements.
|
||||||
|
|
||||||
|
:param ctx: Session context to use
|
||||||
|
:param rp_ids: a set of resource provider IDs
|
||||||
|
:param required_traits: A map, keyed by trait string name, of required
|
||||||
|
trait internal IDs that each provider TREE must
|
||||||
|
COLLECTIVELY have associated with it
|
||||||
|
:param forbidden_traits: A map, keyed by trait string name, of trait
|
||||||
|
internal IDs that a resource provider must
|
||||||
|
not have.
|
||||||
|
"""
|
||||||
|
# We now want to restrict the returned providers to only those provider
|
||||||
|
# trees that have all our required traits.
|
||||||
|
#
|
||||||
|
# The SQL we want looks like this:
|
||||||
|
#
|
||||||
|
# SELECT outer_rp.id, outer_rp.root_provider_id
|
||||||
|
# FROM resource_providers AS outer_rp
|
||||||
|
# JOIN (
|
||||||
|
# SELECT rp.root_provider_id
|
||||||
|
# FROM resource_providers AS rp
|
||||||
|
# # Only if we have required traits...
|
||||||
|
# INNER JOIN resource_provider_traits AS rptt
|
||||||
|
# ON rp.id = rptt.resource_provider_id
|
||||||
|
# AND rptt.trait_id IN ($REQUIRED_TRAIT_IDS)
|
||||||
|
# # Only if we have forbidden_traits...
|
||||||
|
# LEFT JOIN resource_provider_traits AS rptt_forbid
|
||||||
|
# ON rp.id = rptt_forbid.resource_provider_id
|
||||||
|
# AND rptt_forbid.trait_id IN ($FORBIDDEN_TRAIT_IDS)
|
||||||
|
# WHERE rp.id IN ($RP_IDS)
|
||||||
|
# # Only if we have forbidden traits...
|
||||||
|
# AND rptt_forbid.resource_provider_id IS NULL
|
||||||
|
# GROUP BY rp.root_provider_id
|
||||||
|
# # Only if have required traits...
|
||||||
|
# HAVING COUNT(DISTINCT rptt.trait_id) == $NUM_REQUIRED_TRAITS
|
||||||
|
# ) AS trees_with_traits
|
||||||
|
# ON outer_rp.root_provider_id = trees_with_traits.root_provider_id
|
||||||
|
rpt = sa.alias(_RP_TBL, name="rp")
|
||||||
|
cond = [rpt.c.id.in_(rp_ids)]
|
||||||
|
subq = sa.select([rpt.c.root_provider_id])
|
||||||
|
subq_join = None
|
||||||
|
if required_traits:
|
||||||
|
rptt = sa.alias(_RP_TRAIT_TBL, name="rptt")
|
||||||
|
rpt_to_rptt = sa.join(
|
||||||
|
rpt, rptt, sa.and_(
|
||||||
|
rpt.c.id == rptt.c.resource_provider_id,
|
||||||
|
rptt.c.trait_id.in_(required_traits.values())))
|
||||||
|
subq_join = rpt_to_rptt
|
||||||
|
# Only get the resource providers that have ALL the required traits,
|
||||||
|
# so we need to GROUP BY the root provider and ensure that the
|
||||||
|
# COUNT(trait_id) is equal to the number of traits we are requiring
|
||||||
|
num_traits = len(required_traits)
|
||||||
|
having_cond = sa.func.count(sa.distinct(rptt.c.trait_id)) == num_traits
|
||||||
|
subq = subq.having(having_cond)
|
||||||
|
|
||||||
|
# Tack on an additional LEFT JOIN clause inside the derived table if we've
|
||||||
|
# got forbidden traits in the mix.
|
||||||
|
if forbidden_traits:
|
||||||
|
rptt_forbid = sa.alias(_RP_TRAIT_TBL, name="rptt_forbid")
|
||||||
|
join_to = rpt
|
||||||
|
if subq_join is not None:
|
||||||
|
join_to = subq_join
|
||||||
|
rpt_to_rptt_forbid = sa.outerjoin(
|
||||||
|
join_to, rptt_forbid, sa.and_(
|
||||||
|
rpt.c.id == rptt_forbid.c.resource_provider_id,
|
||||||
|
rptt_forbid.c.trait_id.in_(forbidden_traits.values())))
|
||||||
|
cond.append(rptt_forbid.c.resource_provider_id == sa.null())
|
||||||
|
subq_join = rpt_to_rptt_forbid
|
||||||
|
|
||||||
|
subq = subq.select_from(subq_join)
|
||||||
|
subq = subq.where(sa.and_(*cond))
|
||||||
|
subq = subq.group_by(rpt.c.root_provider_id)
|
||||||
|
trees_with_traits = sa.alias(subq, name="trees_with_traits")
|
||||||
|
|
||||||
|
outer_rps = sa.alias(_RP_TBL, name="outer_rps")
|
||||||
|
outer_to_subq = sa.join(
|
||||||
|
outer_rps, trees_with_traits,
|
||||||
|
outer_rps.c.root_provider_id == trees_with_traits.c.root_provider_id)
|
||||||
|
sel = sa.select([outer_rps.c.id, outer_rps.c.root_provider_id])
|
||||||
|
sel = sel.select_from(outer_to_subq)
|
||||||
|
res = ctx.session.execute(sel).fetchall()
|
||||||
|
|
||||||
|
return [(rp_id, root_id) for rp_id, root_id in res]
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def provider_ids_matching_aggregates(context, member_of, rp_ids=None):
|
||||||
|
"""Given a list of lists of aggregate UUIDs, return the internal IDs of all
|
||||||
|
resource providers associated with the aggregates.
|
||||||
|
|
||||||
|
:param member_of: A list containing lists of aggregate UUIDs. Each item in
|
||||||
|
the outer list is to be AND'd together. If that item contains multiple
|
||||||
|
values, they are OR'd together.
|
||||||
|
|
||||||
|
For example, if member_of is::
|
||||||
|
|
||||||
|
[
|
||||||
|
['agg1'],
|
||||||
|
['agg2', 'agg3'],
|
||||||
|
]
|
||||||
|
|
||||||
|
we will return all the resource providers that are
|
||||||
|
associated with agg1 as well as either (agg2 or agg3)
|
||||||
|
:param rp_ids: When present, returned resource providers are limited
|
||||||
|
to only those in this value
|
||||||
|
|
||||||
|
:returns: A set of internal resource provider IDs having all required
|
||||||
|
aggregate associations
|
||||||
|
"""
|
||||||
|
# Given a request for the following:
|
||||||
|
#
|
||||||
|
# member_of = [
|
||||||
|
# [agg1],
|
||||||
|
# [agg2],
|
||||||
|
# [agg3, agg4]
|
||||||
|
# ]
|
||||||
|
#
|
||||||
|
# we need to produce the following SQL expression:
|
||||||
|
#
|
||||||
|
# SELECT
|
||||||
|
# rp.id
|
||||||
|
# FROM resource_providers AS rp
|
||||||
|
# JOIN resource_provider_aggregates AS rpa1
|
||||||
|
# ON rp.id = rpa1.resource_provider_id
|
||||||
|
# AND rpa1.aggregate_id IN ($AGG1_ID)
|
||||||
|
# JOIN resource_provider_aggregates AS rpa2
|
||||||
|
# ON rp.id = rpa2.resource_provider_id
|
||||||
|
# AND rpa2.aggregate_id IN ($AGG2_ID)
|
||||||
|
# JOIN resource_provider_aggregates AS rpa3
|
||||||
|
# ON rp.id = rpa3.resource_provider_id
|
||||||
|
# AND rpa3.aggregate_id IN ($AGG3_ID, $AGG4_ID)
|
||||||
|
# # Only if we have rp_ids...
|
||||||
|
# WHERE rp.id IN ($RP_IDs)
|
||||||
|
|
||||||
|
# First things first, get a map of all the aggregate UUID to internal
|
||||||
|
# aggregate IDs
|
||||||
|
agg_uuids = set()
|
||||||
|
for members in member_of:
|
||||||
|
for member in members:
|
||||||
|
agg_uuids.add(member)
|
||||||
|
agg_tbl = sa.alias(_AGG_TBL, name='aggs')
|
||||||
|
agg_sel = sa.select([agg_tbl.c.uuid, agg_tbl.c.id])
|
||||||
|
agg_sel = agg_sel.where(agg_tbl.c.uuid.in_(agg_uuids))
|
||||||
|
agg_uuid_map = {
|
||||||
|
r[0]: r[1] for r in context.session.execute(agg_sel).fetchall()
|
||||||
|
}
|
||||||
|
|
||||||
|
rp_tbl = sa.alias(_RP_TBL, name='rp')
|
||||||
|
join_chain = rp_tbl
|
||||||
|
|
||||||
|
for x, members in enumerate(member_of):
|
||||||
|
rpa_tbl = sa.alias(_RP_AGG_TBL, name='rpa%d' % x)
|
||||||
|
|
||||||
|
agg_ids = [agg_uuid_map[member] for member in members
|
||||||
|
if member in agg_uuid_map]
|
||||||
|
if not agg_ids:
|
||||||
|
# This member_of list contains only non-existent aggregate UUIDs
|
||||||
|
# and therefore we will always return 0 results, so short-circuit
|
||||||
|
return set()
|
||||||
|
|
||||||
|
join_cond = sa.and_(
|
||||||
|
rp_tbl.c.id == rpa_tbl.c.resource_provider_id,
|
||||||
|
rpa_tbl.c.aggregate_id.in_(agg_ids))
|
||||||
|
join_chain = sa.join(join_chain, rpa_tbl, join_cond)
|
||||||
|
sel = sa.select([rp_tbl.c.id]).select_from(join_chain)
|
||||||
|
if rp_ids:
|
||||||
|
sel = sel.where(rp_tbl.c.id.in_(rp_ids))
|
||||||
|
return set(r[0] for r in context.session.execute(sel))
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def get_provider_ids_having_any_trait(ctx, traits):
|
||||||
|
"""Returns a set of resource provider internal IDs that have ANY of the
|
||||||
|
supplied traits.
|
||||||
|
|
||||||
|
:param ctx: Session context to use
|
||||||
|
:param traits: A map, keyed by trait string name, of trait internal IDs, at
|
||||||
|
least one of which each provider must have associated with
|
||||||
|
it.
|
||||||
|
:raise ValueError: If traits is empty or None.
|
||||||
|
"""
|
||||||
|
if not traits:
|
||||||
|
raise ValueError('traits must not be empty')
|
||||||
|
|
||||||
|
rptt = sa.alias(_RP_TRAIT_TBL, name="rpt")
|
||||||
|
sel = sa.select([rptt.c.resource_provider_id])
|
||||||
|
sel = sel.where(rptt.c.trait_id.in_(traits.values()))
|
||||||
|
sel = sel.group_by(rptt.c.resource_provider_id)
|
||||||
|
return set(r[0] for r in ctx.session.execute(sel))
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def _get_provider_ids_having_all_traits(ctx, required_traits):
|
||||||
|
"""Returns a set of resource provider internal IDs that have ALL of the
|
||||||
|
required traits.
|
||||||
|
|
||||||
|
NOTE: Don't call this method with no required_traits.
|
||||||
|
|
||||||
|
:param ctx: Session context to use
|
||||||
|
:param required_traits: A map, keyed by trait string name, of required
|
||||||
|
trait internal IDs that each provider must have
|
||||||
|
associated with it
|
||||||
|
:raise ValueError: If required_traits is empty or None.
|
||||||
|
"""
|
||||||
|
if not required_traits:
|
||||||
|
raise ValueError('required_traits must not be empty')
|
||||||
|
|
||||||
|
rptt = sa.alias(_RP_TRAIT_TBL, name="rpt")
|
||||||
|
sel = sa.select([rptt.c.resource_provider_id])
|
||||||
|
sel = sel.where(rptt.c.trait_id.in_(required_traits.values()))
|
||||||
|
sel = sel.group_by(rptt.c.resource_provider_id)
|
||||||
|
# Only get the resource providers that have ALL the required traits, so we
|
||||||
|
# need to GROUP BY the resource provider and ensure that the
|
||||||
|
# COUNT(trait_id) is equal to the number of traits we are requiring
|
||||||
|
num_traits = len(required_traits)
|
||||||
|
cond = sa.func.count(rptt.c.trait_id) == num_traits
|
||||||
|
sel = sel.having(cond)
|
||||||
|
return set(r[0] for r in ctx.session.execute(sel))
|
||||||
|
|
||||||
|
|
||||||
|
def get_provider_ids_for_traits_and_aggs(ctx, required_traits,
|
||||||
|
forbidden_traits, member_of,
|
||||||
|
forbidden_aggs):
|
||||||
|
"""Get internal IDs for all providers matching the specified traits/aggs.
|
||||||
|
|
||||||
|
:return: A tuple of:
|
||||||
|
filtered_rp_ids: A set of internal provider IDs matching the specified
|
||||||
|
criteria. If None, work was done and resulted in no matching
|
||||||
|
providers. This is in contrast to the empty set, which indicates
|
||||||
|
that no filtering was performed.
|
||||||
|
forbidden_rp_ids: A set of internal IDs of providers having any of the
|
||||||
|
specified forbidden_traits.
|
||||||
|
"""
|
||||||
|
filtered_rps = set()
|
||||||
|
if required_traits:
|
||||||
|
trait_map = _normalize_trait_map(ctx, required_traits)
|
||||||
|
trait_rps = _get_provider_ids_having_all_traits(ctx, trait_map)
|
||||||
|
filtered_rps = trait_rps
|
||||||
|
LOG.debug("found %d providers after applying required traits filter "
|
||||||
|
"(%s)",
|
||||||
|
len(filtered_rps), list(required_traits))
|
||||||
|
if not filtered_rps:
|
||||||
|
return None, []
|
||||||
|
|
||||||
|
# If 'member_of' has values, do a separate lookup to identify the
|
||||||
|
# resource providers that meet the member_of constraints.
|
||||||
|
if member_of:
|
||||||
|
rps_in_aggs = provider_ids_matching_aggregates(ctx, member_of)
|
||||||
|
if filtered_rps:
|
||||||
|
filtered_rps &= rps_in_aggs
|
||||||
|
else:
|
||||||
|
filtered_rps = rps_in_aggs
|
||||||
|
LOG.debug("found %d providers after applying required aggregates "
|
||||||
|
"filter (%s)", len(filtered_rps), member_of)
|
||||||
|
if not filtered_rps:
|
||||||
|
return None, []
|
||||||
|
|
||||||
|
forbidden_rp_ids = set()
|
||||||
|
if forbidden_aggs:
|
||||||
|
rps_bad_aggs = provider_ids_matching_aggregates(ctx, [forbidden_aggs])
|
||||||
|
forbidden_rp_ids |= rps_bad_aggs
|
||||||
|
if filtered_rps:
|
||||||
|
filtered_rps -= rps_bad_aggs
|
||||||
|
LOG.debug("found %d providers after applying forbidden aggregates "
|
||||||
|
"filter (%s)", len(filtered_rps), forbidden_aggs)
|
||||||
|
if not filtered_rps:
|
||||||
|
return None, []
|
||||||
|
|
||||||
|
if forbidden_traits:
|
||||||
|
trait_map = _normalize_trait_map(ctx, forbidden_traits)
|
||||||
|
rps_bad_traits = get_provider_ids_having_any_trait(ctx, trait_map)
|
||||||
|
forbidden_rp_ids |= rps_bad_traits
|
||||||
|
if filtered_rps:
|
||||||
|
filtered_rps -= rps_bad_traits
|
||||||
|
LOG.debug("found %d providers after applying forbidden traits "
|
||||||
|
"filter (%s)", len(filtered_rps), list(forbidden_traits))
|
||||||
|
if not filtered_rps:
|
||||||
|
return None, []
|
||||||
|
|
||||||
|
return filtered_rps, forbidden_rp_ids
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def get_providers_with_shared_capacity(ctx, rc_id, amount, member_of=None):
|
||||||
|
"""Returns a list of resource provider IDs (internal IDs, not UUIDs)
|
||||||
|
that have capacity for a requested amount of a resource and indicate that
|
||||||
|
they share resource via an aggregate association.
|
||||||
|
|
||||||
|
Shared resource providers are marked with a standard trait called
|
||||||
|
MISC_SHARES_VIA_AGGREGATE. This indicates that the provider allows its
|
||||||
|
inventory to be consumed by other resource providers associated via an
|
||||||
|
aggregate link.
|
||||||
|
|
||||||
|
For example, assume we have two compute nodes, CN_1 and CN_2, each with
|
||||||
|
inventory of VCPU and MEMORY_MB but not DISK_GB (in other words, these are
|
||||||
|
compute nodes with no local disk). There is a resource provider called
|
||||||
|
"NFS_SHARE" that has an inventory of DISK_GB and has the
|
||||||
|
MISC_SHARES_VIA_AGGREGATE trait. Both the "CN_1" and "CN_2" compute node
|
||||||
|
resource providers and the "NFS_SHARE" resource provider are associated
|
||||||
|
with an aggregate called "AGG_1".
|
||||||
|
|
||||||
|
The scheduler needs to determine the resource providers that can fulfill a
|
||||||
|
request for 2 VCPU, 1024 MEMORY_MB and 100 DISK_GB.
|
||||||
|
|
||||||
|
Clearly, no single provider can satisfy the request for all three
|
||||||
|
resources, since neither compute node has DISK_GB inventory and the
|
||||||
|
NFS_SHARE provider has no VCPU or MEMORY_MB inventories.
|
||||||
|
|
||||||
|
However, if we consider the NFS_SHARE resource provider as providing
|
||||||
|
inventory of DISK_GB for both CN_1 and CN_2, we can include CN_1 and CN_2
|
||||||
|
as potential fits for the requested set of resources.
|
||||||
|
|
||||||
|
To facilitate that matching query, this function returns all providers that
|
||||||
|
indicate they share their inventory with providers in some aggregate and
|
||||||
|
have enough capacity for the requested amount of a resource.
|
||||||
|
|
||||||
|
To follow the example above, if we were to call
|
||||||
|
get_providers_with_shared_capacity(ctx, "DISK_GB", 100), we would want to
|
||||||
|
get back the ID for the NFS_SHARE resource provider.
|
||||||
|
|
||||||
|
:param rc_id: Internal ID of the requested resource class.
|
||||||
|
:param amount: Amount of the requested resource.
|
||||||
|
:param member_of: When present, contains a list of lists of aggregate
|
||||||
|
uuids that are used to filter the returned list of
|
||||||
|
resource providers that *directly* belong to the
|
||||||
|
aggregates referenced.
|
||||||
|
"""
|
||||||
|
# The SQL we need to generate here looks like this:
|
||||||
|
#
|
||||||
|
# SELECT rp.id
|
||||||
|
# FROM resource_providers AS rp
|
||||||
|
# INNER JOIN resource_provider_traits AS rpt
|
||||||
|
# ON rp.id = rpt.resource_provider_id
|
||||||
|
# INNER JOIN traits AS t
|
||||||
|
# ON rpt.trait_id = t.id
|
||||||
|
# AND t.name = "MISC_SHARES_VIA_AGGREGATE"
|
||||||
|
# INNER JOIN inventories AS inv
|
||||||
|
# ON rp.id = inv.resource_provider_id
|
||||||
|
# AND inv.resource_class_id = $rc_id
|
||||||
|
# LEFT JOIN (
|
||||||
|
# SELECT resource_provider_id, SUM(used) as used
|
||||||
|
# FROM allocations
|
||||||
|
# WHERE resource_class_id = $rc_id
|
||||||
|
# GROUP BY resource_provider_id
|
||||||
|
# ) AS usage
|
||||||
|
# ON rp.id = usage.resource_provider_id
|
||||||
|
# WHERE COALESCE(usage.used, 0) + $amount <= (
|
||||||
|
# inv.total - inv.reserved) * inv.allocation_ratio
|
||||||
|
# ) AND
|
||||||
|
# inv.min_unit <= $amount AND
|
||||||
|
# inv.max_unit >= $amount AND
|
||||||
|
# $amount % inv.step_size = 0
|
||||||
|
# GROUP BY rp.id
|
||||||
|
|
||||||
|
rp_tbl = sa.alias(_RP_TBL, name='rp')
|
||||||
|
inv_tbl = sa.alias(_INV_TBL, name='inv')
|
||||||
|
t_tbl = sa.alias(_TRAIT_TBL, name='t')
|
||||||
|
rpt_tbl = sa.alias(_RP_TRAIT_TBL, name='rpt')
|
||||||
|
|
||||||
|
rp_to_rpt_join = sa.join(
|
||||||
|
rp_tbl, rpt_tbl,
|
||||||
|
rp_tbl.c.id == rpt_tbl.c.resource_provider_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
rpt_to_t_join = sa.join(
|
||||||
|
rp_to_rpt_join, t_tbl,
|
||||||
|
sa.and_(
|
||||||
|
rpt_tbl.c.trait_id == t_tbl.c.id,
|
||||||
|
# The traits table wants unicode trait names, but os_traits
|
||||||
|
# presents native str, so we need to cast.
|
||||||
|
t_tbl.c.name == six.text_type(os_traits.MISC_SHARES_VIA_AGGREGATE),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
rp_to_inv_join = sa.join(
|
||||||
|
rpt_to_t_join, inv_tbl,
|
||||||
|
sa.and_(
|
||||||
|
rpt_tbl.c.resource_provider_id == inv_tbl.c.resource_provider_id,
|
||||||
|
inv_tbl.c.resource_class_id == rc_id,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
usage = _usage_select([rc_id])
|
||||||
|
|
||||||
|
inv_to_usage_join = sa.outerjoin(
|
||||||
|
rp_to_inv_join, usage,
|
||||||
|
inv_tbl.c.resource_provider_id == usage.c.resource_provider_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
where_conds = _capacity_check_clause(amount, usage, inv_tbl=inv_tbl)
|
||||||
|
|
||||||
|
# If 'member_of' has values, do a separate lookup to identify the
|
||||||
|
# resource providers that meet the member_of constraints.
|
||||||
|
if member_of:
|
||||||
|
rps_in_aggs = provider_ids_matching_aggregates(ctx, member_of)
|
||||||
|
if not rps_in_aggs:
|
||||||
|
# Short-circuit. The user either asked for a non-existing
|
||||||
|
# aggregate or there were no resource providers that matched
|
||||||
|
# the requirements...
|
||||||
|
return []
|
||||||
|
where_conds.append(rp_tbl.c.id.in_(rps_in_aggs))
|
||||||
|
|
||||||
|
sel = sa.select([rp_tbl.c.id]).select_from(inv_to_usage_join)
|
||||||
|
sel = sel.where(where_conds)
|
||||||
|
sel = sel.group_by(rp_tbl.c.id)
|
||||||
|
|
||||||
|
return [r[0] for r in ctx.session.execute(sel)]
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def anchors_for_sharing_providers(context, rp_ids, get_id=False):
|
||||||
|
"""Given a list of internal IDs of sharing providers, returns a set of
|
||||||
|
tuples of (sharing provider UUID, anchor provider UUID), where each of
|
||||||
|
anchor is the unique root provider of a tree associated with the same
|
||||||
|
aggregate as the sharing provider. (These are the providers that can
|
||||||
|
"anchor" a single AllocationRequest.)
|
||||||
|
|
||||||
|
The sharing provider may or may not itself be part of a tree; in either
|
||||||
|
case, an entry for this root provider is included in the result.
|
||||||
|
|
||||||
|
If the sharing provider is not part of any aggregate, the empty list is
|
||||||
|
returned.
|
||||||
|
|
||||||
|
If get_id is True, it returns a set of tuples of (sharing provider ID,
|
||||||
|
anchor provider ID) instead.
|
||||||
|
"""
|
||||||
|
# SELECT sps.uuid, COALESCE(rps.uuid, shr_with_sps.uuid)
|
||||||
|
# FROM resource_providers AS sps
|
||||||
|
# INNER JOIN resource_provider_aggregates AS shr_aggs
|
||||||
|
# ON sps.id = shr_aggs.resource_provider_id
|
||||||
|
# INNER JOIN resource_provider_aggregates AS shr_with_sps_aggs
|
||||||
|
# ON shr_aggs.aggregate_id = shr_with_sps_aggs.aggregate_id
|
||||||
|
# INNER JOIN resource_providers AS shr_with_sps
|
||||||
|
# ON shr_with_sps_aggs.resource_provider_id = shr_with_sps.id
|
||||||
|
# LEFT JOIN resource_providers AS rps
|
||||||
|
# ON shr_with_sps.root_provider_id = rps.id
|
||||||
|
# WHERE sps.id IN $(RP_IDs)
|
||||||
|
rps = sa.alias(_RP_TBL, name='rps')
|
||||||
|
sps = sa.alias(_RP_TBL, name='sps')
|
||||||
|
shr_aggs = sa.alias(_RP_AGG_TBL, name='shr_aggs')
|
||||||
|
shr_with_sps_aggs = sa.alias(_RP_AGG_TBL, name='shr_with_sps_aggs')
|
||||||
|
shr_with_sps = sa.alias(_RP_TBL, name='shr_with_sps')
|
||||||
|
join_chain = sa.join(
|
||||||
|
sps, shr_aggs, sps.c.id == shr_aggs.c.resource_provider_id)
|
||||||
|
join_chain = sa.join(
|
||||||
|
join_chain, shr_with_sps_aggs,
|
||||||
|
shr_aggs.c.aggregate_id == shr_with_sps_aggs.c.aggregate_id)
|
||||||
|
join_chain = sa.join(
|
||||||
|
join_chain, shr_with_sps,
|
||||||
|
shr_with_sps_aggs.c.resource_provider_id == shr_with_sps.c.id)
|
||||||
|
if get_id:
|
||||||
|
sel = sa.select([sps.c.id, shr_with_sps.c.root_provider_id])
|
||||||
|
else:
|
||||||
|
join_chain = sa.join(
|
||||||
|
join_chain, rps, shr_with_sps.c.root_provider_id == rps.c.id)
|
||||||
|
sel = sa.select([sps.c.uuid, rps.c.uuid])
|
||||||
|
sel = sel.select_from(join_chain)
|
||||||
|
sel = sel.where(sps.c.id.in_(rp_ids))
|
||||||
|
return set([(r[0], r[1]) for r in context.session.execute(sel).fetchall()])
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_trait_map(ctx, traits):
|
||||||
|
if not isinstance(traits, dict):
|
||||||
|
return trait_obj.ids_from_names(ctx, traits)
|
||||||
|
return traits
|
||||||
|
|
||||||
|
|
||||||
|
@db_api.placement_context_manager.reader
|
||||||
|
def has_provider_trees(ctx):
|
||||||
|
"""Simple method that returns whether provider trees (i.e. nested resource
|
||||||
|
providers) are in use in the deployment at all. This information is used to
|
||||||
|
switch code paths when attempting to retrieve allocation candidate
|
||||||
|
information. The code paths are eminently easier to execute and follow for
|
||||||
|
non-nested scenarios...
|
||||||
|
|
||||||
|
NOTE(jaypipes): The result of this function can be cached extensively.
|
||||||
|
"""
|
||||||
|
sel = sa.select([_RP_TBL.c.id])
|
||||||
|
sel = sel.where(_RP_TBL.c.parent_provider_id.isnot(None))
|
||||||
|
sel = sel.limit(1)
|
||||||
|
res = ctx.session.execute(sel).fetchall()
|
||||||
|
return len(res) > 0
|
||||||
|
@@ -10,7 +10,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import collections
|
|
||||||
import copy
|
import copy
|
||||||
|
|
||||||
# NOTE(cdent): The resource provider objects are designed to never be
|
# NOTE(cdent): The resource provider objects are designed to never be
|
||||||
@@ -19,22 +18,19 @@ import copy
|
|||||||
# not be registered and there is no need to express VERSIONs nor handle
|
# not be registered and there is no need to express VERSIONs nor handle
|
||||||
# obj_make_compatible.
|
# obj_make_compatible.
|
||||||
|
|
||||||
import os_traits
|
|
||||||
from oslo_db import api as oslo_db_api
|
from oslo_db import api as oslo_db_api
|
||||||
from oslo_db import exception as db_exc
|
from oslo_db import exception as db_exc
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
import six
|
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
from sqlalchemy import exc as sqla_exc
|
from sqlalchemy import exc as sqla_exc
|
||||||
from sqlalchemy import func
|
from sqlalchemy import func
|
||||||
from sqlalchemy import sql
|
|
||||||
|
|
||||||
from placement.db.sqlalchemy import models
|
from placement.db.sqlalchemy import models
|
||||||
from placement import db_api
|
from placement import db_api
|
||||||
from placement import exception
|
from placement import exception
|
||||||
from placement.objects import inventory as inv_obj
|
from placement.objects import inventory as inv_obj
|
||||||
from placement.objects import rp_candidates
|
from placement.objects import research_context as res_ctx
|
||||||
from placement.objects import trait as trait_obj
|
from placement.objects import trait as trait_obj
|
||||||
from placement import resource_class_cache as rc_cache
|
from placement import resource_class_cache as rc_cache
|
||||||
|
|
||||||
@@ -49,27 +45,6 @@ _RP_TRAIT_TBL = models.ResourceProviderTrait.__table__
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _usage_select(rc_ids):
|
|
||||||
usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
|
|
||||||
_ALLOC_TBL.c.resource_class_id,
|
|
||||||
sql.func.sum(_ALLOC_TBL.c.used).label('used')])
|
|
||||||
usage = usage.where(_ALLOC_TBL.c.resource_class_id.in_(rc_ids))
|
|
||||||
usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id,
|
|
||||||
_ALLOC_TBL.c.resource_class_id)
|
|
||||||
return sa.alias(usage, name='usage')
|
|
||||||
|
|
||||||
|
|
||||||
def _capacity_check_clause(amount, usage, inv_tbl=_INV_TBL):
|
|
||||||
return sa.and_(
|
|
||||||
sql.func.coalesce(usage.c.used, 0) + amount <= (
|
|
||||||
(inv_tbl.c.total - inv_tbl.c.reserved) *
|
|
||||||
inv_tbl.c.allocation_ratio),
|
|
||||||
inv_tbl.c.min_unit <= amount,
|
|
||||||
inv_tbl.c.max_unit >= amount,
|
|
||||||
amount % inv_tbl.c.step_size == 0,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_current_inventory_resources(ctx, rp):
|
def _get_current_inventory_resources(ctx, rp):
|
||||||
"""Returns a set() containing the resource class IDs for all resources
|
"""Returns a set() containing the resource class IDs for all resources
|
||||||
currently having an inventory record for the supplied resource provider.
|
currently having an inventory record for the supplied resource provider.
|
||||||
@@ -331,58 +306,6 @@ def _get_aggregates_by_provider_id(context, rp_id):
|
|||||||
return {r[0]: r[1] for r in context.session.execute(sel).fetchall()}
|
return {r[0]: r[1] for r in context.session.execute(sel).fetchall()}
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def anchors_for_sharing_providers(context, rp_ids, get_id=False):
|
|
||||||
"""Given a list of internal IDs of sharing providers, returns a set of
|
|
||||||
tuples of (sharing provider UUID, anchor provider UUID), where each of
|
|
||||||
anchor is the unique root provider of a tree associated with the same
|
|
||||||
aggregate as the sharing provider. (These are the providers that can
|
|
||||||
"anchor" a single AllocationRequest.)
|
|
||||||
|
|
||||||
The sharing provider may or may not itself be part of a tree; in either
|
|
||||||
case, an entry for this root provider is included in the result.
|
|
||||||
|
|
||||||
If the sharing provider is not part of any aggregate, the empty list is
|
|
||||||
returned.
|
|
||||||
|
|
||||||
If get_id is True, it returns a set of tuples of (sharing provider ID,
|
|
||||||
anchor provider ID) instead.
|
|
||||||
"""
|
|
||||||
# SELECT sps.uuid, COALESCE(rps.uuid, shr_with_sps.uuid)
|
|
||||||
# FROM resource_providers AS sps
|
|
||||||
# INNER JOIN resource_provider_aggregates AS shr_aggs
|
|
||||||
# ON sps.id = shr_aggs.resource_provider_id
|
|
||||||
# INNER JOIN resource_provider_aggregates AS shr_with_sps_aggs
|
|
||||||
# ON shr_aggs.aggregate_id = shr_with_sps_aggs.aggregate_id
|
|
||||||
# INNER JOIN resource_providers AS shr_with_sps
|
|
||||||
# ON shr_with_sps_aggs.resource_provider_id = shr_with_sps.id
|
|
||||||
# LEFT JOIN resource_providers AS rps
|
|
||||||
# ON shr_with_sps.root_provider_id = rps.id
|
|
||||||
# WHERE sps.id IN $(RP_IDs)
|
|
||||||
rps = sa.alias(_RP_TBL, name='rps')
|
|
||||||
sps = sa.alias(_RP_TBL, name='sps')
|
|
||||||
shr_aggs = sa.alias(_RP_AGG_TBL, name='shr_aggs')
|
|
||||||
shr_with_sps_aggs = sa.alias(_RP_AGG_TBL, name='shr_with_sps_aggs')
|
|
||||||
shr_with_sps = sa.alias(_RP_TBL, name='shr_with_sps')
|
|
||||||
join_chain = sa.join(
|
|
||||||
sps, shr_aggs, sps.c.id == shr_aggs.c.resource_provider_id)
|
|
||||||
join_chain = sa.join(
|
|
||||||
join_chain, shr_with_sps_aggs,
|
|
||||||
shr_aggs.c.aggregate_id == shr_with_sps_aggs.c.aggregate_id)
|
|
||||||
join_chain = sa.join(
|
|
||||||
join_chain, shr_with_sps,
|
|
||||||
shr_with_sps_aggs.c.resource_provider_id == shr_with_sps.c.id)
|
|
||||||
if get_id:
|
|
||||||
sel = sa.select([sps.c.id, shr_with_sps.c.root_provider_id])
|
|
||||||
else:
|
|
||||||
join_chain = sa.join(
|
|
||||||
join_chain, rps, shr_with_sps.c.root_provider_id == rps.c.id)
|
|
||||||
sel = sa.select([sps.c.uuid, rps.c.uuid, ])
|
|
||||||
sel = sel.select_from(join_chain)
|
|
||||||
sel = sel.where(sps.c.id.in_(rp_ids))
|
|
||||||
return set([(r[0], r[1]) for r in context.session.execute(sel).fetchall()])
|
|
||||||
|
|
||||||
|
|
||||||
def _ensure_aggregate(ctx, agg_uuid):
|
def _ensure_aggregate(ctx, agg_uuid):
|
||||||
"""Finds an aggregate and returns its internal ID. If not found, creates
|
"""Finds an aggregate and returns its internal ID. If not found, creates
|
||||||
the aggregate and returns the new aggregate's internal ID.
|
the aggregate and returns the new aggregate's internal ID.
|
||||||
@@ -600,180 +523,6 @@ def set_root_provider_ids(context, batch_size):
|
|||||||
return res.rowcount, res.rowcount
|
return res.rowcount, res.rowcount
|
||||||
|
|
||||||
|
|
||||||
ProviderIds = collections.namedtuple(
|
|
||||||
'ProviderIds', 'id uuid parent_id parent_uuid root_id root_uuid')
|
|
||||||
|
|
||||||
|
|
||||||
def provider_ids_from_rp_ids(context, rp_ids):
|
|
||||||
"""Given an iterable of internal resource provider IDs, returns a dict,
|
|
||||||
keyed by internal provider Id, of ProviderIds namedtuples describing those
|
|
||||||
providers.
|
|
||||||
|
|
||||||
:returns: dict, keyed by internal provider Id, of ProviderIds namedtuples
|
|
||||||
:param rp_ids: iterable of internal provider IDs to look up
|
|
||||||
"""
|
|
||||||
# SELECT
|
|
||||||
# rp.id, rp.uuid,
|
|
||||||
# parent.id AS parent_id, parent.uuid AS parent_uuid,
|
|
||||||
# root.id AS root_id, root.uuid AS root_uuid
|
|
||||||
# FROM resource_providers AS rp
|
|
||||||
# INNER JOIN resource_providers AS root
|
|
||||||
# ON rp.root_provider_id = root.id
|
|
||||||
# LEFT JOIN resource_providers AS parent
|
|
||||||
# ON rp.parent_provider_id = parent.id
|
|
||||||
# WHERE rp.id IN ($rp_ids)
|
|
||||||
me = sa.alias(_RP_TBL, name="me")
|
|
||||||
parent = sa.alias(_RP_TBL, name="parent")
|
|
||||||
root = sa.alias(_RP_TBL, name="root")
|
|
||||||
cols = [
|
|
||||||
me.c.id,
|
|
||||||
me.c.uuid,
|
|
||||||
parent.c.id.label('parent_id'),
|
|
||||||
parent.c.uuid.label('parent_uuid'),
|
|
||||||
root.c.id.label('root_id'),
|
|
||||||
root.c.uuid.label('root_uuid'),
|
|
||||||
]
|
|
||||||
me_to_root = sa.join(me, root, me.c.root_provider_id == root.c.id)
|
|
||||||
me_to_parent = sa.outerjoin(
|
|
||||||
me_to_root, parent,
|
|
||||||
me.c.parent_provider_id == parent.c.id)
|
|
||||||
sel = sa.select(cols).select_from(me_to_parent)
|
|
||||||
sel = sel.where(me.c.id.in_(rp_ids))
|
|
||||||
|
|
||||||
ret = {}
|
|
||||||
for r in context.session.execute(sel):
|
|
||||||
ret[r['id']] = ProviderIds(**r)
|
|
||||||
return ret
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def provider_ids_from_uuid(context, uuid):
|
|
||||||
"""Given the UUID of a resource provider, returns a namedtuple
|
|
||||||
(ProviderIds) with the internal ID, the UUID, the parent provider's
|
|
||||||
internal ID, parent provider's UUID, the root provider's internal ID and
|
|
||||||
the root provider UUID.
|
|
||||||
|
|
||||||
:returns: ProviderIds object containing the internal IDs and UUIDs of the
|
|
||||||
provider identified by the supplied UUID
|
|
||||||
:param uuid: The UUID of the provider to look up
|
|
||||||
"""
|
|
||||||
# SELECT
|
|
||||||
# rp.id, rp.uuid,
|
|
||||||
# parent.id AS parent_id, parent.uuid AS parent_uuid,
|
|
||||||
# root.id AS root_id, root.uuid AS root_uuid
|
|
||||||
# FROM resource_providers AS rp
|
|
||||||
# INNER JOIN resource_providers AS root
|
|
||||||
# ON rp.root_provider_id = root.id
|
|
||||||
# LEFT JOIN resource_providers AS parent
|
|
||||||
# ON rp.parent_provider_id = parent.id
|
|
||||||
me = sa.alias(_RP_TBL, name="me")
|
|
||||||
parent = sa.alias(_RP_TBL, name="parent")
|
|
||||||
root = sa.alias(_RP_TBL, name="root")
|
|
||||||
cols = [
|
|
||||||
me.c.id,
|
|
||||||
me.c.uuid,
|
|
||||||
parent.c.id.label('parent_id'),
|
|
||||||
parent.c.uuid.label('parent_uuid'),
|
|
||||||
root.c.id.label('root_id'),
|
|
||||||
root.c.uuid.label('root_uuid'),
|
|
||||||
]
|
|
||||||
me_to_root = sa.join(me, root, me.c.root_provider_id == root.c.id)
|
|
||||||
me_to_parent = sa.outerjoin(
|
|
||||||
me_to_root, parent,
|
|
||||||
me.c.parent_provider_id == parent.c.id)
|
|
||||||
sel = sa.select(cols).select_from(me_to_parent)
|
|
||||||
sel = sel.where(me.c.uuid == uuid)
|
|
||||||
res = context.session.execute(sel).fetchone()
|
|
||||||
if not res:
|
|
||||||
return None
|
|
||||||
return ProviderIds(**dict(res))
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def provider_ids_matching_aggregates(context, member_of, rp_ids=None):
|
|
||||||
"""Given a list of lists of aggregate UUIDs, return the internal IDs of all
|
|
||||||
resource providers associated with the aggregates.
|
|
||||||
|
|
||||||
:param member_of: A list containing lists of aggregate UUIDs. Each item in
|
|
||||||
the outer list is to be AND'd together. If that item contains multiple
|
|
||||||
values, they are OR'd together.
|
|
||||||
|
|
||||||
For example, if member_of is::
|
|
||||||
|
|
||||||
[
|
|
||||||
['agg1'],
|
|
||||||
['agg2', 'agg3'],
|
|
||||||
]
|
|
||||||
|
|
||||||
we will return all the resource providers that are
|
|
||||||
associated with agg1 as well as either (agg2 or agg3)
|
|
||||||
:param rp_ids: When present, returned resource providers are limited
|
|
||||||
to only those in this value
|
|
||||||
|
|
||||||
:returns: A set of internal resource provider IDs having all required
|
|
||||||
aggregate associations
|
|
||||||
"""
|
|
||||||
# Given a request for the following:
|
|
||||||
#
|
|
||||||
# member_of = [
|
|
||||||
# [agg1],
|
|
||||||
# [agg2],
|
|
||||||
# [agg3, agg4]
|
|
||||||
# ]
|
|
||||||
#
|
|
||||||
# we need to produce the following SQL expression:
|
|
||||||
#
|
|
||||||
# SELECT
|
|
||||||
# rp.id
|
|
||||||
# FROM resource_providers AS rp
|
|
||||||
# JOIN resource_provider_aggregates AS rpa1
|
|
||||||
# ON rp.id = rpa1.resource_provider_id
|
|
||||||
# AND rpa1.aggregate_id IN ($AGG1_ID)
|
|
||||||
# JOIN resource_provider_aggregates AS rpa2
|
|
||||||
# ON rp.id = rpa2.resource_provider_id
|
|
||||||
# AND rpa2.aggregate_id IN ($AGG2_ID)
|
|
||||||
# JOIN resource_provider_aggregates AS rpa3
|
|
||||||
# ON rp.id = rpa3.resource_provider_id
|
|
||||||
# AND rpa3.aggregate_id IN ($AGG3_ID, $AGG4_ID)
|
|
||||||
# # Only if we have rp_ids...
|
|
||||||
# WHERE rp.id IN ($RP_IDs)
|
|
||||||
|
|
||||||
# First things first, get a map of all the aggregate UUID to internal
|
|
||||||
# aggregate IDs
|
|
||||||
agg_uuids = set()
|
|
||||||
for members in member_of:
|
|
||||||
for member in members:
|
|
||||||
agg_uuids.add(member)
|
|
||||||
agg_tbl = sa.alias(_AGG_TBL, name='aggs')
|
|
||||||
agg_sel = sa.select([agg_tbl.c.uuid, agg_tbl.c.id])
|
|
||||||
agg_sel = agg_sel.where(agg_tbl.c.uuid.in_(agg_uuids))
|
|
||||||
agg_uuid_map = {
|
|
||||||
r[0]: r[1] for r in context.session.execute(agg_sel).fetchall()
|
|
||||||
}
|
|
||||||
|
|
||||||
rp_tbl = sa.alias(_RP_TBL, name='rp')
|
|
||||||
join_chain = rp_tbl
|
|
||||||
|
|
||||||
for x, members in enumerate(member_of):
|
|
||||||
rpa_tbl = sa.alias(_RP_AGG_TBL, name='rpa%d' % x)
|
|
||||||
|
|
||||||
agg_ids = [agg_uuid_map[member] for member in members
|
|
||||||
if member in agg_uuid_map]
|
|
||||||
if not agg_ids:
|
|
||||||
# This member_of list contains only non-existent aggregate UUIDs
|
|
||||||
# and therefore we will always return 0 results, so short-circuit
|
|
||||||
return set()
|
|
||||||
|
|
||||||
join_cond = sa.and_(
|
|
||||||
rp_tbl.c.id == rpa_tbl.c.resource_provider_id,
|
|
||||||
rpa_tbl.c.aggregate_id.in_(agg_ids))
|
|
||||||
join_chain = sa.join(join_chain, rpa_tbl, join_cond)
|
|
||||||
sel = sa.select([rp_tbl.c.id]).select_from(join_chain)
|
|
||||||
if rp_ids:
|
|
||||||
sel = sel.where(rp_tbl.c.id.in_(rp_ids))
|
|
||||||
return set(r[0] for r in context.session.execute(sel))
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.writer
|
@db_api.placement_context_manager.writer
|
||||||
def _delete_rp_record(context, _id):
|
def _delete_rp_record(context, _id):
|
||||||
query = context.session.query(models.ResourceProvider)
|
query = context.session.query(models.ResourceProvider)
|
||||||
@@ -942,7 +691,7 @@ class ResourceProvider(object):
|
|||||||
'Please set parent provider UUID to None if '
|
'Please set parent provider UUID to None if '
|
||||||
'there is no parent.')
|
'there is no parent.')
|
||||||
|
|
||||||
parent_ids = provider_ids_from_uuid(context, parent_uuid)
|
parent_ids = res_ctx.provider_ids_from_uuid(context, parent_uuid)
|
||||||
if parent_ids is None:
|
if parent_ids is None:
|
||||||
raise exception.ObjectActionError(
|
raise exception.ObjectActionError(
|
||||||
action='create',
|
action='create',
|
||||||
@@ -1033,10 +782,11 @@ class ResourceProvider(object):
|
|||||||
# * potentially orphaning heretofore-descendants
|
# * potentially orphaning heretofore-descendants
|
||||||
#
|
#
|
||||||
# So, for now, let's just prevent re-parenting...
|
# So, for now, let's just prevent re-parenting...
|
||||||
my_ids = provider_ids_from_uuid(context, self.uuid)
|
my_ids = res_ctx.provider_ids_from_uuid(context, self.uuid)
|
||||||
parent_uuid = updates.pop('parent_provider_uuid')
|
parent_uuid = updates.pop('parent_provider_uuid')
|
||||||
if parent_uuid is not None:
|
if parent_uuid is not None:
|
||||||
parent_ids = provider_ids_from_uuid(context, parent_uuid)
|
parent_ids = res_ctx.provider_ids_from_uuid(
|
||||||
|
context, parent_uuid)
|
||||||
# User supplied a parent, let's make sure it exists
|
# User supplied a parent, let's make sure it exists
|
||||||
if parent_ids is None:
|
if parent_ids is None:
|
||||||
raise exception.ObjectActionError(
|
raise exception.ObjectActionError(
|
||||||
@@ -1114,133 +864,6 @@ class ResourceProvider(object):
|
|||||||
return resource_provider
|
return resource_provider
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def get_providers_with_shared_capacity(ctx, rc_id, amount, member_of=None):
|
|
||||||
"""Returns a list of resource provider IDs (internal IDs, not UUIDs)
|
|
||||||
that have capacity for a requested amount of a resource and indicate that
|
|
||||||
they share resource via an aggregate association.
|
|
||||||
|
|
||||||
Shared resource providers are marked with a standard trait called
|
|
||||||
MISC_SHARES_VIA_AGGREGATE. This indicates that the provider allows its
|
|
||||||
inventory to be consumed by other resource providers associated via an
|
|
||||||
aggregate link.
|
|
||||||
|
|
||||||
For example, assume we have two compute nodes, CN_1 and CN_2, each with
|
|
||||||
inventory of VCPU and MEMORY_MB but not DISK_GB (in other words, these are
|
|
||||||
compute nodes with no local disk). There is a resource provider called
|
|
||||||
"NFS_SHARE" that has an inventory of DISK_GB and has the
|
|
||||||
MISC_SHARES_VIA_AGGREGATE trait. Both the "CN_1" and "CN_2" compute node
|
|
||||||
resource providers and the "NFS_SHARE" resource provider are associated
|
|
||||||
with an aggregate called "AGG_1".
|
|
||||||
|
|
||||||
The scheduler needs to determine the resource providers that can fulfill a
|
|
||||||
request for 2 VCPU, 1024 MEMORY_MB and 100 DISK_GB.
|
|
||||||
|
|
||||||
Clearly, no single provider can satisfy the request for all three
|
|
||||||
resources, since neither compute node has DISK_GB inventory and the
|
|
||||||
NFS_SHARE provider has no VCPU or MEMORY_MB inventories.
|
|
||||||
|
|
||||||
However, if we consider the NFS_SHARE resource provider as providing
|
|
||||||
inventory of DISK_GB for both CN_1 and CN_2, we can include CN_1 and CN_2
|
|
||||||
as potential fits for the requested set of resources.
|
|
||||||
|
|
||||||
To facilitate that matching query, this function returns all providers that
|
|
||||||
indicate they share their inventory with providers in some aggregate and
|
|
||||||
have enough capacity for the requested amount of a resource.
|
|
||||||
|
|
||||||
To follow the example above, if we were to call
|
|
||||||
get_providers_with_shared_capacity(ctx, "DISK_GB", 100), we would want to
|
|
||||||
get back the ID for the NFS_SHARE resource provider.
|
|
||||||
|
|
||||||
:param rc_id: Internal ID of the requested resource class.
|
|
||||||
:param amount: Amount of the requested resource.
|
|
||||||
:param member_of: When present, contains a list of lists of aggregate
|
|
||||||
uuids that are used to filter the returned list of
|
|
||||||
resource providers that *directly* belong to the
|
|
||||||
aggregates referenced.
|
|
||||||
"""
|
|
||||||
# The SQL we need to generate here looks like this:
|
|
||||||
#
|
|
||||||
# SELECT rp.id
|
|
||||||
# FROM resource_providers AS rp
|
|
||||||
# INNER JOIN resource_provider_traits AS rpt
|
|
||||||
# ON rp.id = rpt.resource_provider_id
|
|
||||||
# INNER JOIN traits AS t
|
|
||||||
# ON rpt.trait_id = t.id
|
|
||||||
# AND t.name = "MISC_SHARES_VIA_AGGREGATE"
|
|
||||||
# INNER JOIN inventories AS inv
|
|
||||||
# ON rp.id = inv.resource_provider_id
|
|
||||||
# AND inv.resource_class_id = $rc_id
|
|
||||||
# LEFT JOIN (
|
|
||||||
# SELECT resource_provider_id, SUM(used) as used
|
|
||||||
# FROM allocations
|
|
||||||
# WHERE resource_class_id = $rc_id
|
|
||||||
# GROUP BY resource_provider_id
|
|
||||||
# ) AS usage
|
|
||||||
# ON rp.id = usage.resource_provider_id
|
|
||||||
# WHERE COALESCE(usage.used, 0) + $amount <= (
|
|
||||||
# inv.total - inv.reserved) * inv.allocation_ratio
|
|
||||||
# ) AND
|
|
||||||
# inv.min_unit <= $amount AND
|
|
||||||
# inv.max_unit >= $amount AND
|
|
||||||
# $amount % inv.step_size = 0
|
|
||||||
# GROUP BY rp.id
|
|
||||||
|
|
||||||
rp_tbl = sa.alias(_RP_TBL, name='rp')
|
|
||||||
inv_tbl = sa.alias(_INV_TBL, name='inv')
|
|
||||||
t_tbl = sa.alias(_TRAIT_TBL, name='t')
|
|
||||||
rpt_tbl = sa.alias(_RP_TRAIT_TBL, name='rpt')
|
|
||||||
|
|
||||||
rp_to_rpt_join = sa.join(
|
|
||||||
rp_tbl, rpt_tbl,
|
|
||||||
rp_tbl.c.id == rpt_tbl.c.resource_provider_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
rpt_to_t_join = sa.join(
|
|
||||||
rp_to_rpt_join, t_tbl,
|
|
||||||
sa.and_(
|
|
||||||
rpt_tbl.c.trait_id == t_tbl.c.id,
|
|
||||||
# The traits table wants unicode trait names, but os_traits
|
|
||||||
# presents native str, so we need to cast.
|
|
||||||
t_tbl.c.name == six.text_type(os_traits.MISC_SHARES_VIA_AGGREGATE),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
rp_to_inv_join = sa.join(
|
|
||||||
rpt_to_t_join, inv_tbl,
|
|
||||||
sa.and_(
|
|
||||||
rpt_tbl.c.resource_provider_id == inv_tbl.c.resource_provider_id,
|
|
||||||
inv_tbl.c.resource_class_id == rc_id,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
usage = _usage_select([rc_id])
|
|
||||||
|
|
||||||
inv_to_usage_join = sa.outerjoin(
|
|
||||||
rp_to_inv_join, usage,
|
|
||||||
inv_tbl.c.resource_provider_id == usage.c.resource_provider_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
where_conds = _capacity_check_clause(amount, usage, inv_tbl=inv_tbl)
|
|
||||||
|
|
||||||
# If 'member_of' has values, do a separate lookup to identify the
|
|
||||||
# resource providers that meet the member_of constraints.
|
|
||||||
if member_of:
|
|
||||||
rps_in_aggs = provider_ids_matching_aggregates(ctx, member_of)
|
|
||||||
if not rps_in_aggs:
|
|
||||||
# Short-circuit. The user either asked for a non-existing
|
|
||||||
# aggregate or there were no resource providers that matched
|
|
||||||
# the requirements...
|
|
||||||
return []
|
|
||||||
where_conds.append(rp_tbl.c.id.in_(rps_in_aggs))
|
|
||||||
|
|
||||||
sel = sa.select([rp_tbl.c.id]).select_from(inv_to_usage_join)
|
|
||||||
sel = sel.where(where_conds)
|
|
||||||
sel = sel.group_by(rp_tbl.c.id)
|
|
||||||
|
|
||||||
return [r[0] for r in ctx.session.execute(sel)]
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
@db_api.placement_context_manager.reader
|
||||||
def _get_all_by_filters_from_db(context, filters):
|
def _get_all_by_filters_from_db(context, filters):
|
||||||
# Eg. filters can be:
|
# Eg. filters can be:
|
||||||
@@ -1310,7 +933,7 @@ def _get_all_by_filters_from_db(context, filters):
|
|||||||
# root_provider_id value of that record. We can then ask for only
|
# root_provider_id value of that record. We can then ask for only
|
||||||
# those resource providers having a root_provider_id of that value.
|
# those resource providers having a root_provider_id of that value.
|
||||||
tree_uuid = filters.pop('in_tree')
|
tree_uuid = filters.pop('in_tree')
|
||||||
tree_ids = provider_ids_from_uuid(context, tree_uuid)
|
tree_ids = res_ctx.provider_ids_from_uuid(context, tree_uuid)
|
||||||
if tree_ids is None:
|
if tree_ids is None:
|
||||||
# List operations should simply return an empty list when a
|
# List operations should simply return an empty list when a
|
||||||
# non-existing resource provider UUID is given.
|
# non-existing resource provider UUID is given.
|
||||||
@@ -1319,7 +942,7 @@ def _get_all_by_filters_from_db(context, filters):
|
|||||||
query = query.where(rp.c.root_provider_id == root_id)
|
query = query.where(rp.c.root_provider_id == root_id)
|
||||||
|
|
||||||
# Get the provider IDs matching any specified traits and/or aggregates
|
# Get the provider IDs matching any specified traits and/or aggregates
|
||||||
rp_ids, forbidden_rp_ids = get_provider_ids_for_traits_and_aggs(
|
rp_ids, forbidden_rp_ids = res_ctx.get_provider_ids_for_traits_and_aggs(
|
||||||
context, required, forbidden, member_of, forbidden_aggs)
|
context, required, forbidden, member_of, forbidden_aggs)
|
||||||
if rp_ids is None:
|
if rp_ids is None:
|
||||||
# If no providers match the traits/aggs, we can short out
|
# If no providers match the traits/aggs, we can short out
|
||||||
@@ -1334,7 +957,8 @@ def _get_all_by_filters_from_db(context, filters):
|
|||||||
|
|
||||||
for rc_name, amount in resources.items():
|
for rc_name, amount in resources.items():
|
||||||
rc_id = rc_cache.RC_CACHE.id_from_string(rc_name)
|
rc_id = rc_cache.RC_CACHE.id_from_string(rc_name)
|
||||||
rps_with_resource = get_providers_with_resource(context, rc_id, amount)
|
rps_with_resource = res_ctx.get_providers_with_resource(
|
||||||
|
context, rc_id, amount)
|
||||||
rps_with_resource = (rp[0] for rp in rps_with_resource)
|
rps_with_resource = (rp[0] for rp in rps_with_resource)
|
||||||
query = query.where(rp.c.id.in_(rps_with_resource))
|
query = query.where(rp.c.id.in_(rps_with_resource))
|
||||||
|
|
||||||
@@ -1363,512 +987,3 @@ def get_all_by_filters(context, filters=None):
|
|||||||
"""
|
"""
|
||||||
resource_providers = _get_all_by_filters_from_db(context, filters)
|
resource_providers = _get_all_by_filters_from_db(context, filters)
|
||||||
return [ResourceProvider(context, **rp) for rp in resource_providers]
|
return [ResourceProvider(context, **rp) for rp in resource_providers]
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def get_provider_ids_having_any_trait(ctx, traits):
|
|
||||||
"""Returns a set of resource provider internal IDs that have ANY of the
|
|
||||||
supplied traits.
|
|
||||||
|
|
||||||
:param ctx: Session context to use
|
|
||||||
:param traits: A map, keyed by trait string name, of trait internal IDs, at
|
|
||||||
least one of which each provider must have associated with
|
|
||||||
it.
|
|
||||||
:raise ValueError: If traits is empty or None.
|
|
||||||
"""
|
|
||||||
if not traits:
|
|
||||||
raise ValueError('traits must not be empty')
|
|
||||||
|
|
||||||
rptt = sa.alias(_RP_TRAIT_TBL, name="rpt")
|
|
||||||
sel = sa.select([rptt.c.resource_provider_id])
|
|
||||||
sel = sel.where(rptt.c.trait_id.in_(traits.values()))
|
|
||||||
sel = sel.group_by(rptt.c.resource_provider_id)
|
|
||||||
return set(r[0] for r in ctx.session.execute(sel))
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def _get_provider_ids_having_all_traits(ctx, required_traits):
|
|
||||||
"""Returns a set of resource provider internal IDs that have ALL of the
|
|
||||||
required traits.
|
|
||||||
|
|
||||||
NOTE: Don't call this method with no required_traits.
|
|
||||||
|
|
||||||
:param ctx: Session context to use
|
|
||||||
:param required_traits: A map, keyed by trait string name, of required
|
|
||||||
trait internal IDs that each provider must have
|
|
||||||
associated with it
|
|
||||||
:raise ValueError: If required_traits is empty or None.
|
|
||||||
"""
|
|
||||||
if not required_traits:
|
|
||||||
raise ValueError('required_traits must not be empty')
|
|
||||||
|
|
||||||
rptt = sa.alias(_RP_TRAIT_TBL, name="rpt")
|
|
||||||
sel = sa.select([rptt.c.resource_provider_id])
|
|
||||||
sel = sel.where(rptt.c.trait_id.in_(required_traits.values()))
|
|
||||||
sel = sel.group_by(rptt.c.resource_provider_id)
|
|
||||||
# Only get the resource providers that have ALL the required traits, so we
|
|
||||||
# need to GROUP BY the resource provider and ensure that the
|
|
||||||
# COUNT(trait_id) is equal to the number of traits we are requiring
|
|
||||||
num_traits = len(required_traits)
|
|
||||||
cond = sa.func.count(rptt.c.trait_id) == num_traits
|
|
||||||
sel = sel.having(cond)
|
|
||||||
return set(r[0] for r in ctx.session.execute(sel))
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def has_provider_trees(ctx):
|
|
||||||
"""Simple method that returns whether provider trees (i.e. nested resource
|
|
||||||
providers) are in use in the deployment at all. This information is used to
|
|
||||||
switch code paths when attempting to retrieve allocation candidate
|
|
||||||
information. The code paths are eminently easier to execute and follow for
|
|
||||||
non-nested scenarios...
|
|
||||||
|
|
||||||
NOTE(jaypipes): The result of this function can be cached extensively.
|
|
||||||
"""
|
|
||||||
sel = sa.select([_RP_TBL.c.id])
|
|
||||||
sel = sel.where(_RP_TBL.c.parent_provider_id.isnot(None))
|
|
||||||
sel = sel.limit(1)
|
|
||||||
res = ctx.session.execute(sel).fetchall()
|
|
||||||
return len(res) > 0
|
|
||||||
|
|
||||||
|
|
||||||
def get_provider_ids_for_traits_and_aggs(ctx, required_traits,
|
|
||||||
forbidden_traits, member_of,
|
|
||||||
forbidden_aggs):
|
|
||||||
"""Get internal IDs for all providers matching the specified traits/aggs.
|
|
||||||
|
|
||||||
:return: A tuple of:
|
|
||||||
filtered_rp_ids: A set of internal provider IDs matching the specified
|
|
||||||
criteria. If None, work was done and resulted in no matching
|
|
||||||
providers. This is in contrast to the empty set, which indicates
|
|
||||||
that no filtering was performed.
|
|
||||||
forbidden_rp_ids: A set of internal IDs of providers having any of the
|
|
||||||
specified forbidden_traits.
|
|
||||||
"""
|
|
||||||
filtered_rps = set()
|
|
||||||
if required_traits:
|
|
||||||
trait_map = _normalize_trait_map(ctx, required_traits)
|
|
||||||
trait_rps = _get_provider_ids_having_all_traits(ctx, trait_map)
|
|
||||||
filtered_rps = trait_rps
|
|
||||||
LOG.debug("found %d providers after applying required traits filter "
|
|
||||||
"(%s)",
|
|
||||||
len(filtered_rps), list(required_traits))
|
|
||||||
if not filtered_rps:
|
|
||||||
return None, []
|
|
||||||
|
|
||||||
# If 'member_of' has values, do a separate lookup to identify the
|
|
||||||
# resource providers that meet the member_of constraints.
|
|
||||||
if member_of:
|
|
||||||
rps_in_aggs = provider_ids_matching_aggregates(ctx, member_of)
|
|
||||||
if filtered_rps:
|
|
||||||
filtered_rps &= rps_in_aggs
|
|
||||||
else:
|
|
||||||
filtered_rps = rps_in_aggs
|
|
||||||
LOG.debug("found %d providers after applying required aggregates "
|
|
||||||
"filter (%s)", len(filtered_rps), member_of)
|
|
||||||
if not filtered_rps:
|
|
||||||
return None, []
|
|
||||||
|
|
||||||
forbidden_rp_ids = set()
|
|
||||||
if forbidden_aggs:
|
|
||||||
rps_bad_aggs = provider_ids_matching_aggregates(ctx, [forbidden_aggs])
|
|
||||||
forbidden_rp_ids |= rps_bad_aggs
|
|
||||||
if filtered_rps:
|
|
||||||
filtered_rps -= rps_bad_aggs
|
|
||||||
LOG.debug("found %d providers after applying forbidden aggregates "
|
|
||||||
"filter (%s)", len(filtered_rps), forbidden_aggs)
|
|
||||||
if not filtered_rps:
|
|
||||||
return None, []
|
|
||||||
|
|
||||||
if forbidden_traits:
|
|
||||||
trait_map = _normalize_trait_map(ctx, forbidden_traits)
|
|
||||||
rps_bad_traits = get_provider_ids_having_any_trait(ctx, trait_map)
|
|
||||||
forbidden_rp_ids |= rps_bad_traits
|
|
||||||
if filtered_rps:
|
|
||||||
filtered_rps -= rps_bad_traits
|
|
||||||
LOG.debug("found %d providers after applying forbidden traits "
|
|
||||||
"filter (%s)", len(filtered_rps), list(forbidden_traits))
|
|
||||||
if not filtered_rps:
|
|
||||||
return None, []
|
|
||||||
|
|
||||||
return filtered_rps, forbidden_rp_ids
|
|
||||||
|
|
||||||
|
|
||||||
def _normalize_trait_map(ctx, traits):
|
|
||||||
if not isinstance(traits, dict):
|
|
||||||
return trait_obj.ids_from_names(ctx, traits)
|
|
||||||
return traits
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def get_provider_ids_matching(rg_ctx):
|
|
||||||
"""Returns a list of tuples of (internal provider ID, root provider ID)
|
|
||||||
that have available inventory to satisfy all the supplied requests for
|
|
||||||
resources. If no providers match, the empty list is returned.
|
|
||||||
|
|
||||||
:note: This function is used to get results for (a) a RequestGroup with
|
|
||||||
use_same_provider=True in a granular request, or (b) a short cut
|
|
||||||
path for scenarios that do NOT involve sharing or nested providers.
|
|
||||||
Each `internal provider ID` represents a *single* provider that
|
|
||||||
can satisfy *all* of the resource/trait/aggregate criteria. This is
|
|
||||||
in contrast with get_trees_matching_all(), where each provider
|
|
||||||
might only satisfy *some* of the resources, the rest of which are
|
|
||||||
satisfied by other providers in the same tree or shared via
|
|
||||||
aggregate.
|
|
||||||
|
|
||||||
:param rg_ctx: RequestGroupSearchContext
|
|
||||||
"""
|
|
||||||
# TODO(tetsuro): refactor this to have only the rg_ctx argument
|
|
||||||
filtered_rps, forbidden_rp_ids = get_provider_ids_for_traits_and_aggs(
|
|
||||||
rg_ctx.context, rg_ctx.required_trait_map, rg_ctx.forbidden_trait_map,
|
|
||||||
rg_ctx.member_of, rg_ctx.forbidden_aggs)
|
|
||||||
if filtered_rps is None:
|
|
||||||
# If no providers match the traits/aggs, we can short out
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Instead of constructing a giant complex SQL statement that joins multiple
|
|
||||||
# copies of derived usage tables and inventory tables to each other, we do
|
|
||||||
# one query for each requested resource class. This allows us to log a
|
|
||||||
# rough idea of which resource class query returned no results (for
|
|
||||||
# purposes of rough debugging of a single allocation candidates request) as
|
|
||||||
# well as reduce the necessary knowledge of SQL in order to understand the
|
|
||||||
# queries being executed here.
|
|
||||||
#
|
|
||||||
# NOTE(jaypipes): The efficiency of this operation may be improved by
|
|
||||||
# passing the trait_rps and/or forbidden_ip_ids iterables to the
|
|
||||||
# get_providers_with_resource() function so that we don't have to process
|
|
||||||
# as many records inside the loop below to remove providers from the
|
|
||||||
# eventual results list
|
|
||||||
provs_with_resource = set()
|
|
||||||
first = True
|
|
||||||
for rc_id, amount in rg_ctx.resources.items():
|
|
||||||
rc_name = rc_cache.RC_CACHE.string_from_id(rc_id)
|
|
||||||
provs_with_resource = get_providers_with_resource(
|
|
||||||
rg_ctx.context, rc_id, amount, tree_root_id=rg_ctx.tree_root_id)
|
|
||||||
LOG.debug("found %d providers with available %d %s",
|
|
||||||
len(provs_with_resource), amount, rc_name)
|
|
||||||
if not provs_with_resource:
|
|
||||||
return []
|
|
||||||
|
|
||||||
rc_rp_ids = set(p[0] for p in provs_with_resource)
|
|
||||||
# The branching below could be collapsed code-wise, but is in place to
|
|
||||||
# make the debug logging clearer.
|
|
||||||
if first:
|
|
||||||
first = False
|
|
||||||
if filtered_rps:
|
|
||||||
filtered_rps &= rc_rp_ids
|
|
||||||
LOG.debug("found %d providers after applying initial "
|
|
||||||
"aggregate and trait filters", len(filtered_rps))
|
|
||||||
else:
|
|
||||||
filtered_rps = rc_rp_ids
|
|
||||||
# The following condition is not necessary for the logic; just
|
|
||||||
# prevents the message from being logged unnecessarily.
|
|
||||||
if forbidden_rp_ids:
|
|
||||||
# Forbidden trait/aggregate filters only need to be applied
|
|
||||||
# a) on the first iteration; and
|
|
||||||
# b) if not already set up before the loop
|
|
||||||
# ...since any providers in the resulting set are the basis
|
|
||||||
# for intersections, and providers with forbidden traits
|
|
||||||
# are already absent from that set after we've filtered
|
|
||||||
# them once.
|
|
||||||
filtered_rps -= forbidden_rp_ids
|
|
||||||
LOG.debug("found %d providers after applying forbidden "
|
|
||||||
"traits/aggregates", len(filtered_rps))
|
|
||||||
else:
|
|
||||||
filtered_rps &= rc_rp_ids
|
|
||||||
LOG.debug("found %d providers after filtering by previous result",
|
|
||||||
len(filtered_rps))
|
|
||||||
|
|
||||||
if not filtered_rps:
|
|
||||||
return []
|
|
||||||
|
|
||||||
# provs_with_resource will contain a superset of providers with IDs still
|
|
||||||
# in our filtered_rps set. We return the list of tuples of
|
|
||||||
# (internal provider ID, root internal provider ID)
|
|
||||||
return [rpids for rpids in provs_with_resource if rpids[0] in filtered_rps]
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def get_providers_with_resource(ctx, rc_id, amount, tree_root_id=None):
|
|
||||||
"""Returns a set of tuples of (provider ID, root provider ID) of providers
|
|
||||||
that satisfy the request for a single resource class.
|
|
||||||
|
|
||||||
:param ctx: Session context to use
|
|
||||||
:param rc_id: Internal ID of resource class to check inventory for
|
|
||||||
:param amount: Amount of resource being requested
|
|
||||||
:param tree_root_id: An optional root provider ID. If provided, the results
|
|
||||||
are limited to the resource providers under the given
|
|
||||||
root resource provider.
|
|
||||||
"""
|
|
||||||
# SELECT rp.id, rp.root_provider_id
|
|
||||||
# FROM resource_providers AS rp
|
|
||||||
# JOIN inventories AS inv
|
|
||||||
# ON rp.id = inv.resource_provider_id
|
|
||||||
# AND inv.resource_class_id = $RC_ID
|
|
||||||
# LEFT JOIN (
|
|
||||||
# SELECT
|
|
||||||
# alloc.resource_provider_id,
|
|
||||||
# SUM(allocs.used) AS used
|
|
||||||
# FROM allocations AS alloc
|
|
||||||
# WHERE allocs.resource_class_id = $RC_ID
|
|
||||||
# GROUP BY allocs.resource_provider_id
|
|
||||||
# ) AS usage
|
|
||||||
# ON inv.resource_provider_id = usage.resource_provider_id
|
|
||||||
# WHERE
|
|
||||||
# used + $AMOUNT <= ((total - reserved) * inv.allocation_ratio)
|
|
||||||
# AND inv.min_unit <= $AMOUNT
|
|
||||||
# AND inv.max_unit >= $AMOUNT
|
|
||||||
# AND $AMOUNT % inv.step_size == 0
|
|
||||||
rpt = sa.alias(_RP_TBL, name="rp")
|
|
||||||
inv = sa.alias(_INV_TBL, name="inv")
|
|
||||||
usage = _usage_select([rc_id])
|
|
||||||
rp_to_inv = sa.join(
|
|
||||||
rpt, inv, sa.and_(
|
|
||||||
rpt.c.id == inv.c.resource_provider_id,
|
|
||||||
inv.c.resource_class_id == rc_id))
|
|
||||||
inv_to_usage = sa.outerjoin(
|
|
||||||
rp_to_inv, usage,
|
|
||||||
inv.c.resource_provider_id == usage.c.resource_provider_id)
|
|
||||||
sel = sa.select([rpt.c.id, rpt.c.root_provider_id])
|
|
||||||
sel = sel.select_from(inv_to_usage)
|
|
||||||
where_conds = _capacity_check_clause(amount, usage, inv_tbl=inv)
|
|
||||||
if tree_root_id is not None:
|
|
||||||
where_conds = sa.and_(
|
|
||||||
rpt.c.root_provider_id == tree_root_id,
|
|
||||||
where_conds)
|
|
||||||
sel = sel.where(where_conds)
|
|
||||||
res = ctx.session.execute(sel).fetchall()
|
|
||||||
res = set((r[0], r[1]) for r in res)
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def _get_trees_with_traits(ctx, rp_ids, required_traits, forbidden_traits):
|
|
||||||
"""Given a list of provider IDs, filter them to return a set of tuples of
|
|
||||||
(provider ID, root provider ID) of providers which belong to a tree that
|
|
||||||
can satisfy trait requirements.
|
|
||||||
|
|
||||||
:param ctx: Session context to use
|
|
||||||
:param rp_ids: a set of resource provider IDs
|
|
||||||
:param required_traits: A map, keyed by trait string name, of required
|
|
||||||
trait internal IDs that each provider TREE must
|
|
||||||
COLLECTIVELY have associated with it
|
|
||||||
:param forbidden_traits: A map, keyed by trait string name, of trait
|
|
||||||
internal IDs that a resource provider must
|
|
||||||
not have.
|
|
||||||
"""
|
|
||||||
# We now want to restrict the returned providers to only those provider
|
|
||||||
# trees that have all our required traits.
|
|
||||||
#
|
|
||||||
# The SQL we want looks like this:
|
|
||||||
#
|
|
||||||
# SELECT outer_rp.id, outer_rp.root_provider_id
|
|
||||||
# FROM resource_providers AS outer_rp
|
|
||||||
# JOIN (
|
|
||||||
# SELECT rp.root_provider_id
|
|
||||||
# FROM resource_providers AS rp
|
|
||||||
# # Only if we have required traits...
|
|
||||||
# INNER JOIN resource_provider_traits AS rptt
|
|
||||||
# ON rp.id = rptt.resource_provider_id
|
|
||||||
# AND rptt.trait_id IN ($REQUIRED_TRAIT_IDS)
|
|
||||||
# # Only if we have forbidden_traits...
|
|
||||||
# LEFT JOIN resource_provider_traits AS rptt_forbid
|
|
||||||
# ON rp.id = rptt_forbid.resource_provider_id
|
|
||||||
# AND rptt_forbid.trait_id IN ($FORBIDDEN_TRAIT_IDS)
|
|
||||||
# WHERE rp.id IN ($RP_IDS)
|
|
||||||
# # Only if we have forbidden traits...
|
|
||||||
# AND rptt_forbid.resource_provider_id IS NULL
|
|
||||||
# GROUP BY rp.root_provider_id
|
|
||||||
# # Only if have required traits...
|
|
||||||
# HAVING COUNT(DISTINCT rptt.trait_id) == $NUM_REQUIRED_TRAITS
|
|
||||||
# ) AS trees_with_traits
|
|
||||||
# ON outer_rp.root_provider_id = trees_with_traits.root_provider_id
|
|
||||||
rpt = sa.alias(_RP_TBL, name="rp")
|
|
||||||
cond = [rpt.c.id.in_(rp_ids)]
|
|
||||||
subq = sa.select([rpt.c.root_provider_id])
|
|
||||||
subq_join = None
|
|
||||||
if required_traits:
|
|
||||||
rptt = sa.alias(_RP_TRAIT_TBL, name="rptt")
|
|
||||||
rpt_to_rptt = sa.join(
|
|
||||||
rpt, rptt, sa.and_(
|
|
||||||
rpt.c.id == rptt.c.resource_provider_id,
|
|
||||||
rptt.c.trait_id.in_(required_traits.values())))
|
|
||||||
subq_join = rpt_to_rptt
|
|
||||||
# Only get the resource providers that have ALL the required traits,
|
|
||||||
# so we need to GROUP BY the root provider and ensure that the
|
|
||||||
# COUNT(trait_id) is equal to the number of traits we are requiring
|
|
||||||
num_traits = len(required_traits)
|
|
||||||
having_cond = sa.func.count(sa.distinct(rptt.c.trait_id)) == num_traits
|
|
||||||
subq = subq.having(having_cond)
|
|
||||||
|
|
||||||
# Tack on an additional LEFT JOIN clause inside the derived table if we've
|
|
||||||
# got forbidden traits in the mix.
|
|
||||||
if forbidden_traits:
|
|
||||||
rptt_forbid = sa.alias(_RP_TRAIT_TBL, name="rptt_forbid")
|
|
||||||
join_to = rpt
|
|
||||||
if subq_join is not None:
|
|
||||||
join_to = subq_join
|
|
||||||
rpt_to_rptt_forbid = sa.outerjoin(
|
|
||||||
join_to, rptt_forbid, sa.and_(
|
|
||||||
rpt.c.id == rptt_forbid.c.resource_provider_id,
|
|
||||||
rptt_forbid.c.trait_id.in_(forbidden_traits.values())))
|
|
||||||
cond.append(rptt_forbid.c.resource_provider_id == sa.null())
|
|
||||||
subq_join = rpt_to_rptt_forbid
|
|
||||||
|
|
||||||
subq = subq.select_from(subq_join)
|
|
||||||
subq = subq.where(sa.and_(*cond))
|
|
||||||
subq = subq.group_by(rpt.c.root_provider_id)
|
|
||||||
trees_with_traits = sa.alias(subq, name="trees_with_traits")
|
|
||||||
|
|
||||||
outer_rps = sa.alias(_RP_TBL, name="outer_rps")
|
|
||||||
outer_to_subq = sa.join(
|
|
||||||
outer_rps, trees_with_traits,
|
|
||||||
outer_rps.c.root_provider_id == trees_with_traits.c.root_provider_id)
|
|
||||||
sel = sa.select([outer_rps.c.id, outer_rps.c.root_provider_id])
|
|
||||||
sel = sel.select_from(outer_to_subq)
|
|
||||||
res = ctx.session.execute(sel).fetchall()
|
|
||||||
|
|
||||||
return [(rp_id, root_id) for rp_id, root_id in res]
|
|
||||||
|
|
||||||
|
|
||||||
@db_api.placement_context_manager.reader
|
|
||||||
def get_trees_matching_all(rg_ctx):
|
|
||||||
"""Returns a RPCandidates object representing the providers that satisfy
|
|
||||||
the request for resources.
|
|
||||||
|
|
||||||
If traits are also required, this function only returns results where the
|
|
||||||
set of providers within a tree that satisfy the resource request
|
|
||||||
collectively have all the required traits associated with them. This means
|
|
||||||
that given the following provider tree:
|
|
||||||
|
|
||||||
cn1
|
|
||||||
|
|
|
||||||
--> pf1 (SRIOV_NET_VF:2)
|
|
||||||
|
|
|
||||||
--> pf2 (SRIOV_NET_VF:1, HW_NIC_OFFLOAD_GENEVE)
|
|
||||||
|
|
||||||
If a user requests 1 SRIOV_NET_VF resource and no required traits will
|
|
||||||
return both pf1 and pf2. However, a request for 2 SRIOV_NET_VF and required
|
|
||||||
trait of HW_NIC_OFFLOAD_GENEVE will return no results (since pf1 is the
|
|
||||||
only provider with enough inventory of SRIOV_NET_VF but it does not have
|
|
||||||
the required HW_NIC_OFFLOAD_GENEVE trait).
|
|
||||||
|
|
||||||
:note: This function is used for scenarios to get results for a
|
|
||||||
RequestGroup with use_same_provider=False. In this scenario, we are able
|
|
||||||
to use multiple providers within the same provider tree including sharing
|
|
||||||
providers to satisfy different resources involved in a single RequestGroup.
|
|
||||||
|
|
||||||
:param rg_ctx: RequestGroupSearchContext
|
|
||||||
"""
|
|
||||||
# If 'member_of' has values, do a separate lookup to identify the
|
|
||||||
# resource providers that meet the member_of constraints.
|
|
||||||
if rg_ctx.member_of:
|
|
||||||
rps_in_aggs = provider_ids_matching_aggregates(
|
|
||||||
rg_ctx.context, rg_ctx.member_of)
|
|
||||||
if not rps_in_aggs:
|
|
||||||
# Short-circuit. The user either asked for a non-existing
|
|
||||||
# aggregate or there were no resource providers that matched
|
|
||||||
# the requirements...
|
|
||||||
return rp_candidates.RPCandidateList()
|
|
||||||
|
|
||||||
if rg_ctx.forbidden_aggs:
|
|
||||||
rps_bad_aggs = provider_ids_matching_aggregates(
|
|
||||||
rg_ctx.context, [rg_ctx.forbidden_aggs])
|
|
||||||
|
|
||||||
# To get all trees that collectively have all required resource,
|
|
||||||
# aggregates and traits, we use `RPCandidateList` which has a list of
|
|
||||||
# three-tuples with the first element being resource provider ID, the
|
|
||||||
# second element being the root provider ID and the third being resource
|
|
||||||
# class ID.
|
|
||||||
provs_with_inv = rp_candidates.RPCandidateList()
|
|
||||||
|
|
||||||
for rc_id, amount in rg_ctx.resources.items():
|
|
||||||
rc_name = rc_cache.RC_CACHE.string_from_id(rc_id)
|
|
||||||
|
|
||||||
provs_with_inv_rc = rp_candidates.RPCandidateList()
|
|
||||||
rc_provs_with_inv = get_providers_with_resource(
|
|
||||||
rg_ctx.context, rc_id, amount, tree_root_id=rg_ctx.tree_root_id)
|
|
||||||
provs_with_inv_rc.add_rps(rc_provs_with_inv, rc_id)
|
|
||||||
LOG.debug("found %d providers under %d trees with available %d %s",
|
|
||||||
len(provs_with_inv_rc), len(provs_with_inv_rc.trees),
|
|
||||||
amount, rc_name)
|
|
||||||
if not provs_with_inv_rc:
|
|
||||||
# If there's no providers that have one of the resource classes,
|
|
||||||
# then we can short-circuit returning an empty RPCandidateList
|
|
||||||
return rp_candidates.RPCandidateList()
|
|
||||||
|
|
||||||
sharing_providers = rg_ctx.get_rps_with_shared_capacity(rc_id)
|
|
||||||
if sharing_providers and rg_ctx.tree_root_id is None:
|
|
||||||
# There are sharing providers for this resource class, so we
|
|
||||||
# should also get combinations of (sharing provider, anchor root)
|
|
||||||
# in addition to (non-sharing provider, anchor root) we've just
|
|
||||||
# got via get_providers_with_resource() above. We must skip this
|
|
||||||
# process if tree_root_id is provided via the ?in_tree=<rp_uuid>
|
|
||||||
# queryparam, because it restricts resources from another tree.
|
|
||||||
rc_provs_with_inv = anchors_for_sharing_providers(
|
|
||||||
rg_ctx.context, sharing_providers, get_id=True)
|
|
||||||
provs_with_inv_rc.add_rps(rc_provs_with_inv, rc_id)
|
|
||||||
LOG.debug(
|
|
||||||
"considering %d sharing providers with %d %s, "
|
|
||||||
"now we've got %d provider trees",
|
|
||||||
len(sharing_providers), amount, rc_name,
|
|
||||||
len(provs_with_inv_rc.trees))
|
|
||||||
|
|
||||||
if rg_ctx.member_of:
|
|
||||||
# Aggregate on root spans the whole tree, so the rp itself
|
|
||||||
# *or its root* should be in the aggregate
|
|
||||||
provs_with_inv_rc.filter_by_rp_or_tree(rps_in_aggs)
|
|
||||||
LOG.debug("found %d providers under %d trees after applying "
|
|
||||||
"aggregate filter %s",
|
|
||||||
len(provs_with_inv_rc.rps), len(provs_with_inv_rc.trees),
|
|
||||||
rg_ctx.member_of)
|
|
||||||
if not provs_with_inv_rc:
|
|
||||||
# Short-circuit returning an empty RPCandidateList
|
|
||||||
return rp_candidates.RPCandidateList()
|
|
||||||
if rg_ctx.forbidden_aggs:
|
|
||||||
# Aggregate on root spans the whole tree, so the rp itself
|
|
||||||
# *and its root* should be outside the aggregate
|
|
||||||
provs_with_inv_rc.filter_by_rp_nor_tree(rps_bad_aggs)
|
|
||||||
LOG.debug("found %d providers under %d trees after applying "
|
|
||||||
"negative aggregate filter %s",
|
|
||||||
len(provs_with_inv_rc.rps), len(provs_with_inv_rc.trees),
|
|
||||||
rg_ctx.forbidden_aggs)
|
|
||||||
if not provs_with_inv_rc:
|
|
||||||
# Short-circuit returning an empty RPCandidateList
|
|
||||||
return rp_candidates.RPCandidateList()
|
|
||||||
|
|
||||||
# Adding the resource providers we've got for this resource class,
|
|
||||||
# filter provs_with_inv to have only trees with enough inventories
|
|
||||||
# for this resource class. Here "tree" includes sharing providers
|
|
||||||
# in its terminology
|
|
||||||
provs_with_inv.merge_common_trees(provs_with_inv_rc)
|
|
||||||
LOG.debug(
|
|
||||||
"found %d providers under %d trees after filtering by "
|
|
||||||
"previous result",
|
|
||||||
len(provs_with_inv.rps), len(provs_with_inv.trees))
|
|
||||||
if not provs_with_inv:
|
|
||||||
return rp_candidates.RPCandidateList()
|
|
||||||
|
|
||||||
if (not rg_ctx.required_trait_map and not rg_ctx.forbidden_trait_map) or (
|
|
||||||
rg_ctx.exists_sharing):
|
|
||||||
# If there were no traits required, there's no difference in how we
|
|
||||||
# calculate allocation requests between nested and non-nested
|
|
||||||
# environments, so just short-circuit and return. Or if sharing
|
|
||||||
# providers are in play, we check the trait constraints later
|
|
||||||
# in _alloc_candidates_multiple_providers(), so skip.
|
|
||||||
return provs_with_inv
|
|
||||||
|
|
||||||
# Return the providers where the providers have the available inventory
|
|
||||||
# capacity and that set of providers (grouped by their tree) have all
|
|
||||||
# of the required traits and none of the forbidden traits
|
|
||||||
rp_tuples_with_trait = _get_trees_with_traits(
|
|
||||||
rg_ctx.context, provs_with_inv.rps, rg_ctx.required_trait_map,
|
|
||||||
rg_ctx.forbidden_trait_map)
|
|
||||||
provs_with_inv.filter_by_rp(rp_tuples_with_trait)
|
|
||||||
LOG.debug("found %d providers under %d trees after applying "
|
|
||||||
"traits filter - required: %s, forbidden: %s",
|
|
||||||
len(provs_with_inv.rps), len(provs_with_inv.trees),
|
|
||||||
list(rg_ctx.required_trait_map),
|
|
||||||
list(rg_ctx.forbidden_trait_map))
|
|
||||||
|
|
||||||
return provs_with_inv
|
|
||||||
|
@@ -41,7 +41,7 @@ def _req_group_search_context(context, **kwargs):
|
|||||||
forbidden_aggs=kwargs.get('forbidden_aggs', []),
|
forbidden_aggs=kwargs.get('forbidden_aggs', []),
|
||||||
in_tree=kwargs.get('in_tree', None),
|
in_tree=kwargs.get('in_tree', None),
|
||||||
)
|
)
|
||||||
has_trees = rp_obj.has_provider_trees(context)
|
has_trees = res_ctx.has_provider_trees(context)
|
||||||
rg_ctx = res_ctx.RequestGroupSearchContext(
|
rg_ctx = res_ctx.RequestGroupSearchContext(
|
||||||
context, request, has_trees)
|
context, request, has_trees)
|
||||||
|
|
||||||
@@ -172,7 +172,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
|
|
||||||
# Run it!
|
# Run it!
|
||||||
rg_ctx = _req_group_search_context(self.ctx, resources=resources)
|
rg_ctx = _req_group_search_context(self.ctx, resources=resources)
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
|
|
||||||
# We should get all the incl_* RPs
|
# We should get all the incl_* RPs
|
||||||
expected = [incl_biginv_noalloc, incl_extra_full]
|
expected = [incl_biginv_noalloc, incl_extra_full]
|
||||||
@@ -192,20 +192,20 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
resources=resources,
|
resources=resources,
|
||||||
required_traits=req_traits,
|
required_traits=req_traits,
|
||||||
)
|
)
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
|
|
||||||
self.assertEqual([], res)
|
self.assertEqual([], res)
|
||||||
|
|
||||||
# Next let's set the required trait to an excl_* RPs.
|
# Next let's set the required trait to an excl_* RPs.
|
||||||
# This should result in no results returned as well.
|
# This should result in no results returned as well.
|
||||||
excl_big_md_noalloc.set_traits([avx2_t])
|
excl_big_md_noalloc.set_traits([avx2_t])
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual([], res)
|
self.assertEqual([], res)
|
||||||
|
|
||||||
# OK, now add the trait to one of the incl_* providers and verify that
|
# OK, now add the trait to one of the incl_* providers and verify that
|
||||||
# provider now shows up in our results
|
# provider now shows up in our results
|
||||||
incl_biginv_noalloc.set_traits([avx2_t])
|
incl_biginv_noalloc.set_traits([avx2_t])
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
|
|
||||||
rp_ids = [r[0] for r in res]
|
rp_ids = [r[0] for r in res]
|
||||||
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
|
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
|
||||||
@@ -216,7 +216,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
resources=resources,
|
resources=resources,
|
||||||
in_tree=uuids.biginv_noalloc,
|
in_tree=uuids.biginv_noalloc,
|
||||||
)
|
)
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
rp_ids = [r[0] for r in res]
|
rp_ids = [r[0] for r in res]
|
||||||
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
|
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
|
||||||
|
|
||||||
@@ -227,7 +227,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
resources=resources,
|
resources=resources,
|
||||||
in_tree=uuids.allused,
|
in_tree=uuids.allused,
|
||||||
)
|
)
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual([], res)
|
self.assertEqual([], res)
|
||||||
|
|
||||||
def test_get_provider_ids_matching_with_multiple_forbidden(self):
|
def test_get_provider_ids_matching_with_multiple_forbidden(self):
|
||||||
@@ -252,7 +252,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
resources=resources,
|
resources=resources,
|
||||||
forbidden_traits=forbidden_traits,
|
forbidden_traits=forbidden_traits,
|
||||||
member_of=member_of)
|
member_of=member_of)
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual({(rp1.id, rp1.id)}, set(res))
|
self.assertEqual({(rp1.id, rp1.id)}, set(res))
|
||||||
|
|
||||||
def test_get_provider_ids_matching_with_aggregates(self):
|
def test_get_provider_ids_matching_with_aggregates(self):
|
||||||
@@ -276,7 +276,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
)
|
)
|
||||||
expected_rp = [rp1, rp4]
|
expected_rp = [rp1, rp4]
|
||||||
|
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
||||||
|
|
||||||
rg_ctx = _req_group_search_context(
|
rg_ctx = _req_group_search_context(
|
||||||
@@ -286,7 +286,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
)
|
)
|
||||||
expected_rp = [rp1, rp2, rp4]
|
expected_rp = [rp1, rp2, rp4]
|
||||||
|
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
||||||
|
|
||||||
rg_ctx = _req_group_search_context(
|
rg_ctx = _req_group_search_context(
|
||||||
@@ -296,7 +296,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
)
|
)
|
||||||
expected_rp = [rp4]
|
expected_rp = [rp4]
|
||||||
|
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
||||||
|
|
||||||
rg_ctx = _req_group_search_context(
|
rg_ctx = _req_group_search_context(
|
||||||
@@ -306,7 +306,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
)
|
)
|
||||||
expected_rp = [rp2, rp3, rp5]
|
expected_rp = [rp2, rp3, rp5]
|
||||||
|
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
||||||
|
|
||||||
rg_ctx = _req_group_search_context(
|
rg_ctx = _req_group_search_context(
|
||||||
@@ -316,7 +316,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
)
|
)
|
||||||
expected_rp = [rp3, rp5]
|
expected_rp = [rp3, rp5]
|
||||||
|
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
||||||
|
|
||||||
rg_ctx = _req_group_search_context(
|
rg_ctx = _req_group_search_context(
|
||||||
@@ -327,7 +327,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
)
|
)
|
||||||
expected_rp = [rp1]
|
expected_rp = [rp1]
|
||||||
|
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
||||||
|
|
||||||
rg_ctx = _req_group_search_context(
|
rg_ctx = _req_group_search_context(
|
||||||
@@ -338,7 +338,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
)
|
)
|
||||||
expected_rp = []
|
expected_rp = []
|
||||||
|
|
||||||
res = rp_obj.get_provider_ids_matching(rg_ctx)
|
res = res_ctx.get_provider_ids_matching(rg_ctx)
|
||||||
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
self.assertEqual(set((rp.id, rp.id) for rp in expected_rp), set(res))
|
||||||
|
|
||||||
def test_get_provider_ids_having_all_traits(self):
|
def test_get_provider_ids_having_all_traits(self):
|
||||||
@@ -346,7 +346,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
tmap = {}
|
tmap = {}
|
||||||
if traitnames:
|
if traitnames:
|
||||||
tmap = trait_obj.ids_from_names(self.ctx, traitnames)
|
tmap = trait_obj.ids_from_names(self.ctx, traitnames)
|
||||||
obs = rp_obj._get_provider_ids_having_all_traits(self.ctx, tmap)
|
obs = res_ctx._get_provider_ids_having_all_traits(self.ctx, tmap)
|
||||||
self.assertEqual(sorted(expected_ids), sorted(obs))
|
self.assertEqual(sorted(expected_ids), sorted(obs))
|
||||||
|
|
||||||
# No traits. This will never be returned, because it's illegal to
|
# No traits. This will never be returned, because it's illegal to
|
||||||
@@ -369,10 +369,10 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
# Request with no traits not allowed
|
# Request with no traits not allowed
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
ValueError,
|
ValueError,
|
||||||
rp_obj._get_provider_ids_having_all_traits, self.ctx, None)
|
res_ctx._get_provider_ids_having_all_traits, self.ctx, None)
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
ValueError,
|
ValueError,
|
||||||
rp_obj._get_provider_ids_having_all_traits, self.ctx, {})
|
res_ctx._get_provider_ids_having_all_traits, self.ctx, {})
|
||||||
|
|
||||||
# Common trait returns both RPs having it
|
# Common trait returns both RPs having it
|
||||||
run(['HW_CPU_X86_TBM'], [cn2.id, cn3.id])
|
run(['HW_CPU_X86_TBM'], [cn2.id, cn3.id])
|
||||||
@@ -418,7 +418,7 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
# NOTE(jaypipes): get_trees_matching_all() expects a dict of
|
# NOTE(jaypipes): get_trees_matching_all() expects a dict of
|
||||||
# resource class internal identifiers, not string names
|
# resource class internal identifiers, not string names
|
||||||
rg_ctx = _req_group_search_context(self.ctx, **kwargs)
|
rg_ctx = _req_group_search_context(self.ctx, **kwargs)
|
||||||
results = rp_obj.get_trees_matching_all(rg_ctx)
|
results = res_ctx.get_trees_matching_all(rg_ctx)
|
||||||
|
|
||||||
tree_ids = self._get_rp_ids_matching_names(expected_trees)
|
tree_ids = self._get_rp_ids_matching_names(expected_trees)
|
||||||
rp_ids = self._get_rp_ids_matching_names(expected_rps)
|
rp_ids = self._get_rp_ids_matching_names(expected_rps)
|
||||||
@@ -742,7 +742,7 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
}
|
}
|
||||||
forbidden_traits = {}
|
forbidden_traits = {}
|
||||||
|
|
||||||
rp_tuples_with_trait = rp_obj._get_trees_with_traits(
|
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
|
||||||
self.ctx, rp_ids, required_traits, forbidden_traits)
|
self.ctx, rp_ids, required_traits, forbidden_traits)
|
||||||
|
|
||||||
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
||||||
@@ -760,7 +760,7 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
ssd_t.name: ssd_t.id,
|
ssd_t.name: ssd_t.id,
|
||||||
}
|
}
|
||||||
|
|
||||||
rp_tuples_with_trait = rp_obj._get_trees_with_traits(
|
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
|
||||||
self.ctx, rp_ids, required_traits, forbidden_traits)
|
self.ctx, rp_ids, required_traits, forbidden_traits)
|
||||||
|
|
||||||
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
||||||
@@ -776,7 +776,7 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
}
|
}
|
||||||
forbidden_traits = {}
|
forbidden_traits = {}
|
||||||
|
|
||||||
rp_tuples_with_trait = rp_obj._get_trees_with_traits(
|
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
|
||||||
self.ctx, rp_ids, required_traits, forbidden_traits)
|
self.ctx, rp_ids, required_traits, forbidden_traits)
|
||||||
|
|
||||||
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
||||||
@@ -791,7 +791,7 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
}
|
}
|
||||||
forbidden_traits = {}
|
forbidden_traits = {}
|
||||||
|
|
||||||
rp_tuples_with_trait = rp_obj._get_trees_with_traits(
|
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
|
||||||
self.ctx, rp_ids, required_traits, forbidden_traits)
|
self.ctx, rp_ids, required_traits, forbidden_traits)
|
||||||
|
|
||||||
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
||||||
@@ -809,7 +809,7 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
ssl_t.name: ssl_t.id
|
ssl_t.name: ssl_t.id
|
||||||
}
|
}
|
||||||
|
|
||||||
rp_tuples_with_trait = rp_obj._get_trees_with_traits(
|
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
|
||||||
self.ctx, rp_ids, required_traits, forbidden_traits)
|
self.ctx, rp_ids, required_traits, forbidden_traits)
|
||||||
|
|
||||||
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
||||||
@@ -825,7 +825,7 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
}
|
}
|
||||||
forbidden_traits = {}
|
forbidden_traits = {}
|
||||||
|
|
||||||
rp_tuples_with_trait = rp_obj._get_trees_with_traits(
|
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
|
||||||
self.ctx, rp_ids, required_traits, forbidden_traits)
|
self.ctx, rp_ids, required_traits, forbidden_traits)
|
||||||
|
|
||||||
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
||||||
@@ -841,7 +841,7 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
}
|
}
|
||||||
forbidden_traits = {}
|
forbidden_traits = {}
|
||||||
|
|
||||||
rp_tuples_with_trait = rp_obj._get_trees_with_traits(
|
rp_tuples_with_trait = res_ctx._get_trees_with_traits(
|
||||||
self.ctx, rp_ids, required_traits, forbidden_traits)
|
self.ctx, rp_ids, required_traits, forbidden_traits)
|
||||||
|
|
||||||
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
tree_root_ids = set([p[1] for p in rp_tuples_with_trait])
|
||||||
|
@@ -20,6 +20,7 @@ from placement.db.sqlalchemy import models
|
|||||||
from placement import exception
|
from placement import exception
|
||||||
from placement.objects import allocation as alloc_obj
|
from placement.objects import allocation as alloc_obj
|
||||||
from placement.objects import inventory as inv_obj
|
from placement.objects import inventory as inv_obj
|
||||||
|
from placement.objects import research_context as res_ctx
|
||||||
from placement.objects import resource_provider as rp_obj
|
from placement.objects import resource_provider as rp_obj
|
||||||
from placement.objects import trait as trait_obj
|
from placement.objects import trait as trait_obj
|
||||||
from placement.objects import usage as usage_obj
|
from placement.objects import usage as usage_obj
|
||||||
@@ -315,16 +316,16 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
"""The has_provider_trees() helper method should return False unless
|
"""The has_provider_trees() helper method should return False unless
|
||||||
there is a resource provider that is a parent.
|
there is a resource provider that is a parent.
|
||||||
"""
|
"""
|
||||||
self.assertFalse(rp_obj.has_provider_trees(self.ctx))
|
self.assertFalse(res_ctx.has_provider_trees(self.ctx))
|
||||||
self._create_provider('cn')
|
self._create_provider('cn')
|
||||||
|
|
||||||
# No parents yet. Should still be False.
|
# No parents yet. Should still be False.
|
||||||
self.assertFalse(rp_obj.has_provider_trees(self.ctx))
|
self.assertFalse(res_ctx.has_provider_trees(self.ctx))
|
||||||
|
|
||||||
self._create_provider('numa0', parent=uuidsentinel.cn)
|
self._create_provider('numa0', parent=uuidsentinel.cn)
|
||||||
|
|
||||||
# OK, now we've got a parent, so should be True
|
# OK, now we've got a parent, so should be True
|
||||||
self.assertTrue(rp_obj.has_provider_trees(self.ctx))
|
self.assertTrue(res_ctx.has_provider_trees(self.ctx))
|
||||||
|
|
||||||
def test_destroy_resource_provider(self):
|
def test_destroy_resource_provider(self):
|
||||||
created_resource_provider = self._create_provider(
|
created_resource_provider = self._create_provider(
|
||||||
@@ -1002,27 +1003,27 @@ class TestResourceProviderAggregates(tb.PlacementDbBaseTestCase):
|
|||||||
# s5 via agg1 and agg2
|
# s5 via agg1 and agg2
|
||||||
expected = set([(s1.uuid, rp.uuid) for rp in (s1, r1, r2, r3, s5)])
|
expected = set([(s1.uuid, rp.uuid) for rp in (s1, r1, r2, r3, s5)])
|
||||||
self.assertItemsEqual(
|
self.assertItemsEqual(
|
||||||
expected, rp_obj.anchors_for_sharing_providers(self.ctx, [s1.id]))
|
expected, res_ctx.anchors_for_sharing_providers(self.ctx, [s1.id]))
|
||||||
|
|
||||||
# Get same result (id format) when we set get_id=True
|
# Get same result (id format) when we set get_id=True
|
||||||
expected = set([(s1.id, rp.id) for rp in (s1, r1, r2, r3, s5)])
|
expected = set([(s1.id, rp.id) for rp in (s1, r1, r2, r3, s5)])
|
||||||
self.assertItemsEqual(
|
self.assertItemsEqual(
|
||||||
expected, rp_obj.anchors_for_sharing_providers(
|
expected, res_ctx.anchors_for_sharing_providers(
|
||||||
self.ctx, [s1.id], get_id=True))
|
self.ctx, [s1.id], get_id=True))
|
||||||
|
|
||||||
# s2 gets s2 (self) and r3 via agg4
|
# s2 gets s2 (self) and r3 via agg4
|
||||||
expected = set([(s2.uuid, rp.uuid) for rp in (s2, r3)])
|
expected = set([(s2.uuid, rp.uuid) for rp in (s2, r3)])
|
||||||
self.assertItemsEqual(
|
self.assertItemsEqual(
|
||||||
expected, rp_obj.anchors_for_sharing_providers(self.ctx, [s2.id]))
|
expected, res_ctx.anchors_for_sharing_providers(self.ctx, [s2.id]))
|
||||||
|
|
||||||
# s3 gets self
|
# s3 gets self
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
set([(s3.uuid, s3.uuid)]), rp_obj.anchors_for_sharing_providers(
|
set([(s3.uuid, s3.uuid)]), res_ctx.anchors_for_sharing_providers(
|
||||||
self.ctx, [s3.id]))
|
self.ctx, [s3.id]))
|
||||||
|
|
||||||
# s4 isn't really a sharing provider - gets nothing
|
# s4 isn't really a sharing provider - gets nothing
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
set([]), rp_obj.anchors_for_sharing_providers(self.ctx, [s4.id]))
|
set([]), res_ctx.anchors_for_sharing_providers(self.ctx, [s4.id]))
|
||||||
|
|
||||||
# s5 gets s5 (self),
|
# s5 gets s5 (self),
|
||||||
# r1 via agg1 through c1,
|
# r1 via agg1 through c1,
|
||||||
@@ -1030,7 +1031,7 @@ class TestResourceProviderAggregates(tb.PlacementDbBaseTestCase):
|
|||||||
# s1 via agg1 and agg2
|
# s1 via agg1 and agg2
|
||||||
expected = set([(s5.uuid, rp.uuid) for rp in (s5, r1, r2, s1)])
|
expected = set([(s5.uuid, rp.uuid) for rp in (s5, r1, r2, s1)])
|
||||||
self.assertItemsEqual(
|
self.assertItemsEqual(
|
||||||
expected, rp_obj.anchors_for_sharing_providers(self.ctx, [s5.id]))
|
expected, res_ctx.anchors_for_sharing_providers(self.ctx, [s5.id]))
|
||||||
|
|
||||||
# validate that we can get them all at once
|
# validate that we can get them all at once
|
||||||
expected = set(
|
expected = set(
|
||||||
@@ -1041,7 +1042,7 @@ class TestResourceProviderAggregates(tb.PlacementDbBaseTestCase):
|
|||||||
)
|
)
|
||||||
self.assertItemsEqual(
|
self.assertItemsEqual(
|
||||||
expected,
|
expected,
|
||||||
rp_obj.anchors_for_sharing_providers(
|
res_ctx.anchors_for_sharing_providers(
|
||||||
self.ctx, [s1.id, s2.id, s3.id, s4.id, s5.id], get_id=True))
|
self.ctx, [s1.id, s2.id, s3.id, s4.id, s5.id], get_id=True))
|
||||||
|
|
||||||
|
|
||||||
@@ -1106,7 +1107,7 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase):
|
|||||||
|
|
||||||
# OK, now that has all been set up, let's verify that we get the ID of
|
# OK, now that has all been set up, let's verify that we get the ID of
|
||||||
# the shared storage pool when we ask for DISK_GB
|
# the shared storage pool when we ask for DISK_GB
|
||||||
got_ids = rp_obj.get_providers_with_shared_capacity(
|
got_ids = res_ctx.get_providers_with_shared_capacity(
|
||||||
self.ctx,
|
self.ctx,
|
||||||
orc.STANDARDS.index(orc.DISK_GB),
|
orc.STANDARDS.index(orc.DISK_GB),
|
||||||
100,
|
100,
|
||||||
|
Reference in New Issue
Block a user