Fix volume provider pool capacity metrics
Turns out there are a few small issues with the new volume metrics when used with ceph as the storage backend. This change fixes a few issues encountered with ceph. It also adds a new storage pool in the tests, which triggered all of the issues. List of issues this fixes: - The max_over_subscription_ratio is returned as a string, which caused an exception. - pool_name is missing for some pools - provisioned_capacity_gb is missing for some pools Closes-Bug: #2113903 Change-Id: I4fd2fd89bd666c2466be5af2c15eb5134de0ce48
This commit is contained in:
@@ -159,7 +159,28 @@ POOL_LIST = [
|
||||
'storage_protocol': 'iSCSI',
|
||||
'vendor_name': 'Open Source',
|
||||
'driver_version': '3.0.0',
|
||||
'timestamp': '2025-03-21T14:19:02.901750'})
|
||||
'timestamp': '2025-03-21T14:19:02.901750'}),
|
||||
type('VolumePool', (object,),
|
||||
{'name': 'cinder-3ceee-volume-ceph-0@ceph#ceph',
|
||||
'vendor_name': 'Open Source',
|
||||
'driver_version': '1.3.0',
|
||||
'storage_protocol': 'ceph',
|
||||
'total_capacity_gb': 85.0,
|
||||
'free_capacity_gb': 85.0,
|
||||
'reserved_percentage': 0,
|
||||
'multiattach': True,
|
||||
'thin_provisioning_support': True,
|
||||
'max_over_subscription_ratio': '20.0',
|
||||
'location_info':
|
||||
'ceph:/etc/ceph/ceph.conf:a94b63c4e:openstack:volumes',
|
||||
'backend_state': 'up',
|
||||
'qos_support': True,
|
||||
'volume_backend_name': 'ceph',
|
||||
'replication_enabled': False,
|
||||
'allocated_capacity_gb': 1,
|
||||
'filter_function': None,
|
||||
'goodness_function': None,
|
||||
'timestamp': '2025-06-09T13:29:43.286226'})
|
||||
]
|
||||
|
||||
|
||||
@@ -243,13 +264,20 @@ class TestVolumeProviderPoolCapacityTotalPollster(base.BaseTestCase):
|
||||
def test_volume_provider_pool_capacity_total_pollster(self):
|
||||
volume_pool_size_total_samples = list(
|
||||
self.pollster.get_samples(self.manager, {}, resources=POOL_LIST))
|
||||
self.assertEqual(1, len(volume_pool_size_total_samples))
|
||||
self.assertEqual(2, len(volume_pool_size_total_samples))
|
||||
|
||||
self.assertEqual('volume.provider.pool.capacity.total',
|
||||
volume_pool_size_total_samples[0].name)
|
||||
self.assertEqual(28.5, volume_pool_size_total_samples[0].volume)
|
||||
self.assertEqual('localhost.localdomain@lvmdriver-1#lvmdriver-1',
|
||||
volume_pool_size_total_samples[0].resource_id)
|
||||
|
||||
self.assertEqual('volume.provider.pool.capacity.total',
|
||||
volume_pool_size_total_samples[1].name)
|
||||
self.assertEqual(85.0, volume_pool_size_total_samples[1].volume)
|
||||
self.assertEqual('cinder-3ceee-volume-ceph-0@ceph#ceph',
|
||||
volume_pool_size_total_samples[1].resource_id)
|
||||
|
||||
|
||||
class TestVolumeProviderPoolCapacityFreePollster(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
@@ -261,13 +289,20 @@ class TestVolumeProviderPoolCapacityFreePollster(base.BaseTestCase):
|
||||
def test_volume_provider_pool_capacity_free_pollster(self):
|
||||
volume_pool_size_free_samples = list(
|
||||
self.pollster.get_samples(self.manager, {}, resources=POOL_LIST))
|
||||
self.assertEqual(1, len(volume_pool_size_free_samples))
|
||||
self.assertEqual(2, len(volume_pool_size_free_samples))
|
||||
|
||||
self.assertEqual('volume.provider.pool.capacity.free',
|
||||
volume_pool_size_free_samples[0].name)
|
||||
self.assertEqual(28.39, volume_pool_size_free_samples[0].volume)
|
||||
self.assertEqual('localhost.localdomain@lvmdriver-1#lvmdriver-1',
|
||||
volume_pool_size_free_samples[0].resource_id)
|
||||
|
||||
self.assertEqual('volume.provider.pool.capacity.free',
|
||||
volume_pool_size_free_samples[1].name)
|
||||
self.assertEqual(85.0, volume_pool_size_free_samples[1].volume)
|
||||
self.assertEqual('cinder-3ceee-volume-ceph-0@ceph#ceph',
|
||||
volume_pool_size_free_samples[1].resource_id)
|
||||
|
||||
|
||||
class TestVolumeProviderPoolCapacityProvisionedPollster(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
@@ -316,9 +351,16 @@ class TestVolumeProviderPoolCapacityAllocatedPollster(base.BaseTestCase):
|
||||
def test_volume_provider_pool_capacity_allocated_pollster(self):
|
||||
volume_pool_size_allocated_samples = list(
|
||||
self.pollster.get_samples(self.manager, {}, resources=POOL_LIST))
|
||||
self.assertEqual(1, len(volume_pool_size_allocated_samples))
|
||||
self.assertEqual(2, len(volume_pool_size_allocated_samples))
|
||||
|
||||
self.assertEqual('volume.provider.pool.capacity.allocated',
|
||||
volume_pool_size_allocated_samples[0].name)
|
||||
self.assertEqual(4, volume_pool_size_allocated_samples[0].volume)
|
||||
self.assertEqual('localhost.localdomain@lvmdriver-1#lvmdriver-1',
|
||||
volume_pool_size_allocated_samples[0].resource_id)
|
||||
|
||||
self.assertEqual('volume.provider.pool.capacity.allocated',
|
||||
volume_pool_size_allocated_samples[1].name)
|
||||
self.assertEqual(1, volume_pool_size_allocated_samples[1].volume)
|
||||
self.assertEqual('cinder-3ceee-volume-ceph-0@ceph#ceph',
|
||||
volume_pool_size_allocated_samples[1].resource_id)
|
||||
|
@@ -125,8 +125,6 @@ class VolumeProviderPoolCapacityTotal(_Base):
|
||||
def default_discovery(self):
|
||||
return 'volume_pools'
|
||||
|
||||
FIELDS = ['pool_name']
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for pool in resources:
|
||||
yield sample.Sample(
|
||||
@@ -137,7 +135,9 @@ class VolumeProviderPoolCapacityTotal(_Base):
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=pool.name,
|
||||
resource_metadata=self.extract_metadata(pool),
|
||||
resource_metadata={
|
||||
"pool_name": getattr(pool, "pool_name", None)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -146,8 +146,6 @@ class VolumeProviderPoolCapacityFree(_Base):
|
||||
def default_discovery(self):
|
||||
return 'volume_pools'
|
||||
|
||||
FIELDS = ['pool_name']
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for pool in resources:
|
||||
yield sample.Sample(
|
||||
@@ -158,7 +156,9 @@ class VolumeProviderPoolCapacityFree(_Base):
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=pool.name,
|
||||
resource_metadata=self.extract_metadata(pool),
|
||||
resource_metadata={
|
||||
"pool_name": getattr(pool, "pool_name", None)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -167,20 +167,21 @@ class VolumeProviderPoolCapacityProvisioned(_Base):
|
||||
def default_discovery(self):
|
||||
return 'volume_pools'
|
||||
|
||||
FIELDS = ['pool_name']
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for pool in resources:
|
||||
yield sample.Sample(
|
||||
name='volume.provider.pool.capacity.provisioned',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='GB',
|
||||
volume=pool.provisioned_capacity_gb,
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=pool.name,
|
||||
resource_metadata=self.extract_metadata(pool),
|
||||
)
|
||||
if getattr(pool, 'provisioned_capacity_gb', None):
|
||||
yield sample.Sample(
|
||||
name='volume.provider.pool.capacity.provisioned',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='GB',
|
||||
volume=pool.provisioned_capacity_gb,
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=pool.name,
|
||||
resource_metadata={
|
||||
"pool_name": getattr(pool, "pool_name", None)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class VolumeProviderPoolCapacityVirtualFree(_Base):
|
||||
@@ -188,31 +189,34 @@ class VolumeProviderPoolCapacityVirtualFree(_Base):
|
||||
def default_discovery(self):
|
||||
return 'volume_pools'
|
||||
|
||||
FIELDS = ['pool_name']
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for pool in resources:
|
||||
reserved_size = math.floor(
|
||||
(pool.reserved_percentage / 100) * pool.total_capacity_gb
|
||||
)
|
||||
max_over_subscription_ratio = 1.0
|
||||
if pool.thin_provisioning_support:
|
||||
max_over_subscription_ratio = pool.max_over_subscription_ratio
|
||||
value = (
|
||||
max_over_subscription_ratio *
|
||||
(pool.total_capacity_gb - reserved_size) -
|
||||
pool.provisioned_capacity_gb
|
||||
)
|
||||
yield sample.Sample(
|
||||
name='volume.provider.pool.capacity.virtual_free',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='GB',
|
||||
volume=value,
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=pool.name,
|
||||
resource_metadata=self.extract_metadata(pool),
|
||||
)
|
||||
if getattr(pool, 'provisioned_capacity_gb', None):
|
||||
reserved_size = math.floor(
|
||||
(pool.reserved_percentage / 100) * pool.total_capacity_gb
|
||||
)
|
||||
max_over_subscription_ratio = 1.0
|
||||
if pool.thin_provisioning_support:
|
||||
max_over_subscription_ratio = float(
|
||||
pool.max_over_subscription_ratio
|
||||
)
|
||||
value = (
|
||||
max_over_subscription_ratio *
|
||||
(pool.total_capacity_gb - reserved_size) -
|
||||
pool.provisioned_capacity_gb
|
||||
)
|
||||
yield sample.Sample(
|
||||
name='volume.provider.pool.capacity.virtual_free',
|
||||
type=sample.TYPE_GAUGE,
|
||||
unit='GB',
|
||||
volume=value,
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=pool.name,
|
||||
resource_metadata={
|
||||
"pool_name": getattr(pool, "pool_name", None)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class VolumeProviderPoolCapacityAllocated(_Base):
|
||||
@@ -220,8 +224,6 @@ class VolumeProviderPoolCapacityAllocated(_Base):
|
||||
def default_discovery(self):
|
||||
return 'volume_pools'
|
||||
|
||||
FIELDS = ['pool_name']
|
||||
|
||||
def get_samples(self, manager, cache, resources):
|
||||
for pool in resources:
|
||||
yield sample.Sample(
|
||||
@@ -232,5 +234,7 @@ class VolumeProviderPoolCapacityAllocated(_Base):
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
resource_id=pool.name,
|
||||
resource_metadata=self.extract_metadata(pool),
|
||||
resource_metadata={
|
||||
"pool_name": getattr(pool, "pool_name", None)
|
||||
}
|
||||
)
|
||||
|
@@ -0,0 +1,5 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
[`bug 2113903 <https://bugs.launchpad.net/ceilometer/+bug/2113903>`_]
|
||||
Fix volume provider pool capacity metrics for ceph backend.
|
Reference in New Issue
Block a user