Merge "Add system storage resource support"
This commit is contained in:
6
releasenotes/notes/add-storage-da766d3dbf9fb385.yaml
Normal file
6
releasenotes/notes/add-storage-da766d3dbf9fb385.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Adds the Storage resource to the library. It also provides the
|
||||
max size available (in bytes) of drives and volumes that can be
|
||||
accessed from storage.
|
@@ -17,13 +17,15 @@ import logging
|
||||
|
||||
from sushy.resources import base
|
||||
from sushy.resources.system.storage import drive
|
||||
from sushy.resources.system.storage import volume
|
||||
from sushy import utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Storage(base.ResourceBase):
|
||||
"""This class represents resources that represent a storage subsystem.
|
||||
"""This class represents the storage subsystem resources.
|
||||
|
||||
A storage subsystem represents a set of storage controllers (physical or
|
||||
virtual) and the resources such as drives and volumes that can be accessed
|
||||
@@ -41,6 +43,8 @@ class Storage(base.ResourceBase):
|
||||
"""A tuple with the drive identities"""
|
||||
|
||||
_drives_max_size_bytes = None
|
||||
_drives = None
|
||||
_volumes = None # reference to VolumeCollection instance
|
||||
|
||||
def get_drive(self, drive_identity):
|
||||
"""Given the drive identity return a ``Drive`` object
|
||||
@@ -52,20 +56,89 @@ class Storage(base.ResourceBase):
|
||||
return drive.Drive(self._conn, drive_identity,
|
||||
redfish_version=self.redfish_version)
|
||||
|
||||
@property
|
||||
def drives(self):
|
||||
"""Return a list of `Drive` objects present in the storage resource.
|
||||
|
||||
It is set once when the first time it is queried. On subsequent
|
||||
invocations, it returns a cached list of `Drives` objects until it is
|
||||
marked stale.
|
||||
|
||||
:returns: A list of `Drive` objects
|
||||
:raises: ResourceNotFoundError
|
||||
"""
|
||||
if self._drives is None:
|
||||
self._drives = [
|
||||
self.get_drive(id_) for id_ in self.drives_identities]
|
||||
return self._drives
|
||||
|
||||
@property
|
||||
def drives_max_size_bytes(self):
|
||||
"""Max size available in bytes among all Drives of this collection."""
|
||||
if self._drives_max_size_bytes is None:
|
||||
self._drives_max_size_bytes = (
|
||||
utils.max_safe(self.get_drive(drive_id).capacity_bytes
|
||||
for drive_id in self.drives_identities))
|
||||
utils.max_safe(drv.capacity_bytes for drv in self.drives))
|
||||
return self._drives_max_size_bytes
|
||||
|
||||
def _do_refresh(self, force=False):
|
||||
"""Do resource specific refresh activities
|
||||
@property
|
||||
def volumes(self):
|
||||
"""Property to reference `VolumeCollection` instance
|
||||
|
||||
On refresh, all sub-resources are marked as stale, i.e.
|
||||
greedy-refresh not done for them unless forced by ``force``
|
||||
argument.
|
||||
It is set once when the first time it is queried. On refresh,
|
||||
this property is marked as stale (greedy-refresh not done at that
|
||||
point). Here only the actual refresh of the sub-resource happens,
|
||||
if resource is stale.
|
||||
"""
|
||||
if self._volumes is None:
|
||||
self._volumes = volume.VolumeCollection(
|
||||
self._conn, utils.get_sub_resource_path_by(self, 'Volumes'),
|
||||
redfish_version=self.redfish_version)
|
||||
|
||||
self._volumes.refresh(force=False)
|
||||
return self._volumes
|
||||
|
||||
def _do_refresh(self, force=False):
|
||||
"""Do resource specific refresh activities."""
|
||||
# Note(deray): undefine the attribute here for fresh evaluation in
|
||||
# subsequent calls to it's exposed property.
|
||||
self._drives_max_size_bytes = None
|
||||
self._drives = None
|
||||
# invalidate the nested resource
|
||||
if self._volumes is not None:
|
||||
self._volumes.invalidate(force)
|
||||
|
||||
|
||||
class StorageCollection(base.ResourceCollectionBase):
|
||||
"""This class represents the collection of Storage resources"""
|
||||
|
||||
_max_drive_size_bytes = None
|
||||
_max_volume_size_bytes = None
|
||||
|
||||
@property
|
||||
def _resource_type(self):
|
||||
return Storage
|
||||
|
||||
@property
|
||||
def max_drive_size_bytes(self):
|
||||
"""Max size available (in bytes) among all device resources."""
|
||||
if self._max_drive_size_bytes is None:
|
||||
self._max_drive_size_bytes = max(
|
||||
storage_.drives_max_size_bytes
|
||||
for storage_ in self.get_members())
|
||||
return self._max_drive_size_bytes
|
||||
|
||||
@property
|
||||
def max_volume_size_bytes(self):
|
||||
"""Max size available (in bytes) among all Volumes under this."""
|
||||
if self._max_volume_size_bytes is None:
|
||||
self._max_volume_size_bytes = max(
|
||||
storage_.volumes.max_size_bytes
|
||||
for storage_ in self.get_members())
|
||||
return self._max_volume_size_bytes
|
||||
|
||||
def _do_refresh(self, force=False):
|
||||
"""Do resource specific refresh activities"""
|
||||
# Note(deray): undefine the attributes here for fresh evaluation in
|
||||
# subsequent calls to their exposed properties.
|
||||
self._max_drive_size_bytes = None
|
||||
self._max_volume_size_bytes = None
|
||||
|
13
sushy/tests/unit/json_samples/storage_collection.json
Normal file
13
sushy/tests/unit/json_samples/storage_collection.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"@odata.type": "#StorageCollection.StorageCollection",
|
||||
"Name": "Storage Collection",
|
||||
"Members@odata.count": 1,
|
||||
"Members": [
|
||||
{
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage/1"
|
||||
}
|
||||
],
|
||||
"@odata.context": "/redfish/v1/$metadata#StorageCollection.StorageCollection",
|
||||
"@odata.id": "/redfish/v1/Systems/437XR1138R2/Storage",
|
||||
"@Redfish.Copyright": "Copyright 2014-2017 Distributed Management Task Force, Inc. (DMTF). For the full DMTF copyright policy, see http://www.dmtf.org/about/policies/copyright."
|
||||
}
|
@@ -16,6 +16,7 @@ import mock
|
||||
|
||||
from sushy.resources.system.storage import drive
|
||||
from sushy.resources.system.storage import storage
|
||||
from sushy.resources.system.storage import volume
|
||||
from sushy.tests.unit import base
|
||||
|
||||
|
||||
@@ -25,6 +26,13 @@ STORAGE_DRIVE_FILE_NAMES = [
|
||||
'sushy/tests/unit/json_samples/drive3.json'
|
||||
]
|
||||
|
||||
STORAGE_VOLUME_FILE_NAMES = [
|
||||
'sushy/tests/unit/json_samples/volume_collection.json',
|
||||
'sushy/tests/unit/json_samples/volume.json',
|
||||
'sushy/tests/unit/json_samples/volume2.json',
|
||||
'sushy/tests/unit/json_samples/volume3.json'
|
||||
]
|
||||
|
||||
|
||||
class StorageTestCase(base.TestCase):
|
||||
|
||||
@@ -59,6 +67,58 @@ class StorageTestCase(base.TestCase):
|
||||
self.assertIsInstance(actual_drive, drive.Drive)
|
||||
self.assertTrue(self.conn.get.return_value.json.called)
|
||||
|
||||
@mock.patch.object(drive, 'Drive', autospec=True)
|
||||
def test_drives(self, Drive_mock):
|
||||
# | WHEN |
|
||||
all_drives = self.storage.drives
|
||||
# | THEN |
|
||||
calls = [
|
||||
mock.call(self.storage._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/35D38F11ACEF7BD3', # noqa
|
||||
redfish_version=self.storage.redfish_version),
|
||||
mock.call(self.storage._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/3F5A8C54207B7233', # noqa
|
||||
redfish_version=self.storage.redfish_version),
|
||||
mock.call(self.storage._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/32ADF365C6C1B7BD', # noqa
|
||||
redfish_version=self.storage.redfish_version),
|
||||
mock.call(self.storage._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1/Drives/3D58ECBC375FD9F2', # noqa
|
||||
redfish_version=self.storage.redfish_version)
|
||||
]
|
||||
Drive_mock.assert_has_calls(calls)
|
||||
self.assertIsInstance(all_drives, list)
|
||||
self.assertEqual(4, len(all_drives))
|
||||
self.assertIsInstance(all_drives[0], drive.Drive.__class__)
|
||||
|
||||
# returning cached value
|
||||
Drive_mock.reset_mock()
|
||||
# | WHEN |
|
||||
all_drives = self.storage.drives
|
||||
# | THEN |
|
||||
self.assertFalse(Drive_mock.called)
|
||||
self.assertIsInstance(all_drives, list)
|
||||
self.assertEqual(4, len(all_drives))
|
||||
self.assertIsInstance(all_drives[0], drive.Drive.__class__)
|
||||
|
||||
def test_drives_after_refresh(self):
|
||||
self.storage.refresh()
|
||||
self.assertIsNone(self.storage._drives)
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
|
||||
successive_return_values = []
|
||||
# repeating the 3rd one to provide mock data for 4th iteration.
|
||||
for fname in STORAGE_DRIVE_FILE_NAMES + [STORAGE_DRIVE_FILE_NAMES[-1]]:
|
||||
with open(fname) as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
self.conn.get.return_value.json.side_effect = successive_return_values
|
||||
|
||||
all_drives = self.storage.drives
|
||||
self.assertIsInstance(all_drives, list)
|
||||
self.assertEqual(4, len(all_drives))
|
||||
for drv in all_drives:
|
||||
self.assertIsInstance(drv, drive.Drive)
|
||||
|
||||
def test_drives_max_size_bytes(self):
|
||||
self.assertIsNone(self.storage._drives_max_size_bytes)
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
@@ -66,7 +126,7 @@ class StorageTestCase(base.TestCase):
|
||||
successive_return_values = []
|
||||
# repeating the 3rd one to provide mock data for 4th iteration.
|
||||
for fname in STORAGE_DRIVE_FILE_NAMES + [STORAGE_DRIVE_FILE_NAMES[-1]]:
|
||||
with open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
self.conn.get.return_value.json.side_effect = successive_return_values
|
||||
|
||||
@@ -85,8 +145,173 @@ class StorageTestCase(base.TestCase):
|
||||
successive_return_values = []
|
||||
# repeating the 3rd one to provide mock data for 4th iteration.
|
||||
for fname in STORAGE_DRIVE_FILE_NAMES + [STORAGE_DRIVE_FILE_NAMES[-1]]:
|
||||
with open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
self.conn.get.return_value.json.side_effect = successive_return_values
|
||||
|
||||
self.assertEqual(899527000000, self.storage.drives_max_size_bytes)
|
||||
|
||||
def test_volumes(self):
|
||||
# check for the underneath variable value
|
||||
self.assertIsNone(self.storage._volumes)
|
||||
# | GIVEN |
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
with open('sushy/tests/unit/json_samples/volume_collection.json') as f:
|
||||
self.conn.get.return_value.json.return_value = json.load(f)
|
||||
# | WHEN |
|
||||
actual_volumes = self.storage.volumes
|
||||
# | THEN |
|
||||
self.assertIsInstance(actual_volumes,
|
||||
volume.VolumeCollection)
|
||||
self.conn.get.return_value.json.assert_called_once_with()
|
||||
|
||||
def test_volumes_cached(self):
|
||||
# | GIVEN |
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
with open('sushy/tests/unit/json_samples/volume_collection.json') as f:
|
||||
self.conn.get.return_value.json.return_value = json.load(f)
|
||||
# invoke it once
|
||||
actual_volumes = self.storage.volumes
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
# | WHEN & THEN |
|
||||
# tests for same object on invoking subsequently
|
||||
self.assertIs(actual_volumes,
|
||||
self.storage.volumes)
|
||||
self.conn.get.return_value.json.assert_not_called()
|
||||
|
||||
def test_volumes_on_refresh(self):
|
||||
# | GIVEN |
|
||||
with open('sushy/tests/unit/json_samples/volume_collection.json') as f:
|
||||
self.conn.get.return_value.json.return_value = json.load(f)
|
||||
# | WHEN & THEN |
|
||||
self.assertIsInstance(self.storage.volumes,
|
||||
volume.VolumeCollection)
|
||||
|
||||
# On refreshing the system instance...
|
||||
with open('sushy/tests/unit/json_samples/storage.json') as f:
|
||||
self.conn.get.return_value.json.return_value = json.load(f)
|
||||
|
||||
self.storage.invalidate()
|
||||
self.storage.refresh(force=False)
|
||||
|
||||
# | WHEN & THEN |
|
||||
self.assertIsNotNone(self.storage._volumes)
|
||||
self.assertTrue(self.storage._volumes._is_stale)
|
||||
|
||||
# | GIVEN |
|
||||
with open('sushy/tests/unit/json_samples/volume_collection.json') as f:
|
||||
self.conn.get.return_value.json.return_value = json.load(f)
|
||||
# | WHEN & THEN |
|
||||
self.assertIsInstance(self.storage.volumes,
|
||||
volume.VolumeCollection)
|
||||
self.assertFalse(self.storage._volumes._is_stale)
|
||||
|
||||
|
||||
class StorageCollectionTestCase(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(StorageCollectionTestCase, self).setUp()
|
||||
self.conn = mock.Mock()
|
||||
with open('sushy/tests/unit/json_samples/'
|
||||
'storage_collection.json') as f:
|
||||
self.conn.get.return_value.json.return_value = json.load(f)
|
||||
self.stor_col = storage.StorageCollection(
|
||||
self.conn, '/redfish/v1/Systems/437XR1138R2/Storage',
|
||||
redfish_version='1.0.2')
|
||||
|
||||
def test__parse_attributes(self):
|
||||
self.stor_col._parse_attributes()
|
||||
self.assertEqual((
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1',),
|
||||
self.stor_col.members_identities)
|
||||
|
||||
@mock.patch.object(storage, 'Storage', autospec=True)
|
||||
def test_get_member(self, Storage_mock):
|
||||
self.stor_col.get_member(
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1')
|
||||
Storage_mock.assert_called_once_with(
|
||||
self.stor_col._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1',
|
||||
redfish_version=self.stor_col.redfish_version)
|
||||
|
||||
@mock.patch.object(storage, 'Storage', autospec=True)
|
||||
def test_get_members(self, Storage_mock):
|
||||
members = self.stor_col.get_members()
|
||||
Storage_mock.assert_called_once_with(
|
||||
self.stor_col._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/Storage/1',
|
||||
redfish_version=self.stor_col.redfish_version)
|
||||
self.assertIsInstance(members, list)
|
||||
self.assertEqual(1, len(members))
|
||||
|
||||
def test_max_drive_size_bytes(self):
|
||||
self.assertIsNone(self.stor_col._max_drive_size_bytes)
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
|
||||
successive_return_values = []
|
||||
with open('sushy/tests/unit/json_samples/storage.json') as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
# repeating the 3rd one to provide mock data for 4th iteration.
|
||||
for fname in STORAGE_DRIVE_FILE_NAMES + [STORAGE_DRIVE_FILE_NAMES[-1]]:
|
||||
with open(fname) as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
self.conn.get.return_value.json.side_effect = successive_return_values
|
||||
|
||||
self.assertEqual(899527000000, self.stor_col.max_drive_size_bytes)
|
||||
|
||||
# for any subsequent fetching it gets it from the cached value
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
self.assertEqual(899527000000, self.stor_col.max_drive_size_bytes)
|
||||
self.conn.get.return_value.json.assert_not_called()
|
||||
|
||||
def test_max_drive_size_bytes_after_refresh(self):
|
||||
self.stor_col.refresh(force=False)
|
||||
self.assertIsNone(self.stor_col._max_drive_size_bytes)
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
|
||||
successive_return_values = []
|
||||
with open('sushy/tests/unit/json_samples/storage.json') as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
# repeating the 3rd one to provide mock data for 4th iteration.
|
||||
for fname in STORAGE_DRIVE_FILE_NAMES + [STORAGE_DRIVE_FILE_NAMES[-1]]:
|
||||
with open(fname) as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
self.conn.get.return_value.json.side_effect = successive_return_values
|
||||
|
||||
self.assertEqual(899527000000, self.stor_col.max_drive_size_bytes)
|
||||
|
||||
def test_max_volume_size_bytes(self):
|
||||
self.assertIsNone(self.stor_col._max_volume_size_bytes)
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
|
||||
successive_return_values = []
|
||||
with open('sushy/tests/unit/json_samples/storage.json') as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
# repeating the 3rd one to provide mock data for 4th iteration.
|
||||
for fname in STORAGE_VOLUME_FILE_NAMES:
|
||||
with open(fname) as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
self.conn.get.return_value.json.side_effect = successive_return_values
|
||||
|
||||
self.assertEqual(1073741824000, self.stor_col.max_volume_size_bytes)
|
||||
|
||||
# for any subsequent fetching it gets it from the cached value
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
self.assertEqual(1073741824000, self.stor_col.max_volume_size_bytes)
|
||||
self.conn.get.return_value.json.assert_not_called()
|
||||
|
||||
def test_max_volume_size_bytes_after_refresh(self):
|
||||
self.stor_col.refresh(force=False)
|
||||
self.assertIsNone(self.stor_col._max_volume_size_bytes)
|
||||
self.conn.get.return_value.json.reset_mock()
|
||||
|
||||
successive_return_values = []
|
||||
with open('sushy/tests/unit/json_samples/storage.json') as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
# repeating the 3rd one to provide mock data for 4th iteration.
|
||||
for fname in STORAGE_VOLUME_FILE_NAMES:
|
||||
with open(fname) as f:
|
||||
successive_return_values.append(json.load(f))
|
||||
self.conn.get.return_value.json.side_effect = successive_return_values
|
||||
|
||||
self.assertEqual(1073741824000, self.stor_col.max_volume_size_bytes)
|
||||
|
@@ -79,12 +79,10 @@ class SimpleStorageCollectionTestCase(base.TestCase):
|
||||
@mock.patch.object(simple_storage, 'SimpleStorage', autospec=True)
|
||||
def test_get_members(self, SimpleStorage_mock):
|
||||
members = self.simpl_stor_col.get_members()
|
||||
calls = [
|
||||
mock.call(self.simpl_stor_col._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/SimpleStorage/1',
|
||||
redfish_version=self.simpl_stor_col.redfish_version),
|
||||
]
|
||||
SimpleStorage_mock.assert_has_calls(calls)
|
||||
SimpleStorage_mock.assert_called_once_with(
|
||||
self.simpl_stor_col._conn,
|
||||
'/redfish/v1/Systems/437XR1138R2/SimpleStorage/1',
|
||||
redfish_version=self.simpl_stor_col.redfish_version)
|
||||
self.assertIsInstance(members, list)
|
||||
self.assertEqual(1, len(members))
|
||||
|
||||
|
Reference in New Issue
Block a user