Add TC WRCPPV-1015 rook-ceph capabilities testing
- Added test_ceph_rook_capabilities_testing_open_model() - Added system_storage_backend_modify() - Added system_storage_backend_modify_with_error() - Removed unnecessary checking from wait_for_ceph_health_status() - Modified wait_for_ceph_osd_pool_replicated_size_update() - Log: /folk/cgts_logs/logs/WRCPPV-1015 Change-Id: I11263d2cf97088fcb495bce6ae7f1551ab1c9ad3 Signed-off-by: ppeng <peng.peng@windriver.com>
This commit is contained in:
@@ -48,11 +48,10 @@ class CephOsdPoolLsDetailKeywords(BaseKeyword):
|
|||||||
bool: True if the pool replicated size is updated as expected
|
bool: True if the pool replicated size is updated as expected
|
||||||
|
|
||||||
"""
|
"""
|
||||||
output = self.ssh_connection.send("ceph osd pool ls detail")
|
|
||||||
pool_object = CephOsdPoolLsDetailOutput(output).get_ceph_osd_pool(pool_name)
|
|
||||||
replicated_update_timeout = time.time() + timeout
|
replicated_update_timeout = time.time() + timeout
|
||||||
|
|
||||||
while time.time() < replicated_update_timeout:
|
while time.time() < replicated_update_timeout:
|
||||||
|
output = self.ssh_connection.send("ceph osd pool ls detail")
|
||||||
|
pool_object = CephOsdPoolLsDetailOutput(output).get_ceph_osd_pool(pool_name)
|
||||||
ceph_osd_pool_replication_size = pool_object.get_replicated_size()
|
ceph_osd_pool_replication_size = pool_object.get_replicated_size()
|
||||||
if ceph_osd_pool_replication_size == expected_replicated_size:
|
if ceph_osd_pool_replication_size == expected_replicated_size:
|
||||||
return True
|
return True
|
||||||
@@ -73,11 +72,10 @@ class CephOsdPoolLsDetailKeywords(BaseKeyword):
|
|||||||
bool: True if the pool min_size is updated as expected
|
bool: True if the pool min_size is updated as expected
|
||||||
|
|
||||||
"""
|
"""
|
||||||
output = self.ssh_connection.send("ceph osd pool ls detail")
|
min_replicated_update_timeout = time.time() + timeout
|
||||||
pool_object = CephOsdPoolLsDetailOutput(output).get_ceph_osd_pool(pool_name)
|
while time.time() < min_replicated_update_timeout:
|
||||||
mini_update_timeout = time.time() + timeout
|
output = self.ssh_connection.send("ceph osd pool ls detail")
|
||||||
|
pool_object = CephOsdPoolLsDetailOutput(output).get_ceph_osd_pool(pool_name)
|
||||||
while time.time() < mini_update_timeout:
|
|
||||||
ceph_osd_pool_min_size = pool_object.get_min_size()
|
ceph_osd_pool_min_size = pool_object.get_min_size()
|
||||||
if ceph_osd_pool_min_size == expected_min_size:
|
if ceph_osd_pool_min_size == expected_min_size:
|
||||||
return True
|
return True
|
||||||
|
|||||||
@@ -40,8 +40,6 @@ class CephStatusKeywords(BaseKeyword):
|
|||||||
False: ceph health status not match expect status
|
False: ceph health status not match expect status
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if expect_health_status not in (True, False):
|
|
||||||
raise ValueError(f"expect_health_status:{expect_health_status} is not valid.")
|
|
||||||
|
|
||||||
def get_ceph_health_status():
|
def get_ceph_health_status():
|
||||||
output = self.ssh_connection.send("ceph -s")
|
output = self.ssh_connection.send("ceph -s")
|
||||||
|
|||||||
@@ -49,3 +49,54 @@ class SystemStorageBackendKeywords(BaseKeyword):
|
|||||||
|
|
||||||
self.ssh_connection.send(source_openrc(f"system storage-backend-add {backend} {extra_args}"))
|
self.ssh_connection.send(source_openrc(f"system storage-backend-add {backend} {extra_args}"))
|
||||||
self.validate_success_return_code(self.ssh_connection)
|
self.validate_success_return_code(self.ssh_connection)
|
||||||
|
|
||||||
|
def system_storage_backend_modify(self, backend: str, services: str = None, deployment_model: str = None, replication: int = 0, min_replication: int = 0):
|
||||||
|
"""
|
||||||
|
Modify the storage backend
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backend (str): the backend name ceph or ceph-rook
|
||||||
|
services (str): New services value
|
||||||
|
deployment_model (str): new deployment_model
|
||||||
|
replication (int): new replication value
|
||||||
|
min_replication (int): min_replication value
|
||||||
|
|
||||||
|
"""
|
||||||
|
extra_args = ""
|
||||||
|
if deployment_model:
|
||||||
|
extra_args += f"--deployment {deployment_model} "
|
||||||
|
if services:
|
||||||
|
extra_args += f"--services {services} "
|
||||||
|
if replication > 0:
|
||||||
|
extra_args += f"replication={replication} "
|
||||||
|
if min_replication > 0:
|
||||||
|
extra_args += f"min_replication={min_replication} "
|
||||||
|
|
||||||
|
self.ssh_connection.send(source_openrc(f"system storage-backend-modify {backend}-store {extra_args}"))
|
||||||
|
self.validate_success_return_code(self.ssh_connection)
|
||||||
|
|
||||||
|
def system_storage_backend_modify_with_error(self, backend: str, services: str = None, deployment_model: str = None, replication: int = 0, min_replication: int = 0) -> list[str]:
|
||||||
|
"""
|
||||||
|
Run the "system storage-backend-modify" command with invalid arguments
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backend (str): the backend name ceph or ceph-rook
|
||||||
|
services (str): New services value
|
||||||
|
deployment_model (str): new deployment_model
|
||||||
|
replication (int): new replication value
|
||||||
|
min_replication (int): min_replication value
|
||||||
|
Returns:
|
||||||
|
list[str]: a list of error msg
|
||||||
|
"""
|
||||||
|
extra_args = ""
|
||||||
|
if deployment_model:
|
||||||
|
extra_args += f"--deployment {deployment_model} "
|
||||||
|
if services:
|
||||||
|
extra_args += f"--services {services} "
|
||||||
|
if replication > 0:
|
||||||
|
extra_args += f"replication={replication} "
|
||||||
|
if min_replication > 0:
|
||||||
|
extra_args += f"min_replication={min_replication} "
|
||||||
|
|
||||||
|
msg = self.ssh_connection.send(source_openrc(f"system storage-backend-modify {backend}-store {extra_args}"))
|
||||||
|
return msg
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ from pytest import mark
|
|||||||
|
|
||||||
from framework.logging.automation_logger import get_logger
|
from framework.logging.automation_logger import get_logger
|
||||||
from framework.validation.validation import validate_equals, validate_equals_with_retry, validate_str_contains
|
from framework.validation.validation import validate_equals, validate_equals_with_retry, validate_str_contains
|
||||||
|
from keywords.ceph.ceph_osd_pool_ls_detail_keywords import CephOsdPoolLsDetailKeywords
|
||||||
|
from keywords.ceph.ceph_status_keywords import CephStatusKeywords
|
||||||
from keywords.cloud_platform.ssh.lab_connection_keywords import LabConnectionKeywords
|
from keywords.cloud_platform.ssh.lab_connection_keywords import LabConnectionKeywords
|
||||||
from keywords.cloud_platform.system.host.system_host_fs_keywords import SystemHostFSKeywords
|
from keywords.cloud_platform.system.host.system_host_fs_keywords import SystemHostFSKeywords
|
||||||
from keywords.cloud_platform.system.host.system_host_list_keywords import SystemHostListKeywords
|
from keywords.cloud_platform.system.host.system_host_list_keywords import SystemHostListKeywords
|
||||||
@@ -84,3 +86,132 @@ def test_ceph_rook_host_fs_operation():
|
|||||||
get_logger().log_test_case_step(f"\n\nDelete {fs_name} file system on {host}.\n")
|
get_logger().log_test_case_step(f"\n\nDelete {fs_name} file system on {host}.\n")
|
||||||
system_host_fs_keywords.system_host_fs_delete(hostname=host, fs_name=fs_name)
|
system_host_fs_keywords.system_host_fs_delete(hostname=host, fs_name=fs_name)
|
||||||
validate_equals_with_retry(is_filesystem_exist, False, f"{fs_name} should be deleted in 2 mins", 120, 5)
|
validate_equals_with_retry(is_filesystem_exist, False, f"{fs_name} should be deleted in 2 mins", 120, 5)
|
||||||
|
|
||||||
|
|
||||||
|
@mark.p2
|
||||||
|
@mark.lab_ceph_rook
|
||||||
|
def test_ceph_rook_capabilities_testing_open_model(request):
|
||||||
|
"""
|
||||||
|
Test case: [TC_34918] WRCPPV-1015 ceph-rook backend capabilities testing for open model
|
||||||
|
|
||||||
|
Test Steps:
|
||||||
|
- Make sure the storage backend is ceph-rook.
|
||||||
|
- Get original capabilities value
|
||||||
|
- Test "open" model
|
||||||
|
- Modify replication between 1:Max OSD number
|
||||||
|
- Modify min_replication value
|
||||||
|
- check whether "ceph osd pool ls detail" aligned with current replication
|
||||||
|
|
||||||
|
Teardown:
|
||||||
|
- Restore storage backend capabilities value to original value
|
||||||
|
- check whether "ceph osd pool ls detail" aligned with current replication
|
||||||
|
"""
|
||||||
|
ssh_connection = LabConnectionKeywords().get_active_controller_ssh()
|
||||||
|
system_storage_backend_keywords = SystemStorageBackendKeywords(ssh_connection)
|
||||||
|
|
||||||
|
get_logger().log_test_case_step("\n\nCheck whether rook-ceph is configured as storage backend.\n")
|
||||||
|
|
||||||
|
if not system_storage_backend_keywords.get_system_storage_backend_list().is_backend_configured("ceph-rook"):
|
||||||
|
get_logger().log_test_case_step("\n\nAdd rook-ceph as storage backend.\n")
|
||||||
|
system_storage_backend_keywords.system_storage_backend_add(backend="ceph-rook", confirmed=True)
|
||||||
|
|
||||||
|
backend_name = system_storage_backend_keywords.get_system_storage_backend_list().get_system_storage_backend("ceph-rook").get_backend()
|
||||||
|
get_logger().log_info(f"Backend is: {backend_name}")
|
||||||
|
|
||||||
|
get_logger().log_test_case_step("\n\nGet original backend capabilities value.\n")
|
||||||
|
original_capa_obj = system_storage_backend_keywords.get_system_storage_backend_list().get_system_storage_backend("ceph-rook").get_capabilities()
|
||||||
|
original_deployment_model = original_capa_obj.get_deployment_model()
|
||||||
|
original_replication = original_capa_obj.get_replication()
|
||||||
|
original_min_replication = original_capa_obj.get_min_replication()
|
||||||
|
get_logger().log_info(f"\nOriginal Capabilities deployment model is: {original_deployment_model}" f"\nOriginal Capabilities replication value is: {original_replication}" f"\nOriginal Capabilities min_replication value is: {original_min_replication}\n")
|
||||||
|
|
||||||
|
def teardown_restore_original_capabilities():
|
||||||
|
|
||||||
|
get_logger().log_test_case_step("\n\nRestore capabilities values to original if original value is changed .\n")
|
||||||
|
curr_capa_obj = system_storage_backend_keywords.get_system_storage_backend_list().get_system_storage_backend("ceph-rook").get_capabilities()
|
||||||
|
curr_deployment_model = curr_capa_obj.get_deployment_model()
|
||||||
|
curr_replication = curr_capa_obj.get_replication()
|
||||||
|
curr_min_replication = curr_capa_obj.get_min_replication()
|
||||||
|
get_logger().log_info(f"\nCurrent Capabilities deployment model is: {curr_deployment_model}" f"\nCurrent Capabilities replication value is: {curr_replication}" f"\nCurrent Capabilities min_replication value is: {curr_min_replication}\n" f"\nOriginal Capabilities deployment model is: {original_deployment_model}" f"\nOriginal Capabilities replication value is: {original_replication}" f"\nOriginal Capabilities min_replication value is: {original_min_replication}\n")
|
||||||
|
|
||||||
|
if curr_replication != original_replication or curr_min_replication != original_min_replication:
|
||||||
|
get_logger().log_info(f"\n\nModify replication value to {original_replication} and min_replication value to {original_min_replication}'.\n")
|
||||||
|
system_storage_backend_keywords.system_storage_backend_modify(backend="ceph-rook", replication=original_replication, min_replication=original_min_replication)
|
||||||
|
|
||||||
|
if curr_deployment_model != original_deployment_model:
|
||||||
|
get_logger().log_info(f"\n\nModify deployment model to {original_deployment_model}.\n")
|
||||||
|
system_storage_backend_keywords.system_storage_backend_modify(backend="ceph-rook", deployment_model=original_deployment_model)
|
||||||
|
|
||||||
|
curr_capa_obj = system_storage_backend_keywords.get_system_storage_backend_list().get_system_storage_backend("ceph-rook").get_capabilities()
|
||||||
|
curr_deployment_model = curr_capa_obj.get_deployment_model()
|
||||||
|
curr_replication = curr_capa_obj.get_replication()
|
||||||
|
curr_min_replication = curr_capa_obj.get_min_replication()
|
||||||
|
get_logger().log_info(f"\nCurrent Capabilities deployment model is: {curr_deployment_model}" f"\nCurrent Capabilities replication value is: {curr_replication}" f"\nCurrent Capabilities min_replication value is: {curr_min_replication}\n")
|
||||||
|
validate_equals(curr_deployment_model, original_deployment_model, f"deployment model should be {original_deployment_model}")
|
||||||
|
validate_equals(curr_replication, original_replication, f"replication value should be {original_replication}")
|
||||||
|
validate_equals(curr_min_replication, original_min_replication, f"min_replication value should be {original_min_replication}")
|
||||||
|
|
||||||
|
get_logger().log_test_case_step("\n\nCheck whether 'ceph osd pool ls detail' aligned with replication.\n")
|
||||||
|
ceph_pool_keywords = CephOsdPoolLsDetailKeywords(ssh_connection)
|
||||||
|
pool_update = ceph_pool_keywords.wait_for_ceph_osd_pool_replicated_size_update(pool_name=".mgr", expected_replicated_size=original_replication)
|
||||||
|
validate_equals(pool_update, True, "Replicated value is updated.")
|
||||||
|
|
||||||
|
pool_update = ceph_pool_keywords.wait_for_ceph_osd_pool_min_size_update(pool_name=".mgr", expected_min_size=original_min_replication)
|
||||||
|
validate_equals(pool_update, True, "min_size value is updated.")
|
||||||
|
|
||||||
|
request.addfinalizer(teardown_restore_original_capabilities)
|
||||||
|
|
||||||
|
get_logger().log_test_case_step("\n\nMake sure the deployment model is open.\n")
|
||||||
|
curr_deployment_model = original_deployment_model
|
||||||
|
if original_deployment_model != "open":
|
||||||
|
get_logger().log_test_case_step(f"\n\nModify deployment_model from '{original_deployment_model}' to 'open'.\n")
|
||||||
|
system_storage_backend_keywords.system_storage_backend_modify(backend="ceph-rook", deployment_model="open")
|
||||||
|
curr_deployment_model = system_storage_backend_keywords.get_system_storage_backend_list().get_system_storage_backend("ceph-rook").get_capabilities().get_deployment_model()
|
||||||
|
validate_equals(curr_deployment_model, "open", "Current deployment model should be 'open'.")
|
||||||
|
|
||||||
|
get_logger().log_info(f"\nCurrent Capabilities deployment model is: {curr_deployment_model};")
|
||||||
|
|
||||||
|
get_logger().log_test_case_step("\n\nGet OSD number.\n")
|
||||||
|
ceph_status_keywords = CephStatusKeywords(ssh_connection)
|
||||||
|
ceph_status_output = ceph_status_keywords.ceph_status()
|
||||||
|
osd_number = ceph_status_output.get_ceph_osd_count()
|
||||||
|
get_logger().log_info(f"\nOSD number is: {osd_number};")
|
||||||
|
|
||||||
|
# replication value should not be greater than osd number, and should not be zero
|
||||||
|
if original_replication < osd_number:
|
||||||
|
new_replication_value = osd_number
|
||||||
|
elif original_replication == osd_number and original_replication != 1:
|
||||||
|
new_replication_value = osd_number - 1
|
||||||
|
else:
|
||||||
|
raise ValueError(f"System has {osd_number} osd, but replication is {original_replication}, TC should not run.")
|
||||||
|
|
||||||
|
get_logger().log_test_case_step(f"\n\nModify replication from {original_replication} to {new_replication_value}.\n")
|
||||||
|
system_storage_backend_keywords.system_storage_backend_modify(backend="ceph-rook", replication=new_replication_value)
|
||||||
|
curr_replication = system_storage_backend_keywords.get_system_storage_backend_list().get_system_storage_backend("ceph-rook").get_capabilities().get_replication()
|
||||||
|
get_logger().log_info(f"\nCurrent Capabilities replication value is: {curr_replication};")
|
||||||
|
validate_equals(curr_replication, new_replication_value, f"replication value should be {new_replication_value}")
|
||||||
|
|
||||||
|
get_logger().log_test_case_step("\n\nCheck whether 'ceph osd pool ls detail' aligned with replication.\n")
|
||||||
|
ceph_pool_keywords = CephOsdPoolLsDetailKeywords(ssh_connection)
|
||||||
|
pool_update = ceph_pool_keywords.wait_for_ceph_osd_pool_replicated_size_update(pool_name=".mgr", expected_replicated_size=curr_replication)
|
||||||
|
validate_equals(pool_update, True, "Replicated value should be updated.")
|
||||||
|
|
||||||
|
get_logger().log_test_case_step("\n\nIt should be rejected if modifying min_replication value great than replication value.\n")
|
||||||
|
new_min_replication_value = curr_replication + 1
|
||||||
|
msg = system_storage_backend_keywords.system_storage_backend_modify_with_error(backend="ceph-rook", min_replication=new_min_replication_value)
|
||||||
|
validate_str_contains(msg[0], "must be greater than", f"system backend modify should be failed: {msg[0]}")
|
||||||
|
|
||||||
|
curr_min_replication = system_storage_backend_keywords.get_system_storage_backend_list().get_system_storage_backend("ceph-rook").get_capabilities().get_min_replication()
|
||||||
|
|
||||||
|
if curr_min_replication > 1:
|
||||||
|
new_min_replication_value = 1
|
||||||
|
get_logger().log_test_case_step(f"\n\nModify min_replication from {curr_min_replication} to {new_min_replication_value}.\n")
|
||||||
|
system_storage_backend_keywords.system_storage_backend_modify(backend="ceph-rook", min_replication=new_min_replication_value)
|
||||||
|
curr_min_replication = system_storage_backend_keywords.get_system_storage_backend_list().get_system_storage_backend("ceph-rook").get_capabilities().get_min_replication()
|
||||||
|
get_logger().log_info(f"\nCurrent Capabilities min_replication value is: {curr_min_replication};")
|
||||||
|
validate_equals(curr_min_replication, new_min_replication_value, f"min_replication value should be {new_min_replication_value}")
|
||||||
|
|
||||||
|
get_logger().log_test_case_step("\n\nCheck whether 'ceph osd pool ls detail' aligned with min_replication.\n")
|
||||||
|
ceph_pool_keywords = CephOsdPoolLsDetailKeywords(ssh_connection)
|
||||||
|
pool_update = ceph_pool_keywords.wait_for_ceph_osd_pool_min_size_update(pool_name=".mgr", expected_min_size=curr_min_replication)
|
||||||
|
validate_equals(pool_update, True, "Replicated min_size is updated.")
|
||||||
|
|||||||
Reference in New Issue
Block a user