Add virt/libvirt error test cases
This patch add libvirt functional tests to test error cases. It also change the libvirt fixture to handre libvirt filesystems that will be used by shares. Manila is the OpenStack Shared Filesystems service. These series of patches implement changes required in Nova to allow the shares provided by Manila to be associated with and attached to instances using virtiofs. Implements: blueprint libvirt-virtiofs-attach-manila-shares Change-Id: I5e64cb56585654bc04d0fc3ba86d76a19f9636aa
This commit is contained in:
@@ -12,20 +12,24 @@
|
||||
|
||||
import fixtures
|
||||
from lxml import etree
|
||||
import os
|
||||
from requests import request
|
||||
|
||||
from nova import context as nova_context
|
||||
from nova import exception
|
||||
from nova.objects import instance
|
||||
from nova.objects import share_mapping
|
||||
from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional.api import client
|
||||
from nova.tests.functional.libvirt import base
|
||||
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from unittest import mock
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@@ -136,6 +140,76 @@ class ServerSharesTest(ServerSharesTestBase):
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_share_metadata_with_tag(self):
|
||||
"""Verify that share metadata are available with the provided tag"""
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
traits = self._get_provider_traits(
|
||||
self.compute_rp_uuids[self.compute])
|
||||
for trait in (
|
||||
'COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
tag = 'mytag'
|
||||
self._attach_share(server, share_id, tag=tag)
|
||||
self._start_server(server)
|
||||
|
||||
# tag is the filesystem target directory.
|
||||
# if post /server/{server_id}/share was called without a specific
|
||||
# tag then the tag is the share id.
|
||||
self._assert_filesystem_tag(self._get_xml(server), tag)
|
||||
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, tag)
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_share_fails_with_tag_already_used(self):
|
||||
"""Verify that share create fails if we use an already assigned tag"""
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
traits = self._get_provider_traits(
|
||||
self.compute_rp_uuids[self.compute])
|
||||
for trait in (
|
||||
'COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
tag = 'mytag'
|
||||
self._attach_share(server, share_id, tag=tag)
|
||||
|
||||
share_id = '1457bd85-7e7f-4835-92de-47834e1516b5'
|
||||
tag = 'mytag'
|
||||
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._attach_share,
|
||||
server,
|
||||
share_id,
|
||||
tag=tag
|
||||
)
|
||||
|
||||
self.assertEqual(409, exc.response.status_code)
|
||||
self.assertIn(
|
||||
"Share '1457bd85-7e7f-4835-92de-47834e1516b5' or "
|
||||
"tag 'mytag' already associated to this server.",
|
||||
str(exc.response.text),
|
||||
)
|
||||
|
||||
def test_server_cephfs_share_metadata(self):
|
||||
"""Verify that cephfs share metadata are available"""
|
||||
with mock.patch(
|
||||
@@ -173,3 +247,268 @@ class ServerSharesTest(ServerSharesTestBase):
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_share_after_hard_reboot(self):
|
||||
"""Verify that share is still available after a reboot"""
|
||||
server, share_id = self.test_server_share_metadata()
|
||||
self._reboot_server(server, hard=True)
|
||||
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
|
||||
def test_server_share_mount_failure(self):
|
||||
os.environ['OS_DEBUG'] = "true"
|
||||
traits = self._get_provider_traits(self.compute_rp_uuids[self.compute])
|
||||
for trait in ('COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.connect_volume',
|
||||
side_effect=processutils.ProcessExecutionError
|
||||
):
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
|
||||
self._attach_share(server, share_id)
|
||||
response = self._get_share(server, share_id)
|
||||
|
||||
self.assertEqual(
|
||||
response["share_id"], "4b021746-d0eb-4031-92aa-23c3bec182cd"
|
||||
)
|
||||
self.assertEqual(response["status"], "inactive")
|
||||
|
||||
# Here we are using CastAsCallFixture so we got an exception from
|
||||
# nova compute. This should not happen without the fixture and
|
||||
# the api should answer with a 201 status code.
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._start_server,
|
||||
server
|
||||
)
|
||||
|
||||
self.assertIn("nova.exception.ShareMountError", str(exc))
|
||||
|
||||
log_out = self.stdlog.logger.output
|
||||
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd mount error "
|
||||
"from server",
|
||||
log_out)
|
||||
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'error')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_start_fails_share_in_error(self):
|
||||
"""Ensure a server can not start if its attached share is in error
|
||||
status and hard reboot attempts allow to restart the server as
|
||||
soon as the share issue is fixed.
|
||||
"""
|
||||
server, share_id = self.test_server_share_mount_failure()
|
||||
self._verify_start_fails_share_in_error(server, share_id)
|
||||
|
||||
server, share_id = self.test_server_share_umount_failure()
|
||||
self._verify_start_fails_share_in_error(server, share_id)
|
||||
|
||||
def _verify_start_fails_share_in_error(self, server, share_id):
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._start_server,
|
||||
server
|
||||
)
|
||||
|
||||
# Try to start a vm in error state.
|
||||
self.assertEqual(exc.response.status_code, 409)
|
||||
self.assertIn("Cannot \'start\' instance", exc.response.text)
|
||||
self.assertIn("while it is in vm_state error", exc.response.text)
|
||||
|
||||
# Reboot to do another mount attempt and fix the error.
|
||||
# But the error is not fixed.
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.connect_volume',
|
||||
side_effect=processutils.ProcessExecutionError
|
||||
):
|
||||
|
||||
# Here we are using CastAsCallFixture so we got an exception from
|
||||
# nova api. This should not happen without the fixture.
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._reboot_server,
|
||||
server,
|
||||
hard=True
|
||||
)
|
||||
|
||||
log_out = self.stdlog.logger.output
|
||||
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd mount error "
|
||||
"from server",
|
||||
log_out)
|
||||
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'error')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
|
||||
# Reboot to do another mount attempt and fix the error.
|
||||
# But the error is fixed now.
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.connect_volume',
|
||||
):
|
||||
self._reboot_server(server, hard=True)
|
||||
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'active')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'active')
|
||||
|
||||
def test_detach_server_share_in_error(self):
|
||||
"""Ensure share can still be detached even if
|
||||
the share are in an error state.
|
||||
"""
|
||||
os.environ['OS_DEBUG'] = "true"
|
||||
server, share_id = self.test_server_share_umount_failure()
|
||||
|
||||
# Simulate an attempt to detach that fail due to umount error.
|
||||
# In that case we should have an umount error.
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.disconnect_volume',
|
||||
side_effect=processutils.ProcessExecutionError
|
||||
):
|
||||
|
||||
# Ensure we failed to umount he share
|
||||
log_out = self.stdlog.logger.output
|
||||
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd umount error",
|
||||
log_out)
|
||||
|
||||
# We detach the share. As a consequence, we are leaking the share
|
||||
# mounted on the compute.
|
||||
self._detach_share(server, share_id)
|
||||
|
||||
# Share is removed so not anymore in the DB.
|
||||
self.assertRaises(
|
||||
exception.ShareNotFound,
|
||||
share_mapping.ShareMapping.get_by_instance_uuid_and_share_id,
|
||||
self.context,
|
||||
server['id'],
|
||||
share_id
|
||||
)
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
|
||||
# Reboot the server to restart it without the share.
|
||||
self._reboot_server(server, hard=True)
|
||||
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'active')
|
||||
|
||||
@mock.patch('nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.disconnect_volume',
|
||||
side_effect=processutils.ProcessExecutionError)
|
||||
@mock.patch('nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.connect_volume')
|
||||
def test_server_share_umount_failure(self, mock_mount, mock_umount):
|
||||
os.environ['OS_DEBUG'] = "true"
|
||||
traits = self._get_provider_traits(self.compute_rp_uuids[self.compute])
|
||||
for trait in ('COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
|
||||
# Here we are using CastAsCallFixture so we got an exception from
|
||||
# nova compute. This should not happen without the fixture and
|
||||
# the api should answer with a 202 status code.
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._stop_server,
|
||||
server,
|
||||
)
|
||||
|
||||
self.assertIn("nova.exception.ShareUmountError", str(exc))
|
||||
|
||||
log_out = self.stdlog.logger.output
|
||||
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd umount error "
|
||||
"from server",
|
||||
log_out)
|
||||
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'error')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_resume_with_shares(self):
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._suspend_server,
|
||||
server,
|
||||
)
|
||||
|
||||
def test_server_rescue_with_shares(self):
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
self._rescue_server(server)
|
||||
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
|
||||
self._unrescue_server(server)
|
||||
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
return (server, share_id)
|
||||
|
||||
Reference in New Issue
Block a user