Merge "Handle swapping to a multiattach volume"

This commit is contained in:
Zuul
2018-01-18 19:57:08 +00:00
committed by Gerrit Code Review
2 changed files with 93 additions and 16 deletions

View File

@@ -5373,10 +5373,10 @@ class ComputeManager(manager.Manager):
self._detach_volume(context, bdm, instance,
attachment_id=attachment_id)
def _init_volume_connection(self, context, new_volume_id,
def _init_volume_connection(self, context, new_volume,
old_volume_id, connector, bdm,
new_attachment_id, mountpoint):
new_volume_id = new_volume['id']
if new_attachment_id is None:
# We're dealing with an old-style attachment so initialize the
# connection so we can get the connection_info.
@@ -5384,6 +5384,16 @@ class ComputeManager(manager.Manager):
new_volume_id,
connector)
else:
# Check for multiattach on the new volume and if True, check to
# see if the virt driver supports multiattach.
# TODO(mriedem): This is copied from DriverVolumeBlockDevice
# and should be consolidated into some common code at some point.
vol_multiattach = new_volume.get('multiattach', False)
virt_multiattach = self.driver.capabilities['supports_multiattach']
if vol_multiattach and not virt_multiattach:
raise exception.MultiattachNotSupportedByVirtDriver(
volume_id=new_volume_id)
# This is a new style attachment and the API created the new
# volume attachment and passed the id to the compute over RPC.
# At this point we need to update the new volume attachment with
@@ -5393,6 +5403,11 @@ class ComputeManager(manager.Manager):
context, new_attachment_id, connector,
mountpoint)['connection_info']
if vol_multiattach:
# This will be used by the volume driver to determine the
# proper disk configuration.
new_cinfo['multiattach'] = True
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
@@ -5404,14 +5419,15 @@ class ComputeManager(manager.Manager):
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector,
old_volume_id, new_volume_id, resize_to,
old_volume_id, new_volume, resize_to,
new_attachment_id, is_cinder_migration):
new_volume_id = new_volume['id']
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
try:
old_cinfo, new_cinfo = self._init_volume_connection(
context, new_volume_id, old_volume_id, connector,
context, new_volume, old_volume_id, connector,
bdm, new_attachment_id, mountpoint)
# NOTE(lyarwood): The Libvirt driver, the only virt driver
# currently implementing swap_volume, will modify the contents of
@@ -5552,7 +5568,8 @@ class ComputeManager(manager.Manager):
True if old_volume['status'] in ('retyping',
'migrating') else False)
old_vol_size = old_volume['size']
new_vol_size = self.volume_api.get(context, new_volume_id)['size']
new_volume = self.volume_api.get(context, new_volume_id)
new_vol_size = new_volume['size']
if new_vol_size > old_vol_size:
resize_to = new_vol_size
@@ -5564,7 +5581,7 @@ class ComputeManager(manager.Manager):
bdm,
connector,
old_volume_id,
new_volume_id,
new_volume,
resize_to,
new_attachment_id,
is_cinder_migration)

View File

@@ -2067,10 +2067,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
attachment_id=uuids.old_attachment_id,
connection_info='{"data": {}}', volume_size=1)
old_volume = {
'id': uuids.old_volume_id, 'size': 1, 'status': 'retyping'
'id': uuids.old_volume_id, 'size': 1, 'status': 'retyping',
'multiattach': False
}
new_volume = {
'id': uuids.new_volume_id, 'size': 1, 'status': 'reserved'
'id': uuids.new_volume_id, 'size': 1, 'status': 'reserved',
'multiattach': False
}
attachment_update.return_value = {"connection_info": {"data": {}}}
get_bdm.return_value = bdm
@@ -2138,10 +2140,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
attachment_id=uuids.old_attachment_id,
connection_info='{"data": {}}')
old_volume = {
'id': uuids.old_volume_id, 'size': 1, 'status': 'detaching'
'id': uuids.old_volume_id, 'size': 1, 'status': 'detaching',
'multiattach': False
}
new_volume = {
'id': uuids.new_volume_id, 'size': 2, 'status': 'reserved'
'id': uuids.new_volume_id, 'size': 2, 'status': 'reserved',
'multiattach': False
}
attachment_update.return_value = {"connection_info": {"data": {}}}
get_bdm.return_value = bdm
@@ -2181,8 +2185,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.assertEqual(uuids.new_volume_id, bdm.volume_id)
self.assertEqual(uuids.new_attachment_id, bdm.attachment_id)
self.assertEqual(2, bdm.volume_size)
self.assertEqual(uuids.new_volume_id,
jsonutils.loads(bdm.connection_info)['serial'])
new_conn_info = jsonutils.loads(bdm.connection_info)
self.assertEqual(uuids.new_volume_id, new_conn_info['serial'])
self.assertNotIn('multiattach', new_conn_info)
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(compute_utils, 'notify_about_volume_swap')
@@ -2209,10 +2214,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
attachment_id=uuids.old_attachment_id,
connection_info='{"data": {}}')
old_volume = {
'id': uuids.old_volume_id, 'size': 1, 'status': 'migrating'
'id': uuids.old_volume_id, 'size': 1, 'status': 'migrating',
'multiattach': False
}
new_volume = {
'id': uuids.new_volume_id, 'size': 1, 'status': 'reserved'
'id': uuids.new_volume_id, 'size': 1, 'status': 'reserved',
'multiattach': False
}
get_bdm.return_value = bdm
get_volume.side_effect = (old_volume, new_volume)
@@ -2272,10 +2279,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
attachment_id=uuids.old_attachment_id,
connection_info='{"data": {}}')
old_volume = {
'id': uuids.old_volume_id, 'size': 1, 'status': 'detaching'
'id': uuids.old_volume_id, 'size': 1, 'status': 'detaching',
'multiattach': False
}
new_volume = {
'id': uuids.new_volume_id, 'size': 2, 'status': 'reserved'
'id': uuids.new_volume_id, 'size': 2, 'status': 'reserved',
'multiattach': False
}
attachment_update.return_value = {"connection_info": {"data": {}}}
get_bdm.return_value = bdm
@@ -2317,6 +2326,57 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
# Cinder-initiated call, we don't call migrate_volume_completion.
migrate_volume_completion.assert_not_called()
@mock.patch('nova.volume.cinder.API.attachment_update')
def test_swap_volume_with_multiattach(self, attachment_update):
"""Tests swap volume where the volume being swapped-to supports
multiattach as well as the compute driver, so the attachment for the
new volume (created in the API) is updated with the host connector
and the new_connection_info is updated with the multiattach flag.
"""
bdm = objects.BlockDeviceMapping(
volume_id=uuids.old_volume_id, device_name='/dev/vda',
attachment_id=uuids.old_attachment_id,
connection_info='{"data": {}}')
new_volume = {
'id': uuids.new_volume_id, 'size': 2, 'status': 'reserved',
'multiattach': True
}
attachment_update.return_value = {"connection_info": {"data": {}}}
connector = mock.sentinel.connector
with mock.patch.dict(self.compute.driver.capabilities,
{'supports_multiattach': True}):
_, new_cinfo = self.compute._init_volume_connection(
self.context, new_volume, uuids.old_volume_id,
connector, bdm, uuids.new_attachment_id, bdm.device_name)
self.assertEqual(uuids.new_volume_id, new_cinfo['serial'])
self.assertIn('multiattach', new_cinfo)
self.assertTrue(new_cinfo['multiattach'])
attachment_update.assert_called_once_with(
self.context, uuids.new_attachment_id, connector,
bdm.device_name)
def test_swap_volume_with_multiattach_no_driver_support(self):
"""Tests a swap volume scenario where the new volume being swapped-to
supports multiattach but the virt driver does not, so swap volume
fails.
"""
bdm = objects.BlockDeviceMapping(
volume_id=uuids.old_volume_id, device_name='/dev/vda',
attachment_id=uuids.old_attachment_id,
connection_info='{"data": {}}')
new_volume = {
'id': uuids.new_volume_id, 'size': 2, 'status': 'reserved',
'multiattach': True
}
connector = {'host': 'localhost'}
with mock.patch.dict(self.compute.driver.capabilities,
{'supports_multiattach': False}):
self.assertRaises(exception.MultiattachNotSupportedByVirtDriver,
self.compute._init_volume_connection,
self.context, new_volume, uuids.old_volume_id,
connector, bdm, uuids.new_attachment_id,
bdm.device_name)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(fake_driver.FakeDriver,
'check_can_live_migrate_source')