ISCSI Volume support
* Rewrite of Volume code to make VolumeManager more generic * AoE vs. iscsi moved to driver layer * Added db support for target ids * Added code to re-export volumes on restart of VolumeManager * Includes a script to create /dev/iscsi volumes on remote hosts
This commit is contained in:
@@ -164,10 +164,18 @@ class ComputeManager(manager.Manager):
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
dev_path = yield self.volume_manager.setup_compute_volume(context,
|
||||
volume_id)
|
||||
yield self.driver.attach_volume(instance_ref['ec2_id'],
|
||||
dev_path,
|
||||
mountpoint)
|
||||
self.db.volume_attached(context, volume_id, instance_id, mountpoint)
|
||||
try:
|
||||
yield self.driver.attach_volume(instance_ref['name'],
|
||||
dev_path,
|
||||
mountpoint)
|
||||
self.db.volume_attached(context,
|
||||
volume_id,
|
||||
instance_id,
|
||||
mountpoint)
|
||||
except Exception:
|
||||
yield self.volume_manager.remove_compute_volume(context,
|
||||
volume_id)
|
||||
raise
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -180,7 +188,12 @@ class ComputeManager(manager.Manager):
|
||||
volume_id)
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
yield self.driver.detach_volume(instance_ref['ec2_id'],
|
||||
volume_ref['mountpoint'])
|
||||
if instance_ref['name'] not in self.driver.list_instances():
|
||||
logging.warn("Detaching volume from instance %s that isn't running",
|
||||
instance_ref['name'])
|
||||
else:
|
||||
yield self.driver.detach_volume(instance_ref['name'],
|
||||
volume_ref['mountpoint'])
|
||||
yield self.volume_manager.remove_compute_volume(context, volume_id)
|
||||
self.db.volume_detached(context, volume_id)
|
||||
defer.returnValue(True)
|
||||
|
@@ -48,6 +48,11 @@ class NoMoreNetworks(exception.Error):
|
||||
pass
|
||||
|
||||
|
||||
class NoMoreTargets(exception.Error):
|
||||
"""No more available blades"""
|
||||
pass
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
@@ -481,6 +486,23 @@ def export_device_create_safe(context, values):
|
||||
###################
|
||||
|
||||
|
||||
def target_id_count_by_host(context, host):
|
||||
"""Return count of export devices."""
|
||||
return IMPL.target_id_count_by_host(context, host)
|
||||
|
||||
|
||||
def target_id_create_safe(context, values):
|
||||
"""Create an target_id from the values dictionary.
|
||||
|
||||
The device is not returned. If the create violates the unique
|
||||
constraints because the target_id and host already exist,
|
||||
no exception is raised."""
|
||||
return IMPL.target_id_create_safe(context, values)
|
||||
|
||||
|
||||
###############
|
||||
|
||||
|
||||
def auth_destroy_token(context, token):
|
||||
"""Destroy an auth token"""
|
||||
return IMPL.auth_destroy_token(context, token)
|
||||
@@ -527,6 +549,11 @@ def volume_allocate_shelf_and_blade(context, volume_id):
|
||||
return IMPL.volume_allocate_shelf_and_blade(context, volume_id)
|
||||
|
||||
|
||||
def volume_allocate_target_id(context, volume_id, host):
|
||||
"""Atomically allocate a free target_id from the pool."""
|
||||
return IMPL.volume_allocate_target_id(context, volume_id, host)
|
||||
|
||||
|
||||
def volume_attached(context, volume_id, instance_id, mountpoint):
|
||||
"""Ensure that a volume is set as attached."""
|
||||
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
|
||||
@@ -562,9 +589,9 @@ def volume_get_all(context):
|
||||
return IMPL.volume_get_all(context)
|
||||
|
||||
|
||||
def volume_get_instance(context, volume_id):
|
||||
"""Get the instance that a volume is attached to."""
|
||||
return IMPL.volume_get_instance(context, volume_id)
|
||||
def volume_get_all_by_host(context, host):
|
||||
"""Get all volumes belonging to a host."""
|
||||
return IMPL.volume_get_all_by_host(context, host)
|
||||
|
||||
|
||||
def volume_get_all_by_project(context, project_id):
|
||||
@@ -577,11 +604,21 @@ def volume_get_by_ec2_id(context, ec2_id):
|
||||
return IMPL.volume_get_by_ec2_id(context, ec2_id)
|
||||
|
||||
|
||||
def volume_get_instance(context, volume_id):
|
||||
"""Get the instance that a volume is attached to."""
|
||||
return IMPL.volume_get_instance(context, volume_id)
|
||||
|
||||
|
||||
def volume_get_shelf_and_blade(context, volume_id):
|
||||
"""Get the shelf and blade allocated to the volume."""
|
||||
return IMPL.volume_get_shelf_and_blade(context, volume_id)
|
||||
|
||||
|
||||
def volume_get_target_id(context, volume_id):
|
||||
"""Get the target id allocated to the volume."""
|
||||
return IMPL.volume_get_target_id(context, volume_id)
|
||||
|
||||
|
||||
def volume_update(context, volume_id, values):
|
||||
"""Set the given properties on an volume and update it.
|
||||
|
||||
|
@@ -1041,6 +1041,30 @@ def export_device_create_safe(context, values):
|
||||
###################
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def target_id_count_by_host(context, host):
|
||||
session = get_session()
|
||||
return session.query(models.TargetId).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
filter_by(host=host).\
|
||||
count()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def target_id_create_safe(context, values):
|
||||
target_id_ref = models.TargetId()
|
||||
for (key, value) in values.iteritems():
|
||||
target_id_ref[key] = value
|
||||
try:
|
||||
target_id_ref.save()
|
||||
return target_id_ref
|
||||
except IntegrityError:
|
||||
return None
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
def auth_destroy_token(_context, token):
|
||||
session = get_session()
|
||||
session.delete(token)
|
||||
@@ -1130,6 +1154,25 @@ def volume_allocate_shelf_and_blade(context, volume_id):
|
||||
return (export_device.shelf_id, export_device.blade_id)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_allocate_target_id(context, volume_id, host):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
target_id_ref = session.query(models.TargetId).\
|
||||
filter_by(volume=None).\
|
||||
filter_by(host=host).\
|
||||
filter_by(deleted=False).\
|
||||
with_lockmode('update').\
|
||||
first()
|
||||
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
|
||||
# then this has concurrency issues
|
||||
if not target_id_ref:
|
||||
raise db.NoMoreTargets()
|
||||
target_id_ref.volume_id = volume_id
|
||||
session.add(target_id_ref)
|
||||
return target_id_ref.target_id
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_attached(context, volume_id, instance_id, mountpoint):
|
||||
session = get_session()
|
||||
@@ -1181,6 +1224,9 @@ def volume_destroy(context, volume_id):
|
||||
session.execute('update export_devices set volume_id=NULL '
|
||||
'where volume_id=:id',
|
||||
{'id': volume_id})
|
||||
session.execute('update target_ids set volume_id=NULL '
|
||||
'where volume_id=:id',
|
||||
{'id': volume_id})
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@@ -1222,6 +1268,17 @@ def volume_get(context, volume_id, session=None):
|
||||
def volume_get_all(context):
|
||||
session = get_session()
|
||||
return session.query(models.Volume).\
|
||||
options(joinedload('instance')).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_all_by_host(context, host):
|
||||
session = get_session()
|
||||
return session.query(models.Volume).\
|
||||
options(joinedload('instance')).\
|
||||
filter_by(host=host).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
all()
|
||||
|
||||
@@ -1232,6 +1289,7 @@ def volume_get_all_by_project(context, project_id):
|
||||
|
||||
session = get_session()
|
||||
return session.query(models.Volume).\
|
||||
options(joinedload('instance')).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
all()
|
||||
@@ -1299,6 +1357,19 @@ def volume_get_shelf_and_blade(context, volume_id):
|
||||
return (result.shelf_id, result.blade_id)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_get_target_id(context, volume_id):
|
||||
session = get_session()
|
||||
result = session.query(models.TargetId).\
|
||||
filter_by(volume_id=volume_id).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.NotFound('No target id found for volume %s' %
|
||||
volume_id)
|
||||
|
||||
return result.target_id
|
||||
|
||||
|
||||
@require_context
|
||||
def volume_update(context, volume_id, values):
|
||||
session = get_session()
|
||||
|
@@ -255,6 +255,11 @@ class Volume(BASE, NovaBase):
|
||||
display_name = Column(String(255))
|
||||
display_description = Column(String(255))
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.ec2_id
|
||||
|
||||
|
||||
|
||||
class Quota(BASE, NovaBase):
|
||||
"""Represents quota overrides for a project"""
|
||||
@@ -290,6 +295,22 @@ class ExportDevice(BASE, NovaBase):
|
||||
'ExportDevice.deleted==False)')
|
||||
|
||||
|
||||
class TargetId(BASE, NovaBase):
|
||||
"""Represates an iscsi target_id for a given host"""
|
||||
__tablename__ = 'target_ids'
|
||||
__table_args__ = (schema.UniqueConstraint("target_id", "host"),
|
||||
{'mysql_engine': 'InnoDB'})
|
||||
id = Column(Integer, primary_key=True)
|
||||
target_id = Column(Integer)
|
||||
host = Column(String(255))
|
||||
volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True)
|
||||
volume = relationship(Volume,
|
||||
backref=backref('target_id', uselist=False),
|
||||
foreign_keys=volume_id,
|
||||
primaryjoin='and_(TargetId.volume_id==Volume.id,'
|
||||
'TargetId.deleted==False)')
|
||||
|
||||
|
||||
class SecurityGroupInstanceAssociation(BASE, NovaBase):
|
||||
__tablename__ = 'security_group_instance_association'
|
||||
id = Column(Integer, primary_key=True)
|
||||
@@ -510,7 +531,7 @@ class FloatingIp(BASE, NovaBase):
|
||||
def register_models():
|
||||
"""Register Models and create metadata"""
|
||||
from sqlalchemy import create_engine
|
||||
models = (Service, Instance, Volume, ExportDevice, FixedIp,
|
||||
models = (Service, Instance, Volume, ExportDevice, TargetId, FixedIp,
|
||||
FloatingIp, Network, SecurityGroup,
|
||||
SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
|
||||
AuthToken, User, Project) # , Image, Host
|
||||
|
@@ -224,7 +224,7 @@ DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
|
||||
'Manager for compute')
|
||||
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
|
||||
'Manager for network')
|
||||
DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
|
||||
DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
|
||||
'Manager for volume')
|
||||
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
||||
'Manager for scheduler')
|
||||
|
@@ -21,7 +21,7 @@ from nova import flags
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DECLARE('volume_driver', 'nova.volume.manager')
|
||||
FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver'
|
||||
FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver'
|
||||
FLAGS.connection_type = 'fake'
|
||||
FLAGS.fake_rabbit = True
|
||||
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
|
||||
@@ -31,9 +31,11 @@ flags.DECLARE('fake_network', 'nova.network.manager')
|
||||
FLAGS.network_size = 16
|
||||
FLAGS.num_networks = 5
|
||||
FLAGS.fake_network = True
|
||||
flags.DECLARE('num_shelves', 'nova.volume.manager')
|
||||
flags.DECLARE('blades_per_shelf', 'nova.volume.manager')
|
||||
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
||||
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
||||
flags.DECLARE('iscsi_target_ids', 'nova.volume.driver')
|
||||
FLAGS.num_shelves = 2
|
||||
FLAGS.blades_per_shelf = 4
|
||||
FLAGS.iscsi_target_ids = 8
|
||||
FLAGS.verbose = True
|
||||
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
|
||||
|
@@ -83,9 +83,9 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_too_many_volumes(self):
|
||||
"""Ensure that NoMoreBlades is raised when we run out of volumes"""
|
||||
"""Ensure that NoMoreTargets is raised when we run out of volumes"""
|
||||
vols = []
|
||||
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||
total_slots = FLAGS.iscsi_target_ids
|
||||
for _index in xrange(total_slots):
|
||||
volume_id = self._create_volume()
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
@@ -93,7 +93,7 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
volume_id = self._create_volume()
|
||||
self.assertFailure(self.volume.create_volume(self.context,
|
||||
volume_id),
|
||||
db.NoMoreBlades)
|
||||
db.NoMoreTargets)
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
for volume_id in vols:
|
||||
yield self.volume.delete_volume(self.context, volume_id)
|
||||
@@ -148,23 +148,21 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_concurrent_volumes_get_different_blades(self):
|
||||
"""Ensure multiple concurrent volumes get different blades"""
|
||||
def test_concurrent_volumes_get_different_targets(self):
|
||||
"""Ensure multiple concurrent volumes get different targets"""
|
||||
volume_ids = []
|
||||
shelf_blades = []
|
||||
targets = []
|
||||
|
||||
def _check(volume_id):
|
||||
"""Make sure blades aren't duplicated"""
|
||||
"""Make sure targets aren't duplicated"""
|
||||
volume_ids.append(volume_id)
|
||||
admin_context = context.get_admin_context()
|
||||
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(admin_context,
|
||||
volume_id)
|
||||
shelf_blade = '%s.%s' % (shelf_id, blade_id)
|
||||
self.assert_(shelf_blade not in shelf_blades)
|
||||
shelf_blades.append(shelf_blade)
|
||||
logging.debug("Blade %s allocated", shelf_blade)
|
||||
target_id = db.volume_get_target_id(admin_context, volume_id)
|
||||
self.assert_(target_id not in targets)
|
||||
targets.append(target_id)
|
||||
logging.debug("Target %s allocated", target_id)
|
||||
deferreds = []
|
||||
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||
total_slots = FLAGS.iscsi_target_ids
|
||||
for _index in xrange(total_slots):
|
||||
volume_id = self._create_volume()
|
||||
d = self.volume.create_volume(self.context, volume_id)
|
||||
|
@@ -27,6 +27,7 @@ from twisted.internet import defer
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import process
|
||||
from nova import utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -36,12 +37,29 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
|
||||
'Which device to export the volumes on')
|
||||
flags.DEFINE_string('num_shell_tries', 3,
|
||||
'number of times to attempt to run flakey shell commands')
|
||||
flags.DEFINE_integer('num_shelves',
|
||||
100,
|
||||
'Number of vblade shelves')
|
||||
flags.DEFINE_integer('blades_per_shelf',
|
||||
16,
|
||||
'Number of vblade blades per shelf')
|
||||
flags.DEFINE_integer('iscsi_target_ids',
|
||||
100,
|
||||
'Number of iscsi target ids per host')
|
||||
flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:',
|
||||
'prefix for iscsi volumes')
|
||||
flags.DEFINE_string('iscsi_ip_prefix', '127.0.0',
|
||||
'only connect to the specified ip')
|
||||
|
||||
|
||||
class AOEDriver(object):
|
||||
"""Executes commands relating to AOE volumes"""
|
||||
def __init__(self, execute=process.simple_execute, *args, **kwargs):
|
||||
class VolumeDriver(object):
|
||||
"""Executes commands relating to Volumes"""
|
||||
def __init__(self, execute=process.simple_execute,
|
||||
sync_exec=utils.execute, *args, **kwargs):
|
||||
# NOTE(vish): db is set by Manager
|
||||
self.db = None
|
||||
self._execute = execute
|
||||
self._sync_exec = sync_exec
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _try_execute(self, command):
|
||||
@@ -61,55 +79,93 @@ class AOEDriver(object):
|
||||
"Try number %s", tries)
|
||||
yield self._execute("sleep %s" % tries ** 2)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_volume(self, volume_name, size):
|
||||
"""Creates a logical volume"""
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequesits aren't met"""
|
||||
# NOTE(vish): makes sure that the volume group exists
|
||||
yield self._execute("vgs %s" % FLAGS.volume_group)
|
||||
if int(size) == 0:
|
||||
(_out, err) = self._sync_exec("vgs %s" % FLAGS.volume_group,
|
||||
check_exit_code=False)
|
||||
if err:
|
||||
raise exception.Error(err)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_volume(self, volume):
|
||||
"""Creates a logical volume"""
|
||||
if int(volume['size']) == 0:
|
||||
sizestr = '100M'
|
||||
else:
|
||||
sizestr = '%sG' % size
|
||||
sizestr = '%sG' % volume['size']
|
||||
yield self._try_execute("sudo lvcreate -L %s -n %s %s" %
|
||||
(sizestr,
|
||||
volume_name,
|
||||
volume['name'],
|
||||
FLAGS.volume_group))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_volume(self, volume_name):
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume"""
|
||||
yield self._try_execute("sudo lvremove -f %s/%s" %
|
||||
(FLAGS.volume_group,
|
||||
volume_name))
|
||||
volume['name']))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_export(self, volume_name, shelf_id, blade_id):
|
||||
def local_path(self, volume):
|
||||
defer.returnValue("/dev/%s/%s" % (FLAGS.volume_group, volume['name']))
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Safely and synchronously recreates an export for a logical volume"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_export(self, context, volume):
|
||||
"""Exports the volume"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def discover_volume(self, volume):
|
||||
"""Discover volume on a remote host"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def undiscover_volume(self, volume):
|
||||
"""Undiscover volume on a remote host"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class AOEDriver(VolumeDriver):
|
||||
"""Implements AOE specific volume commands"""
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
# NOTE(vish): we depend on vblade-persist for recreating exports
|
||||
pass
|
||||
|
||||
def _ensure_blades(self, context):
|
||||
"""Ensure that blades have been created in datastore"""
|
||||
total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||
if self.db.export_device_count(context) >= total_blades:
|
||||
return
|
||||
for shelf_id in xrange(FLAGS.num_shelves):
|
||||
for blade_id in xrange(FLAGS.blades_per_shelf):
|
||||
dev = {'shelf_id': shelf_id, 'blade_id': blade_id}
|
||||
self.db.export_device_create_safe(context, dev)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_export(self, context, volume):
|
||||
"""Creates an export for a logical volume"""
|
||||
self._ensure_blades(context)
|
||||
(shelf_id,
|
||||
blade_id) = self.db.volume_allocate_shelf_and_blade(context,
|
||||
volume['id'])
|
||||
yield self._try_execute(
|
||||
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
|
||||
(shelf_id,
|
||||
blade_id,
|
||||
FLAGS.aoe_eth_dev,
|
||||
FLAGS.volume_group,
|
||||
volume_name))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def discover_volume(self, _volume_name):
|
||||
"""Discover volume on a remote host"""
|
||||
yield self._execute("sudo aoe-discover")
|
||||
yield self._execute("sudo aoe-stat")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_export(self, _volume_name, shelf_id, blade_id):
|
||||
"""Removes an export for a logical volume"""
|
||||
yield self._try_execute("sudo vblade-persist stop %s %s" %
|
||||
(shelf_id, blade_id))
|
||||
yield self._try_execute("sudo vblade-persist destroy %s %s" %
|
||||
(shelf_id, blade_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def ensure_exports(self):
|
||||
"""Runs all existing exports"""
|
||||
volume['name']))
|
||||
# NOTE(vish): The standard _try_execute does not work here
|
||||
# because these methods throw errors if other
|
||||
# volumes on this host are in the process of
|
||||
@@ -123,13 +179,143 @@ class AOEDriver(object):
|
||||
yield self._execute("sudo vblade-persist start all",
|
||||
check_exit_code=False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume"""
|
||||
(shelf_id,
|
||||
blade_id) = self.db.volume_get_shelf_and_blade(context,
|
||||
volume['id'])
|
||||
yield self._try_execute("sudo vblade-persist stop %s %s" %
|
||||
(shelf_id, blade_id))
|
||||
yield self._try_execute("sudo vblade-persist destroy %s %s" %
|
||||
(shelf_id, blade_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def discover_volume(self, _volume):
|
||||
"""Discover volume on a remote host"""
|
||||
yield self._execute("sudo aoe-discover")
|
||||
yield self._execute("sudo aoe-stat", check_exit_code=False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def undiscover_volume(self, _volume):
|
||||
"""Undiscover volume on a remote host"""
|
||||
yield
|
||||
|
||||
|
||||
class FakeAOEDriver(AOEDriver):
|
||||
"""Logs calls instead of executing"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FakeAOEDriver, self).__init__(self.fake_execute)
|
||||
super(FakeAOEDriver, self).__init__(execute=self.fake_execute,
|
||||
sync_exec=self.fake_execute,
|
||||
*args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def fake_execute(cmd, *_args, **_kwargs):
|
||||
"""Execute that simply logs the command"""
|
||||
logging.debug("FAKE AOE: %s", cmd)
|
||||
return (None, None)
|
||||
|
||||
|
||||
class ISCSIDriver(VolumeDriver):
|
||||
"""Executes commands relating to ISCSI volumes"""
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Safely and synchronously recreates an export for a logical volume"""
|
||||
target_id = self.db.volume_get_target_id(context, volume['id'])
|
||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
||||
self._sync_exec("sudo ietadm --op new "
|
||||
"--tid=%s --params Name=%s" %
|
||||
(target_id, iscsi_name),
|
||||
check_exit_code=False)
|
||||
self._sync_exec("sudo ietadm --op new --tid=%s "
|
||||
"--lun=0 --params Path=%s,Type=fileio" %
|
||||
(target_id, volume_path),
|
||||
check_exit_code=False)
|
||||
|
||||
def _ensure_target_ids(self, context, host):
|
||||
"""Ensure that target ids have been created in datastore"""
|
||||
host_target_ids = self.db.target_id_count_by_host(context, host)
|
||||
if host_target_ids >= FLAGS.iscsi_target_ids:
|
||||
return
|
||||
# NOTE(vish): Target ids start at 1, not 0.
|
||||
for target_id in xrange(1, FLAGS.iscsi_target_ids + 1):
|
||||
target = {'host': host, 'target_id': target_id}
|
||||
self.db.target_id_create_safe(context, target)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_export(self, context, volume):
|
||||
"""Creates an export for a logical volume"""
|
||||
self._ensure_target_ids(context, volume['host'])
|
||||
target_id = self.db.volume_allocate_target_id(context,
|
||||
volume['id'],
|
||||
volume['host'])
|
||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
||||
yield self._execute("sudo ietadm --op new "
|
||||
"--tid=%s --params Name=%s" %
|
||||
(target_id, iscsi_name))
|
||||
yield self._execute("sudo ietadm --op new --tid=%s "
|
||||
"--lun=0 --params Path=%s,Type=fileio" %
|
||||
(target_id, volume_path))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume"""
|
||||
target_id = self.db.volume_get_target_id(context, volume['name'])
|
||||
yield self._execute("sudo ietadm --op delete --tid=%s "
|
||||
"--lun=0" % target_id)
|
||||
yield self._execute("sudo ietadm --op delete --tid=%s" %
|
||||
target_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_name_and_portal(self, volume_name, host):
|
||||
(out, _err) = yield self._execute("sudo iscsiadm -m discovery -t "
|
||||
"sendtargets -p %s" % host)
|
||||
for target in out.splitlines():
|
||||
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
|
||||
(location, _sep, iscsi_name) = target.partition(" ")
|
||||
break
|
||||
iscsi_portal = location.split(",")[0]
|
||||
defer.returnValue((iscsi_name, iscsi_portal))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def discover_volume(self, volume):
|
||||
"""Discover volume on a remote host"""
|
||||
(iscsi_name,
|
||||
iscsi_portal) = yield self._get_name_and_portal(volume['id'],
|
||||
volume['host'])
|
||||
yield self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
|
||||
(iscsi_name, iscsi_portal))
|
||||
yield self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
|
||||
"-n node.startup -v automatic" %
|
||||
(iscsi_name, iscsi_portal))
|
||||
defer.returnValue("/dev/iscsi/%s" % volume['name'])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def undiscover_volume(self, volume):
|
||||
"""Undiscover volume on a remote host"""
|
||||
(iscsi_name,
|
||||
iscsi_portal) = yield self._get_name_and_portal(volume['name'],
|
||||
volume['host'])
|
||||
yield self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
|
||||
"-n node.startup -v manual" %
|
||||
(iscsi_name, iscsi_portal))
|
||||
yield self._execute("sudo iscsiadm -m node -T %s -p %s --logout " %
|
||||
(iscsi_name, iscsi_portal))
|
||||
yield self._execute("sudo iscsiadm -m node --op delete "
|
||||
"--targetname %s" % iscsi_name)
|
||||
|
||||
|
||||
class FakeISCSIDriver(ISCSIDriver):
|
||||
"""Logs calls instead of executing"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
|
||||
sync_exec=self.fake_execute,
|
||||
*args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def fake_execute(cmd, *_args, **_kwargs):
|
||||
"""Execute that simply logs the command"""
|
||||
logging.debug("FAKE ISCSI: %s", cmd)
|
||||
return (None, None)
|
||||
|
@@ -26,6 +26,7 @@ import datetime
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import manager
|
||||
@@ -36,70 +37,58 @@ FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('storage_availability_zone',
|
||||
'nova',
|
||||
'availability zone of this service')
|
||||
flags.DEFINE_string('volume_driver', 'nova.volume.driver.AOEDriver',
|
||||
flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver',
|
||||
'Driver to use for volume creation')
|
||||
flags.DEFINE_integer('num_shelves',
|
||||
100,
|
||||
'Number of vblade shelves')
|
||||
flags.DEFINE_integer('blades_per_shelf',
|
||||
16,
|
||||
'Number of vblade blades per shelf')
|
||||
|
||||
|
||||
class AOEManager(manager.Manager):
|
||||
"""Manages Ata-Over_Ethernet volumes"""
|
||||
class VolumeManager(manager.Manager):
|
||||
"""Manages attachable block storage devices"""
|
||||
def __init__(self, volume_driver=None, *args, **kwargs):
|
||||
if not volume_driver:
|
||||
volume_driver = FLAGS.volume_driver
|
||||
self.driver = utils.import_object(volume_driver)
|
||||
super(AOEManager, self).__init__(*args, **kwargs)
|
||||
super(VolumeManager, self).__init__(*args, **kwargs)
|
||||
# NOTE(vish): Implementation specific db handling is done
|
||||
# by the driver.
|
||||
self.driver.db = self.db
|
||||
|
||||
def _ensure_blades(self, context):
|
||||
"""Ensure that blades have been created in datastore"""
|
||||
total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||
if self.db.export_device_count(context) >= total_blades:
|
||||
return
|
||||
for shelf_id in xrange(FLAGS.num_shelves):
|
||||
for blade_id in xrange(FLAGS.blades_per_shelf):
|
||||
dev = {'shelf_id': shelf_id, 'blade_id': blade_id}
|
||||
self.db.export_device_create_safe(context, dev)
|
||||
def init_host(self):
|
||||
"""Do any initialization that needs to be run if this is a
|
||||
standalone service.
|
||||
"""
|
||||
self.driver.check_for_setup_error()
|
||||
ctxt = context.get_admin_context()
|
||||
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
|
||||
logging.debug("Re-exporting %s volumes", len(volumes))
|
||||
for volume in volumes:
|
||||
self.driver.ensure_export(context, volume)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_volume(self, context, volume_id):
|
||||
"""Creates and exports the volume"""
|
||||
context = context.elevated()
|
||||
logging.info("volume %s: creating", volume_id)
|
||||
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
logging.info("volume %s: creating", volume_ref['name'])
|
||||
|
||||
self.db.volume_update(context,
|
||||
volume_id,
|
||||
{'host': self.host})
|
||||
# NOTE(vish): so we don't have to get volume from db again
|
||||
# before passing it to the driver.
|
||||
volume_ref['host'] = self.host
|
||||
|
||||
size = volume_ref['size']
|
||||
logging.debug("volume %s: creating lv of size %sG", volume_id, size)
|
||||
yield self.driver.create_volume(volume_ref['ec2_id'], size)
|
||||
logging.debug("volume %s: creating lv of size %sG",
|
||||
volume_ref['name'], volume_ref['size'])
|
||||
yield self.driver.create_volume(volume_ref)
|
||||
|
||||
logging.debug("volume %s: allocating shelf & blade", volume_id)
|
||||
self._ensure_blades(context)
|
||||
rval = self.db.volume_allocate_shelf_and_blade(context, volume_id)
|
||||
(shelf_id, blade_id) = rval
|
||||
|
||||
logging.debug("volume %s: exporting shelf %s & blade %s", volume_id,
|
||||
shelf_id, blade_id)
|
||||
|
||||
yield self.driver.create_export(volume_ref['ec2_id'],
|
||||
shelf_id,
|
||||
blade_id)
|
||||
|
||||
logging.debug("volume %s: re-exporting all values", volume_id)
|
||||
yield self.driver.ensure_exports()
|
||||
logging.debug("volume %s: creating export", volume_ref['name'])
|
||||
yield self.driver.create_export(context, volume_ref)
|
||||
|
||||
now = datetime.datetime.utcnow()
|
||||
self.db.volume_update(context,
|
||||
volume_ref['id'], {'status': 'available',
|
||||
'launched_at': now})
|
||||
logging.debug("volume %s: created successfully", volume_id)
|
||||
logging.debug("volume %s: created successfully", volume_ref['name'])
|
||||
defer.returnValue(volume_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -111,14 +100,10 @@ class AOEManager(manager.Manager):
|
||||
raise exception.Error("Volume is still attached")
|
||||
if volume_ref['host'] != self.host:
|
||||
raise exception.Error("Volume is not local to this node")
|
||||
logging.debug("Deleting volume with id of: %s", volume_id)
|
||||
shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context,
|
||||
volume_id)
|
||||
yield self.driver.remove_export(volume_ref['ec2_id'],
|
||||
shelf_id,
|
||||
blade_id)
|
||||
yield self.driver.delete_volume(volume_ref['ec2_id'])
|
||||
logging.debug("volume %s: deleting", volume_ref['name'])
|
||||
yield self.driver.delete_volume(volume_ref)
|
||||
self.db.volume_destroy(context, volume_id)
|
||||
logging.debug("volume %s: deleted successfully", volume_ref['name'])
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -127,9 +112,23 @@ class AOEManager(manager.Manager):
|
||||
|
||||
Returns path to device.
|
||||
"""
|
||||
context = context.elevated()
|
||||
context = context.admin()
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
yield self.driver.discover_volume(volume_ref['ec2_id'])
|
||||
shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context,
|
||||
volume_id)
|
||||
defer.returnValue("/dev/etherd/e%s.%s" % (shelf_id, blade_id))
|
||||
if volume_ref['host'] == self.host:
|
||||
# NOTE(vish): No need to discover local volumes.
|
||||
path = yield self.driver.local_path(volume_ref)
|
||||
else:
|
||||
path = yield self.driver.discover_volume(volume_ref)
|
||||
defer.returnValue(path)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_compute_volume(self, context, volume_id):
|
||||
"""Remove remote volume on compute host """
|
||||
context = context.admin()
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
if volume_ref['host'] == self.host:
|
||||
# NOTE(vish): No need to undiscover local volumes.
|
||||
defer.returnValue(True)
|
||||
else:
|
||||
yield self.driver.undiscover_volume(volume_ref)
|
||||
|
||||
|
41
tools/iscsidev.sh
Executable file
41
tools/iscsidev.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE(vish): This script helps udev create common names for discovered iscsi
|
||||
# volumes under /dev/iscsi. To use it, create /dev/iscsi and add
|
||||
# a file to /etc/udev/rules.d like so:
|
||||
# mkdir /dev/iscsi
|
||||
# echo 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/path/to/iscsidev.sh
|
||||
# %b",SYMLINK+="iscsi/%c%n"' > /etc/udev/rules.d/55-openiscsi.rules
|
||||
|
||||
BUS=${1}
|
||||
HOST=${BUS%%:*}
|
||||
|
||||
if [ ! -e /sys/class/iscsi_host ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname"
|
||||
|
||||
target_name=$(cat ${file})
|
||||
|
||||
if [ -z "${target_name}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "${target_name##*:}"
|
Reference in New Issue
Block a user