Merge "db: Compact Kilo database migrations"

This commit is contained in:
Zuul
2021-01-30 20:24:57 +00:00
committed by Gerrit Code Review
30 changed files with 111 additions and 1416 deletions

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@@ -1,37 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table
INDEXES = [
# subset of instances_host_deleted_cleaned_idx
('instances', 'instances_host_deleted_idx'),
# subset of iscsi_targets_host_volume_id_deleted_idx
('iscsi_targets', 'iscsi_targets_host_idx'),
]
def upgrade(migrate_engine):
"""Remove index that are subsets of other indexes."""
meta = MetaData(bind=migrate_engine)
for table_name, index_name in INDEXES:
table = Table(table_name, meta, autoload=True)
for index in table.indexes:
if index.name == index_name:
index.drop()

View File

@@ -1,27 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
tags = sa.Table('tags', meta,
sa.Column('resource_id', sa.String(36), primary_key=True,
nullable=False),
sa.Column('tag', sa.Unicode(80), primary_key=True,
nullable=False),
sa.Index('tags_tag_idx', 'tag'),
mysql_engine='InnoDB',
mysql_charset='utf8')
tags.create()

View File

@@ -1,110 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import UniqueConstraint
from sqlalchemy import MetaData
from sqlalchemy.sql import null
from nova import exception
from nova.i18n import _
UC_NAME = 'uniq_instances0uuid'
def scan_for_null_records(table, col_name, check_fkeys):
"""Queries the table looking for NULL instances of the given column.
:param col_name: The name of the column to look for in the table.
:param check_fkeys: If True, check the table for foreign keys back to the
instances table and if not found, return.
:raises: exception.ValidationError: If any records are found.
"""
if col_name in table.columns:
# NOTE(mriedem): filter out tables that don't have a foreign key back
# to the instances table since they could have stale data even if
# instances.uuid wasn't NULL.
if check_fkeys:
fkey_found = False
fkeys = table.c[col_name].foreign_keys or []
for fkey in fkeys:
if fkey.column.table.name == 'instances':
fkey_found = True
if not fkey_found:
return
records = len(list(
table.select().where(table.c[col_name] == null()).execute()
))
if records:
msg = _("There are %(records)d records in the "
"'%(table_name)s' table where the uuid or "
"instance_uuid column is NULL. These must be "
"manually cleaned up before the migration will pass. "
"Consider running the "
"'nova-manage db null_instance_uuid_scan' command.") % (
{'records': records, 'table_name': table.name})
raise exception.ValidationError(detail=msg)
def process_null_records(meta, scan=True):
"""Scans the database for null instance_uuid records for processing.
:param meta: sqlalchemy.MetaData object, assumes tables are reflected.
:param scan: If True, does a query and fails the migration if NULL instance
uuid entries found. If False, makes instances.uuid
non-nullable.
"""
if scan:
for table in reversed(meta.sorted_tables):
# NOTE(mriedem): There is a periodic task in the network manager
# that calls nova.db.api.fixed_ip_disassociate_all_by_timeout which
# will set fixed_ips.instance_uuid to None by design, so we have to
# skip the fixed_ips table otherwise we'll wipeout the pool of
# fixed IPs.
if table.name not in ('fixed_ips', 'shadow_fixed_ips'):
scan_for_null_records(table, 'instance_uuid', check_fkeys=True)
for table_name in ('instances', 'shadow_instances'):
table = meta.tables[table_name]
if scan:
scan_for_null_records(table, 'uuid', check_fkeys=False)
else:
# The record is gone so make the uuid column non-nullable.
table.columns.uuid.alter(nullable=False)
def upgrade(migrate_engine):
# NOTE(mriedem): We're going to load up all of the tables so we can find
# any with an instance_uuid column since those may be foreign keys back
# to the instances table and we want to cleanup those records first. We
# have to do this explicitly because the foreign keys in nova aren't
# defined with cascading deletes.
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
# Scan the database first and fail if any NULL records found.
process_null_records(meta, scan=True)
# Now run the alter statements.
process_null_records(meta, scan=False)
# Create a unique constraint on instances.uuid for foreign keys.
instances = meta.tables['instances']
UniqueConstraint('uuid', table=instances, name=UC_NAME).create()
# NOTE(mriedem): We now have a unique index on instances.uuid from the
# 234_icehouse migration and a unique constraint on the same column, which
# is redundant but should not be a big performance penalty. We should
# clean this up in a later (separate) migration since it involves dropping
# any ForeignKeys on the instances.uuid column due to some index rename
# issues in older versions of MySQL. That is beyond the scope of this
# migration.

View File

@@ -1,42 +0,0 @@
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import UniqueConstraint
from sqlalchemy import MetaData, Table, Column, String
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Add a new column host
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
# NOTE(sbauza) : Old compute nodes can report stats without this field, we
# need to set it as nullable
host = Column('host', String(255), nullable=True)
if not hasattr(compute_nodes.c, 'host'):
compute_nodes.create_column(host)
if not hasattr(shadow_compute_nodes.c, 'host'):
shadow_compute_nodes.create_column(host.copy())
# NOTE(sbauza) : Populate the host field with the value from the services
# table will be done at the ComputeNode object level when save()
ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname")
ukey.create()

View File

@@ -1,32 +0,0 @@
# Copyright 2014 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# See blueprint backportable-db-migrations-icehouse
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
from sqlalchemy import MetaData, Table, Column, Integer
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
# Add a new column to store PCI device numa node
pci_devices = Table('pci_devices', meta, autoload=True)
shadow_pci_devices = Table('shadow_pci_devices', meta, autoload=True)
numa_node = Column('numa_node', Integer, default=None)
if not hasattr(pci_devices.c, 'numa_node'):
pci_devices.create_column(numa_node)
if not hasattr(shadow_pci_devices.c, 'numa_node'):
shadow_pci_devices.create_column(numa_node.copy())

View File

@@ -1,32 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'flavor'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)

View File

@@ -1,69 +0,0 @@
# Copyright 2014 Rackspace Hosting
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import utils
INDEXES = [
('block_device_mapping', 'snapshot_id', ['snapshot_id']),
('block_device_mapping', 'volume_id', ['volume_id']),
('dns_domains', 'dns_domains_project_id_idx', ['project_id']),
('fixed_ips', 'network_id', ['network_id']),
('fixed_ips', 'fixed_ips_instance_uuid_fkey', ['instance_uuid']),
('fixed_ips', 'fixed_ips_virtual_interface_id_fkey',
['virtual_interface_id']),
('floating_ips', 'fixed_ip_id', ['fixed_ip_id']),
('iscsi_targets', 'iscsi_targets_volume_id_fkey', ['volume_id']),
('virtual_interfaces', 'virtual_interfaces_network_id_idx',
['network_id']),
('virtual_interfaces', 'virtual_interfaces_instance_uuid_fkey',
['instance_uuid']),
]
def ensure_index_exists(migrate_engine, table_name, index_name, column_names):
if not utils.index_exists(migrate_engine, table_name, index_name):
utils.add_index(migrate_engine, table_name, index_name, column_names)
def ensure_index_removed(migrate_engine, table_name, index_name):
if utils.index_exists(migrate_engine, table_name, index_name):
utils.drop_index(migrate_engine, table_name, index_name)
def upgrade(migrate_engine):
"""Add indexes missing on SQLite and PostgreSQL."""
# PostgreSQL and SQLite namespace indexes at the database level, whereas
# MySQL namespaces indexes at the table level. Unfortunately, some of
# the missing indexes in PostgreSQL and SQLite have conflicting names
# that MySQL allowed.
if migrate_engine.name in ('sqlite', 'postgresql'):
for table_name, index_name, column_names in INDEXES:
ensure_index_exists(migrate_engine, table_name, index_name,
column_names)
elif migrate_engine.name == 'mysql':
# Rename some indexes with conflicting names
ensure_index_removed(migrate_engine, 'dns_domains', 'project_id')
ensure_index_exists(migrate_engine, 'dns_domains',
'dns_domains_project_id_idx', ['project_id'])
ensure_index_removed(migrate_engine, 'virtual_interfaces',
'network_id')
ensure_index_exists(migrate_engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])

View File

@@ -1,28 +0,0 @@
# Copyright (c) 2015 Cloudbase Solutions SRL
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(mikal): this migration number exists like this because change
# I506dd1c8d0f0a877fdfc1a4ed11a8830d9600b98 needs to revert the hyper-v
# keypair change, but we promise that we will never remove a schema migration
# version. Instead, we replace this migration with a noop.
#
# It is hypothetically possible that a hyper-v continuous deployer exists who
# will have a poor experience because of this code revert, if that deployer
# is you, please contact the nova team at openstack-discuss@lists.openstack.org
# and we will walk you through the manual fix required for this problem.
def upgrade(migrate_engine):
pass

View File

@@ -1,108 +0,0 @@
# Copyright 2014 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint, UniqueConstraint
from oslo_db.sqlalchemy import utils
from sqlalchemy import MetaData, schema, Table
FKEYS = [
('fixed_ips', 'instance_uuid', 'instances', 'uuid',
'fixed_ips_instance_uuid_fkey'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid',
'block_device_mapping_instance_uuid_fkey'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid',
'instance_info_caches_instance_uuid_fkey'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid',
'instance_metadata_instance_uuid_fkey'),
('instance_system_metadata', 'instance_uuid', 'instances', 'uuid',
'instance_system_metadata_ibfk_1'),
('instance_type_projects', 'instance_type_id', 'instance_types', 'id',
'instance_type_projects_ibfk_1'),
('iscsi_targets', 'volume_id', 'volumes', 'id',
'iscsi_targets_volume_id_fkey'),
('reservations', 'usage_id', 'quota_usages', 'id',
'reservations_ibfk_1'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid',
'security_group_instance_association_instance_uuid_fkey'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id',
'security_group_instance_association_ibfk_1'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid',
'virtual_interfaces_instance_uuid_fkey'),
('compute_nodes', 'service_id', 'services', 'id',
'fk_compute_nodes_service_id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid',
'fk_instance_actions_instance_uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid',
'fk_instance_faults_instance_uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid',
'fk_migrations_instance_uuid'),
]
UNIQUES = [
('compute_nodes', 'uniq_compute_nodes0host0hypervisor_hostname',
['host', 'hypervisor_hostname']),
('fixed_ips', 'uniq_fixed_ips0address0deleted',
['address', 'deleted']),
('instance_info_caches', 'uniq_instance_info_caches0instance_uuid',
['instance_uuid']),
('instance_type_projects',
'uniq_instance_type_projects0instance_type_id0project_id0deleted',
['instance_type_id', 'project_id', 'deleted']),
('pci_devices', 'uniq_pci_devices0compute_node_id0address0deleted',
['compute_node_id', 'address', 'deleted']),
('virtual_interfaces', 'uniq_virtual_interfaces0address0deleted',
['address', 'deleted']),
]
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == 'sqlite':
# SQLite is also missing this one index
if not utils.index_exists(migrate_engine, 'fixed_ips', 'address'):
utils.add_index(migrate_engine, 'fixed_ips', 'address',
['address'])
for src_table, src_column, dst_table, dst_column, name in FKEYS:
src_table = Table(src_table, meta, autoload=True)
if name in set(fk.name for fk in src_table.foreign_keys):
continue
src_column = src_table.c[src_column]
dst_table = Table(dst_table, meta, autoload=True)
dst_column = dst_table.c[dst_column]
fkey = ForeignKeyConstraint(columns=[src_column],
refcolumns=[dst_column],
name=name)
fkey.create()
# SQLAlchemy versions < 1.0.0 don't reflect unique constraints
# for SQLite correctly causing sqlalchemy-migrate to recreate
# some tables with missing unique constraints. Re-add some
# potentially missing unique constraints as a workaround.
for table_name, name, column_names in UNIQUES:
table = Table(table_name, meta, autoload=True)
if name in set(c.name for c in table.constraints
if isinstance(table, schema.UniqueConstraint)):
continue
uc = UniqueConstraint(*column_names, table=table, name=name)
uc.create()

View File

@@ -1,44 +0,0 @@
# Copyright 2014 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from sqlalchemy import MetaData, Table, Index
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
"""Change instances (project_id) index to cover (project_id, deleted)."""
meta = MetaData(bind=migrate_engine)
# Indexes can't be changed, we need to create the new one and delete
# the old one
instances = Table('instances', meta, autoload=True)
for index in instances.indexes:
if [c.name for c in index.columns] == ['project_id', 'deleted']:
LOG.info('Skipped adding instances_project_id_deleted_idx '
'because an equivalent index already exists.')
break
else:
index = Index('instances_project_id_deleted_idx',
instances.c.project_id, instances.c.deleted)
index.create()
for index in instances.indexes:
if [c.name for c in index.columns] == ['project_id']:
index.drop()

View File

@@ -1,41 +0,0 @@
# Copyright (c) 2015 Cloudbase Solutions SRL
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Column, Table
from sqlalchemy import Enum
from nova.objects import keypair
def upgrade(migrate_engine):
"""Function adds key_pairs type field."""
meta = MetaData(bind=migrate_engine)
key_pairs = Table('key_pairs', meta, autoload=True)
shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True)
enum = Enum('ssh', 'x509', metadata=meta, name='keypair_types')
enum.create()
keypair_type = Column('type', enum, nullable=False,
server_default=keypair.KEYPAIR_TYPE_SSH)
if hasattr(key_pairs.c, 'type'):
key_pairs.c.type.drop()
if hasattr(shadow_key_pairs.c, 'type'):
shadow_key_pairs.c.type.drop()
key_pairs.create_column(keypair_type)
shadow_key_pairs.create_column(keypair_type.copy())

View File

@@ -1,32 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'vcpu_model'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)

View File

@@ -1,43 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from sqlalchemy import Index, MetaData, Table
LOG = logging.getLogger(__name__)
INDEX_COLUMNS = ['deleted', 'allocated', 'updated_at']
INDEX_NAME = 'fixed_ips_%s_idx' % ('_'.join(INDEX_COLUMNS),)
def _get_table_index(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('fixed_ips', meta, autoload=True)
for idx in table.indexes:
if idx.columns.keys() == INDEX_COLUMNS:
break
else:
idx = None
return meta, table, idx
def upgrade(migrate_engine):
meta, table, index = _get_table_index(migrate_engine)
if index:
LOG.info('Skipped adding %s because an equivalent index'
' already exists.', INDEX_NAME)
return
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
index = Index(INDEX_NAME, *columns)
index.create(migrate_engine)

View File

@@ -1,68 +0,0 @@
# Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint, UniqueConstraint
from sqlalchemy import MetaData, Table
from sqlalchemy.engine import reflection
def _correct_sqlite_unique_constraints(migrate_engine, table):
# NOTE(sbauza): SQLAlchemy<1.0 doesn't provide the unique keys in the
# constraints field of the Table object, so it would drop them if we change
# either the scheme or the constraints. Adding them back to the Table
# object before changing the model makes sure that they are not dropped.
if migrate_engine.name != 'sqlite':
# other engines don't have this problem
return
inspector = reflection.Inspector.from_engine(migrate_engine)
constraints = inspector.get_unique_constraints(table.name)
constraint_names = [c.name for c in table.constraints]
for constraint in constraints:
if constraint['name'] in constraint_names:
# the constraint is already in the table
continue
table.constraints.add(
UniqueConstraint(*constraint['column_names'],
table=table, name=constraint['name']))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
services = Table('services', meta, autoload=True)
_correct_sqlite_unique_constraints(migrate_engine, compute_nodes)
# Make the service_id column nullable
compute_nodes.c.service_id.alter(nullable=True)
shadow_compute_nodes.c.service_id.alter(nullable=True)
for fk in compute_nodes.foreign_keys:
if fk.column == services.c.id:
# Delete the FK
fkey = ForeignKeyConstraint(columns=[compute_nodes.c.service_id],
refcolumns=[services.c.id],
name=fk.name)
fkey.drop()
break
for index in compute_nodes.indexes:
if 'service_id' in index.columns:
# Delete the nested index which was created by the FK
index.drop()
break

View File

@@ -1,35 +0,0 @@
# Copyright (c) Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import UniqueConstraint
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
# Drop the old UniqueConstraint
ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname")
ukey.drop()
# Add new UniqueConstraint
ukey = UniqueConstraint(
'host', 'hypervisor_hostname', 'deleted',
table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname0deleted")
ukey.create()

View File

@@ -1,35 +0,0 @@
# Copyright 2015 Intel Corporation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
"""Function enforces non-null value for keypairs name field."""
meta = MetaData(bind=migrate_engine)
key_pairs = Table('key_pairs', meta, autoload=True)
# Note: Since we are altering name field, this constraint on name needs to
# first be dropped before we can alter name. We then re-create the same
# constraint. It was first added in 234_icehouse so no need to remove
# constraint on downgrade.
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').drop()
key_pairs.c.name.alter(nullable=False)
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').create()

View File

@@ -20,8 +20,10 @@ from sqlalchemy import dialects
from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
from sqlalchemy import Text
from sqlalchemy.types import NullType
from sqlalchemy import Unicode
from nova.db.sqlalchemy import types
from nova.objects import keypair
LOG = logging.getLogger(__name__)
@@ -55,6 +57,10 @@ def _create_shadow_tables(migrate_engine):
meta.bind = migrate_engine
for table_name in table_names:
# Skip tables that are not soft-deletable
if table_name in ('tags', ):
continue
table = Table(table_name, meta, autoload=True)
columns = []
@@ -72,7 +78,23 @@ def _create_shadow_tables(migrate_engine):
'owner', 'admin', name='shadow_instances0locked_by',
)
column_copy = Column(column.name, enum)
else:
# NOTE(stephenfin): By default, 'sqlalchemy.Enum' will issue a
# 'CREATE TYPE' command on PostgreSQL, even if the type already
# exists. We work around this by using the PostgreSQL-specific
# 'sqlalchemy.dialects.postgresql.ENUM' type and setting
# 'create_type' to 'False'. See [1] for more information.
#
# [1] https://stackoverflow.com/a/28894354/613428
if migrate_engine.name == 'postgresql':
if table_name == 'key_pairs' and column.name == 'type':
enum = dialects.postgresql.ENUM(
'ssh', 'x509', name='keypair_types', create_type=False)
column_copy = Column(
column.name, enum, nullable=False,
server_default=keypair.KEYPAIR_TYPE_SSH)
if column_copy is None:
column_copy = column.copy()
columns.append(column_copy)
@@ -238,7 +260,7 @@ def upgrade(migrate_engine):
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('service_id', Integer, nullable=False),
Column('service_id', Integer, nullable=True),
Column('vcpus', Integer, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('local_gb', Integer, nullable=False),
@@ -262,6 +284,11 @@ def upgrade(migrate_engine):
Column('extra_resources', Text, nullable=True),
Column('stats', Text, default='{}'),
Column('numa_topology', Text, nullable=True),
Column('host', String(255), nullable=True),
UniqueConstraint(
'host', 'hypervisor_hostname', 'deleted',
name='uniq_compute_nodes0host0hypervisor_hostname0deleted',
),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@@ -560,7 +587,7 @@ def upgrade(migrate_engine):
Column('launched_on', MediumText()),
Column('instance_type_id', Integer),
Column('vm_mode', String(length=255)),
Column('uuid', String(length=36)),
Column('uuid', String(length=36), nullable=False),
Column('architecture', String(length=255)),
Column('root_device_name', String(length=255)),
Column('access_ip_v4', InetSmall()),
@@ -582,8 +609,8 @@ def upgrade(migrate_engine):
'locked_by', Enum('owner', 'admin', name='instances0locked_by')),
Column('cleaned', Integer, default=0),
Column('ephemeral_key_uuid', String(36)),
Index('project_id', 'project_id'),
Index('uuid', 'uuid', unique=True),
UniqueConstraint('uuid', name='uniq_instances0uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@@ -633,6 +660,8 @@ def upgrade(migrate_engine):
Column('instance_uuid', String(length=36), nullable=False),
Column('numa_topology', Text, nullable=True),
Column('pci_requests', Text, nullable=True),
Column('flavor', Text, nullable=True),
Column('vcpu_model', Text, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
@@ -660,6 +689,9 @@ def upgrade(migrate_engine):
Column('fingerprint', String(length=255)),
Column('public_key', MediumText()),
Column('deleted', Integer),
Column(
'type', Enum('ssh', 'x509', name='keypair_types'), nullable=False,
server_default=keypair.KEYPAIR_TYPE_SSH),
UniqueConstraint(
'user_id', 'name', 'deleted',
name='uniq_key_pairs0user_id0name0deleted'),
@@ -745,10 +777,11 @@ def upgrade(migrate_engine):
Column('extra_info', Text, nullable=True),
Column('instance_uuid', String(36), nullable=True),
Column('request_id', String(36), nullable=True),
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Column('numa_node', Integer, default=None),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
UniqueConstraint(
'compute_node_id', 'address', 'deleted',
name='uniq_pci_devices0compute_node_id0address0deleted'),
@@ -972,6 +1005,14 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
tags = Table('tags', meta,
Column('resource_id', String(36), primary_key=True, nullable=False),
Column('tag', Unicode(80), primary_key=True, nullable=False),
Index('tags_tag_idx', 'tag'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
task_log = Table('task_log', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
@@ -1095,7 +1136,7 @@ def upgrade(migrate_engine):
quotas, project_user_quotas,
reservations, s3_images, security_group_instance_association,
security_group_rules, security_group_default_rules,
services, snapshot_id_mappings, task_log,
services, snapshot_id_mappings, tags, task_log,
virtual_interfaces,
volume_id_mappings,
volume_usage_cache]
@@ -1108,28 +1149,6 @@ def upgrade(migrate_engine):
LOG.exception('Exception while creating table.')
raise
# created first (to preserve ordering for schema diffs)
mysql_pre_indexes = [
Index('instance_type_id', instance_type_projects.c.instance_type_id),
Index('project_id', dns_domains.c.project_id),
Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
Index('network_id', virtual_interfaces.c.network_id),
Index('network_id', fixed_ips.c.network_id),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
Index('address', fixed_ips.c.address),
Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
Index('instance_uuid', instance_system_metadata.c.instance_uuid),
Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
Index('snapshot_id', block_device_mapping.c.snapshot_id),
Index('usage_id', reservations.c.usage_id),
Index('virtual_interfaces_instance_uuid_fkey',
virtual_interfaces.c.instance_uuid),
Index('volume_id', block_device_mapping.c.volume_id),
Index('security_group_id',
security_group_instance_association.c.security_group_id),
]
# Common indexes (indexes we apply to all databases)
# NOTE: order specific for MySQL diff support
common_indexes = [
@@ -1144,12 +1163,13 @@ def upgrade(migrate_engine):
agent_builds.c.architecture),
# block_device_mapping
Index('snapshot_id', block_device_mapping.c.snapshot_id),
Index('volume_id', block_device_mapping.c.volume_id),
Index('block_device_mapping_instance_uuid_idx',
block_device_mapping.c.instance_uuid),
Index('block_device_mapping_instance_uuid_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
Index('block_device_mapping_instance_uuid_volume_id_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.volume_id),
@@ -1170,8 +1190,14 @@ def upgrade(migrate_engine):
# dns_domains
Index('dns_domains_domain_deleted_idx',
dns_domains.c.domain, dns_domains.c.deleted),
Index('dns_domains_project_id_idx', dns_domains.c.project_id),
# fixed_ips
Index('network_id', fixed_ips.c.network_id),
Index('address', fixed_ips.c.address),
Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
Index('fixed_ips_host_idx', fixed_ips.c.host),
Index('fixed_ips_network_id_host_deleted_idx', fixed_ips.c.network_id,
fixed_ips.c.host, fixed_ips.c.deleted),
@@ -1180,8 +1206,12 @@ def upgrade(migrate_engine):
fixed_ips.c.network_id, fixed_ips.c.deleted),
Index('fixed_ips_deleted_allocated_idx', fixed_ips.c.address,
fixed_ips.c.deleted, fixed_ips.c.allocated),
Index('fixed_ips_deleted_allocated_updated_at_idx',
fixed_ips.c.deleted, fixed_ips.c.allocated,
fixed_ips.c.updated_at),
# floating_ips
Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
Index('floating_ips_host_idx', floating_ips.c.host),
Index('floating_ips_project_id_idx', floating_ips.c.project_id),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
@@ -1204,8 +1234,6 @@ def upgrade(migrate_engine):
Index('instances_task_state_updated_at_idx',
instances.c.task_state,
instances.c.updated_at),
Index('instances_host_deleted_idx', instances.c.host,
instances.c.deleted),
Index('instances_uuid_deleted_idx', instances.c.uuid,
instances.c.deleted),
Index('instances_host_node_deleted_idx', instances.c.host,
@@ -1213,6 +1241,8 @@ def upgrade(migrate_engine):
Index('instances_host_deleted_cleaned_idx',
instances.c.host, instances.c.deleted,
instances.c.cleaned),
Index('instances_project_id_deleted_idx',
instances.c.project_id, instances.c.deleted),
# instance_actions
Index('instance_uuid_idx', instance_actions.c.instance_uuid),
@@ -1240,7 +1270,7 @@ def upgrade(migrate_engine):
instance_type_extra_specs.c.key),
# iscsi_targets
Index('iscsi_targets_host_idx', iscsi_targets.c.host),
Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
Index('iscsi_targets_host_volume_id_deleted_idx',
iscsi_targets.c.host, iscsi_targets.c.volume_id,
iscsi_targets.c.deleted),
@@ -1298,12 +1328,38 @@ def upgrade(migrate_engine):
Index('ix_quota_usages_user_id_deleted',
quota_usages.c.user_id, quota_usages.c.deleted),
# virtual_interfaces
Index('virtual_interfaces_instance_uuid_fkey',
virtual_interfaces.c.instance_uuid),
Index('virtual_interfaces_network_id_idx',
virtual_interfaces.c.network_id),
# volumes
Index('volumes_instance_uuid_idx', volumes.c.instance_uuid),
]
# MySQL specific indexes
if migrate_engine.name == 'mysql':
# created first (to preserve ordering for schema diffs)
# NOTE(stephenfin): For some reason, we have to put this within the if
# statement to avoid it being evaluated for the sqlite case. Even
# though we don't call create except in the MySQL case... Failure to do
# this will result in the following ugly error message:
#
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) no such
# index: instance_type_id
#
# Yeah, I don't get it either...
mysql_pre_indexes = [
Index(
'instance_type_id', instance_type_projects.c.instance_type_id),
Index('instance_uuid', instance_system_metadata.c.instance_uuid),
Index('usage_id', reservations.c.usage_id),
Index(
'security_group_id',
security_group_instance_association.c.security_group_id),
]
for index in mysql_pre_indexes:
index.create(migrate_engine)
@@ -1314,10 +1370,6 @@ def upgrade(migrate_engine):
"source_node(100), dest_node(100), status)")
migrate_engine.execute(sql)
# PostgreSQL specific indexes
if migrate_engine.name == 'postgresql':
Index('address', fixed_ips.c.address).create()
POSTGRES_INDEX_SKIPS = []
MYSQL_INDEX_SKIPS = [
@@ -1356,11 +1408,6 @@ def upgrade(migrate_engine):
[security_groups.c.id],
'security_group_instance_association_ibfk_1',
],
[
[compute_nodes.c.service_id],
[services.c.id],
'fk_compute_nodes_service_id',
],
[
[fixed_ips.c.instance_uuid],
[instances.c.uuid],
@@ -1414,7 +1461,7 @@ def upgrade(migrate_engine):
]
for fkey_pair in fkeys:
if migrate_engine.name == 'mysql':
if migrate_engine.name in ('mysql', 'sqlite'):
# For MySQL we name our fkeys explicitly
# so they match Havana
fkey = ForeignKeyConstraint(
@@ -1475,3 +1522,19 @@ def upgrade(migrate_engine):
shadow_table = Table('shadow_instance_extra', meta, autoload=True)
idx = Index('shadow_instance_extra_idx', shadow_table.c.instance_uuid)
idx.create(migrate_engine)
# 280_add_nullable_false_to_keypairs_name; this should apply to the shadow
# table also
# Note: Since we are altering name field, this constraint on name needs to
# first be dropped before we can alter name. We then re-create the same
# constraint.
UniqueConstraint(
'user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted'
).drop()
key_pairs.c.name.alter(nullable=False)
UniqueConstraint(
'user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted',
).create()

View File

@@ -29,7 +29,7 @@ from nova import exception
from nova.i18n import _
INIT_VERSION = {}
INIT_VERSION['main'] = 253
INIT_VERSION['main'] = 279
INIT_VERSION['api'] = 0
_REPOSITORY = {}

View File

@@ -35,7 +35,6 @@ For postgres on Ubuntu this can be done with the following commands::
import glob
import os
from migrate import UniqueConstraint
from migrate.versioning import repository
import mock
from oslo_db.sqlalchemy import enginefacade
@@ -46,14 +45,12 @@ from oslotest import timeout
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy.sql import null
import testtools
from nova.db import migration
from nova.db.sqlalchemy import migrate_repo
from nova.db.sqlalchemy import migration as sa_migration
from nova.db.sqlalchemy import models
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
@@ -163,11 +160,9 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
def _skippable_migrations(self):
special = [
254, # Juno
272, # NOOP migration due to revert
self.INIT_VERSION + 1,
]
juno_placeholders = list(range(255, 265))
kilo_placeholders = list(range(281, 291))
liberty_placeholders = list(range(303, 313))
mitaka_placeholders = list(range(320, 330))
@@ -184,7 +179,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
victoria_placeholders = list(range(413, 418))
return (special +
juno_placeholders +
kilo_placeholders +
liberty_placeholders +
mitaka_placeholders +
@@ -210,18 +204,8 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
# Chances are you don't meet the critera.
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
exceptions = [
# 267 enforces non-nullable instance.uuid. This was mostly
# a special case because instance.uuid shouldn't be able
# to be nullable
267,
# 278 removes a FK restriction, so it's an alter operation
# that doesn't break existing users
278,
# 280 enforces non-null keypair name. This is really not
# something we should allow, but it's in the past
280,
# The base migration can do whatever it likes
self.INIT_VERSION + 1,
# 292 drops completely orphaned tables with no users, so
# it can be done without affecting anything.
@@ -234,11 +218,7 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
]
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
# NOTE(danms): We only started requiring things be additive in
# kilo, so ignore all migrations before that point.
KILO_START = 265
if version >= KILO_START and version not in exceptions:
if version not in exceptions:
banned = ['Table', 'Column']
else:
banned = None
@@ -248,310 +228,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
def test_walk_versions(self):
self.walk_versions(snake_walk=False, downgrade=False)
def _check_265(self, engine, data):
# Assert that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _check_266(self, engine, data):
self.assertColumnExists(engine, 'tags', 'resource_id')
self.assertColumnExists(engine, 'tags', 'tag')
table = oslodbutils.get_table(engine, 'tags')
self.assertIsInstance(table.c.resource_id.type,
sqlalchemy.types.String)
self.assertIsInstance(table.c.tag.type,
sqlalchemy.types.String)
def _pre_upgrade_267(self, engine):
# Create a fixed_ips row with a null instance_uuid (if not already
# there) to make sure that's not deleted.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
fake_fixed_ip = {'id': 1}
fixed_ips.insert().execute(fake_fixed_ip)
# Create an instance record with a valid (non-null) UUID so we make
# sure we don't do something stupid and delete valid records.
instances = oslodbutils.get_table(engine, 'instances')
fake_instance = {'id': 1, 'uuid': 'fake-non-null-uuid'}
instances.insert().execute(fake_instance)
# Add a null instance_uuid entry for the volumes table
# since it doesn't have a foreign key back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
fake_volume = {'id': '9c3c317e-24db-4d57-9a6f-96e6d477c1da'}
volumes.insert().execute(fake_volume)
def _check_267(self, engine, data):
# Make sure the column is non-nullable and the UC exists.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
self.assertTrue(fixed_ips.c.instance_uuid.nullable)
fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNone(fixed_ip.instance_uuid)
instances = oslodbutils.get_table(engine, 'instances')
self.assertFalse(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertIn('uniq_instances0uuid', constraint_names)
# Make sure the instances record with the valid uuid is still there.
instance = instances.select(instances.c.id == 1).execute().first()
self.assertIsNotNone(instance)
# Check that the null entry in the volumes table is still there since
# we skipped tables that don't have FK's back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
self.assertTrue(volumes.c.instance_uuid.nullable)
volume = fixed_ips.select(
volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da'
).execute().first()
self.assertIsNone(volume.instance_uuid)
def test_migration_267(self):
# This is separate from test_walk_versions so we can test the case
# where there are non-null instance_uuid entries in the database which
# cause the 267 migration to fail.
engine = self.migrate_engine
self.migration_api.version_control(
engine, self.REPOSITORY, self.INIT_VERSION)
self.migration_api.upgrade(engine, self.REPOSITORY, 266)
# Create a consoles record with a null instance_uuid so
# we can test that the upgrade fails if that entry is found.
# NOTE(mriedem): We use the consoles table since that's the only table
# created in the 216 migration with a ForeignKey created on the
# instance_uuid table for sqlite.
consoles = oslodbutils.get_table(engine, 'consoles')
fake_console = {'id': 1}
consoles.insert().execute(fake_console)
# NOTE(mriedem): We handle the 267 migration where we expect to
# hit a ValidationError on the consoles table to have
# a null instance_uuid entry
ex = self.assertRaises(exception.ValidationError,
self.migration_api.upgrade,
engine, self.REPOSITORY, 267)
self.assertIn("There are 1 records in the "
"'consoles' table where the uuid or "
"instance_uuid column is NULL.",
ex.kwargs['detail'])
# Remove the consoles entry with the null instance_uuid column.
rows = consoles.delete().where(
consoles.c['instance_uuid'] == null()).execute().rowcount
self.assertEqual(1, rows)
# Now run the 267 upgrade again.
self.migration_api.upgrade(engine, self.REPOSITORY, 267)
# Make sure the consoles entry with the null instance_uuid
# was deleted.
console = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(console)
def _check_268(self, engine, data):
# We can only assert that the col exists, not the unique constraint
# as the engine is running sqlite
self.assertColumnExists(engine, 'compute_nodes', 'host')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'host')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(
engine, 'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_compute_nodes.c.host.type,
sqlalchemy.types.String)
def _check_269(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'numa_node')
self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(pci_devices.c.numa_node.nullable)
self.assertIsInstance(shadow_pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
def _check_270(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'flavor')
self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.flavor.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.flavor.type,
sqlalchemy.types.Text)
def _check_271(self, engine, data):
self.assertIndexMembers(engine, 'block_device_mapping',
'snapshot_id', ['snapshot_id'])
self.assertIndexMembers(engine, 'block_device_mapping',
'volume_id', ['volume_id'])
self.assertIndexMembers(engine, 'dns_domains',
'dns_domains_project_id_idx',
['project_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'network_id', ['network_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_instance_uuid_fkey',
['instance_uuid'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_virtual_interface_id_fkey',
['virtual_interface_id'])
self.assertIndexMembers(engine, 'floating_ips',
'fixed_ip_id', ['fixed_ip_id'])
self.assertIndexMembers(engine, 'iscsi_targets',
'iscsi_targets_volume_id_fkey', ['volume_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_instance_uuid_fkey',
['instance_uuid'])
# Removed on MySQL, never existed on other databases
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
def _pre_upgrade_273(self, engine):
if engine.name != 'sqlite':
return
# Drop a variety of unique constraints to ensure that the script
# properly readds them back
for table_name, constraint_name in [
('compute_nodes', 'uniq_compute_nodes0'
'host0hypervisor_hostname'),
('fixed_ips', 'uniq_fixed_ips0address0deleted'),
('instance_info_caches', 'uniq_instance_info_caches0'
'instance_uuid'),
('instance_type_projects', 'uniq_instance_type_projects0'
'instance_type_id0project_id0'
'deleted'),
('pci_devices', 'uniq_pci_devices0compute_node_id0'
'address0deleted'),
('virtual_interfaces', 'uniq_virtual_interfaces0'
'address0deleted')]:
table = oslodbutils.get_table(engine, table_name)
constraints = [c for c in table.constraints
if c.name == constraint_name]
for cons in constraints:
# Need to use sqlalchemy-migrate UniqueConstraint
cons = UniqueConstraint(*[c.name for c in cons.columns],
name=cons.name,
table=table)
cons.drop()
def _check_273(self, engine, data):
for src_table, src_column, dst_table, dst_column in [
('fixed_ips', 'instance_uuid', 'instances', 'uuid'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid'),
('instance_system_metadata', 'instance_uuid',
'instances', 'uuid'),
('instance_type_projects', 'instance_type_id',
'instance_types', 'id'),
('iscsi_targets', 'volume_id', 'volumes', 'id'),
('reservations', 'usage_id', 'quota_usages', 'id'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid'),
('compute_nodes', 'service_id', 'services', 'id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid')]:
src_table = oslodbutils.get_table(engine, src_table)
fkeys = {fk.parent.name: fk.column
for fk in src_table.foreign_keys}
self.assertIn(src_column, fkeys)
self.assertEqual(fkeys[src_column].table.name, dst_table)
self.assertEqual(fkeys[src_column].name, dst_column)
def _check_274(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_project_id_deleted_idx',
['project_id', 'deleted'])
self.assertIndexNotExists(engine, 'instances', 'project_id')
def _pre_upgrade_275(self, engine):
# Create a keypair record so we can test that the upgrade will set
# 'ssh' as default value in the new column for the previous keypair
# entries.
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
fake_keypair = {'name': 'test-migr'}
key_pairs.insert().execute(fake_keypair)
def _check_275(self, engine, data):
self.assertColumnExists(engine, 'key_pairs', 'type')
self.assertColumnExists(engine, 'shadow_key_pairs', 'type')
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
shadow_key_pairs = oslodbutils.get_table(engine, 'shadow_key_pairs')
self.assertIsInstance(key_pairs.c.type.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_key_pairs.c.type.type,
sqlalchemy.types.String)
# Make sure the keypair entry will have the type 'ssh'
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
keypair = key_pairs.select(
key_pairs.c.name == 'test-migr').execute().first()
self.assertEqual('ssh', keypair.type)
def _check_276(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'vcpu_model')
self.assertColumnExists(engine, 'shadow_instance_extra', 'vcpu_model')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
def _check_277(self, engine, data):
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_deleted_allocated_updated_at_idx',
['deleted', 'allocated', 'updated_at'])
def _check_278(self, engine, data):
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertEqual(0, len([fk for fk in compute_nodes.foreign_keys
if fk.parent.name == 'service_id']))
self.assertTrue(compute_nodes.c.service_id.nullable)
def _check_279(self, engine, data):
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('compute_nodes')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_compute_nodes0host0hypervisor_hostname',
constraint_names)
self.assertIn('uniq_compute_nodes0host0hypervisor_hostname0deleted',
constraint_names)
def _check_280(self, engine, data):
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
self.assertFalse(key_pairs.c.name.nullable)
def _check_291(self, engine, data):
# NOTE(danms): This is a dummy migration that just does a consistency
# check

View File

@@ -234,47 +234,6 @@ class TestGetEngine(test.NoDBTestCase):
mock_get_engine.assert_called_once_with()
class TestFlavorCheck(test.TestCase):
def setUp(self):
super(TestFlavorCheck, self).setUp()
self.context = context.get_admin_context()
self.migration = importlib.import_module(
'nova.db.sqlalchemy.migrate_repo.versions.'
'291_enforce_flavors_migrated')
self.engine = db_api.get_engine()
def test_upgrade_clean(self):
inst = objects.Instance(context=self.context,
uuid=uuidsentinel.fake,
user_id=self.context.user_id,
project_id=self.context.project_id,
system_metadata={'foo': 'bar'})
inst.create()
self.migration.upgrade(self.engine)
def test_upgrade_dirty(self):
inst = objects.Instance(context=self.context,
uuid=uuidsentinel.fake,
user_id=self.context.user_id,
project_id=self.context.project_id,
system_metadata={'foo': 'bar',
'instance_type_id': 'foo'})
inst.create()
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_upgrade_flavor_deleted_instances(self):
inst = objects.Instance(context=self.context,
uuid=uuidsentinel.fake,
user_id=self.context.user_id,
project_id=self.context.project_id,
system_metadata={'foo': 'bar',
'instance_type_id': 'foo'})
inst.create()
inst.destroy()
self.migration.upgrade(self.engine)
class TestNewtonCheck(test.TestCase):
def setUp(self):
super(TestNewtonCheck, self).setUp()