Initial changes to enable new upgrades

Making initial changes to enable new upgrades. Most
of the changes are related to removing older upgrade code that
is no longer necessary (i.e. all the packstack to mattstack
conversion code).

Change-Id: I8fe4c8c0d3f12fd7b4fc45b226bf969ffda72dc7
Story: 2002886
Task: 22847
Signed-off-by: Jack Ding <jack.ding@windriver.com>
This commit is contained in:
Bart Wensley 2018-04-16 15:05:29 -05:00 committed by Jack Ding
parent 14cd49f916
commit 4d70f23c65
21 changed files with 224 additions and 3430 deletions

View File

@ -9,7 +9,6 @@
#
import copy
import ConfigParser
import glob
import json
import psycopg2
@ -22,17 +21,16 @@ import sys
import tarfile
import tempfile
import time
import uuid
import keyring
import yaml
from sysinv.common import constants as sysinv_constants
# WARNING: The controller-1 upgrade is done before any packstack manifests
# WARNING: The controller-1 upgrade is done before any puppet manifests
# have been applied, so only the static entries from tsconfig can be used
# (the platform.conf file will not have been updated with dynamic values).
from tsconfig.tsconfig import (SW_VERSION, PLATFORM_PATH,
from tsconfig.tsconfig import (SW_VERSION, PLATFORM_PATH, KEYRING_PATH,
PLATFORM_CONF_FILE, PLATFORM_CONF_PATH,
CGCS_PATH, CONFIG_PATH, CONTROLLER_UPGRADE_FLAG,
CONTROLLER_UPGRADE_COMPLETE_FLAG,
@ -66,230 +64,80 @@ def gethostaddress(hostname):
return socket.getaddrinfo(hostname, None)[0][4][0]
def get_hiera_db_records(shared_services, packstack_config):
def get_db_credentials(shared_services, from_release):
"""
Returns the hiera records from the answerfile using the provided shared
services.
Returns the database credentials using the provided shared services.
"""
hiera_db_records = \
{'aodh': {'packstack_user_key': 'CONFIG_AODH_DB_USER',
'packstack_password_key': 'CONFIG_AODH_DB_PW',
'packstack_ks_user_key': 'CONFIG_AODH_KS_USER_NAME',
'packstack_ks_password_key': 'CONFIG_AODH_KS_PW'
db_credential_keys = \
{'aodh': {'hiera_user_key': 'aodh::db::postgresql::user',
'keyring_password_key': 'aodh',
},
'ceilometer': {'packstack_user_key': 'CONFIG_CEILOMETER_DB_USER',
'packstack_password_key': 'CONFIG_CEILOMETER_DB_PW',
'packstack_ks_user_key':
'CONFIG_CEILOMETER_KS_USER_NAME',
'packstack_ks_password_key':
'CONFIG_CEILOMETER_KS_PW'
'ceilometer': {'hiera_user_key': 'ceilometer::db::postgresql::user',
'keyring_password_key': 'ceilometer',
},
'heat': {'packstack_user_key': 'CONFIG_HEAT_DB_USER',
'packstack_password_key': 'CONFIG_HEAT_DB_PW',
'packstack_ks_user_key': 'CONFIG_HEAT_KS_USER_NAME',
'packstack_ks_password_key': 'CONFIG_HEAT_KS_PW',
'heat': {'hiera_user_key': 'heat::db::postgresql::user',
'keyring_password_key': 'heat',
},
'neutron': {'packstack_user_key': 'CONFIG_NEUTRON_DB_USER',
'packstack_password_key': 'CONFIG_NEUTRON_DB_PW',
'packstack_ks_user_key': 'CONFIG_NEUTRON_KS_USER_NAME',
'packstack_ks_password_key': 'CONFIG_NEUTRON_KS_PW'
'neutron': {'hiera_user_key': 'neutron::db::postgresql::user',
'keyring_password_key': 'neutron',
},
'nova': {'packstack_user_key': 'CONFIG_NOVA_DB_USER',
'packstack_password_key': 'CONFIG_NOVA_DB_PW',
'packstack_ks_user_key': 'CONFIG_NOVA_KS_USER_NAME',
'packstack_ks_password_key': 'CONFIG_NOVA_KS_PW'
'nova': {'hiera_user_key': 'nova::db::postgresql::user',
'keyring_password_key': 'nova',
},
'nova_api': {'packstack_user_key': 'CONFIG_NOVA_API_DB_USER',
'packstack_password_key': 'CONFIG_NOVA_API_DB_PW',
'nova_api': {'hiera_user_key': 'nova::db::postgresql_api::user',
'keyring_password_key': 'nova-api',
},
'sysinv': {'packstack_user_key': 'CONFIG_SYSINV_DB_USER',
'packstack_password_key': 'CONFIG_SYSINV_DB_PW',
'packstack_ks_user_key': 'CONFIG_SYSINV_KS_USER_NAME',
'packstack_ks_password_key': 'CONFIG_SYSINV_KS_PW'
'sysinv': {'hiera_user_key': 'sysinv::db::postgresql::user',
'keyring_password_key': 'sysinv',
},
'murano': {'hiera_user_key': 'murano::db::postgresql::user',
'keyring_password_key': 'murano',
},
'magnum': {'hiera_user_key': 'magnum::db::postgresql::user',
'keyring_password_key': 'magnum',
},
'panko': {'hiera_user_key': 'panko::db::postgresql::user',
'keyring_password_key': 'panko',
},
'ironic': {'hiera_user_key': 'ironic::db::postgresql::user',
'keyring_password_key': 'ironic',
},
'murano': {'packstack_user_key': 'CONFIG_MURANO_DB_USER',
'packstack_password_key': 'CONFIG_MURANO_DB_PW',
'packstack_ks_user_key': 'CONFIG_MURANO_KS_USER_NAME',
'packstack_ks_password_key': 'CONFIG_MURANO_KS_PW'
}
}
if sysinv_constants.SERVICE_TYPE_VOLUME not in shared_services:
hiera_db_records.update(
{'cinder': {'packstack_user_key': 'CONFIG_CINDER_DB_USER',
'packstack_password_key': 'CONFIG_CINDER_DB_PW',
'packstack_ks_user_key': 'CONFIG_CINDER_KS_USER_NAME',
'packstack_ks_password_key': 'CONFIG_CINDER_KS_PW'
db_credential_keys.update(
{'cinder': {'hiera_user_key': 'cinder::db::postgresql::user',
'keyring_password_key': 'cinder',
}})
if sysinv_constants.SERVICE_TYPE_IMAGE not in shared_services:
hiera_db_records.update(
{'glance': {'packstack_user_key': 'CONFIG_GLANCE_DB_USER',
'packstack_password_key': 'CONFIG_GLANCE_DB_PW',
'packstack_ks_user_key': 'CONFIG_GLANCE_KS_USER_NAME',
'packstack_ks_password_key': 'CONFIG_GLANCE_KS_PW'
db_credential_keys.update(
{'glance': {'hiera_user_key': 'glance::db::postgresql::user',
'keyring_password_key': 'glance',
}})
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
hiera_db_records.update(
{'keystone': {'packstack_user_key': 'CONFIG_KEYSTONE_DB_USER',
'packstack_password_key': 'CONFIG_KEYSTONE_DB_PW',
'packstack_ks_user_key':
'CONFIG_KEYSTONE_ADMIN_USERNAME',
'packstack_ks_password_key':
'CONFIG_KEYSTONE_ADMIN_PW'
db_credential_keys.update(
{'keystone': {'hiera_user_key':
'keystone::db::postgresql::user',
'keyring_password_key': 'keystone',
}})
for database, values in hiera_db_records.iteritems():
username = packstack_config.get(
'general', values['packstack_user_key'])
password = packstack_config.get(
'general', values['packstack_password_key'])
values.update({'username': username})
values.update({'password': password})
if database != 'nova_api':
# optional services like murano might not have the service user
# name configured in release 4
if packstack_config.has_option('general',
values['packstack_ks_user_key']):
ks_username = packstack_config.get(
'general', values['packstack_ks_user_key'])
else:
# default it to the service name, the user name will
# be overwritten when the service is enabled
ks_username = database
ks_password = packstack_config.get(
'general', values['packstack_ks_password_key'])
values.update({'ks_username': ks_username})
values.update({'ks_password': ks_password})
# For the Keystone admin password, always procure it
# from keyring as it may have changed from what was initially
# set in the Packstack config
if database == 'keystone':
ks_password = get_password_from_keyring('CGCS', 'admin')
values.update({'ks_password': ks_password})
# add heat auth encryption key and domain password
if database == 'heat':
auth_key = packstack_config.get(
'general', 'CONFIG_HEAT_AUTH_ENC_KEY')
domain_password = packstack_config.get(
'general', 'CONFIG_HEAT_DOMAIN_PASSWORD')
values.update({'auth_key': auth_key})
values.update({'domain_password': domain_password})
if database == 'neutron':
metadata_passwd = packstack_config.get(
'general', 'CONFIG_NEUTRON_METADATA_PW')
values.update({'metadata_passwd': metadata_passwd})
# The sysinv puppet code assumes the db user is in the format
# admin-<serivce>. These services used a different format in R4 so we
# will correct that here.
# For other services this would have the potential to break upgrades,
# however aodh and murano are only accessed from the active controller
# so we are safe to do this here
# TODO This check is for 17.06 upgrades only. Remove in R6
if database in ['aodh', 'murano']:
db_username = "admin-%s" % database
values.update({'username': db_username})
# Get the hiera data for the from release
hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release,
"hieradata")
static_file = os.path.join(hiera_path, "static.yaml")
with open(static_file, 'r') as file:
static_config = yaml.load(file)
# keystone admin user and password are always required,
# even for Non-Primary regions (where Keystone is shared)
if 'keystone' not in hiera_db_records:
ks_username = packstack_config.get('general',
'CONFIG_KEYSTONE_ADMIN_USERNAME')
# For the Keystone admin password, always procure it
# from keyring as it may have changed from what was initially
# set in the Packstack config
ks_password = get_password_from_keyring('CGCS', 'admin')
hiera_db_records.update({
'keystone': {'ks_username': ks_username,
'ks_password': ks_password}
})
db_credentials = dict()
for database, values in db_credential_keys.iteritems():
username = static_config[values['hiera_user_key']]
password = utils.get_password_from_keyring(
values['keyring_password_key'], "database")
db_credentials[database] = {'username': username, 'password': password}
# add keystone admin token, it might not be needed
admin_token = packstack_config.get('general',
'CONFIG_KEYSTONE_ADMIN_TOKEN')
hiera_db_records['keystone'].update({'admin_token': admin_token})
# add patching keystone user and password
patching_ks_passwd = packstack_config.get('general',
'CONFIG_PATCHING_KS_PW')
patching_ks_username = packstack_config.get(
'general', 'CONFIG_PATCHING_KS_USER_NAME')
hiera_db_records.update({
'patching': {'ks_username': patching_ks_username,
'ks_password': patching_ks_passwd}
})
# add NFV password
nfv_ks_pwd = packstack_config.get('general', 'CONFIG_NFV_KS_PW')
hiera_db_records.update({'vim': {'ks_password': nfv_ks_pwd}})
# The mtce keystone user is new in 18.xx and requires a password to
# be generated and the new mtce user
mtce_ks_pw = uuid.uuid4().hex[:10] + "TiC1*"
hiera_db_records.update({
'mtce': {'ks_username': 'mtce',
'ks_password': mtce_ks_pw}
})
# The magnum db is new and requires a password to be generate
# and the username set for magnum to access the DB
magnum_db_pw = uuid.uuid4().hex[:16]
magnum_ks_pw = uuid.uuid4().hex[:10] + "TiC1*"
hiera_db_records.update({
'magnum': {'username': 'admin-magnum',
'password': magnum_db_pw,
'ks_password': magnum_ks_pw}
})
# generate magnum domain password
magnum_dks_pw = uuid.uuid4().hex[:10] + "TiC1*"
hiera_db_records.update({
'magnum-domain': {'ks_password': magnum_dks_pw}
})
# The panko db is new and requires a password to be generate
# and the username set for panko to access the DB
panko_db_pw = uuid.uuid4().hex[:16]
panko_ks_pw = uuid.uuid4().hex[:10] + "TiC1*"
hiera_db_records.update({
'panko': {'username': 'admin-panko',
'password': panko_db_pw,
'ks_password': panko_ks_pw}
})
# The ironic db is new and requires a password to be generate
# and the username set for ironic to access the DB
ironic_db_pw = uuid.uuid4().hex[:16]
ironic_ks_pw = uuid.uuid4().hex[:10] + "TiC1*"
hiera_db_records.update({
'ironic': {'username': 'admin-ironic',
'password': ironic_db_pw,
'ks_password': ironic_ks_pw}
})
# The placmenent keystone user is new in 18.xx and needs to be added to
# keystone. The 17.06 upgrades patch has already created a placement
# password in keyring and that password has been used in placement config
# in nova.conf on all 17.06 compute nodes so we use that instead of
# generating a new one.
# This currently does not support region mode.
placement_ks_username = 'placement'
placement_ks_pw = get_password_from_keyring(placement_ks_username,
'services')
platform_float_ip = packstack_config.get('general',
'CONFIG_PLATFORM_FLOAT_IP')
platform_oam_float_ip = packstack_config.get(
'general', 'CONFIG_PLATFORM_FLOAT_OAM_IP')
placement_admin_url = 'http://%s:8778' % platform_float_ip
placement_internal_url = 'http://%s:8778' % platform_float_ip
placement_public_url = 'http://%s:8778' % platform_oam_float_ip
hiera_db_records.update({
'placement': {'ks_password': placement_ks_pw,
'ks_username': placement_ks_username,
'ks_admin_url': placement_admin_url,
'ks_internal_url': placement_internal_url,
'ks_public_url': placement_public_url}
})
return hiera_db_records
return db_credentials
def get_shared_services():
@ -304,7 +152,7 @@ def get_shared_services():
if row is None:
LOG.error("Failed to fetch i_system data")
raise psycopg2.ProgrammingError("Failed to fetch i_system data")
cap_obj = eval(row[0])
cap_obj = json.loads(row[0])
region_config = cap_obj.get('region_config', None)
if region_config:
shared_services = cap_obj.get('shared_services',
@ -313,10 +161,10 @@ def get_shared_services():
return shared_services
def get_connection_string(hiera_db_records, database):
def get_connection_string(db_credentials, database):
""" Generates a connection string for a given database"""
username = hiera_db_records[database]['username']
password = hiera_db_records[database]['password']
username = db_credentials[database]['username']
password = db_credentials[database]['password']
return DB_CONNECTION_FORMAT % (username, password, database)
@ -471,16 +319,15 @@ def migrate_sysinv_data(from_release, to_release):
LOG.exception("Failed to copy sysinv platform dir to new version")
raise
# Get the packstack config using the from release's answerfile
from_config = os.path.join(PLATFORM_PATH, "packstack", from_release,
"config")
answer_file = os.path.join(from_config, "packstack-answers.txt")
packstack_config = ConfigParser.RawConfigParser()
packstack_config.optionxform = lambda option: option
packstack_config.read(answer_file)
# Get the hiera data for the from release
hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release,
"hieradata")
static_file = os.path.join(hiera_path, "static.yaml")
with open(static_file, 'r') as file:
static_config = yaml.load(file)
username = packstack_config.get('general', 'CONFIG_SYSINV_DB_USER')
password = packstack_config.get('general', 'CONFIG_SYSINV_DB_PW')
username = static_config["sysinv::db::postgresql::user"]
password = utils.get_password_from_keyring("sysinv", "database")
# We need a bare bones /etc/sysinv/sysinv.conf file in order to do the
# sysinv database migration and then generate the upgrades manifests.
@ -612,12 +459,12 @@ def import_databases(from_release, to_release, from_path=None, simplex=False):
raise
def create_databases(from_release, to_release, hiera_db_records):
def create_databases(from_release, to_release, db_credentials):
""" Creates databases. """
LOG.info("Creating new databases")
if from_release == '17.06':
# Create databases that are new in the 17.xx release
if from_release == '18.03':
# Create databases that are new in this release
conn = psycopg2.connect('dbname=postgres user=postgres')
@ -626,15 +473,18 @@ def create_databases(from_release, to_release, hiera_db_records):
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
databases_to_create = ['magnum', 'panko', 'ironic']
databases_to_create = []
if not databases_to_create:
return
with conn:
with conn.cursor() as cur:
for database in databases_to_create:
print "Creating %s database" % database
username = psycopg2.extensions.AsIs(
'\"%s\"' % hiera_db_records[database]['username'])
'\"%s\"' % db_credentials[database]['username'])
db_name = psycopg2.extensions.AsIs('\"%s\"' % database)
password = hiera_db_records[database]['password']
password = db_credentials[database]['password']
try:
# Here we create the new database and the role for it
@ -652,14 +502,6 @@ def create_databases(from_release, to_release, hiera_db_records):
"(%s : %s) Exception: %s" %
(database, username, ex))
raise
try:
cur.execute('CREATE DATABASE "nova_cell0"')
cur.execute('GRANT ALL ON DATABASE nova_cell0 TO '
'"admin-nova";')
except Exception as ex:
LOG.exception("Failed to create nova_cell0 database." +
"Exception: %s" % ex)
raise
def migrate_sysinv_database():
@ -680,8 +522,8 @@ def migrate_sysinv_database():
raise
def migrate_databases(from_release, shared_services, hiera_db_records,
packstack_config):
def migrate_databases(from_release, shared_services, db_credentials,
simplex=False):
""" Migrates databases. """
devnull = open(os.devnull, 'w')
@ -690,62 +532,59 @@ def migrate_databases(from_release, shared_services, hiera_db_records,
# run their database migration.
with open("/etc/ceilometer/ceilometer-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'ceilometer'))
f.write(get_connection_string(db_credentials, 'ceilometer'))
with open("/etc/heat/heat-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'heat'))
f.write(get_connection_string(db_credentials, 'heat'))
with open("/etc/neutron/neutron-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'neutron'))
f.write(get_connection_string(db_credentials, 'neutron'))
with open("/etc/nova/nova-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'nova'))
f.write(get_connection_string(db_credentials, 'nova'))
f.write("[api_database]\n")
f.write(get_connection_string(hiera_db_records, 'nova_api'))
f.write(get_connection_string(db_credentials, 'nova_api'))
with open("/etc/aodh/aodh-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'aodh'))
f.write(get_connection_string(db_credentials, 'aodh'))
with open("/etc/murano/murano-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'murano'))
f.write(get_connection_string(db_credentials, 'murano'))
with open("/etc/magnum/magnum-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'magnum'))
f.write(get_connection_string(db_credentials, 'magnum'))
with open("/etc/panko/panko-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'panko'))
f.write(get_connection_string(db_credentials, 'panko'))
with open("/etc/ironic/ironic-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'ironic'))
f.write(get_connection_string(db_credentials, 'ironic'))
if sysinv_constants.SERVICE_TYPE_VOLUME not in shared_services:
with open("/etc/cinder/cinder-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'cinder'))
f.write(get_connection_string(db_credentials, 'cinder'))
if sysinv_constants.SERVICE_TYPE_IMAGE not in shared_services:
with open("/etc/glance/glance-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'glance'))
f.write(get_connection_string(db_credentials, 'glance'))
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
with open("/etc/keystone/keystone-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(hiera_db_records, 'keystone'))
if from_release == '17.06':
nova_map_cells(packstack_config)
f.write(get_connection_string(db_credentials, 'keystone'))
migrate_commands = [
# Migrate aodh (new in 16.xx)
# Migrate aodh (new in R3)
('aodh',
'aodh-dbsync --config-file /etc/aodh/aodh-dbsync.conf'),
# Migrate ceilometer
@ -762,21 +601,21 @@ def migrate_databases(from_release, shared_services, hiera_db_records,
# Migrate nova
('nova',
'nova-manage --config-file /etc/nova/nova-dbsync.conf db sync'),
# Migrate nova_api (new in 16.xx)
# Migrate nova_api (new in R3)
('nova',
'nova-manage --config-file /etc/nova/nova-dbsync.conf api_db sync'),
# Migrate murano (new in 17.06)
# Migrate murano (new in R4)
('murano',
'murano-db-manage --config-file /etc/murano/murano-dbsync.conf ' +
'upgrade'),
# Migrate magnum (added to release after 17.06)
# Migrate magnum (new in R5)
('magnum',
'magnum-db-manage --config-file /etc/magnum/magnum-dbsync.conf ' +
'upgrade'),
# Migrate panko (added to release after 17.06)
# Migrate panko (new in R5)
('panko',
'panko-dbsync --config-file /etc/panko/panko-dbsync.conf'),
# Migrate ironic (added to release after 17.06)
# Migrate ironic (new in R5)
('ironic',
'ironic-dbsync --config-file /etc/ironic/ironic-dbsync.conf ' +
'upgrade'),
@ -785,14 +624,6 @@ def migrate_databases(from_release, shared_services, hiera_db_records,
if sysinv_constants.SERVICE_TYPE_VOLUME not in shared_services:
migrate_commands += [
# Migrate cinder to ocata + groups.replication_status
('cinder',
'cinder-manage --config-file /etc/cinder/cinder-dbsync.conf ' +
'db sync 96'),
# Run online_data_migrations needed by ocata release
('cinder',
'cinder-manage --config-file /etc/cinder/cinder-dbsync.conf ' +
'db online_data_migrations --ignore_state'),
# Migrate cinder to latest version
('cinder',
'cinder-manage --config-file /etc/cinder/cinder-dbsync.conf ' +
@ -814,16 +645,7 @@ def migrate_databases(from_release, shared_services, hiera_db_records,
# To avoid a deadlock during keystone contract we will use offline
# migration for simplex upgrades. Other upgrades will have to use
# another method to resolve the deadlock
try:
system_mode = packstack_config.get('general', 'CONFIG_SYSTEM_MODE')
except ConfigParser.NoOptionError:
# We may not have the system mode if the system was upgraded from
# R2 or R3. Those systems can only be duplex so we will use that
# value
LOG.info("Missing value CONFIG_SYSTEM_MODE. Using duplex.")
system_mode = sysinv_constants.SYSTEM_MODE_DUPLEX
if system_mode != sysinv_constants.SYSTEM_MODE_SIMPLEX:
if not simplex:
migrate_commands += [
# Migrate keystone
#
@ -884,9 +706,6 @@ def migrate_databases(from_release, shared_services, hiera_db_records,
"online_data_migrations. Output: %s", e.output)
raise
if from_release == '17.06':
nova_fix_db_connect(packstack_config)
# The database entry for controller-1 will be set to whatever it was when
# the sysinv database was dumped on controller-0. Update the state and
# from/to load to what it should be when it becomes active.
@ -901,109 +720,6 @@ def migrate_databases(from_release, shared_services, hiera_db_records,
raise
def _packstack_insert_l2population_mechanism_driver(packstack_config):
"""Update the packstack configuration with an updated list of Neutron
mechanism drivers. In releases following 17.06, the set of the drivers
has been updated to include the l2population driver. This new driver is
responsible for distributing static tunnel endpoint information to
compute nodes."""
mechanism_drivers_key = 'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS'
mechanism_driver = 'l2population'
mechanism_drivers = packstack_config.get('general', mechanism_drivers_key)
if mechanism_driver not in mechanism_drivers.split(','):
mechanism_drivers += ',%s' % mechanism_driver
packstack_config.set('general', mechanism_drivers_key, mechanism_drivers)
def get_password_from_keyring(service, username):
"""Retrieve password from keyring"""
password = ""
os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR
try:
password = keyring.get_password(service, username)
except Exception as e:
LOG.exception("Received exception when attempting to get password "
"for service %s, username %s: %s" %
(service, username, e))
raise
finally:
del os.environ["XDG_DATA_HOME"]
return password
def store_service_password(hiera_db_records):
"""Store the service user password in keyring"""
os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR
for service, values in hiera_db_records.iteritems():
if 'password' in values:
# set nova-api service name since the database name is different
# than service name and sysinv looks for service name in
# keyring
if service == 'nova_api':
service = 'nova-api'
keyring.set_password(service, 'database', values['password'])
if 'ks_password' in values:
keyring.set_password(service, 'services', values['ks_password'])
del os.environ["XDG_DATA_HOME"]
def nova_map_cells(packstack_config):
devnull = open(os.devnull, 'w')
# First have to db sync on nova db to upgrade it fully
try:
cmd = ['nova-manage --config-file /etc/nova/nova-dbsync.conf ' +
'db sync ']
subprocess.check_call(cmd, shell=True, stdout=devnull, stderr=devnull)
except Exception as ex:
LOG.exception("Failed to execute command: '%s' during upgrade "
"processing, Exception: %s" % (cmd, ex))
raise
# Now run simple_cell_setup to map nova_cell0 and default cell for nova db.
# Then map hosts and instances to default cell.
transport_username = packstack_config.get('general',
'CONFIG_AMQP_AUTH_USER')
transport_password = packstack_config.get('general',
'CONFIG_AMQP_AUTH_PASSWORD')
transport_host = packstack_config.get('general', 'CONFIG_AMQP_HOST')
transport_url = "rabbit://%s:%s@%s:5672" % (
transport_username, transport_password, transport_host)
try:
cmd = ['nova-manage --config-file /etc/nova/nova-dbsync.conf ' +
'cell_v2 simple_cell_setup --transport-url ' + transport_url]
subprocess.check_call(cmd, shell=True, stdout=devnull, stderr=devnull)
except Exception as ex:
LOG.exception("Failed to execute command: '%s' during upgrade "
"processing, Exception: %s" % (cmd, ex))
raise
LOG.info("Finished nova_cell0 database creation and mapping cells & hosts")
def nova_fix_db_connect(packstack_config):
nova_db_username = packstack_config.get('general', 'CONFIG_NOVA_DB_USER')
nova_db_password = packstack_config.get('general', 'CONFIG_NOVA_DB_PW')
nova_db_host = packstack_config.get('general', 'CONFIG_DB_HOST')
nova_db_connect = "postgresql+psycopg2://%s:%s@%s/nova" % (
nova_db_username, nova_db_password, nova_db_host)
nova_cell0_db_connect = "postgresql+psycopg2://%s:%s@%s/nova_cell0" % (
nova_db_username, nova_db_password, nova_db_host)
conn = psycopg2.connect('dbname=nova_api user=postgres')
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
with conn:
with conn.cursor() as cur:
try:
cur.execute("UPDATE cell_mappings SET database_connection=%s "
"WHERE name='cell0';", (nova_cell0_db_connect,))
cur.execute("UPDATE cell_mappings SET database_connection=%s "
"WHERE name IS NULL;", (nova_db_connect,))
except Exception as ex:
LOG.exception("Failed to fix nova cells database "
"connections. Exception: %s" % ex)
raise
LOG.info("Finished fixup of nova cells database connections.")
def get_controller_1_uuid():
""" Read in the uuid from the sysinv db"""
conn = psycopg2.connect("dbname=sysinv user=postgres")
@ -1038,6 +754,37 @@ def update_platform_conf_file(uuid):
fd.write("UUID=" + uuid + "\n")
def migrate_hiera_data(from_release, to_release):
""" Migrate hiera data. """
LOG.info("Migrating hiera data")
from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release,
"hieradata")
to_hiera_path = constants.HIERADATA_PERMDIR
os.makedirs(to_hiera_path)
# Copy only the static yaml files. The other yaml files will be generated
# when required.
for f in ['secure_static.yaml', 'static.yaml']:
shutil.copy(os.path.join(from_hiera_path, f), to_hiera_path)
# Make any necessary updates to the static yaml files.
if from_release == "18.03":
# Update the static.yaml file
static_file = os.path.join(constants.HIERADATA_PERMDIR, "static.yaml")
with open(static_file, 'r') as yaml_file:
static_config = yaml.load(yaml_file)
static_config.update({
'platform::params::software_version': SW_VERSION,
'openstack::client::credentials::params::keyring_directory':
KEYRING_PATH,
'openstack::client::credentials::params::keyring_file':
os.path.join(KEYRING_PATH, '.CREDENTIAL'),
})
with open(static_file, 'w') as yaml_file:
yaml.dump(static_config, yaml_file, default_flow_style=False)
def upgrade_controller(from_release, to_release):
""" Executed on the release N+1 side upgrade controller-1. """
@ -1110,35 +857,28 @@ def upgrade_controller(from_release, to_release):
shared_services = get_shared_services()
# Before we can generate the hiera records from the
# answer file, we need to set up Keyring as this is
# going to be used to retrieve the Keystone admin password
# Create /tmp/python_keyring - used by keystone manifest.
shutil.copytree(os.path.join(PLATFORM_PATH, ".keyring", to_release,
"python_keyring"),
"/tmp/python_keyring")
# Migrate packstack answer file to hiera records
packstack_config = utils.get_packstack_config(from_release)
hiera_db_records = get_hiera_db_records(shared_services, packstack_config)
utils.generate_upgrade_hiera_record(to_release,
hiera_db_records,
packstack_config)
# Migrate hiera data
migrate_hiera_data(from_release, to_release)
utils.add_upgrade_entries_to_hiera_data(from_release)
# Get database credentials
db_credentials = get_db_credentials(shared_services, from_release)
# Create any new databases
print "Creating new databases..."
create_databases(from_release, to_release, hiera_db_records)
if from_release == '17.06':
migrate_db_users(hiera_db_records, packstack_config)
create_databases(from_release, to_release, db_credentials)
print "Migrating databases..."
# Migrate sysinv database
migrate_sysinv_database()
# Migrate databases
migrate_databases(from_release, shared_services, hiera_db_records,
packstack_config)
migrate_databases(from_release, shared_services, db_credentials)
print "Applying configuration..."
@ -1164,11 +904,8 @@ def upgrade_controller(from_release, to_release):
LOG.exception("Failed to stop postgres service")
raise
# store service user password
store_service_password(hiera_db_records)
# Apply "upgrades" manifests
LOG.info("Applying upgrades manifests")
# Apply "upgrades" manifest
LOG.info("Applying upgrades manifest")
myip = gethostaddress(utils.CONTROLLER_1_HOSTNAME)
utils.apply_upgrade_manifest(myip)
@ -1176,8 +913,8 @@ def upgrade_controller(from_release, to_release):
shutil.rmtree("/tmp/puppet")
shutil.rmtree("/tmp/python_keyring")
# Generate "regular" manifests
LOG.info("Generating manifests for %s" % utils.CONTROLLER_1_HOSTNAME)
# Generate config to be used by "regular" manifest
LOG.info("Generating config for %s" % utils.CONTROLLER_1_HOSTNAME)
try:
cutils.create_system_config()
cutils.create_host_config(utils.CONTROLLER_1_HOSTNAME)
@ -1339,8 +1076,8 @@ def extract_relative_file(archive, member_name, dest_dir):
def extract_data_from_archive(archive, staging_dir, from_release, to_release):
"""Extracts the data from the archive to the staging directory"""
tmp_platform_path = os.path.join(staging_dir, "opt", "platform")
tmp_packstack_path = os.path.join(tmp_platform_path, "packstack",
from_release)
tmp_puppet_path = os.path.join(tmp_platform_path, "puppet",
from_release)
tmp_sysinv_path = os.path.join(tmp_platform_path, "sysinv", from_release)
tmp_keyring_path = os.path.join(tmp_platform_path, ".keyring",
from_release)
@ -1353,15 +1090,14 @@ def extract_data_from_archive(archive, staging_dir, from_release, to_release):
dir_options = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | \
stat.S_IROTH | stat.S_IXOTH
os.makedirs(tmp_packstack_path, dir_options)
os.makedirs(tmp_puppet_path, dir_options)
os.makedirs(tmp_config_path, dir_options)
os.makedirs(tmp_sysinv_path, dir_options)
os.makedirs(tmp_keyring_path, dir_options)
os.symlink(tmp_platform_path, PLATFORM_PATH)
extract_relative_directory(archive, "packstack",
tmp_packstack_path)
extract_relative_directory(archive, "puppet", tmp_puppet_path)
extract_relative_directory(archive, ".keyring", tmp_keyring_path)
extract_relative_directory(archive, "config/pxelinux.cfg",
tmp_pxelinux_path)
@ -1416,46 +1152,6 @@ def extract_postgres_data(archive):
extract_relative_directory(archive, "postgres", postgres_data_dir)
def migrate_db_users(hiera_db_records, packstack_config):
""" This is only needed for upgrades from 17.06.
Some of the postgres users were in the form <service> not
admin-<service> so we'll correct that here.
"""
conn = psycopg2.connect('dbname=postgres user=postgres')
# Postgres won't allow transactions around database create operations
# so we set the connection to autocommit
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
users_to_migrate = []
if (packstack_config.get('general', 'CONFIG_MURANO_DB_USER') == 'murano'):
users_to_migrate.append('murano')
if (packstack_config.get('general', 'CONFIG_AODH_DB_USER') == 'aodh'):
users_to_migrate.append('aodh')
with conn:
with conn.cursor() as cur:
for user in users_to_migrate:
LOG.info("Migrating user %s" % user)
old_username = psycopg2.extensions.AsIs('\"%s\"' % user)
new_username = psycopg2.extensions.AsIs(
'\"%s\"' % hiera_db_records[user]['username'])
password = hiera_db_records[user]['password']
try:
# We need to rename the user, then update the password, as
# the password is cleared during the rename.
cur.execute('ALTER ROLE %s RENAME TO %s',
(old_username, new_username))
cur.execute('ALTER ROLE %s PASSWORD %s',
(new_username, password))
except Exception as ex:
LOG.exception("Failed to migrate user. " +
"(%s to %s) Exception: %s" %
(user, new_username, ex))
raise
def migrate_platform_conf(staging_dir):
""" Migrate platform.conf """
temp_platform_conf_path = os.path.join(staging_dir, 'platform.conf')
@ -1691,11 +1387,9 @@ def upgrade_controller_simplex(backup_file):
# Simplex configurations can not have shared services
shared_services = []
# Migrate packstack answer file to hiera records
packstack_config = utils.get_packstack_config(from_release)
hiera_db_records = get_hiera_db_records(shared_services, packstack_config)
utils.generate_simplex_upgrade_hiera_record(to_release, hiera_db_records,
packstack_config)
# Migrate hiera data
migrate_hiera_data(from_release, to_release)
db_credentials = get_db_credentials(shared_services, from_release)
os.unlink(PLATFORM_PATH)
@ -1729,20 +1423,17 @@ def upgrade_controller_simplex(backup_file):
import_databases(from_release, to_release, utils.POSTGRES_PATH,
simplex=True)
if from_release == '17.06':
migrate_db_users(hiera_db_records, packstack_config)
# Create any new databases
print_log_info("Creating new databases...")
create_databases(from_release, to_release, hiera_db_records)
create_databases(from_release, to_release, db_credentials)
print_log_info("Migrating databases...")
# Migrate sysinv database
migrate_sysinv_database()
# Migrate databases
migrate_databases(from_release, shared_services, hiera_db_records,
packstack_config)
migrate_databases(from_release, shared_services, db_credentials,
simplex=True)
print_log_info("Applying configuration...")
@ -1758,9 +1449,6 @@ def upgrade_controller_simplex(backup_file):
backup_restore.configure_loopback_interface(archive)
print_log_info("Store Keyring...")
store_service_password(hiera_db_records)
print_log_info("Creating configs...")
cutils.create_system_config()
cutils.create_host_config()
@ -1774,6 +1462,9 @@ def upgrade_controller_simplex(backup_file):
runtime_filename = os.path.join(staging_dir, 'runtime.yaml')
utils.create_simplex_runtime_config(runtime_filename)
if not os.path.isfile(runtime_filename):
# There is no runtime yaml file to apply
runtime_filename = None
print_log_info("Applying manifest...")
cutils.apply_manifest(controller_0_address,

View File

@ -280,7 +280,7 @@ def abort_upgrade(from_load, to_load, upgrade):
os.path.join(tsc.CGCS_PATH, "ironic", to_load),
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", to_load),
os.path.join(tsc.PLATFORM_PATH, ".keyring", to_load),
os.path.join(tsc.PLATFORM_PATH, "packstack", to_load),
os.path.join(tsc.PLATFORM_PATH, "puppet", to_load),
os.path.join(tsc.PLATFORM_PATH, "sysinv", to_load),
os.path.join(tsc.CGCS_PATH, "ceilometer", to_load),
os.path.join(tsc.CONFIG_PATH, 'upgrades')
@ -352,7 +352,7 @@ def complete_upgrade(from_load, to_load):
os.path.join(tsc.CGCS_PATH, "ironic", from_load),
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", from_load),
os.path.join(tsc.PLATFORM_PATH, ".keyring", from_load),
os.path.join(tsc.PLATFORM_PATH, "packstack", from_load),
os.path.join(tsc.PLATFORM_PATH, "puppet", from_load),
os.path.join(tsc.PLATFORM_PATH, "sysinv", from_load),
]

View File

@ -9,18 +9,16 @@
# and during the upgrade of controller-1.
#
import keyring
import os
import subprocess
import tempfile
import uuid
import yaml
import ConfigParser
# WARNING: The controller-1 upgrade is done before any packstack manifests
# WARNING: The controller-1 upgrade is done before any puppet manifests
# have been applied, so only the static entries from tsconfig can be used.
# (the platform.conf file will not have been updated with dynamic values).
from tsconfig.tsconfig import (SW_VERSION, PLATFORM_PATH,
KEYRING_PATH, CONFIG_PATH)
from tsconfig.tsconfig import SW_VERSION, PLATFORM_PATH
from configutilities import DEFAULT_DOMAIN_NAME
from controllerconfig import utils as cutils
@ -89,10 +87,33 @@ def get_db_connection(hiera_db_records, database):
username, password, 'localhost', database)
def get_upgrade_token(hiera_db_records,
packstack_config,
def get_password_from_keyring(service, username):
"""Retrieve password from keyring"""
password = ""
os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR
try:
password = keyring.get_password(service, username)
except Exception as e:
LOG.exception("Received exception when attempting to get password "
"for service %s, username %s: %s" %
(service, username, e))
raise
finally:
del os.environ["XDG_DATA_HOME"]
return password
def get_upgrade_token(from_release,
config,
secure_config):
# Get the system hiera data from the from release
from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release,
"hieradata")
system_file = os.path.join(from_hiera_path, "system.yaml")
with open(system_file, 'r') as file:
system_config = yaml.load(file)
# during a controller-1 upgrade, keystone is running
# on the controller UNIT IP, however the service catalog
# that was migrated from controller-0 since lists the
@ -104,27 +125,30 @@ def get_upgrade_token(hiera_db_records,
# providing a bypass endpoint.
keystone_upgrade_url = "http://{}:5000/{}".format(
'127.0.0.1',
packstack_config.get('general', 'CONFIG_KEYSTONE_API_VERSION'))
system_config['openstack::keystone::params::api_version'])
try:
admin_user_domain = packstack_config.get(
'general', 'CONFIG_ADMIN_USER_DOMAIN_NAME')
except ConfigParser.NoOptionError:
admin_user_domain = system_config.get(
'openstack::client::params::admin_user_domain')
if admin_user_domain is None:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("CONFIG_ADMIN_USER_DOMAIN_NAME key not found. Using Default.")
LOG.info("openstack::client::params::admin_user_domain key not found. "
"Using Default.")
admin_user_domain = DEFAULT_DOMAIN_NAME
try:
admin_project_domain = packstack_config.get(
'general', 'CONFIG_ADMIN_PROJECT_DOMAIN_NAME')
except ConfigParser.NoOptionError:
admin_project_domain = system_config.get(
'openstack::client::params::admin_project_domain')
if admin_project_domain is None:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("CONFIG_ADMIN_PROJECT_DOMAIN_NAME key not found. Using "
"Default.")
LOG.info("openstack::client::params::admin_project_domain key not "
"found. Using Default.")
admin_project_domain = DEFAULT_DOMAIN_NAME
admin_password = get_password_from_keyring("CGCS", "admin")
admin_username = system_config.get(
'openstack::client::params::admin_username')
# the upgrade token command
keystone_upgrade_token = (
"openstack "
@ -137,8 +161,8 @@ def get_upgrade_token(hiera_db_records,
"--os-interface internal "
"--os-identity-api-version 3 "
"token issue -c id -f value".format(
packstack_config.get('general', 'CONFIG_KEYSTONE_ADMIN_USERNAME'),
hiera_db_records['keystone']['ks_password'],
admin_username,
admin_password,
keystone_upgrade_url,
admin_user_domain,
admin_project_domain
@ -156,488 +180,26 @@ def get_upgrade_token(hiera_db_records,
})
def get_platform_config(packstack_config,
to_release,
config,
secure_config):
# TODO(TLIU): for now set the hiera option for puppet-keystone
# Not sure whether it is better to use env instead
config.update({
'platform::params::software_version': to_release
})
def add_upgrade_entries_to_hiera_data(from_release):
""" Adds upgrade entries to the hiera data """
amqp_passwd = packstack_config.get('general', 'CONFIG_AMQP_AUTH_PASSWORD')
postgres_password = packstack_config.get('general', 'CONFIG_POSTGRESQL_PW')
secure_config.update({
'platform::amqp::params::auth_password': amqp_passwd,
'platform::postgresql::params::password': postgres_password})
wrsroot_password = packstack_config.get('general', 'CONFIG_WRSROOT_PW')
try:
wrsroot_password_age = packstack_config.get('general',
'CONFIG_WRSROOT_PW_AGE')
except ConfigParser.NoOptionError:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("CONFIG_WRSROOT_PW_AGE key not found. Setting value to 45")
wrsroot_password_age = constants.WRSROOT_MAX_PASSWORD_AGE
secure_config.update({
'platform::users::params::wrsroot_password': wrsroot_password,
'platform::users::params::wrsroot_password_max_age':
wrsroot_password_age
})
ceph_cluster_id = packstack_config.get('general',
'CONFIG_CEPH_CLUSTER_UUID')
config.update({
'platform::ceph::params::cluster_uuid': ceph_cluster_id
})
try:
ceph_pwd = packstack_config.get('general',
'CONFIG_CEPH_OBJECT_GATEWAY_KS_PW')
except ConfigParser.NoOptionError:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("CONFIG_CEPH_OBJECT_GATEWAY_KS_PW key not found. Generating "
"a new value")
ceph_pwd = uuid.uuid4().hex[:10] + "TiC1*"
secure_config.update({
'platform::ceph::params::rgw_admin_password': ceph_pwd
})
ldap_hash = packstack_config.get('general',
'CONFIG_LDAPADMIN_HASHED_PASSWORD')
ldap_pwd = packstack_config.get('general',
'CONFIG_LDAPADMIN_PASSWORD')
secure_config.update({
'platform::ldap::params::admin_hashed_pw': ldap_hash,
'platform::ldap::params::admin_pw': ldap_pwd
})
def get_service_user_config(hiera_db_records,
packstack_config,
config,
secure_config):
# aodh user
config.update({
'aodh::db::postgresql::user': hiera_db_records['aodh']['username']
})
secure_config.update({
'aodh::auth::auth_password': hiera_db_records['aodh']['ks_password'],
'aodh::db::postgresql::password': hiera_db_records['aodh']['password'],
'aodh::keystone::auth::password':
hiera_db_records['aodh']['ks_password'],
'aodh::keystone::authtoken::password':
hiera_db_records['aodh']['ks_password']
})
# ceilometer user
config.update({
'ceilometer::db::postgresql::user':
hiera_db_records['ceilometer']['username'],
})
secure_config.update({
'ceilometer::agent::auth::auth_password':
hiera_db_records['ceilometer']['ks_password'],
'ceilometer::db::postgresql::password':
hiera_db_records['ceilometer']['password'],
'ceilometer::keystone::auth::password':
hiera_db_records['ceilometer']['ks_password'],
'ceilometer::keystone::authtoken::password':
hiera_db_records['ceilometer']['ks_password']
})
# keystone user
secure_config.update({
'keystone::admin_password':
hiera_db_records['keystone']['ks_password'],
'keystone::admin_token':
hiera_db_records['keystone']['admin_token'],
'keystone::roles::admin::password':
hiera_db_records['keystone']['ks_password']
})
if 'keystone' in hiera_db_records:
config.update({
'CONFIG_KEYSTONE_ADMIN_USERNAME':
hiera_db_records['keystone']['ks_username'],
'keystone::db::postgresql::user':
hiera_db_records['keystone']['username']
})
secure_config.update({
'CONFIG_KEYSTONE_ADMIN_PW':
hiera_db_records['keystone']['ks_password'],
'keystone::database_connection':
get_db_connection(hiera_db_records, 'keystone'),
'keystone::db::postgresql::password':
hiera_db_records['keystone']['password']
})
if 'cinder' in hiera_db_records:
# cinder user
config.update({
'cinder::db::postgresql::user':
hiera_db_records['cinder']['username']
})
secure_config.update({
'cinder::db::postgresql::password':
hiera_db_records['cinder']['password'],
'cinder::keystone::auth::password':
hiera_db_records['cinder']['ks_password'],
'cinder::keystone::authtoken::password':
hiera_db_records['cinder']['ks_password']
})
if 'glance' in hiera_db_records:
# glance user
config.update({
'glance::api::authtoken::username':
hiera_db_records['glance']['ks_username'],
'glance::db::postgresql::user':
hiera_db_records['glance']['username'],
'glance::registry::authtoken::username':
hiera_db_records['glance']['ks_username']
})
secure_config.update({
'glance::api::authtoken::password':
hiera_db_records['glance']['ks_password'],
'glance::db::postgresql::password':
hiera_db_records['glance']['password'],
'glance::keystone::auth::password':
hiera_db_records['glance']['ks_password'],
'glance::keystone::authtoken::password':
hiera_db_records['glance']['ks_password'],
'glance::registry::authtoken::password':
hiera_db_records['glance']['ks_password']
})
# heat user
config.update({
'heat::db::postgresql::user':
hiera_db_records['heat']['username']
})
secure_config.update({
'heat::db::postgresql::password':
hiera_db_records['heat']['password'],
'heat::engine::auth_encryption_key':
hiera_db_records['heat']['auth_key'],
'heat::keystone::auth::password':
hiera_db_records['heat']['ks_password'],
'heat::keystone::auth_cfn::password':
hiera_db_records['heat']['ks_password'],
'heat::keystone::authtoken::password':
hiera_db_records['heat']['ks_password'],
'heat::keystone::domain::domain_password':
hiera_db_records['heat']['domain_password']
})
# neutron
config.update({
'neutron::db::postgresql::user':
hiera_db_records['neutron']['username']
})
secure_config.update({
'neutron::agents::metadata::shared_secret':
hiera_db_records['neutron']['metadata_passwd'],
'neutron::db::postgresql::password':
hiera_db_records['neutron']['password'],
'neutron::keystone::auth::password':
hiera_db_records['neutron']['ks_password'],
'neutron::keystone::authtoken::password':
hiera_db_records['neutron']['ks_password'],
'neutron::server::notifications::password':
hiera_db_records['nova']['ks_password']
})
# nova
# in 18.xx placement user is new so have to add additional
# config to setup endpoint urls in keystone. This currently does
# not suppport region mode.
auth_region = packstack_config.get('general',
'CONFIG_KEYSTONE_REGION')
config.update({
'nova::db::postgresql::user':
hiera_db_records['nova']['username'],
'nova::db::postgresql_api::user':
hiera_db_records['nova_api']['username'],
'nova::keystone::auth_placement::auth_name':
hiera_db_records['placement']['ks_username'],
'nova::keystone::auth_placement::admin_url':
hiera_db_records['placement']['ks_admin_url'],
'nova::keystone::auth_placement::internal_url':
hiera_db_records['placement']['ks_internal_url'],
'nova::keystone::auth_placement::public_url':
hiera_db_records['placement']['ks_public_url'],
'nova::keystone::auth_placement::region': auth_region
})
secure_config.update({
'nova::api::neutron_metadata_proxy_shared_secret':
hiera_db_records['neutron']['metadata_passwd'],
'nova::db::postgresql::password':
hiera_db_records['nova']['password'],
'nova::db::postgresql_api::password':
hiera_db_records['nova_api']['password'],
'nova::keystone::auth::password':
hiera_db_records['nova']['ks_password'],
'nova::keystone::authtoken::password':
hiera_db_records['nova']['ks_password'],
'nova::network::neutron::neutron_password':
hiera_db_records['neutron']['ks_password'],
'nova_api_proxy::config::admin_password':
hiera_db_records['nova']['ks_password'],
'nova::keystone::auth_placement::password':
hiera_db_records['placement']['ks_password'],
'nova::placement::password':
hiera_db_records['placement']['ks_password']
})
# patching user
config.update({
'patching::api::keystone_user':
hiera_db_records['patching']['ks_username']
})
secure_config.update({
'patching::api::keystone_password':
hiera_db_records['patching']['ks_password'],
'patching::keystone::auth::password':
hiera_db_records['patching']['ks_password'],
'patching::keystone::authtoken::password':
hiera_db_records['patching']['ks_password']
})
# sysinv
sysinv_database_connection = "postgresql://%s:%s@%s/%s" % (
hiera_db_records['sysinv']['username'],
hiera_db_records['sysinv']['password'],
'localhost',
'sysinv'
)
config.update({
'sysinv::db::postgresql::user':
hiera_db_records['sysinv']['username']
})
secure_config.update({
'sysinv::api::keystone_password':
hiera_db_records['sysinv']['ks_password'],
'sysinv::database_connection': sysinv_database_connection,
'sysinv::db::postgresql::password':
hiera_db_records['sysinv']['password'],
'sysinv::keystone::auth::password':
hiera_db_records['sysinv']['ks_password']
})
# murano
config.update({
'murano::db::postgresql::user':
hiera_db_records['murano']['username']
})
config.update({
'murano::db::postgresql::password':
hiera_db_records['murano']['password'],
'murano::keystone::auth::password':
hiera_db_records['murano']['ks_password'],
'murano::keystone::authtoken::password':
hiera_db_records['murano']['ks_password'],
'murano::admin_password':
hiera_db_records['murano']['ks_password']
})
try:
admin_user_domain = packstack_config.get(
'general', 'CONFIG_ADMIN_USER_DOMAIN_NAME')
except ConfigParser.NoOptionError:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("CONFIG_ADMIN_USER_DOMAIN_NAME key not found. Using Default.")
admin_user_domain = DEFAULT_DOMAIN_NAME
try:
admin_project_domain = packstack_config.get(
'general', 'CONFIG_ADMIN_PROJECT_DOMAIN_NAME')
except ConfigParser.NoOptionError:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("CONFIG_ADMIN_PROJECT_DOMAIN_NAME key not found. Using "
"Default.")
admin_project_domain = DEFAULT_DOMAIN_NAME
config.update({
'openstack::client::params::admin_username':
hiera_db_records['keystone']['ks_username'],
'openstack::client::params::admin_user_domain':
admin_user_domain,
'openstack::client::params::admin_project_domain':
admin_project_domain,
})
secure_config.update({
'openstack::murano::params::auth_password':
hiera_db_records['murano']['ks_password']
})
# magnum
config.update({
'magnum::db::postgresql::user':
hiera_db_records['magnum']['username']
})
secure_config.update({
'magnum::db::postgresql::password':
hiera_db_records['magnum']['password'],
'magnum::keystone::auth::password':
hiera_db_records['magnum']['ks_password'],
'magnum::keystone::authtoken::password':
hiera_db_records['magnum']['ks_password'],
'magnum::keystone::domain::domain_password':
hiera_db_records['magnum-domain']['ks_password']
})
# mtc
# project and domains are also required for manifest to create the user
auth_project = packstack_config.get('general',
'CONFIG_SERVICE_TENANT_NAME')
try:
auth_user_domain = packstack_config.get(
'general', 'CONFIG_SERVICE_USER_DOMAIN_NAME')
except ConfigParser.NoOptionError:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("CONFIG_SERVICE_USER_DOMAIN_NAME key not found. Using "
"Default.")
auth_user_domain = DEFAULT_DOMAIN_NAME
try:
auth_project_domain = packstack_config.get(
'general', 'CONFIG_SERVICE_PROJECT_DOMAIN_NAME')
except ConfigParser.NoOptionError:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("CONFIG_SERVICE_PROJECT_DOMAIN_NAME key not found. Using "
"Default.")
auth_project_domain = DEFAULT_DOMAIN_NAME
config.update({
'platform::mtce::params::auth_username':
hiera_db_records['mtce']['ks_username'],
'platform::mtce::params::auth_project': auth_project,
'platform::mtce::params::auth_user_domain': auth_user_domain,
'platform::mtce::params::auth_project_domain': auth_project_domain
})
secure_config.update({
'platform::mtce::params::auth_pw':
hiera_db_records['mtce']['ks_password'],
})
# nfv
secure_config.update({
'nfv::keystone::auth::password':
hiera_db_records['vim']['ks_password']
})
# ironic
config.update({
'ironic::db::postgresql::user':
hiera_db_records['ironic']['username'],
})
secure_config.update({
'ironic::db::postgresql::password':
hiera_db_records['ironic']['password'],
'ironic::keystone::auth::password':
hiera_db_records['ironic']['ks_password'],
'ironic::keystone::authtoken::password':
hiera_db_records['ironic']['ks_password'],
'ironic::api::authtoken::password':
hiera_db_records['ironic']['ks_password']
})
# panko
config.update({
'panko::db::postgresql::user':
hiera_db_records['panko']['username']
})
secure_config.update({
'panko::db::postgresql::password':
hiera_db_records['panko']['password'],
'panko::keystone::auth::password':
hiera_db_records['panko']['ks_password'],
'panko::keystone::authtoken::password':
hiera_db_records['panko']['ks_password']
})
def get_nova_ssh_keys(config, secure_config):
# retrieve the nova ssh keys
ssh_config_dir = os.path.join(CONFIG_PATH, 'ssh_config')
migration_key = os.path.join(ssh_config_dir, 'nova_migration_key')
system_host_key = os.path.join(ssh_config_dir, 'system_host_key')
if not os.path.isdir(ssh_config_dir):
LOG.error("ssh_config directory %s not found" % ssh_config_dir)
return config
# Read the public/private migration keys
with open(migration_key) as fp:
migration_private = fp.read().strip()
with open('%s.pub' % migration_key) as fp:
migration_public = fp.read().strip().split()[1]
# Read the public/private host keys
with open(system_host_key) as fp:
host_private = fp.read().strip()
with open('%s.pub' % system_host_key) as fp:
host_header, host_public, _ = fp.read().strip().split()
# Add our pre-generated system host key to /etc/ssh/ssh_known_hosts
ssh_keys = {
'system_host_key': {
'ensure': 'present',
'name': '*',
'host_aliases': [],
'type': host_header,
'key': host_public
}
}
migration_key_type = 'ssh-rsa'
host_key_type = 'ssh-ecdsa'
secure_config.update({
'openstack::nova::compute::ssh_keys': ssh_keys,
'openstack::nova::compute::host_key_type': host_key_type,
'openstack::nova::compute::host_private_key': host_private,
'openstack::nova::compute::host_public_key': host_public,
'openstack::nova::compute::host_public_header': host_header,
'openstack::nova::compute::migration_key_type': migration_key_type,
'openstack::nova::compute::migration_private_key':
migration_private,
'openstack::nova::compute::migration_public_key':
migration_public,
})
def get_openstack_config(packstack_config, config, secure_config):
horizon_key = packstack_config.get('general',
'CONFIG_HORIZON_SECRET_KEY')
config.update({
'openstack::client::credentials::params::keyring_base':
os.path.dirname(KEYRING_PATH),
'openstack::client::credentials::params::keyring_directory':
KEYRING_PATH,
'openstack::client::credentials::params::keyring_file':
os.path.join(KEYRING_PATH, '.CREDENTIAL'),
})
secure_config.update({
'openstack::horizon::params::secret_key': horizon_key
})
get_nova_ssh_keys(config, secure_config)
def write_hieradata(config, secure_config):
filename = 'static.yaml'
secure_filename = 'secure_static.yaml'
path = constants.HIERADATA_PERMDIR
# Get the hiera data for this release
filepath = os.path.join(path, filename)
with open(filepath, 'r') as file:
config = yaml.load(file)
secure_filepath = os.path.join(path, secure_filename)
with open(secure_filepath, 'r') as file:
secure_config = yaml.load(file)
# Get a token and update the config
get_upgrade_token(from_release, config, secure_config)
# Update the hiera data on disk
try:
os.makedirs(path)
filepath = os.path.join(path, filename)
fd, tmppath = tempfile.mkstemp(dir=path, prefix=filename,
text=True)
with open(tmppath, 'w') as f:
@ -649,7 +211,6 @@ def write_hieradata(config, secure_config):
raise
try:
secure_filepath = os.path.join(path, secure_filename)
fd, tmppath = tempfile.mkstemp(dir=path, prefix=secure_filename,
text=True)
with open(tmppath, 'w') as f:
@ -661,80 +222,14 @@ def write_hieradata(config, secure_config):
raise
def generate_simplex_upgrade_hiera_record(to_release, hiera_db_records,
packstack_config):
""" generate static records from the packstack config. """
LOG.info("Migrating packstack answer file to hiera data")
config = {}
secure_config = {}
get_platform_config(packstack_config,
to_release,
config,
secure_config)
get_service_user_config(hiera_db_records,
packstack_config,
config,
secure_config)
get_openstack_config(packstack_config,
config,
secure_config)
write_hieradata(config, secure_config)
def generate_upgrade_hiera_record(to_release, hiera_db_records,
packstack_config):
""" generate static records from the packstack config. """
LOG.info("Migrating packstack answer file to hiera data")
config = {}
secure_config = {}
config.update({'platform::params::controller_upgrade': True})
get_platform_config(packstack_config,
to_release,
config,
secure_config)
get_service_user_config(hiera_db_records,
packstack_config,
config,
secure_config)
get_openstack_config(packstack_config,
config,
secure_config)
get_upgrade_token(hiera_db_records,
packstack_config,
config,
secure_config)
write_hieradata(config, secure_config)
def create_simplex_runtime_config(filename):
""" Create any runtime parameters needed for simplex upgrades"""
config = {}
# We need to disable nova cellv2 setup as this was done during the data
# migration
config.update({'nova::db::sync_api::cellv2_setup': False})
# Here is an example from a previous release...
# config.update({'nova::db::sync_api::cellv2_setup': False})
cutils.create_manifest_runtime_config(filename, config)
def get_packstack_config(software_release):
from_config = os.path.join(PLATFORM_PATH, "packstack", software_release,
"config")
answer_file = os.path.join(from_config, "packstack-answers.txt")
packstack_config = ConfigParser.RawConfigParser()
# Preserve the case in the answer file
packstack_config.optionxform = lambda option: option
try:
packstack_config.read(answer_file)
except Exception:
LOG.exception("Error parsing answer file %s" % answer_file)
raise
return packstack_config
def apply_upgrade_manifest(controller_address):
"""Apply puppet upgrade manifest files."""

View File

@ -1,86 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2018 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
# This migration script copies the inode capabilities reserved
# field, which contain tpm_data to the tpm_data field in the
# tpmdevices DB table
import sys
import psycopg2
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
LOG.info("performing sysinv TPM Device migration from release "
"%s to %s with action: %s" %
(from_release, to_release, action))
copy_sysinv_tpm_data()
except Exception as ex:
LOG.exception(ex)
print ex
return 1
# We will update for all controller hosts.
# We stow the TPM data in R4, in the inode.capabilities
# field since that is the only JSONEncodedDict field thats
# organized by hostid and vacant in R4.
def copy_sysinv_tpm_data():
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT id FROM i_host WHERE "
"personality='controller';")
ctrhosts = cur.fetchall()
if ctrhosts is None or len(ctrhosts) == 0:
LOG.exception(
"Failed to fetch controller host information")
raise
for ctrhost in ctrhosts:
# we may have multiple nodes per host, and
# we only populate one of them (per host) with
# the tpm_data.
cur.execute("SELECT capabilities FROM i_node WHERE "
"forihostid='%s' AND capabilities!='{}';" %
ctrhost['id'])
tpm_data = cur.fetchone()
if tpm_data and 'capabilities' in tpm_data:
tpm_data = tpm_data['capabilities']
LOG.info("Updating tpm_data for host '%s'" % ctrhost['id'])
cur.execute("UPDATE tpmdevice SET tpm_data='%s' WHERE "
"host_id='%s' AND tpm_data is null ;" %
(tpm_data, ctrhost['id']))
# clear the capabilities field for all hosts
cur.execute("UPDATE i_node SET capabilities='{}';")
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,92 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will add neutron hosts for each controller
import psycopg2
import sys
from sysinv.common import constants
from psycopg2.extras import RealDictCursor
from controllerconfig.common import log
from tsconfig.tsconfig import system_mode
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
neutron_create_controller_hosts()
except Exception as ex:
LOG.exception(ex)
print ex
return 1
def get_controller(conn, hostname):
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT * FROM i_host WHERE hostname=%s;",
(hostname,))
row = cur.fetchone()
if row is None:
LOG.exception("Failed to fetch %s host_id" % hostname)
raise
return row
def create_neutron_host_if_not_exists(conn, sysinv_host):
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT * FROM hosts WHERE name=%s;",
(sysinv_host['hostname'],))
row = cur.fetchone()
if row is None:
cur.execute("INSERT INTO hosts "
"(id, name, availability, created_at) "
"VALUES (%s, %s, %s, %s);",
(sysinv_host['uuid'], sysinv_host['hostname'],
"down", sysinv_host['created_at']))
def neutron_create_controller_hosts():
simplex = (system_mode == constants.SYSTEM_MODE_SIMPLEX)
sysinv_conn = psycopg2.connect("dbname=sysinv user=postgres")
controller_0 = get_controller(sysinv_conn, constants.CONTROLLER_0_HOSTNAME)
if not simplex:
controller_1 = get_controller(sysinv_conn,
constants.CONTROLLER_1_HOSTNAME)
neutron_conn = psycopg2.connect("dbname=neutron user=postgres")
create_neutron_host_if_not_exists(neutron_conn, controller_0)
if not simplex:
create_neutron_host_if_not_exists(neutron_conn, controller_1)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,66 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2018 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
# This migration script renames the qat devices from qat-vf to
# qat-dh895xcc-vf in the flavor extra specs that contains pci passthrough alias
import sys
import psycopg2
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
from sysinv.common import constants
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
LOG.info("performing nova flavor extra specs migration "
"from release %s to %s with action: %s" %
(from_release, to_release, action))
update_nova_flavor_pci_alias_data()
except Exception as ex:
LOG.exception(ex)
print ex
return 1
# In R4, only the Coleto Creek (qat-vf) is supported.
# In R5, the qat devices are exposed more explicitly as qat-dh895xcc-vf
# The pci passthrough alias name 'qat-vf' is replaced with 'qat-dh895xcc-vf'.
def update_nova_flavor_pci_alias_data():
conn = psycopg2.connect("dbname='nova_api' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("UPDATE flavor_extra_specs SET value = regexp_replace("
"value, '\mqat-vf\M', '%s', 'gi') WHERE "
"key='pci_passthrough:alias'" %
constants.NOVA_PCI_ALIAS_QAT_DH895XCC_VF_NAME)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,210 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will update the controller_fs extension in the sysinv database.
import sys
import os
import subprocess
import math
import uuid
from datetime import datetime
import psycopg2
from controllerconfig import utils
from controllerconfig.common import log
from controllerconfig.common import constants
from psycopg2.extras import RealDictCursor
from sysinv.common import utils as sutils
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
update_extension()
except Exception as ex:
LOG.exception(ex)
print ex
return 1
def get_temp_sizes():
""" Get the temporary filesystems sizes setup during upgrades.
"""
total_temp_sizes = 0
args = ["lvdisplay",
"--columns",
"--options",
"lv_size,lv_name",
"--units",
"g",
"--noheading",
"--nosuffix",
"/dev/cgts-vg/dbdump-temp-lv",
"/dev/cgts-vg/postgres-temp-lv"]
with open(os.devnull, "w") as fnull:
try:
lvdisplay_output = subprocess.check_output(args,
stderr=fnull)
except Exception:
LOG.info("migrate extension, total_temp_size=%s" %
total_temp_sizes)
return total_temp_sizes
lvdisplay_dict = utils.output_to_dict(lvdisplay_output)
if lvdisplay_dict.get('dbdump-temp-lv'):
total_temp_sizes = int(math.ceil(float(
lvdisplay_dict.get('dbdump-temp-lv'))))
if lvdisplay_dict.get('postgres-temp-lv'):
total_temp_sizes += int(math.ceil(float(
lvdisplay_dict.get('postgres-temp-lv'))))
LOG.info("migrate extension, total_temp_sizes=%s" % total_temp_sizes)
return total_temp_sizes
def update_extension():
""" Update sysinv db controller_fs extension size on upgrade."""
try:
vg_free = sutils.get_cgts_vg_free_space()
LOG.info("migrate extension, get_cgts_vg_free_space=%s" % vg_free)
# Add back the temporary sizes
vg_free = get_temp_sizes()
LOG.info("migrate extension, vg_free=%s" % vg_free)
except Exception as e:
LOG.exception(e)
print e
return 1
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("select id from i_system;")
row = cur.fetchone()
if row is None:
LOG.exception("migrate extension, failed to fetch "
"i_system data")
raise
controller_fs_uuid = str(uuid.uuid4())
forisystemid = row.get('id')
values = {'created_at': datetime.now(),
'updated_at': None,
'deleted_at': None,
'uuid': controller_fs_uuid,
'name': 'extension',
'size': 1,
'replicated': True,
'logical_volume': 'extension-lv',
'forisystemid': forisystemid}
cur.execute("INSERT INTO controller_fs "
"(created_at, updated_at, deleted_at, "
"uuid, name, size, replicated, logical_volume, "
"forisystemid) "
"VALUES (%(created_at)s, %(updated_at)s, "
"%(deleted_at)s, %(uuid)s, %(name)s, %(size)s, "
"%(replicated)s, %(logical_volume)s, "
"%(forisystemid)s)",
values)
LOG.info("migrate extension, controller_fs, insert new row with "
"data %s" % values)
conn.commit()
# If there is not enough space to add the new extension filesystem
# then decrease the backup filesystem by the amount required (1G)
cur.execute("select size from controller_fs where name='backup';")
row = cur.fetchone()
LOG.info("migrate extension, backup = %s" % row)
if row is None:
LOG.exception("migrate extension, failed to fetch "
"controller_fs data")
raise
backup_size = row.get('size')
cur.execute(
"select size from controller_fs where name='database';")
row = cur.fetchone()
LOG.info("migrate extension, database = %s" % row)
if row is None:
LOG.exception("migrate extension, failed to fetch "
"controller_fs data")
raise
database_size = row.get('size')
cur.execute("select size from controller_fs where name='cgcs';")
row = cur.fetchone()
LOG.info("migrate extension, cgcs = %s" % row)
if row is None:
LOG.exception("migrate extension, failed to fetch "
"controller_fs data")
raise
cgcs_size = row.get('size')
cur.execute(
"select size from controller_fs where name='img-conversions';")
row = cur.fetchone()
LOG.info("migrate extension, img-conversions = %s" % row)
if row is None:
LOG.exception("migrate extension, failed to fetch "
"controller_fs data")
raise
img_conversions_size = row.get('size')
cur.execute(
"select size from controller_fs where name='extension';")
row = cur.fetchone()
LOG.info("migrate extension, extension= %s" % row)
if row is None:
LOG.exception("migrate extension, failed to fetch "
"controller_fs data")
raise
extension_size = row.get('size')
total_size = backup_size + (database_size * 2) + \
cgcs_size + img_conversions_size + extension_size
if vg_free < total_size:
LOG.info("migrate extension, we have less than 1G free")
new_backup_size = \
backup_size - constants.DEFAULT_EXTENSION_STOR_SIZE
LOG.info("migrate extension, reduce the backup size by 1G. "
"new_backup_size = %s" % new_backup_size)
cur.execute(
"UPDATE controller_fs SET size=%s where name='backup';",
(new_backup_size,))
conn.commit()
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,708 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will update the partition schema for controller-1.
import collections
import json
import math
import psycopg2
import re
import sys
import subprocess
import parted
from sysinv.openstack.common import uuidutils
from sysinv.common import constants
from psycopg2.extras import RealDictCursor
from controllerconfig.common import log
from controllerconfig import utils
from tsconfig.tsconfig import system_mode
LOG = log.get_logger(__name__)
Partition_Tuple = collections.namedtuple(
'partition', 'uuid idisk_id idisk_uuid size_mib device_node device_path '
'status type_guid forihostid foripvid start_mib end_mib')
uefi_cgts_pv_1_partition_number = 4
bios_cgts_pv_1_partition_number = 5
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
create_user_partitions()
except Exception as ex:
LOG.exception(ex)
return 1
def get_partitions(device_path):
"""Obtain existing partitions from a disk."""
try:
device = parted.getDevice(device_path)
disk = parted.newDisk(device)
except Exception as e:
LOG.info("No partition info for disk %s - %s" % (device_path, e))
return None
ipartitions = []
partitions = disk.partitions
for partition in partitions:
part_size_mib = partition.getSize()
part_device_node = partition.path
part_device_path = '{}-part{}'.format(device_path,
partition.number)
start_mib = math.ceil(float(partition.geometry.start) / 2048)
end_mib = math.ceil(float(partition.geometry.end) / 2048)
part_attrs = {
'size_mib': part_size_mib,
'device_node': part_device_node,
'device_path': part_device_path,
'start_mib': start_mib,
'end_mib': end_mib
}
ipartitions.append(part_attrs)
return ipartitions
def get_disk_available_mib(device_node):
# Get sector size command.
sector_size_bytes_cmd = '{} {}'.format('blockdev --getss', device_node)
# Get total free space in sectors command.
avail_space_sectors_cmd = '{} {} {}'.format(
'sgdisk -p', device_node, "| grep \"Total free space\"")
# Get the sector size.
sector_size_bytes_process = subprocess.Popen(
sector_size_bytes_cmd, stdout=subprocess.PIPE, shell=True)
sector_size_bytes = sector_size_bytes_process.stdout.read().rstrip()
# Get the free space.
avail_space_sectors_process = subprocess.Popen(
avail_space_sectors_cmd, stdout=subprocess.PIPE, shell=True)
avail_space_sectors_output = avail_space_sectors_process.stdout.read()
avail_space_sectors = re.findall('\d+',
avail_space_sectors_output)[0].rstrip()
# Free space in MiB.
avail_space_mib = (int(sector_size_bytes) * int(avail_space_sectors) /
(1024 ** 2))
# Keep 2 MiB for partition table.
if avail_space_mib >= 2:
avail_space_mib = avail_space_mib - 2
return avail_space_mib
def build_partition_device_node(disk_device_node, partition_number):
if constants.DEVICE_NAME_NVME in disk_device_node:
partition_device_node = '{}p{}'.format(
disk_device_node, partition_number)
else:
partition_device_node = '{}{}'.format(
disk_device_node, partition_number)
LOG.info("partition_device_node: %s" % partition_device_node)
return partition_device_node
def update_db_pv(cur, part_device_path, part_device_node, part_uuid,
lvm_pv_name, pv_id):
cur.execute("update i_pv set disk_or_part_device_path=%s,"
"disk_or_part_device_node=%s, disk_or_part_uuid=%s,"
"lvm_pv_name=%s where id=%s",
(part_device_path, part_device_node, part_uuid,
lvm_pv_name, pv_id))
def create_partition(cur, partition):
cur.execute(
"insert into partition(uuid, idisk_id, idisk_uuid, size_mib,"
"device_node, device_path, status, type_guid, "
"forihostid, foripvid, start_mib, end_mib) "
"values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
partition)
def get_storage_backend(cur):
cur.execute("select storage_backend.id, storage_backend.backend, "
"storage_backend.state, "
"storage_backend.forisystemid, storage_backend.services, "
"storage_backend.capabilities from storage_backend")
storage_backend = cur.fetchone()
if not storage_backend:
LOG.exception("No storage backend present, exiting.")
raise
backend = storage_backend['backend']
LOG.info("storage_backend: %s" % str(storage_backend))
return backend
def cgts_vg_extend(cur, disk, partition4, pv_cgts_vg, partition_number,
part_size_mib):
part_device_node = '{}{}'.format(disk.get('device_node'),
partition_number)
part_device_path = '{}-part{}'.format(disk.get('device_path'),
partition_number)
LOG.info("Extra cgts-vg partition size: %s device node: %s "
"device path: %s" %
(part_size_mib, part_device_node, part_device_path))
part_uuid = uuidutils.generate_uuid()
new_partition = Partition_Tuple(
uuid=part_uuid, idisk_id=disk.get('id'),
idisk_uuid=disk.get('uuid'), size_mib=part_size_mib,
device_node=part_device_node, device_path=part_device_path,
status=constants.PARTITION_CREATE_ON_UNLOCK_STATUS,
type_guid=constants.USER_PARTITION_PHYSICAL_VOLUME,
forihostid=disk['forihostid'], foripvid=None,
start_mib=None, end_mib=None)
create_partition(cur, new_partition)
pv_uuid = uuidutils.generate_uuid()
cur.execute(
"insert into i_pv(uuid, pv_state, pv_type, disk_or_part_uuid, "
"disk_or_part_device_node, disk_or_part_device_path, lvm_pv_name, "
"lvm_vg_name, forihostid, forilvgid) "
"values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
(pv_uuid, constants.PV_ADD, constants.PV_TYPE_PARTITION, part_uuid,
part_device_node, part_device_path, part_device_node,
constants.LVG_CGTS_VG, disk.get('forihostid'),
pv_cgts_vg.get('forilvgid')))
# Get the PV.
cur.execute("select i_pv.id from i_pv where uuid=%s",
(pv_uuid,))
pv = cur.fetchone()
# Update partition.
cur.execute(
"update partition set foripvid=%s where uuid=%s",
(pv.get('id'), part_uuid))
def update_ctrl0_cinder_partition_pv(cur):
# Get controller-0 id.
hostname = constants.CONTROLLER_0_HOSTNAME
cur.execute("select i_host.id, i_host.rootfs_device from i_host "
"where hostname=%s;", (hostname,))
row = cur.fetchone()
if row is None:
LOG.exception("Failed to fetch %s host_id" % hostname)
raise
ctrl0_id = row['id']
# Controller-0 has only one partition added, the cinder partition.
cur.execute("select partition.id, partition.uuid, "
"partition.status, partition.device_node, "
"partition.device_path, partition.size_mib,"
"partition.idisk_uuid, partition.foripvid "
"from partition where forihostid = %s",
(ctrl0_id,))
ctrl0_cinder_partition = cur.fetchone()
if not ctrl0_cinder_partition:
LOG.exception("Failed to get ctrl0 cinder volumes partition")
raise
# Obtain the cinder PV for controller-0.
cur.execute("select i_pv.id, i_pv.disk_or_part_uuid, "
"i_pv.disk_or_part_device_node, "
"i_pv.disk_or_part_device_path, i_pv.lvm_pv_size,"
"i_pv.lvm_pv_name, i_pv.lvm_vg_name, i_pv.forilvgid,"
"i_pv.pv_type from i_pv where forihostid=%s and "
"lvm_vg_name=%s",
(ctrl0_id, constants.LVG_CINDER_VOLUMES))
ctrl0_cinder_pv = cur.fetchone()
if not ctrl0_cinder_pv:
LOG.exception("Failed to get ctrl0 cinder physical volume")
raise
# Update the cinder PV with the partition info.
update_db_pv(cur, ctrl0_cinder_partition['device_path'],
ctrl0_cinder_partition['device_node'],
ctrl0_cinder_partition['uuid'],
ctrl0_cinder_partition['device_node'],
ctrl0_cinder_pv['id'])
# Mark the cinder partition in use.
cur.execute("update partition set foripvid=%s, status=%s "
"where id=%s",
(ctrl0_cinder_pv['id'], constants.PARTITION_IN_USE_STATUS,
ctrl0_cinder_partition['id']))
def update_partition_pv(cur, pvs, partitions, disks):
backend = get_storage_backend(cur)
if system_mode != constants.SYSTEM_MODE_SIMPLEX and backend != "ceph":
update_ctrl0_cinder_partition_pv(cur)
for pv in pvs:
if (pv['pv_type'] == constants.PV_TYPE_PARTITION and
'-part' not in pv['disk_or_part_device_path']):
if "drbd" in pv['lvm_pv_name']:
partition_number = '1'
else:
partition_number = (
re.match('.*?([0-9]+)$', pv['lvm_pv_name']).group(1))
# Update disk foripvid to null.
disk = next((
d for d in disks
if d['device_path'] == pv['disk_or_part_device_path']), None)
if disk:
LOG.info("Set foripvid to null for disk %s" % disk['id'])
cur.execute(
"update i_idisk set foripvid=null where id=%s",
(disk['id'],))
# Update partition device path and device path for the current PV.
part_device_path = "{}{}{}".format(
pv['disk_or_part_device_path'],
'-part',
partition_number)
if constants.DEVICE_NAME_NVME in pv['disk_or_part_device_node']:
part_device_node = "{}p{}".format(
pv['disk_or_part_device_node'],
partition_number)
else:
part_device_node = "{}{}".format(
pv['disk_or_part_device_node'],
partition_number)
LOG.info("Old PV device path: %s New PV device path: %s" %
(pv['disk_or_part_device_path'], part_device_path))
LOG.info("Old PV device node: %s New PV device node: %s" %
(pv['disk_or_part_device_node'], part_device_node))
lvm_pv_name = part_device_node
# Do not use constant here yet since this may change due to
# cinder removal from cfg ctrl US.
if "drbd" in pv['lvm_pv_name']:
lvm_pv_name = pv['lvm_pv_name']
part = next((
p for p in partitions
if p['device_path'] == part_device_path), None)
if not part:
LOG.info("No %s partition, returning" % part_device_path)
continue
# Update the PV DB entry.
update_db_pv(cur, part_device_path, part_device_node,
part['uuid'], lvm_pv_name, pv['id'])
# Update the PV DB entry.
cur.execute(
"update partition set foripvid=%s, status=%s "
"where id=%s",
(pv['id'], constants.PARTITION_IN_USE_STATUS,
part['id']))
def create_ctrl0_cinder_partition(cur, stors, part_size):
hostname = constants.CONTROLLER_0_HOSTNAME
cur.execute("select i_host.id, i_host.rootfs_device from i_host "
"where hostname=%s;", (hostname,))
row = cur.fetchone()
if row is None:
LOG.exception("Failed to fetch %s host_id" % hostname)
raise
controller_id = row['id']
# Get the disks for controller-0.
cur.execute("select i_idisk.forihostid, i_idisk.uuid, "
"i_idisk.device_node, i_idisk.device_path, "
"i_idisk.id, i_idisk.size_mib from i_idisk where "
"forihostid = %s", (controller_id,))
disks_ctrl0 = cur.fetchall()
# Obtain the cinder disk for controller-0.
cinder_disk_ctrl0 = next((
d for d in disks_ctrl0
if d['uuid'] in [s['idisk_uuid'] for s in stors]), None)
LOG.info("cinder_disk_ctrl0: %s" % str(cinder_disk_ctrl0))
if not cinder_disk_ctrl0:
LOG.exception("Failed to get cinder disk for host %s" %
controller_id)
raise
# Fill in partition info.
new_part_size = part_size
new_part_device_node = "%s1" % cinder_disk_ctrl0['device_node']
new_part_device_path = ('%s-part1' %
cinder_disk_ctrl0['device_path'])
LOG.info("New partition: %s - %s" %
(new_part_device_node, new_part_device_path))
new_part_uuid = uuidutils.generate_uuid()
new_partition = Partition_Tuple(
uuid=new_part_uuid,
idisk_id=cinder_disk_ctrl0.get('id'),
idisk_uuid=cinder_disk_ctrl0.get('uuid'),
size_mib=new_part_size,
device_node=new_part_device_node,
device_path=new_part_device_path,
status=constants.PARTITION_IN_USE_STATUS,
type_guid=constants.USER_PARTITION_PHYSICAL_VOLUME,
forihostid=controller_id,
foripvid=None,
start_mib=None,
end_mib=None)
create_partition(cur, new_partition)
def create_db_partition_entries(cur, disks):
# Get the stors with the cinder function.
cur.execute("select i_istor.id, i_istor.idisk_uuid, "
"i_istor.function, i_istor.forihostid "
"from i_istor where function = %s",
(constants.STOR_FUNCTION_CINDER,))
stors = cur.fetchall()
cinder_partition = False
for disk in disks:
partitions = get_partitions(disk['device_path'])
LOG.info("partitions: %s" % str(partitions))
# Create the DB entries for all disk partitions on controller-1.
# For controller-0 we will only create the cinder partition, as the
# rest will be reported by sysinv-agent once the host is upgraded.
if not partitions:
continue
for part in partitions:
part_disk = next((
d for d in disks if d['device_path'] in part['device_path']
))
crt_stor = next((s for s in stors
if s['idisk_uuid'] == part_disk['uuid']), None)
part_type_guid = constants.LINUX_LVM_PARTITION
if crt_stor:
part_type_guid = constants.USER_PARTITION_PHYSICAL_VOLUME
part_size = part['size_mib']
part_device_node = part['device_node']
part_device_path = part['device_path']
LOG.info("New partition size: %s part device node: %s "
"part device path: %s" %
(part_size, part_device_node, part_device_path))
part_uuid = uuidutils.generate_uuid()
new_partition = Partition_Tuple(
uuid=part_uuid, idisk_id=part_disk.get('id'),
idisk_uuid=part_disk.get('uuid'), size_mib=part_size,
device_node=part_device_node, device_path=part_device_path,
status=constants.PARTITION_IN_USE_STATUS,
type_guid=part_type_guid,
forihostid=disk['forihostid'], foripvid=None,
start_mib=part['start_mib'], end_mib=part['end_mib'])
create_partition(cur, new_partition)
# If this is the cinder disk, also create partition for the other
# controller.
if not crt_stor:
LOG.info("Disk %s is not a cinder disk for host %s" %
(part_disk['device_path'], part_disk['forihostid']))
continue
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
cinder_partition = True
continue
# Also create the cinder partition for controller-0.
create_ctrl0_cinder_partition(cur, stors, part_size)
cinder_partition = True
# If somehow the cinder disk was also wiped and the partition was lost,
# we need to retrieve it in another way.
if not cinder_partition:
LOG.info("Cinder partition was wiped so we need to create it")
for disk in disks:
d_json_dict = json.loads(disk['capabilities'])
if (constants.IDISK_DEV_FUNCTION in d_json_dict and
d_json_dict['device_function'] == 'cinder_device'):
if 'cinder_gib' in d_json_dict:
LOG.info("cinder_gib: %s" % d_json_dict['cinder_gib'])
# Partition size calculated from the size of cinder_gib.
part_size = int(d_json_dict['cinder_gib'])
# Actual disk size in MiB.
device = parted.getDevice(disk['device_path'])
disk_size = device.length * device.sectorSize / (1024 ** 2)
part_size = min(part_size, disk_size - 2)
if constants.DEVICE_NAME_NVME in disk['device_node']:
part_device_node = "%sp1" % disk['device_node']
else:
part_device_node = "%s1" % disk['device_node']
part_device_path = "%s-part1" % disk['device_path']
part_start_mib = 2
part_end_mib = 2 + part_size
LOG.info("New partition size: %s part device node: %s "
"part device path: %s part_end_mib: %s" %
(part_size, part_device_node, part_device_path,
part_end_mib))
part_uuid = uuidutils.generate_uuid()
new_partition = Partition_Tuple(
uuid=part_uuid,
idisk_id=disk.get('id'),
idisk_uuid=disk.get('uuid'), size_mib=part_size,
device_node=part_device_node,
device_path=part_device_path,
status=constants.PARTITION_IN_USE_STATUS,
type_guid=constants.USER_PARTITION_PHYSICAL_VOLUME,
forihostid=disk['forihostid'], foripvid=None,
start_mib=part_start_mib, end_mib=part_end_mib)
create_partition(cur, new_partition)
if system_mode != constants.SYSTEM_MODE_SIMPLEX:
create_ctrl0_cinder_partition(cur, stors, part_size)
break
def create_user_partitions():
conn = psycopg2.connect("dbname=sysinv user=postgres")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
hostname = constants.CONTROLLER_1_HOSTNAME
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
hostname = constants.CONTROLLER_0_HOSTNAME
cur.execute("select i_host.id, i_host.rootfs_device from i_host "
"where hostname=%s;", (hostname,))
row = cur.fetchone()
if row is None:
LOG.exception("Failed to fetch %s host_id" % hostname)
raise
controller_id = row['id']
controller_rootfs = row['rootfs_device']
# Get the disks for the controller.
cur.execute("select i_idisk.forihostid, i_idisk.uuid, "
"i_idisk.device_node, i_idisk.device_path, "
"i_idisk.capabilities, "
"i_idisk.id, i_idisk.size_mib from i_idisk where "
"forihostid = %s", (controller_id,))
disks = cur.fetchall()
# Get the PVs for the controller.
cur.execute(
"select i_pv.id, i_pv.disk_or_part_uuid, "
"i_pv.disk_or_part_device_node, "
"i_pv.disk_or_part_device_path, i_pv.lvm_pv_size,"
"i_pv.lvm_pv_name, i_pv.lvm_vg_name, i_pv.forilvgid,"
"i_pv.pv_type from i_pv where forihostid = %s",
(controller_id,))
pvs = cur.fetchall()
# Obtain the rootfs disk. This is for handling the case when
# rootfs is not on /dev/sda.
controller_rootfs_disk = next((
d for d in disks
if (d.get('device_path') == controller_rootfs or
controller_rootfs in d.get('device_node'))), None)
LOG.info("controller_rootfs_disk: %s" % controller_rootfs_disk)
create_db_partition_entries(cur, disks)
# Get the PVs for the controller.
cur.execute(
"select partition.id, partition.uuid, "
"partition.status, partition.device_node, "
"partition.device_path, partition.size_mib,"
"partition.idisk_uuid, partition.foripvid "
"from partition where forihostid = %s",
(controller_id,))
partitions = cur.fetchall()
update_partition_pv(cur, pvs, partitions, disks)
# If this is not an AIO setup, we must return, as we already have
# all the needed information.
if utils.get_system_type() != constants.TIS_AIO_BUILD:
LOG.info("This is not an AIO setup, nothing to do here.")
return
# Get the PVs for cgts-vg from the root fs disk, present in the DB.
# This list can have max 2 elements.
cgts_vg_pvs = [pv for pv in pvs
if pv['lvm_vg_name'] == constants.LVG_CGTS_VG and
(controller_rootfs_disk['device_path'] in
pv['disk_or_part_device_path'])]
LOG.info("cgts-vg pvs: %s" % str(cgts_vg_pvs))
# Build the PV name of the initial PV for cgts-vg.
R5_cgts_pv_1_name = build_partition_device_node(
controller_rootfs_disk['device_node'],
uefi_cgts_pv_1_partition_number)
# Get the initial PV of cgts-vg. If it's not present with the
# provided name, then we're probably on a BIOS setup.
R5_cgts_pv_1 = next((
pv for pv in cgts_vg_pvs
if pv['lvm_pv_name'] == R5_cgts_pv_1_name), None)
# Get the device used by R5_cgts_pv_1.
R5_cgts_pv_1_part = next((
p for p in partitions
if p['device_node'] == R5_cgts_pv_1_name),
None)
# On an R4 AIO installed with BIOS, we won't have 6 partitions
# right after install, but only 4.
# R4 PV /dev/sda5 thus should become PV /dev/sda4 in R5.
if not R5_cgts_pv_1:
LOG.info("Probably bios here, we need to update the DB for "
"cgts-vg partitions and pv")
R4_cgts_pv_1_name = build_partition_device_node(
controller_rootfs_disk['device_node'],
bios_cgts_pv_1_partition_number)
R5_cgts_pv_1 = next((
pv for pv in pvs
if pv['lvm_pv_name'] == R4_cgts_pv_1_name),
None)
cur.execute(
"update partition set foripvid=%s, status=%s "
"where device_path=%s and forihostid=%s",
(R5_cgts_pv_1.get('id'), constants.PARTITION_IN_USE_STATUS,
R5_cgts_pv_1_part['device_path'], controller_id))
update_db_pv(cur, R5_cgts_pv_1_part['device_path'],
R5_cgts_pv_1_part['device_node'],
R5_cgts_pv_1_part['uuid'],
R5_cgts_pv_1_part['device_node'],
R5_cgts_pv_1.get('id'))
cgts_vg_pvs.remove(R5_cgts_pv_1)
# There is a high chance that the current R5 /dev/sda4 partition is
# too small for the R4 cgts-vg. In this case, we need to create
# an extra partition & PV for cgts-vg.
part_number = 5
extra_cgts_part_size = math.ceil(
float(R5_cgts_pv_1.get('lvm_pv_size')) / (1024 ** 2) -
R5_cgts_pv_1_part.get('size_mib'))
if extra_cgts_part_size > 0:
LOG.info("/dev/sda4 is not enough for R4 cgts-vg")
cgts_vg_extend(cur, controller_rootfs_disk, R5_cgts_pv_1_part,
R5_cgts_pv_1,
part_number, extra_cgts_part_size)
part_number = part_number + 1
else:
extra_cgts_part_size = 0
# If the remaining space was used by either nova-local or cgts-vg,
# then the R4 partition must be specifically created.
if cgts_vg_pvs:
last_rootfs_pv = cgts_vg_pvs[0]
LOG.info("Extra rootfs disk space used by cgts-vg")
else:
# Get the nova-local PV from the rootfs disk.
last_rootfs_pv = next((
pv for pv in pvs
if (pv['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and
controller_rootfs_disk['device_node'] in
pv['lvm_pv_name'])),
None)
if last_rootfs_pv:
LOG.info("Extra rootfs disk space used by nova-local")
# If the remaining space is not used, return.
if not last_rootfs_pv:
LOG.info("Extra rootfs disk space not used, return")
return
# Create the partition DB entry and update the associated
# physical volume.
disk_available_mib = get_disk_available_mib(
controller_rootfs_disk['device_node']) - extra_cgts_part_size
LOG.info("Available mib: %s" % disk_available_mib)
part_size = disk_available_mib
part_device_node = '{}{}'.format(
controller_rootfs_disk.get('device_node'),
part_number)
part_device_path = '{}-part{}'.format(
controller_rootfs_disk.get('device_path'),
part_number)
LOG.info("Partition size: %s part device node: %s "
"part device path: %s" %
(part_size, part_device_node, part_device_path))
part_uuid = uuidutils.generate_uuid()
new_partition = Partition_Tuple(
uuid=part_uuid,
idisk_id=controller_rootfs_disk.get('id'),
idisk_uuid=controller_rootfs_disk.get('uuid'),
size_mib=part_size,
device_node=part_device_node,
device_path=part_device_path,
status=constants.PARTITION_CREATE_ON_UNLOCK_STATUS,
type_guid=constants.USER_PARTITION_PHYSICAL_VOLUME,
forihostid=controller_id,
foripvid=last_rootfs_pv.get('id'),
start_mib=None,
end_mib=None)
create_partition(cur, new_partition)
update_db_pv(cur, part_device_path, part_device_node,
part_uuid, part_device_node, last_rootfs_pv.get('id'))
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,411 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will migrate away from using vlan-tagged subnets,
# to using separate networks with their compute ports trunked
# from the network the vlan-tagged subnet was on.
# Once all of the compute nodes are updates, the old vlan-tagged
# subnets, as well as all of the ports on them, will be deleted.
import os
import psycopg2
import subprocess
import sys
import uuid
from psycopg2.extras import RealDictCursor
from controllerconfig.common import log
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
migrate_vlan()
except Exception as ex:
LOG.exception(ex)
print ex
return 1
if from_release == "17.06" and action == "activate":
try:
cleanup_neutron_vlan_subnets()
except Exception as ex:
LOG.exception(ex)
print ex
return 1
def run_cmd(cur, cmd):
cur.execute(cmd)
def run_cmd_postgres(sub_cmd):
"""
This executes the given command as user postgres. This is necessary when
this script is run as root, which is the case on an upgrade activation.
"""
error_output = open(os.devnull, 'w')
cmd = ("sudo -u postgres psql -d neutron -c \"%s\"" % sub_cmd)
LOG.info("Executing '%s'" % cmd)
subprocess.check_call([cmd], shell=True, stderr=error_output)
def migrate_vlan():
conn = psycopg2.connect("dbname=neutron user=postgres")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
create_new_networks(cur)
def cleanup_neutron_vlan_subnets():
"""
This function cleans up data leftover from migrating away from using
vlan-tagged subnets. Specifically, it deletes all non-compute ports
on vlan-tagged subnets, as well as all vlan-tagged subnets.
"""
cmd = ("DELETE FROM ports WHERE id in"
" (SELECT port_id FROM ipallocations AS ipa"
" JOIN subnets AS s ON ipa.subnet_id = s.id"
" where s.vlan_id!=0)"
" AND device_owner not like 'compute:%';")
run_cmd_postgres(cmd)
cmd = "DELETE FROM subnets WHERE vlan_id != 0;"
run_cmd_postgres(cmd)
def create_new_networks(cur):
"""
This function creates new networks for each network segment belonging to
a vlan-tagged subnet, and clones those subnets minus the vlan ID.
For each of those cloned subnets, it also clones all of the ports on them,
as well as all of the IP allocations, and the bindings
"""
cmd = ("SELECT s.vlan_id, s.network_id, m2ss.network_type,"
" m2ss.physical_network, m2ss.segmentation_id FROM subnets AS s"
" JOIN ml2_subnet_segments AS m2ss ON s.id = m2ss.subnet_id"
" WHERE s.vlan_id != 0 GROUP BY s.vlan_id, s.network_id,"
" m2ss.network_type, m2ss.physical_network, m2ss.segmentation_id;")
run_cmd(cur, cmd)
networks_to_create = []
while True:
network = cur.fetchone()
if network is None:
break
networks_to_create.append(network)
for network in networks_to_create:
create_and_populate_network(cur, network)
def create_standard_attribute(cur, name):
"""
This function creates new standard attribute entries to be used by copied
data.
"""
cmd = ("INSERT INTO standardattributes (resource_type)"
" VALUES ('%s') RETURNING id") %\
(name,)
run_cmd(cur, cmd)
return cur.fetchone()['id']
def create_and_populate_network(cur, network):
"""
This function takes a network segment, and copies all the data on that
network segment to a newly-created network. For each compute port on the
original network, a port trunk should be created from the original port
as a parent, to the new port as a subport. This relaces the vlan id being
set on an individual subnet.
"""
vlan_id = network['vlan_id']
network_type = network['network_type']
old_network_id = network['network_id']
# This new network ID should be the same as neutron passes to vswitch for
# the network-uuid of the network segment for the vlan-tagged subnet.
network_suffix = "vlan%s" % vlan_id
new_network_id = uuid.uuid5(uuid.UUID(old_network_id), network_suffix)
new_networksegment_id = uuid.uuid4()
cmd = ("INSERT INTO networks (project_id, id, name, status,"
"admin_state_up, vlan_transparent, standard_attr_id,"
" availability_zone_hints)"
" (SELECT project_id, '%s',"
" CONCAT_WS('-VLAN%d', NULLIF(name,''), ''), status,"
" admin_state_up, vlan_transparent, '%s', availability_zone_hints"
" FROM networks WHERE id = '%s') RETURNING id;") %\
(new_network_id, vlan_id,
create_standard_attribute(cur, 'networks'), old_network_id)
run_cmd(cur, cmd)
old_network_id = network['network_id']
new_network_id = cur.fetchone()['id']
cmd = ("INSERT INTO networksegments (id, network_id, network_type,"
" physical_network, segmentation_id, is_dynamic, segment_index,"
" standard_attr_id, name)"
" VALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s')") %\
(new_networksegment_id, new_network_id, network_type,
network['physical_network'], network['segmentation_id'],
'f', '0', create_standard_attribute(cur, 'networksegments'), '')
run_cmd(cur, cmd)
# Get a list of vlan-tagged subnets on the network we are copying.
# For each of these subnets, we loop through and copy them, and then loop
# through the ip allocations on them and copy those ip allocations, along
# with the ports that are in those ip allocations.
sub_cmd = ("SELECT id FROM subnets"
" WHERE vlan_id = '%s' AND network_id='%s'") %\
(vlan_id, old_network_id)
# Copy the subnets to the new network
run_cmd(cur, sub_cmd)
subnets = cur.fetchall()
subnet_copies = {}
for subnet in subnets:
old_subnet_id = subnet['id']
new_subnet_id = uuid.uuid4()
new_ml2_subnet_segment_id = uuid.uuid4()
subnet_copies[old_subnet_id] = new_subnet_id
cmd = ("INSERT INTO subnets"
" (project_id, id, name, network_id, ip_version, cidr,"
" gateway_ip, enable_dhcp, ipv6_ra_mode, ipv6_address_mode,"
" subnetpool_id, vlan_id, standard_attr_id, segment_id)"
" (SELECT project_id, '%s', name, '%s', ip_version, cidr,"
" gateway_ip, enable_dhcp, ipv6_ra_mode, ipv6_address_mode,"
" subnetpool_id, 0, '%s', segment_id"
" FROM subnets WHERE id='%s')") %\
(new_subnet_id, new_network_id,
create_standard_attribute(cur, 'subnets'), old_subnet_id)
run_cmd(cur, cmd)
cmd = ("INSERT INTO ml2_subnet_segments"
" (id, subnet_id, network_type, physical_network,"
" segmentation_id, is_dynamic, segment_index)"
" (SELECT '%s', '%s', network_type, physical_network,"
" segmentation_id, is_dynamic, segment_index"
" FROM ml2_subnet_segments WHERE subnet_id='%s')") %\
(new_ml2_subnet_segment_id, new_subnet_id, old_subnet_id)
run_cmd(cur, cmd)
duplicate_ipam_subnets(cur, old_subnet_id, new_subnet_id)
duplicate_ipallocationpools(cur, old_subnet_id, new_subnet_id)
# Copy the ports that are related to vlan subnets such that those new
# ports are directly attached to the network that was created to replace
# the vlan subnet. We ignore DHCP ports because since both the vlan
# subnet and the new network will share the same provider network we do
# not want 2 ports with the same IP to exist simultaneously. Instead,
# we let the DHCP server allocate this port when it notices that it is
# missing which will result in a new IP allocation and should not
# interfere with any existing allocations because they have all been
# cloned onto the new network.
cmd = ("SELECT DISTINCT port_id FROM ipallocations"
" LEFT JOIN ports AS p ON p.id = ipallocations.port_id"
" WHERE p.device_owner != 'network:dhcp'"
" AND subnet_id IN (%s)") % sub_cmd
run_cmd(cur, cmd)
ports_to_copy = cur.fetchall()
port_copies = {}
for port in ports_to_copy:
old_port_id = port['port_id']
new_port_id = uuid.uuid4()
port_copies[old_port_id] = new_port_id
cmd = ("INSERT INTO ports (project_id, id, name, network_id,"
" mac_address, admin_state_up, status, device_id, device_owner,"
" standard_attr_id, ip_allocation)"
" (SELECT project_id, '%s',"
" CONCAT_WS('-VLAN%d', NULLIF(name,''), ''), '%s',"
" mac_address, admin_state_up, status, device_id, device_owner,"
"'%s', ip_allocation FROM ports WHERE id = '%s')"
" RETURNING id, device_owner") %\
(new_port_id, vlan_id, new_network_id,
create_standard_attribute(cur, 'ports'), old_port_id)
run_cmd(cur, cmd)
new_port = cur.fetchone()
new_port_owner = new_port['device_owner']
cmd = ("INSERT INTO ml2_port_bindings"
" (port_id, host, vif_type, vnic_type, profile,"
" vif_details, vif_model, mac_filtering, mtu)"
" (SELECT '%s', host, vif_type, vnic_type, profile,"
" vif_details, vif_model, mac_filtering, mtu"
" FROM ml2_port_bindings where port_id='%s')") %\
(new_port_id, old_port_id)
run_cmd(cur, cmd)
cmd = ("INSERT INTO ml2_port_binding_levels"
" (port_id, host, level, driver, segment_id)"
" (SELECT '%s', host, level, driver, '%s'"
" FROM ml2_port_binding_levels WHERE port_id='%s')") %\
(new_port_id, new_networksegment_id, old_port_id)
run_cmd(cur, cmd)
if new_port_owner.startswith('compute:'):
trunk_id = create_port_trunk(cur, old_port_id)
create_subport(cur, trunk_id, new_port_id, 'vlan', vlan_id)
elif new_port_owner.startswith('network:router'):
cmd = ("INSERT INTO routerports (router_id, port_id, port_type)"
" (SELECT router_id, '%s', port_type FROM routerports"
" WHERE port_id = '%s')") %\
(new_port_id, old_port_id)
run_cmd(cur, cmd)
elif new_port_owner == 'network:dhcp':
# Set new port's device_id to DEVICE_ID_RESERVED_DHCP_PORT,
# so that it is used by dhcp agent for new subnet.
cmd = ("UPDATE ports SET device_id='reserved_dhcp_port'"
" WHERE id='%s'") %\
(new_port_id,)
run_cmd(cur, cmd)
# Copy the ipallocations
cmd = ("SELECT * FROM ipallocations WHERE network_id='%s'") %\
(old_network_id)
run_cmd(cur, cmd)
ipallocations = cur.fetchall()
for ipallocation in ipallocations:
old_ip_address = ipallocation['ip_address']
old_port_id = ipallocation['port_id']
old_subnet_id = ipallocation['subnet_id']
new_port_id = port_copies.get(old_port_id)
new_subnet_id = subnet_copies.get(old_subnet_id)
if not new_port_id or not new_subnet_id:
continue
cmd = ("INSERT INTO ipallocations"
" (port_id, ip_address, subnet_id, network_id)"
" VALUES ('%s', '%s', '%s', '%s')") %\
(new_port_id, old_ip_address, new_subnet_id, new_network_id)
run_cmd(cur, cmd)
# Copy the DHCP network agent bindings so that the new networks are
# initial scheduled to the same agents as the vlan subnets they are
# replacing. The alternative is that all new networks are initially
# unscheduled and they may all get scheduled to the same agent when any
# of the agents query for new networks to service.
cmd = ("SELECT * FROM networkdhcpagentbindings WHERE network_id='%s'" %
old_network_id)
run_cmd(cur, cmd)
bindings = cur.fetchall()
for binding in bindings:
agent_id = binding['dhcp_agent_id']
cmd = ("INSERT INTO networkdhcpagentbindings"
" (network_id, dhcp_agent_id)"
" VALUES ('%s', '%s')" %
(new_network_id, agent_id))
run_cmd(cur, cmd)
def duplicate_ipam_subnets(cur, old_neutron_subnet_id, new_neutron_subnet_id):
cmd = ("SELECT id from ipamsubnets WHERE neutron_subnet_id='%s'") %\
(old_neutron_subnet_id)
run_cmd(cur, cmd)
ipamsubnets = cur.fetchall()
for ipamsubnet in ipamsubnets:
old_ipamsubnet_id = ipamsubnet['id']
new_ipamsubnet_id = uuid.uuid4()
cmd = ("INSERT INTO ipamsubnets (id, neutron_subnet_id)"
" VALUES ('%s', '%s')") %\
(new_ipamsubnet_id, new_neutron_subnet_id)
run_cmd(cur, cmd)
cmd = ("SELECT * from ipamallocationpools"
" WHERE ipam_subnet_id='%s'") %\
(old_ipamsubnet_id)
run_cmd(cur, cmd)
ipamallocationpools = cur.fetchall()
for ipamallocationpool in ipamallocationpools:
new_ipamallocationpool_id = uuid.uuid4()
first_ip = ipamallocationpool['first_ip']
last_ip = ipamallocationpool['last_ip']
cmd = ("INSERT INTO ipamallocationpools"
" (id, ipam_subnet_id, first_ip, last_ip)"
" VALUES ('%s', '%s', '%s', '%s')") %\
(new_ipamallocationpool_id, new_ipamsubnet_id,
first_ip, last_ip)
run_cmd(cur, cmd)
cmd = ("INSERT INTO ipamallocations"
" (ip_address, status, ipam_subnet_id)"
" (SELECT ip_address, status, '%s' FROM ipamallocations"
" WHERE ipam_subnet_id='%s')") %\
(new_ipamsubnet_id, old_ipamsubnet_id)
run_cmd(cur, cmd)
def duplicate_ipallocationpools(cur, old_subnet_id, new_subnet_id):
cmd = ("SELECT * from ipallocationpools WHERE subnet_id='%s'") %\
(old_subnet_id)
run_cmd(cur, cmd)
ipallocationpools = cur.fetchall()
for ipallocationpool in ipallocationpools:
new_ipallocationpool_id = uuid.uuid4()
first_ip = ipallocationpool['first_ip']
last_ip = ipallocationpool['last_ip']
cmd = ("INSERT INTO ipallocationpools"
" (id, subnet_id, first_ip, last_ip)"
" VALUES ('%s', '%s', '%s', '%s')") %\
(new_ipallocationpool_id, new_subnet_id,
first_ip, last_ip)
run_cmd(cur, cmd)
def create_port_trunk(cur, port_id):
"""
This function will create a trunk off of a given port if there doesn't
already exist a trunk off of that port. This port should be a compute
port, where this is to replace a vlan-tagged subnet on that port.
"""
# create trunk if not exists
cmd = ("SELECT id FROM trunks WHERE port_id = '%s'") %\
(port_id)
run_cmd(cur, cmd)
trunk = cur.fetchone()
if trunk:
return trunk['id']
cmd = ("INSERT INTO trunks (admin_state_up, project_id, id, name, port_id,"
" status, standard_attr_id)"
" (SELECT admin_state_up, project_id, '%s', name, id, status, '%s'"
" FROM ports WHERE id = '%s') RETURNING id") %\
(uuid.uuid4(), create_standard_attribute(cur, 'trunks'), port_id)
run_cmd(cur, cmd)
trunk = cur.fetchone()
return trunk['id']
def create_subport(cur, trunk_id, subport_id, segmentation_type,
segmentation_id):
"""
Create a subport off of a given network trunk.
The segmentation_id should be the vlan id as visible to the guest,
not the segmentation id of the network segment.
"""
cmd = ("INSERT INTO subports"
" (port_id, trunk_id, segmentation_type, segmentation_id)"
" VALUES ('%s', '%s','%s','%s')") %\
(subport_id, trunk_id, segmentation_type, segmentation_id)
run_cmd(cur, cmd)
cmd = ("UPDATE ports SET device_id='', device_owner='trunk:subport'"
" WHERE id='%s'") % subport_id
run_cmd(cur, cmd)
vif_details = '{\"port_filter\": true, \"vhostuser_enabled\": false}'
cmd = ("UPDATE ml2_port_bindings SET vif_model='',vif_details='%s'"
" WHERE port_id='%s'" % (vif_details, subport_id))
run_cmd(cur, cmd)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,297 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will update the storage backends for controller-1.
import psycopg2
import sys
import json
from sysinv.openstack.common import uuidutils
from sysinv.common import constants
from psycopg2.extras import RealDictCursor
from controllerconfig.common import log
from controllerconfig.upgrades import utils
LOG = log.get_logger(__name__)
CINDER_BACKEND = None
CONFIG_CINDER_LVM_TYPE = "CONFIG_CINDER_LVM_TYPE"
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
set_backends(from_release)
except Exception as ex:
LOG.exception(ex)
return 1
def update_capabilities(cur):
# Update i_idisk capabilities.
cur.execute("select i_idisk.forihostid, i_idisk.uuid, "
"i_idisk.device_node, i_idisk.device_path, "
"i_idisk.id, i_idisk.capabilities from i_idisk")
disks = cur.fetchall()
for d in disks:
d_json_dict = json.loads(d['capabilities'])
if constants.IDISK_DEV_FUNCTION in d_json_dict:
del d_json_dict[constants.IDISK_DEV_FUNCTION]
d_new_capab = json.dumps(d_json_dict)
try:
cur.execute(
"update i_idisk set capabilities=%s "
"where id=%s",
(d_new_capab, d['id']))
except Exception as e:
LOG.exception("Error: %s" % str(e))
raise
# Update i_system capabilities.
cur.execute("select i_system.id, i_system.capabilities "
"from i_system")
systems = cur.fetchall()
for s in systems:
s_json_dict = json.loads(s['capabilities'])
if 'cinder_backend' in s_json_dict:
del s_json_dict['cinder_backend']
s_new_capab = json.dumps(s_json_dict)
cur.execute(
"update i_system set capabilities=%s "
"where id=%s",
(s_new_capab, s['id']))
def update_stors(cur):
# Get the stors
cur.execute("select i_istor.id, i_istor.idisk_uuid, "
"i_istor.function, i_istor.forihostid "
"from i_istor ")
stors = cur.fetchall()
for stor in stors:
if stor['function'] == constants.STOR_FUNCTION_CINDER:
# remove cinder stors
try:
cur.execute(
"update i_idisk set foristorid=null where uuid=%s",
(stor['idisk_uuid'],))
cur.execute(
"delete from i_istor where id=%s",
(stor['id'],))
except Exception as e:
LOG.exception("Error: %s" % str(e))
raise
elif stor['function'] == constants.STOR_FUNCTION_OSD:
# link OSDs to the primary storage tier
try:
cur.execute(
"update i_istor set fortierid=1 where id=%s",
(stor['id'],))
except Exception as e:
LOG.exception("Error: %s" % str(e))
raise
def add_primary_storage_tier(cur):
# A cluster and a primary tier are always present even if we don't have
# a ceph backend currently enabled. So make sure on upgrade we add the tier
# referencing the existing cluster.
new_storage_tier_uuid = uuidutils.generate_uuid()
try:
# Currently only 1 cluster ever defined, id must be 1
cur.execute("insert into storage_tiers(uuid, id, name, type, status, "
"capabilities, forclusterid) "
"values(%s, %s, %s, %s, %s, %s, %s)",
(new_storage_tier_uuid, '1',
constants.SB_TIER_DEFAULT_NAMES[
constants.SB_TIER_TYPE_CEPH],
constants.SB_TIER_TYPE_CEPH,
constants.SB_TIER_STATUS_DEFINED,
'{}', '1'))
except Exception as e:
LOG.exception("Error inserting into storage_tiers: %s" % str(e))
LOG.info("Primary Storage Tier added.")
def update_storage_backends(cur):
global CINDER_BACKEND
cur.execute("select storage_backend.id, storage_backend.backend, "
"storage_backend.state, "
"storage_backend.forisystemid, storage_backend.services, "
"storage_backend.capabilities from storage_backend")
storage_backend = cur.fetchone()
LOG.info("storage_backend: %s" % str(storage_backend))
if not storage_backend:
LOG.exception("No storage backend present, exiting.")
raise
backend = storage_backend['backend']
if backend == "ceph":
CINDER_BACKEND = constants.SB_TYPE_CEPH
LOG.info("Ceph backend")
cur.execute(
"select storage_ceph.id, storage_ceph.object_gateway "
"from storage_ceph")
storage_ceph = cur.fetchone()
if not storage_ceph:
LOG.exception("No storage_ceph entry, exiting.")
raise
services = "{0}, {1}".format(constants.SB_SVC_CINDER,
constants.SB_SVC_GLANCE)
if storage_ceph['object_gateway'] == "t":
services = "cinder, glance, swift"
LOG.info("Services ran on ceph: %s" % services)
try:
cur.execute(
"update storage_backend set state=%s, services=%s, "
"capabilities=%s where id=%s",
(constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH],
constants.SB_STATE_CONFIGURED, services,
'{"replication":"2", "min_replication":"1"}',
storage_backend['id']))
cur.execute(
"update storage_ceph set tier_id=%s where id=%s",
('1', storage_backend['id']))
except Exception as e:
LOG.exception("Error: %s" % str(e))
raise
elif backend == "lvm":
CINDER_BACKEND = constants.SB_TYPE_LVM
LOG.info("LVM backend")
cur.execute(
"update storage_backend set name=%s, state=%s, services=%s, "
"capabilities=%s where id=%s",
(constants.SB_DEFAULT_NAMES[constants.SB_TYPE_LVM],
constants.SB_STATE_CONFIGURED, constants.SB_SVC_CINDER, '{}',
storage_backend['id']))
else:
LOG.info("Other backend present: %s" % backend)
return
new_storage_backend_uuid = uuidutils.generate_uuid()
cur.execute(
"insert into storage_backend(uuid, name, backend, state, "
"forisystemid, services, capabilities) "
"values(%s, %s, %s, %s, %s, %s, %s)",
(new_storage_backend_uuid,
constants.SB_DEFAULT_NAMES[constants.SB_TYPE_FILE],
constants.SB_TYPE_FILE, constants.SB_STATE_CONFIGURED,
storage_backend['forisystemid'], constants.SB_SVC_GLANCE, '{}'))
try:
cur.execute(
"select storage_backend.id, storage_backend.name, "
"storage_backend.backend, storage_backend.state, "
"storage_backend.forisystemid, storage_backend.services, "
"storage_backend.capabilities from storage_backend where "
"services=%s", (constants.SB_SVC_GLANCE,))
except Exception as e:
LOG.exception("Error selecting the storage backend for glance: %s"
% str(e))
storage_backend_glance = cur.fetchone()
try:
cur.execute("insert into storage_file(id) values(%s)",
(storage_backend_glance['id'],))
except Exception as e:
LOG.exception("Error inserting into storage file: %s" % str(e))
LOG.info("Backends updated")
def update_legacy_cache_tier(cur):
feature_enabled = constants.SERVICE_PARAM_CEPH_CACHE_TIER_FEATURE_ENABLED
cur.execute("select * from service_parameter where service=%s and "
"name=%s", (constants.SERVICE_TYPE_CEPH, feature_enabled,))
parameters = cur.fetchall()
if parameters is None or len(parameters) == 0:
LOG.exception("Failed to fetch ceph service_parameter data")
raise
# Make sure that cache tiering is disabled: Not supported but not removed
LOG.info("Updating ceph service parameters")
cur.execute("update service_parameter set value='false' where "
"service=%s and name=%s",
(constants.SERVICE_TYPE_CEPH, feature_enabled,))
def update_lvm_type(cur, from_release):
lvm_type = None
packstack_config = utils.get_packstack_config(from_release)
try:
config_cinder_lvm_type = packstack_config.get(
'general', CONFIG_CINDER_LVM_TYPE)
except Exception:
# For upgrades from R2, this value may be missing
# If so we log and use the default value of thin
LOG.info("No %s option. Using Default thin." % CONFIG_CINDER_LVM_TYPE)
config_cinder_lvm_type = constants.CINDER_LVM_TYPE_THIN
# Determine the lvm_type from the packstack-answers.txt file.
# If this information is missing, just give a warning and continue
# with the upgrade since this is not critical.
if constants.CINDER_LVM_TYPE_THIN in config_cinder_lvm_type.lower():
lvm_type = constants.CINDER_LVM_TYPE_THIN
elif constants.CINDER_LVM_TYPE_THICK in config_cinder_lvm_type.lower():
lvm_type = constants.CINDER_LVM_TYPE_THICK
else:
LOG.warning("No %s or %s LVM type" % (constants.CINDER_LVM_TYPE_THIN,
constants.CINDER_LVM_TYPE_THICK))
if not lvm_type:
LOG.warning("No %s option" % CONFIG_CINDER_LVM_TYPE)
lvm_type = constants.CINDER_LVM_TYPE_THIN
LOG.info("lvm_type: %s" % lvm_type)
capabilities = '{"lvm_type": "%s"}' % lvm_type
cur.execute("update i_lvg set capabilities=%s where lvm_vg_name=%s",
(capabilities, constants.LVG_CINDER_VOLUMES))
def set_backends(from_release):
conn = psycopg2.connect("dbname=sysinv user=postgres")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
update_stors(cur)
update_capabilities(cur)
add_primary_storage_tier(cur)
update_storage_backends(cur)
if CINDER_BACKEND == constants.SB_TYPE_CEPH:
update_legacy_cache_tier(cur)
if CINDER_BACKEND == constants.SB_TYPE_LVM:
update_lvm_type(cur, from_release)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,78 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This migration script converts the sdn_enabled field in the system table
# from y/n to True/False
import json
import sys
import psycopg2
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
LOG.info("performing system migration from release %s to %s with "
"action: %s" % (from_release, to_release, action))
update_system_capabilities()
except Exception as ex:
LOG.exception(ex)
print ex
return 1
def update_system_capabilities():
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("select capabilities from i_system WHERE id = 1;")
capabilities = cur.fetchone()
if capabilities is None:
LOG.exception("Failed to fetch i_system data")
raise
fields_str = capabilities.get('capabilities')
fields_dict = json.loads(fields_str)
if fields_dict.get('sdn_enabled') == 'y':
new_vals = {'sdn_enabled': True}
else:
new_vals = {'sdn_enabled': False}
fields_dict.update(new_vals)
new_cap = json.dumps(fields_dict)
LOG.info("Updating system capabilities %s to %s" %
(capabilities, new_cap))
upgrade_vals = {'C': new_cap}
cur.execute("update i_system set capabilities=%(C)s WHERE id=1",
upgrade_vals)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,67 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This migration script converts the identity and assignment driver
# values in the service parameter table from their fully qualified
# paths to a relative path as required by Pike
import sys
import psycopg2
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
LOG.info("performing system migration from release %s to %s with "
"action: %s" % (from_release, to_release, action))
update_identity_service_parameters()
except Exception as ex:
LOG.exception(ex)
print ex
return 1
def update_identity_service_parameters():
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("select * from service_parameter "
"where service='identity' and name='driver';")
parameters = cur.fetchall()
if parameters is None or len(parameters) == 0:
LOG.exception(
"Failed to fetch identity service_parameter data")
raise
LOG.info("Updating identity service parameters to 'sql'")
cur.execute("update service_parameter set value='sql' "
"where service='identity' and name='driver';")
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,83 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This migration script converts the admin URL in the Keystone
# service catalog to be equivalent to the internal URL
import sys
import psycopg2
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
LOG.info("performing keystone migration from release %s to %s "
"with action: %s" % (from_release, to_release, action))
update_identity_admin_url()
except Exception as ex:
LOG.exception(ex)
print ex
return 1
# We will update for all Regions and not just the primary Region,
# otherwise we'd break non-Primary Regions once Primary Region
# gets upgraded
def update_identity_admin_url():
conn = psycopg2.connect("dbname='keystone' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT service_id, url, region_id FROM "
"endpoint INNER JOIN service "
"ON endpoint.service_id = service.id WHERE "
"type='identity' and interface='internal';")
records = cur.fetchall()
if records is None or len(records) == 0:
LOG.exception(
"Failed to fetch identity endpoint and servic data")
raise
for record in records:
service_id = record['service_id']
internal_url = record['url']
region_id = record['region_id']
if not service_id or not internal_url or not region_id:
LOG.exception(
"Fetched an entry %s with essential data missing" %
record)
raise
LOG.info("Updating identity admin URL to '%s' for "
"service_id '%s' and region '%s'" %
(internal_url, service_id, region_id))
cur.execute("UPDATE endpoint SET url='%s' "
"WHERE interface='admin' and service_id='%s' "
"and region_id='%s' ;" %
(internal_url, service_id, region_id))
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,197 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This migration script converts the sdn_enabled field in the system table
# from y/n to True/False
import json
import sys
import uuid
import psycopg2
from netaddr import IPNetwork
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor, DictCursor
from controllerconfig.upgrades import utils
from sysinv.common import constants
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "17.06" and action == "migrate":
try:
LOG.info("Performing system migration from release %s to %s with "
"action: %s" % (from_release, to_release, action))
packstack_config = utils.get_packstack_config(from_release)
config_region = packstack_config.get('general', 'CONFIG_REGION')
if config_region == 'y':
region_name = packstack_config.get('general',
'CONFIG_REGION_2_NAME')
else:
region_name = packstack_config.get('general',
'CONFIG_KEYSTONE_REGION')
project_name = packstack_config.get('general',
'CONFIG_SERVICE_TENANT_NAME')
multicast_subnet = IPNetwork(packstack_config.get(
'general', 'CONFIG_MULTICAST_MGMT_SUBNET'))
pxeboot_subnet = IPNetwork(packstack_config.get(
'general', 'CONFIG_PLATFORM_PXEBOOT_SUBNET'))
mtu = packstack_config.get('general', 'CONFIG_PLATFORM_MGMT_MTU')
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
update_system_table(conn, region_name, project_name)
populate_multicast_address_records(conn, multicast_subnet, mtu)
populate_pxeboot_address_records(conn, pxeboot_subnet, mtu)
except Exception as ex:
LOG.exception(ex)
print ex
return 1
def update_system_table(conn, region_name, project_name):
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("select capabilities from i_system WHERE id = 1;")
capabilities = cur.fetchone()
if capabilities is None:
LOG.exception("Failed to fetch i_system data")
raise
fields_str = capabilities.get('capabilities')
fields_dict = json.loads(fields_str)
if fields_dict.get('region_config') == 'True':
new_vals = {'region_config': True}
else:
new_vals = {'region_config': False}
fields_dict.update(new_vals)
new_cap = json.dumps(fields_dict)
LOG.info("Updating system capabilities %s to %s"
% (capabilities, new_cap))
cur.execute("update i_system set capabilities=%s, "
"region_name=%s, service_project_name=%s WHERE id=1",
(new_cap, region_name, project_name))
def populate_multicast_address_records(conn, multicast_subnet, mtu):
pool_name = 'multicast-subnet'
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute('insert into address_pools(uuid,name,family,network,'
'prefix,"order") VALUES(%s, %s, %s, %s, %s, %s)',
(str(uuid.uuid4()), pool_name, multicast_subnet.version,
str(multicast_subnet.network), multicast_subnet.prefixlen,
'random'))
cur.execute("select id from address_pools WHERE name=%s;",
(pool_name,))
pool_row = cur.fetchone()
if pool_row is None:
LOG.exception("Failed to fetch pool id for %s", pool_name)
raise
pool_id = pool_row['id']
cur.execute('insert into address_pool_ranges(address_pool_id,uuid,'
'start,"end") VALUES(%s, %s, %s, %s)',
(pool_id, str(uuid.uuid4()),
str(multicast_subnet[1]),
str(multicast_subnet[-2])))
cur.execute("insert into networks(id, address_pool_id, uuid,"
"type, mtu, dynamic) values(%s, %s, %s, %s, %s, False)",
(pool_id, pool_id, str(uuid.uuid4()),
constants.NETWORK_TYPE_MULTICAST, mtu))
addresses = {
constants.SM_MULTICAST_MGMT_IP_NAME:
str(multicast_subnet[1]),
constants.MTCE_MULTICAST_MGMT_IP_NAME:
str(multicast_subnet[2]),
constants.PATCH_CONTROLLER_MULTICAST_MGMT_IP_NAME:
str(multicast_subnet[3]),
constants.PATCH_AGENT_MULTICAST_MGMT_IP_NAME:
str(multicast_subnet[4]),
}
for name, address in addresses.iteritems():
address_name = "%s-%s" % (name, constants.NETWORK_TYPE_MULTICAST)
cur.execute("insert into addresses(uuid, address_pool_id, address,"
"prefix, name, family, enable_dad) values(%s, %s, %s,"
"%s, %s, %s, False)",
(str(uuid.uuid4()), pool_id, str(address),
multicast_subnet.prefixlen, address_name,
multicast_subnet.version))
def populate_pxeboot_address_records(conn, pxeboot_subnet, mtu):
pool_name = 'pxeboot'
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute('select id from address_pools where name=%s;',
(pool_name,))
pool_row = cur.fetchone()
if pool_row:
LOG.info("existing pxeboot pool found, skip adding pxeboot "
"network. pool id = (%s)" % pool_row['id'])
return
cur.execute('insert into address_pools(uuid,name,family,network,'
'prefix,"order") VALUES(%s, %s, %s, %s, %s, %s)',
(str(uuid.uuid4()), pool_name, pxeboot_subnet.version,
str(pxeboot_subnet.network), pxeboot_subnet.prefixlen,
'random'))
cur.execute("select id from address_pools WHERE name=%s;",
(pool_name,))
pool_row = cur.fetchone()
if pool_row is None:
LOG.exception("Failed to fetch pool id for %s", pool_name)
raise
pool_id = pool_row['id']
cur.execute('insert into address_pool_ranges(address_pool_id,uuid,'
'start,"end") VALUES(%s, %s, %s, %s)',
(pool_id, str(uuid.uuid4()),
str(pxeboot_subnet[1]),
str(pxeboot_subnet[-2])))
cur.execute("insert into networks(id, address_pool_id, uuid,"
"type, mtu, dynamic) values(%s, %s, %s, %s, %s, False)",
(pool_id, pool_id, str(uuid.uuid4()),
constants.NETWORK_TYPE_PXEBOOT, mtu))
addresses = {
constants.CONTROLLER_HOSTNAME:
str(pxeboot_subnet[2]),
constants.CONTROLLER_0_HOSTNAME:
str(pxeboot_subnet[3]),
constants.CONTROLLER_1_HOSTNAME:
str(pxeboot_subnet[4]),
}
for name, address in addresses.iteritems():
address_name = "%s-%s" % (name, constants.NETWORK_TYPE_PXEBOOT)
cur.execute("insert into addresses(uuid, address_pool_id, address,"
"prefix, name, family, enable_dad) values(%s, %s, %s,"
"%s, %s, %s, False)",
(str(uuid.uuid4()), pool_id, str(address),
pxeboot_subnet.prefixlen, address_name,
pxeboot_subnet.version))
if __name__ == "__main__":
sys.exit(main())

View File

@ -18,11 +18,5 @@ include ::platform::amqp::upgrade
include ::openstack::keystone::upgrade
include ::openstack::client::upgrade
include ::platform::mtce::upgrade
include ::openstack::murano::upgrade
include ::openstack::ironic::upgrade
include ::openstack::nova::upgrade
include ::platform::drbd::upgrade

View File

@ -348,11 +348,11 @@ class openstack::keystone::upgrade (
}
# Panko is a new non-optional service in 18.xx.
# Ensure its service account and endpoints are created
include ::panko::keystone::auth
# Add service account and endpoints for any new R6 services...
# include ::<new service>::keystone::auth
# No new services yet...
# Always remove the upgrade token file after all 18.xx
# Always remove the upgrade token file after all new
# services have been added
file { $upgrade_token_file :
ensure => absent,

View File

@ -680,8 +680,3 @@ class openstack::nova::compute::runtime {
stage => post
}
}
class openstack::nova::upgrade {
include ::nova::keystone::auth_placement
}

View File

@ -261,38 +261,6 @@ class platform::drbd::extension (
}
}
class platform::drbd::extension::upgrade (
) inherits ::platform::drbd::extension::params {
$drbd_primary = true
$drbd_initial = true
$drbd_automount =true
$drbd_manage = true
# ip2_override should be removed in R6. It is required for drbd-extension
# when upgrading from R4->R5 only. This is so "on controller-1" is set to
# 127.0.0.1 and not 127.0.0.2. drbd-extension is new to R5.
#
# on controller-1 {
# address ipv4 127.0.0.1:7793;
# }
#
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
ip2_override => $::platform::drbd::params::ip1,
}
}
class platform::drbd::patch_vault::params (
$service_enabled = false,
$device = '/dev/drbd6',
@ -423,22 +391,6 @@ class platform::drbd::extension::runtime {
include ::platform::drbd::extension
}
class platform::drbd::upgrade {
# On upgrading controller-1 (R4->R5) we need to make this new drbd resource
# the primary as it does not currently exists controller-0. This code MUST
# be removed in R6.
class { '::drbd':
wfc_timeout => 1,
degr_wfc_timeout => 1,
service_enable => true,
service_ensure => 'running'
}
include ::platform::drbd::params
include ::platform::drbd::extension::upgrade
}
class platform::drbd::patch_vault::runtime {
include ::platform::drbd::params

View File

@ -89,9 +89,3 @@ class platform::mtce::runtime {
stage => post
}
}
class platform::mtce::upgrade {
# configure a mtce user that added in release 5
# to be removed in release 6
include ::platform::mtce::agent
}

View File

@ -46,23 +46,6 @@ class Health(object):
return unprovisioned_hosts, provisioned_hosts
def _check_controller_0_manifests(self, controller_0):
"""
Checks that controller-0 has all it's manifests
During config_controller some manifests are not generated,
in particular the interfaces manifest will be missing.
Upgrade abort-reinstall will fail if all the manifests are not present
so we check for the manifest here.
"""
controller_0_mgmt_ip = controller_0['mgmt_ip']
network_manifest_path = os.path.join(
tsc.PACKSTACK_PATH,
'manifests',
constants.CONTROLLER,
"%s_interfaces.pp" % controller_0_mgmt_ip
)
return os.path.isfile(network_manifest_path)
def _check_hosts_enabled(self, hosts):
"""Checks that each host is enabled and unlocked"""
offline_host_list = []
@ -324,17 +307,6 @@ class Health(object):
output += _('No imported load found. Unable to test further\n')
return health_ok, output
# Check that controller-0 has been locked and unlocked
# As this should only happen in lab scenarios, we only display a
# message in cases were the check fails
controller_0 = self._dbapi.ihost_get_by_hostname(
constants.CONTROLLER_0_HOSTNAME)
if not self._check_controller_0_manifests(controller_0):
output += _('Missing manifests for %s. '
'Lock and Unlock to resolve\n') \
% constants.CONTROLLER_0_HOSTNAME
health_ok = False
upgrade_version = imported_load.software_version
if imported_load.required_patches:
patches = imported_load.required_patches.split('\n')

View File

@ -18,8 +18,8 @@ from . import base
HOSTNAME_INFRA_SUFFIX = '-infra'
NOVA_UPGRADE_LEVEL_NEWTON = 'newton'
NOVA_UPGRADE_LEVELS = {'17.06': NOVA_UPGRADE_LEVEL_NEWTON}
NOVA_UPGRADE_LEVEL_PIKE = 'pike'
NOVA_UPGRADE_LEVELS = {'18.03': NOVA_UPGRADE_LEVEL_PIKE}
class PlatformPuppet(base.BasePuppet):
@ -192,14 +192,10 @@ class PlatformPuppet(base.BasePuppet):
constants.UPGRADE_COMPLETED]
# we don't need compatibility mode after we activate
if upgrade.state in upgrade_states:
config.update({
'neutron::server::vhost_user_enabled': True
})
return config
upgrade_load_id = upgrade.to_load
# TODO: update the nova upgrade level for Pike
host_upgrade = self.dbapi.host_upgrade_get_by_host(host['id'])
if host_upgrade.target_load == upgrade_load_id:
from_load = self.dbapi.load_get(upgrade.from_load)