Merge "Cleanup obsolete upgrade scripts"

This commit is contained in:
Zuul 2023-12-13 18:10:45 +00:00 committed by Gerrit Code Review
commit 719bff6b9b
11 changed files with 0 additions and 2827 deletions

View File

@ -1,235 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import psycopg2
import re
import sys
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
from sysinv.agent import disk
from sysinv.common import constants
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.info(
"%s invoked from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action)
)
res = 0
if action == "migrate" and (
from_release == "21.12" and
to_release == "22.12"
):
if not is_multipath():
LOG.info("Multipath not detected, nothing to do")
return 0
try:
conn = psycopg2.connect("dbname=sysinv user=postgres")
do_update_i_host(conn)
do_update_i_idisks(conn)
do_update_partitions(conn)
do_update_i_pv(conn)
do_update_hw_settle(conn)
except Exception as e:
LOG.exception("Error: {}".format(e))
res = 1
return res
def do_update_partitions(conn):
partitions_db = get_partitions(conn)
for partition_db in partitions_db:
new_path_device_node = transform_device_node_path(
partition_db["device_node"]
)
new_part_path_device_path = transform_part_device_path(
partition_db["device_path"])
query = (
"UPDATE partition SET device_path='{}', "
"device_node='{}' WHERE id={};".format(
new_part_path_device_path,
new_path_device_node,
partition_db["id"],
)
)
LOG.info(
"Update partition id={} query={}".format(
partition_db["id"], query)
)
do_update_query(conn, query)
def do_update_i_pv(conn):
i_pvs = get_i_pvs(conn)
for i_pv in i_pvs:
new_path_device_node = transform_device_node_path(
i_pv["disk_or_part_device_node"]
)
new_disk_or_part_device_path = transform_part_device_path(
i_pv["disk_or_part_device_path"]
)
query = (
"UPDATE i_pv SET disk_or_part_device_node='{}', "
"lvm_pv_name='{}', disk_or_part_device_path='{}' "
"WHERE id={}").format(
new_path_device_node,
new_path_device_node,
new_disk_or_part_device_path,
i_pv["id"])
LOG.info("Update i_pv id={} query= {}".format(
i_pv["id"], query))
do_update_query(
conn,
query
)
def do_update_i_idisks(conn):
i_disks_db = get_idisks(conn)
for i_disk_db in i_disks_db:
new_device_path = transform_device_path(i_disk_db["device_path"])
query = "UPDATE i_idisk SET device_path='{}' "\
"WHERE id={};".format(
new_device_path, i_disk_db["id"])
LOG.info(
"Update disk id={} device_path={} "
"to {}".format(
i_disk_db["id"],
i_disk_db["device_path"],
new_device_path))
do_update_query(conn, query)
def do_update_i_host(conn):
i_hosts = get_i_hosts(conn)
for i_host in i_hosts:
query = (
"UPDATE i_host SET boot_device='/dev/mapper/mpatha', "
"rootfs_device='/dev/mapper/mpatha' "
"WHERE id={};".format(
i_host["id"]
)
)
LOG.info("Update i_hosts id={} query= {}".format(
i_host["id"], query))
do_update_query(conn, query)
def get_idisks(conn):
query = "SELECT id, device_node, serial_id, device_id, device_path "\
"FROM i_idisk;"
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(query)
i_disks = cur.fetchall()
return i_disks
def get_partitions(conn):
query = "SELECT id, device_node, device_path FROM partition;"
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(query)
partitions = cur.fetchall()
return partitions
def get_i_pvs(conn):
query = (
"SELECT id, disk_or_part_device_node, lvm_pv_name, "
"disk_or_part_device_path FROM i_pv;"
)
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(query)
pvs = cur.fetchall()
return pvs
def get_i_hosts(conn):
query = "SELECT id, boot_device, rootfs_device "\
"FROM i_host WHERE personality='controller';"
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(query)
ihosts = cur.fetchall()
return ihosts
def do_update_query(conn, query):
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
def do_update_hw_settle(conn):
query = "UPDATE i_host SET hw_settle='30'; "
LOG.info("Update hw_settle query= {}".format(query))
do_update_query(conn, query)
def is_multipath():
disk_operator = disk.DiskOperator()
system_disk = disk_operator.idisk_get()[0]
if constants.DEVICE_NAME_MPATH in system_disk["device_node"]:
return True
return False
def transform_device_node_path(path):
regex = r"(\/dev\/mapper\/mpath)([a-zA-Z])(\d)"
result = re.match(regex, path)
if result:
return "{}{}-part{}".format(result[1], result[2], result[3])
return path
def transform_device_path(path):
# This regex is used to support QEMU virtualization devices,
# while all other real iSCSI devices start with 0
regex = r"(\/dev\/disk\/by-id\/)dm-uuid-mpath-[0-9](.*)"
result = re.match(regex, path)
if result:
return "{}wwn-0x{}".format(result[1], result[2])
return path
def transform_part_device_path(path):
# This regex is used to support QEMU virtualization devices,
# while all other real iSCSI devices start with 0
regex = r"(\/dev\/disk\/by-id\/)dm-uuid-(.*)-mpath-[0-9](.*)"
result = re.match(regex, path)
if result:
return "{}wwn-0x{}-{}".format(result[1], result[3], result[2])
return path
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,613 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This is a data migration script to pickup the partition changes during
# upgrade to Debian OS.
# The migration refreshes the partition, i_pv and i_lvg tables
# with the new partition configuration on filessytem of Debian StarlingX
# after controller-1 is upgraded.
#
import copy
import sys
import psycopg2
from controllerconfig.common import log
from datetime import datetime
import operator
from psycopg2.extras import DictCursor
import uuid
from sysinv.common import constants
from sysinv.common import utils as cutils
from sysinv.agent import partition as Partition
LOG = log.get_logger(__name__)
# set partition end_mib to END_OF_DISK_MIB to indicate
# that the partition will take all the remaining disk spaces
END_OF_DISK_MIB = "-1"
ONE_GIB = 1024 * 1024 * 1024
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.info("%s invoked from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
res = 0
if action == "migrate" and (from_release in ['22.06', '21.12'] and
to_release == '22.12'):
try:
res = do_update()
except Exception:
LOG.exception("Remapping partition action failed")
res = 1
return res
IPARTITION_COLUMNS = 'created_at', 'updated_at', 'deleted_at', 'uuid', \
'start_mib', 'end_mib', 'size_mib', 'device_path', \
'type_guid', 'type_name', 'idisk_id', 'idisk_uuid', \
'capabilities', 'status', 'foripvid', 'forihostid', \
'device_node'
# worker node partition template
WORKER_PARTITION_LIST = [
{'start_mib': '1', 'end_mib': '2', 'size_mib': '1',
'type_guid': '21686148-6449-6e6f-744e-656564454649',
'type_name': 'BIOS boot partition'},
{'start_mib': '2', 'end_mib': '302', 'size_mib': '300',
'type_guid': 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b',
'type_name': 'EFI system partition'},
{'start_mib': '302', 'end_mib': '2350', 'size_mib': '2048',
'type_guid': '0fc63daf-8483-4772-8e79-3d69d8477de4',
'type_name': 'Linux filesystem'},
{'start_mib': '2350', 'end_mib': END_OF_DISK_MIB, 'size_mib': '0',
'type_guid': 'e6d6d379-f507-44c2-a23c-238f2a3df928',
'type_name': 'Linux LVM'}]
def get_disk_uuid_mapping(conn, forihostid):
# return map of idisk uuid indexed by device_node
with conn.cursor(cursor_factory=DictCursor) as cur:
sql = "SELECT uuid, device_node FROM i_idisk WHERE forihostid = %s;"
cur.execute(sql, (forihostid, ))
vals = cur.fetchall()
mappings = {}
for val in vals:
pair = {val["device_node"]: val["uuid"]}
mappings.update(pair)
return mappings
def get_idisks(conn, forihostid):
# do not consider disk change (replace, remove, or add new disk)
# during upgrade
sql = "SELECT * FROM i_idisk WHERE forihostid = %s;"
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute(sql, (forihostid, ))
idisks = cur.fetchall()
return idisks
def get_cur_host(conn):
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute("SELECT system_mode FROM i_system;")
system = cur.fetchone()
upgrade_controller = "controller-1"
if system["system_mode"] == "simplex":
upgrade_controller = "controller-0"
cur.execute("SELECT id, boot_device FROM i_host WHERE hostname = %s;",
(upgrade_controller,))
instance = cur.fetchone()
if instance is None:
raise Exception("Failed to retrieve host id for %s" %
upgrade_controller)
return instance
def get_disk_by_device_node(disks, device_path):
for disk in disks:
if disk["device_path"] in device_path:
return disk
elif constants.DEVICE_NAME_MPATH in disk["device_node"]:
path_split = disk["device_node"].split(constants.DEVICE_NAME_MPATH)
if path_split[0] in device_path and path_split[1] in device_path:
return disk
raise Exception("Cannot locate the disk for %s" % device_path)
def get_rootdisk_partitions(conn, forihostid):
# get partitions on root disk of N release configuration
# the corresponding vg name is ammended to the end of each partition.
col_fmt = "p." + ", p.".join(IPARTITION_COLUMNS)
sql_fmt = "select %s, pv.lvm_vg_name " \
"from partition as p left join i_pv pv on pv.id = foripvid " \
"where idisk_uuid in" \
" (select d.uuid from i_host join i_idisk d on" \
" d.device_node = boot_device or" \
" d.device_path = boot_device" \
" where d.forihostid = %%s and i_host.id = %%s) " \
"order by start_mib;" % col_fmt
sql = sql_fmt % (forihostid, forihostid)
partitions = []
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute(sql, (forihostid, forihostid))
partitions = cur.fetchall()
return partitions
def get_controller_partition_template(rootdisk):
# return list of partitions created on rootdisk, sorted by physical
# order (start_mib)
root_device_path = rootdisk["device_path"]
root_device_node = rootdisk["device_node"]
po = Partition.PartitionOperator()
partitions = po.ipartition_get(skip_gpt_check=True)
# sort by start_mib
now = datetime.now()
partition_additions = {
"created_at": now,
"updated_at": now,
"deleted_at": None,
# foripvid will be populated when updating i_pv table
"foripvid": None,
# TODO: check to load capabilities
"capabilities": None,
# These are the partitions that have already created
"status": 1
}
bootdisk_partitions = []
for partition in partitions:
partition.update(partition_additions)
part_device_path = partition["device_path"]
if is_device_path_on_disk(part_device_path, root_device_path,
root_device_node):
partition["device_path"] = None
partition["device_node"] = None
bootdisk_partitions.append(partition)
sorted_list = sorted(bootdisk_partitions,
key=operator.itemgetter('start_mib'))
# the last partition takes all the rest of disk spaces
sorted_list[-1]["end_mib"] = END_OF_DISK_MIB
return sorted_list
def get_node_partition_template(part_list):
# create a partition template from a list of partitions
template = copy.deepcopy(part_list)
now = datetime.now()
partition_additions = {
"created_at": now,
"updated_at": now,
"deleted_at": None,
# foripvid will be populated when updating i_pv table
"foripvid": None,
# TODO: check to load capabilities
"capabilities": None,
# These are the partitions that have already created
"status": 1,
"device_path": None,
"device_node": None
}
for partition in template:
partition.update(partition_additions)
return template
def get_ipartitions(forihostid, template, rootdisk):
# localize default partitions on rootdisk
partitions = copy.deepcopy(template)
rootdisk_device_node = rootdisk["device_node"]
rootdisk_device_path = rootdisk["device_path"]
idx = 1
for partition in partitions:
# regenerate uuid
partition["uuid"] = "%s" % uuid.uuid4()
partition["idisk_id"] = rootdisk["id"]
partition["idisk_uuid"] = rootdisk["uuid"]
partition["forihostid"] = forihostid
device_node, device_path = \
build_device_node_path(rootdisk_device_node, rootdisk_device_path,
idx)
partition["device_node"] = device_node
partition["device_path"] = device_path
if partition["end_mib"] == END_OF_DISK_MIB:
# get all the rest of disk spaces
end_mib = int(rootdisk["size_mib"]) + 1
partition["end_mib"] = str(end_mib)
partition["size_mib"] = str(end_mib - int(partition["start_mib"]))
idx += 1
return partitions
def build_device_node_path(disk_device_node, disk_device_path, device_idx):
"""Builds the partition device path and device node based on last
partition number and assigned disk.
"""
if constants.DEVICE_NAME_NVME in disk_device_node:
device_node = "%sp%s" % (disk_device_node, device_idx)
else:
device_node = "%s%s" % (disk_device_node, device_idx)
device_path = cutils.get_part_device_path(disk_device_path,
str(device_idx))
return device_node, device_path
def is_device_path_on_disk(device_path, disk_device_path, disk_device_node):
if disk_device_path in device_path:
return True
elif constants.DEVICE_NAME_MPATH in disk_device_node:
split_path = device_path.split("-part")
if split_path[0] in disk_device_path:
return True
return False
def append_additional_partitions(conn, new_rootdisk_partitions,
host, rootdisk):
# append user created partitions on rootdisk from the N release
# new_rootdisk_partitions is new default partitions on root disk
# will append additional user partitions on root disk to the list
# to form the entier partition list on root disk
forihostid = host["id"]
# get partitions on rootdisk from N db
rootdisk_partitions = get_rootdisk_partitions(conn, forihostid)
rootdisk_device_node = rootdisk["device_node"]
LOG.info("Previous release ipartitions on root disk %s \n%s" %
(rootdisk_device_node, rootdisk_partitions))
# find the last default partition in ordered list. All default
# partitions will be replaced with new default partitions.
for partition in rootdisk_partitions:
if partition["lvm_vg_name"] == "cgts-vg":
# found the 1st cgts-vg.
# cgts-vg in new load will replace the existing cgts-vg partition
# on the node as PV of cgts-vg
new_rootdisk_partitions[-1]["foripvid"] = partition["foripvid"]
break
else:
# a cgts-vg is not found on root disk... game over
raise Exception("cgts-vg partition is not found on rootdisk")
ipartitions = []
for partition in new_rootdisk_partitions:
ipartition = [partition[key] for key in IPARTITION_COLUMNS]
ipartitions.append(ipartition)
return ipartitions
def update_partition(conn, ipartitions, forihostid, rootdisk):
dp_idx = IPARTITION_COLUMNS.index("device_path")
partition_disk_uuid_idx = IPARTITION_COLUMNS.index("idisk_uuid")
rootdisk_uuid = rootdisk["uuid"]
with conn.cursor(cursor_factory=DictCursor) as cur:
# 1. delete all partitions on rootdisk
sql = "DELETE FROM partition where idisk_uuid = %s;"
LOG.info("Delete partition records on root disk: uuid(%s)" %
rootdisk_uuid)
cur.execute(sql, (rootdisk_uuid, ))
count = cur.rowcount
LOG.info("%s partition records are deleted" % count)
# 2. recreate records for the partitions on created root disk
LOG.info("recreate partition record on root disk %s" % rootdisk_uuid)
for ipartition in ipartitions:
if ipartition[partition_disk_uuid_idx] != rootdisk_uuid:
# skip non-rootdisk partitions
continue
device_path = ipartition[dp_idx]
col_fmt = ", ".join(["%s"] * len(IPARTITION_COLUMNS))
values_fmt = ", ".join(["%%s"] * len(IPARTITION_COLUMNS))
sql_fmt = "INSERT INTO partition (%s) VALUES(%s)" % \
(col_fmt, values_fmt)
sql = sql_fmt % IPARTITION_COLUMNS
cur.execute(sql, ipartition)
if cur.rowcount == 1:
LOG.info("Create new partition %s, %s" %
(device_path, ipartition[partition_disk_uuid_idx]))
LOG.info("Done recreate partitions on root disk")
sql = "SELECT id, uuid, device_node, device_path, foripvid " \
"FROM partition WHERE forihostid = %s"
cur.execute(sql, (forihostid, ))
partitions = [{"id": d[0], "uuid": d[1], "device_node": d[2],
"device_path": d[3], "foripvid": d[4],
"type": "partition"}
for d in cur.fetchall()]
return partitions
def update_pvs(conn, forihostid):
with conn.cursor(cursor_factory=DictCursor) as cur:
# partition records are pointing to i_pv, but the i_pv reference
# to partition uuid (disk_or_part_uuid) and device_node
# (disk_or_part_device_node) needs to relink.
# this is a double link
# update primary cgts-vg pv, this pv and partition have been
# provisioned
sql = "UPDATE i_pv " \
"SET disk_or_part_uuid = p.uuid, " \
"disk_or_part_device_node = p.device_node, " \
"disk_or_part_device_path = p.device_path, " \
"lvm_pv_name = p.device_node " \
"FROM i_pv AS v JOIN partition AS p ON p.foripvid = v.id " \
"WHERE v.forihostid = %s AND p.forihostid = %s AND" \
" i_pv.id = v.id AND p.status <> %s"
cur.execute(sql, (forihostid, forihostid,
constants.PARTITION_CREATE_ON_UNLOCK_STATUS))
LOG.info("Updated %s PVs" % cur.rowcount)
# Delete the PVs that link to user partition on boot disk.
# As the user partitions on boot disk are deleted during
# update_partition, the orphan partition PVs are to be deleted.
sql = "DELETE FROM i_pv " \
"WHERE pv_type = 'partition' AND forihostid = %s AND id NOT IN" \
" (SELECT foripvid FROM partition WHERE forihostid = %s AND " \
"foripvid IS NOT Null)"
cur.execute(sql, (forihostid, forihostid))
count = cur.rowcount
if count > 0:
LOG.info("Deleted %s PVs on user partition" % cur.rowcount)
sql = "SELECT id, uuid, lvm_pv_name, pv_type, pv_state, " \
"disk_or_part_uuid " \
"FROM i_pv WHERE forihostid = %s"
cur.execute(sql, (forihostid, ))
pvs = [{"id": d[0], "uuid": d[1], "lvm_pv_name": d[2], "pv_type":
d[3], "pv_state": d[4], "disk_or_part_uuid": d[5]}
for d in cur.fetchall()]
return pvs
def update_lvgs(conn, forihostid):
with conn.cursor(cursor_factory=DictCursor) as cur:
# delete the lvgs that don't have any PVs.
# PVs can be deleted in update_pvs when associated partition is
# deleted as root disk space is reallocated to cgts-vg.
# nova-local can be deleted if all nova-local PVs are partitions
# on root disk. In this case all partitions and PVs are deleted
# in update_partition and update_pvs.
sql = "DELETE FROM i_lvg " \
"WHERE forihostid = %s AND id NOT IN " \
"(SELECT forilvgid FROM i_pv WHERE forihostid = %s AND " \
"forilvgid IS NOT Null);"
cur.execute(sql, (forihostid, forihostid))
count = cur.rowcount
if count > 0:
LOG.info("Deleted %s unused lvg" % count)
# mark lvgs to be recreated during host unlock
sql = "UPDATE i_lvg SET vg_state = %s " \
"WHERE lvm_vg_name <> 'cgts-vg' AND forihostid = %s;"
cur.execute(sql, (constants.LVG_ADD, forihostid))
count = cur.rowcount
if count > 0:
LOG.info("%s lvg will be recreated" % count)
sql = "SELECT id, uuid, lvm_vg_name FROM i_lvg WHERE forihostid = %s"
cur.execute(sql, (forihostid, ))
lvgs = [{"id": d[0], "uuid": d[1], "lvm_vg_name": d[2]}
for d in cur.fetchall()]
return lvgs
def get_disk_or_partition(conn, hostid):
sql = "SELECT uuid, device_node, device_path, foripvid, 'disk' as type " \
"FROM i_idisk WHERE forihostid = %s UNION " \
"SELECT uuid, device_node, device_path, foripvid, " \
"'partition' as type FROM partition WHERE forihostid = %s;"
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute(sql, (hostid, hostid))
dops = [{"uuid": d[0], "device_node": d[1], "device_path": d[2],
"foripvid": d[3], "type": d[4]}
for d in cur.fetchall()]
return dops
def get_rootdisk(conn, hostid, boot_device):
# return device_node and device_path of rootdisk
sql = "SELECT id, uuid, device_node, device_path, size_mib " \
"FROM i_idisk " \
"WHERE (device_node = %s OR device_path = %s) AND forihostid = %s"
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute(sql, (boot_device, boot_device, hostid))
rootdisk = cur.fetchone()
return rootdisk
def get_hosts(conn):
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute("SELECT id, hostname, personality, boot_device, "
"subfunctions "
"FROM i_host WHERE personality "
"IN ('controller', 'worker');")
nodes = cur.fetchall()
return nodes
def update_host(conn, host, partition_template):
hostid = host["id"]
hostname = host["hostname"]
rootdisk = get_rootdisk(conn, hostid, host["boot_device"])
ipartitions = get_ipartitions(hostid,
partition_template, rootdisk)
ipartitions = append_additional_partitions(conn, ipartitions,
host, rootdisk)
ipartitions = update_partition(conn, ipartitions, hostid, rootdisk)
pvs = update_pvs(conn, hostid)
lvgs = update_lvgs(conn, hostid)
LOG.info("partition migration summary %s:" % hostname)
LOG.info("=" * 60)
LOG.info("new list of lvgs:")
for lvg in lvgs:
LOG.info("%s" % lvg)
LOG.info("new list of pvs:")
for pv in pvs:
LOG.info("%s" % pv)
LOG.info("new list of partitions:")
for ip in ipartitions:
LOG.info(ip)
LOG.info("=" * 60)
def get_nova_local_pvs(conn, hostid):
sql = "SELECT pv_type, lvm_vg_name, lvm_pv_size, disk_or_part_uuid " \
"FROM i_pv WHERE forihostid = %s AND lvm_vg_name='nova-local';"
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute(sql, (hostid,))
pvs = cur.fetchall()
return pvs
def create_instances_lv(conn, host, rootdisk_nova_local_size):
# size_gib is rounded up to nearest Gib
sql = "INSERT INTO host_fs" \
"(created_at, uuid, name, size, logical_volume, forihostid) " \
"VALUES(%s, %s, %s, %s, %s, %s);"
created_at = datetime.now()
fs_uuid = "%s" % uuid.uuid4()
name = constants.FILESYSTEM_NAME_INSTANCES
# round up
size_gib = int((rootdisk_nova_local_size + ONE_GIB - 1) / ONE_GIB)
lv_name = constants.FILESYSTEM_LV_DICT[name]
forihostid = host["id"]
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute(sql, (created_at, fs_uuid, name, size_gib,
lv_name, forihostid))
if cur.rowcount == 1:
LOG.info("%s: created cgts-vg:%s %sGib" %
(host["hostname"], lv_name, size_gib))
def migrate_nova_local(conn, host, rootdisk):
# Migrate nova-local on boot disk
# This only needs to do on nodes with worker subfunction
# The migration rules:
# 1. if nova-local only exists on boot disk as a partition,
# the nova-local partition will be dropped, replaced with
# same size will be allocated to cgts-vg:instances-lv,
# 2. if nova-local only exists on separated disk, then no
# migration operation is needed
# 3. if nova-local exists on both boot disk partition and
# separated disk, nova-local partition on boot disk will
# be dropped, and with no other compensation. This will
# result total nova-local space reduced.
pvs = get_nova_local_pvs(conn, host["id"])
if len(pvs) > 0:
LOG.info("Found nova-local pvs on rootdisk: %s", pvs)
rootdisk_partitions = get_rootdisk_partitions(conn, host["id"])
rootdisk_part_uuids = [p["uuid"] for p in rootdisk_partitions]
rootdisk_nova_local_size = 0
pv_on_other_disk = False
for pv in pvs:
pv_type = pv["pv_type"]
dop_uuid = pv["disk_or_part_uuid"]
if (pv_type == "partition" and dop_uuid in rootdisk_part_uuids):
rootdisk_nova_local_size += int(pv["lvm_pv_size"])
else:
pv_on_other_disk = True
if rootdisk_nova_local_size > 0:
if not pv_on_other_disk:
create_instances_lv(conn, host, rootdisk_nova_local_size)
else:
msg = "Total nova-local is reduced by %s bytes"
LOG.info(msg % rootdisk_nova_local_size)
def do_update():
res = 0
conn = psycopg2.connect("dbname=sysinv user=postgres")
try:
cur_host = get_cur_host(conn)
rootdisk = get_rootdisk(conn, cur_host["id"], cur_host["boot_device"])
controller_partitions = get_controller_partition_template(rootdisk)
worker_partitions = get_node_partition_template(WORKER_PARTITION_LIST)
# migrate hosts with the partition template
hosts = get_hosts(conn)
for host in hosts:
personality = host["personality"]
if personality == constants.WORKER:
partition_template = worker_partitions
elif personality == constants.CONTROLLER:
partition_template = controller_partitions
else:
# nothing to migrate on storage node, as no user partitions
# are allowed on root disk
continue
if "worker" in host["subfunctions"]:
migrate_nova_local(conn, host, rootdisk)
update_host(conn, host, partition_template)
except psycopg2.Error as ex:
conn.rollback()
LOG.exception(ex)
LOG.warning("Rollback changes")
res = 1
except Exception as ex:
conn.rollback()
LOG.exception(ex)
LOG.warning("Rollback changes")
res = 1
else:
LOG.info("All good, committing all changes into database")
conn.commit()
finally:
conn.close()
return res
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,496 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
# Conversion of oidc-auth-apps configuration
#
# Verify the supported configuration during health-query-upgrade.
# Backup overrides at upgrade start.
# Convert the configuration during upgrade activate.
from controllerconfig.common import log
import copy
import os
import psycopg2
from psycopg2.extras import RealDictCursor
import sys
import yaml
LOG = log.get_logger(__name__)
log.configure()
# This script is only valid for to/from releases:
ACCEPTED_FROM = ['21.12']
ACCEPTED_TO = ['22.12']
ACCEPTED_ACTIONS = ['health-check', 'start', 'migrate']
# this path should have been created by stx-oidc-auth-helm package
# with ownership assigned to postgres:postgres
BACKUP_PATH = '/var/opt/oidc-auth-apps'
# list of charts in oidc-auth-apps; for sanity check only
oidc_charts = ['dex', 'oidc-client', 'secret-observer']
# Hard-coded chart values; matching the fluxcd manifest defaults
DEFAULT_HTTPSTLS_MOUNT = '/etc/dex/tls'
DEFAULT_HTTPSTLS_MODE = 420
DEFAULT_HTTPSTLS_SECRET = 'local-dex.tls'
# A dictionary of values selected from the overrides yaml during
# validate_overrides(). The selected values are used to convert
# the yaml from old dex to new dex
DEFINES = {}
# validate yaml, instructions for what configurations accepted
validation_yaml = """
name: "supported"
validation: "children"
optional: False
accepted: ["extraVolumeMounts", "extraVolumes", "config", "certs"]
children:
- name: "extraVolumeMounts"
validation: "any"
optional: True
define: "volumeMounts"
- name: "extraVolumes"
validation: "any"
optional: True
define: "volumes"
- name: "config"
validation: "children"
optional: False
define: "dex_config"
children:
- name: "web"
validation: "children"
optional: True
children:
- name: "tlsCert"
validation: "exact"
optional: True
define: "dex_https_tlsCert"
- name: "tlsKey"
validation: "exact"
optional: True
define: "dex_https_tlsKey"
- name: "certs"
validation: "children"
optional: True
accepted: ["grpc", "web"]
children:
- name: "grpc"
validation: "children"
optional: True
accepted: ["secret"]
children:
- name: "secret"
validation: "children"
optional: False
accepted: ["caName", "clientTlsName", "serverTlsName"]
children:
- name: "caName"
validation: "exact"
optional: False
define: "tls_secret"
- name: "clientTlsName"
validation: "exact"
optional: False
define: "tls_secret"
- name: "serverTlsName"
validation: "exact"
optional: False
define: "tls_secret"
- name: "web"
validation: "children"
optional: False
accepted: ["secret"]
children:
- name: "secret"
validation: "children"
optional: False
accepted: ["caName", "tlsName"]
children:
- name: "caName"
validation: "exact"
optional: False
define: "tls_secret"
- name: "tlsName"
validation: "exact"
optional: False
define: "tls_secret"
"""
# sql to fetch the user_overrides from DB for oidc-auth-apps
sql_overrides = ("SELECT helm_overrides.name, user_overrides"
" FROM helm_overrides"
" LEFT OUTER JOIN kube_app"
" ON helm_overrides.app_id = kube_app.id"
" WHERE kube_app.name = 'oidc-auth-apps'")
sql_update = ("UPDATE helm_overrides"
" SET user_overrides = '%s'"
" FROM kube_app"
" WHERE helm_overrides.app_id = kube_app.id"
" AND kube_app.name = 'oidc-auth-apps'"
" AND helm_overrides.name = 'dex'")
def get_overrides(conn):
"""Fetch helm overrides from DB"""
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(sql_overrides)
return cur.fetchall()
def backup_overrides(overrides, action='debug'):
"""Dump helm overrides from DB to files in BACKUP_PATH"""
backup_path = os.path.join(BACKUP_PATH, action)
if not os.path.exists(backup_path):
os.makedirs(backup_path)
field = 'user_overrides'
for chart in overrides:
name = chart['name']
if name not in oidc_charts:
LOG.warning("oidc-auth-apps: mismatch chart name '%s'", name)
if chart[field]:
document = yaml.safe_load(chart[field])
if not document:
LOG.debug("oidc-auth-apps: %s empty document", name)
continue
backup_f = '_'.join([name, field])
backup_f = '.'.join([backup_f, 'yaml'])
backup_f = os.path.join(backup_path, backup_f)
try:
with open(backup_f, 'w') as file:
yaml.dump(document, file, default_flow_style=False)
except IOError as e:
LOG.error("oidc-auth-apps: IOError: %s; file: %s", e, backup_f)
return 1
LOG.info("oidc-auth-apps: user_overrides backed up to %s", backup_path)
return 0
def validate_value(instruction, value):
"""Verify a value"""
if instruction['validation'] == 'exact':
if type(value) not in [str, bool, int, float]:
LOG.error("oidc-auth-apps: value type %s not supported",
type(value))
return False
if 'define' in instruction:
if instruction['define'] in DEFINES:
if DEFINES[instruction['define']] != value:
LOG.error("oidc-auth-apps: defined value is"
" mismatched '%s': '%s' != '%s'",
instruction['define'],
DEFINES[instruction['define']],
value)
LOG.error("oidc-auth-apps: instruction: %s", instruction)
return False
else:
DEFINES[instruction['define']] = value
LOG.debug("oidc-auth-apps: define: '%s' == '%s'",
instruction['define'], value)
if 'values' in instruction:
LOG.error("oidc-auth-apps: validation exact values"
" not implemented")
return False
else:
LOG.error("oidc-auth-apps: validation %s not supported",
instruction['validation'])
return False
LOG.debug("oidc-auth-apps: accept %s: %s: %s",
instruction['validation'], instruction, value)
return True
def printable_item(item):
"""remove children from item to make it printable"""
printable = {}
printable['validation'] = item['validation']
printable['name'] = item['name']
printable['optional'] = item['optional']
if 'define' in item:
printable['define'] = item['define']
return printable
def define_complex_value(item, yaml_doc):
"""Subroutine to fill DEFINES for complex values"""
if 'define' in item and item['validation'] != 'exact':
# Handle saving of complex values
if item['define'] in DEFINES:
LOG.error("oidc-auth-apps: complex values comparison"
" is not supported: %s", printable_item(item))
return False
else:
DEFINES[item['define']] = copy.deepcopy(yaml_doc[item['name']])
LOG.debug("oidc-auth-apps: define: '%s'",
item['define'])
return True
def validate_item(item, yaml_doc):
"""Handle one list item from instruction"""
print_item = printable_item(item)
# If neither present nor optional: fail
# If not present, but optional: pass
optional = True
if 'optional' in item:
optional = item['optional']
present = item['name'] in yaml_doc
if not (present or optional):
LOG.error("oidc-auth-apps: overrides omit required value:"
" %s", print_item)
return False
elif not present:
# pass
return True
if not define_complex_value(item, yaml_doc):
return False
if item['validation'] == 'any':
# pass
LOG.debug("oidc-auth-apps: accept instruction: %s", print_item)
elif item['validation'] == 'exact':
if not validate_value(item, yaml_doc[item['name']]):
return False
elif item['validation'] == 'children':
accepted_keys = ['*']
if 'accepted' in item:
if not validate_accepted(item['accepted'], yaml_doc[item['name']]):
return False
else:
accepted_keys = [x for x in yaml_doc[item['name']]]
if not recurse_validate_document(item['children'],
yaml_doc[item['name']]):
LOG.error("oidc-auth-apps: instruction: %s", print_item)
return False
else:
LOG.debug("oidc-auth-apps: accept instruction: %s: %s",
print_item, accepted_keys)
else:
LOG.error("oidc-auth-apps: instruction %s not implemented",
item['validation'])
return False
return True
def validate_accepted(accepted, yaml_doc):
"""Check that each item in yaml is expected"""
if type(yaml_doc) is not dict:
LOG.error("oidc-auth-apps: accepting from list not implemented")
return False
error = False
for key in yaml_doc:
if key not in accepted:
error = True
LOG.error("oidc-auth-apps: key is not accepted: %s", key)
return not error
def recurse_validate_document(instruction, yaml_doc):
"""Recursively verify the document against validation yaml"""
if type(instruction) is not list:
LOG.error("oidc-auth-apps: non-list instruction not implemented")
return False
for item in instruction:
if type(item) is not dict:
LOG.error("oidc-auth-apps: non-dict instruction item"
" not implemented")
return False
elif 'validation' not in item:
LOG.error("oidc-auth-apps: instruction missing validation")
return False
elif 'name' not in item:
LOG.error("oidc-auth-apps: instruction missing name")
return False
elif not validate_item(item, yaml_doc):
return False
return True
def validate_document(validation, document):
"""Top level, verify the document against validation yaml"""
LOG.info("oidc-auth-apps: validating %s", validation['name'])
if validation['validation'] != 'children':
LOG.warning("oidc-auth-apps: root validation should be"
" children not %s", validation['validation'])
result = recurse_validate_document(validation['children'], document)
if 'accepted' in validation:
if not validate_accepted(validation['accepted'], document):
return False
if validation['optional']:
LOG.warning("oidc-auth-apps: root validation is optional")
return True
return result
def get_chart_override(overrides, chart):
"""Get a specific set of overrides from the db value"""
chart_ov = None
for chart_ov in overrides:
if 'name' in chart_ov and chart_ov['name'] == chart:
break
else:
chart_ov = None
if not (chart_ov and 'user_overrides' in chart_ov):
return None
if not chart_ov['user_overrides']:
# A sanity check. Really shouldn't see this if oidc-auth-apps
# does not have dex overrides - either because the app is not
# applied, or because it failed to apply without overrides
return None
# convert the string to python structures
return yaml.safe_load(chart_ov['user_overrides'])
def validate_overrides(overrides):
"""Check if the user_overrides are supported"""
DEFINES.clear()
if not overrides:
# dex without overrides isn't configured correctly
LOG.error("oidc-auth-apps: no overrides to validate")
return False
elif type(overrides) is not list:
# this shouldn't happen
LOG.error("oidc-auth-apps: overrides not list type")
return False
# Find dex; only dex helm needs conversion
document = get_chart_override(overrides, 'dex')
if not document:
LOG.error("oidc-auth-apps: no dex user_overrides to validate")
return False
validate = yaml.safe_load(validation_yaml)
return validate_document(validate, document)
def get_httpstls_mount():
"""Use the default unless the end-user had overridden it"""
if 'dex_https_tlsCert' in DEFINES:
return os.path.dirname(DEFINES['dex_https_tlsCert'])
# The default matches oic-auth-apps flucd manifest defaults
return DEFAULT_HTTPSTLS_MOUNT
def get_httpstls_secret():
"""Use the default unless the end-user had overridden it"""
if 'tls_secret' in DEFINES:
return DEFINES['tls_secret']
# The default matches oic-auth-apps flucd manifest defaults
return DEFAULT_HTTPSTLS_SECRET
def merge_new_overrides():
"""Read DEFINES and prepare new overrides yaml"""
# Take the dex config as is:
new_doc = {'config': copy.deepcopy(DEFINES['dex_config'])}
# Convert old dex certs.web.secret to https-tls volume/volumeMounts
mount = {'mountPath': get_httpstls_mount(), 'name': 'https-tls'}
vol = {'secret': {'secretName': get_httpstls_secret(),
'defaultMode': DEFAULT_HTTPSTLS_MODE},
'name': 'https-tls'}
# Take 'extra' volumes and mounts that may exist in old dex
# This is expected to be the WAD certificate
volumes = []
volumeMounts = []
if 'volumes' in DEFINES:
volumes = copy.deepcopy(DEFINES['volumes'])
if 'volumeMounts' in DEFINES:
volumeMounts = copy.deepcopy(DEFINES['volumeMounts'])
# only add volumes/mounts if 'extra' was specified, or
# if there was non-default mount
if volumes or 'tls_secret' in DEFINES:
volumes.append(vol)
if volumeMounts or 'dex_https_tlsCert' in DEFINES:
volumeMounts.append(mount)
if volumes:
new_doc['volumes'] = volumes
if volumeMounts:
new_doc['volumeMounts'] = volumeMounts
return new_doc
def convert_overrides(overrides, conn):
"""Convert the user_overrides from old dex to new"""
LOG.info("oidc-auth-apps: converting dex overrides")
if not validate_overrides(overrides):
return 1
new_doc = merge_new_overrides()
res = backup_overrides(overrides, action='migrate')
if res != 0:
return res
# replace the dex user overrides
new_str = yaml.dump(new_doc, default_flow_style=False)
for override in overrides:
if override['name'] == 'dex':
override['user_overrides'] = new_str
res = backup_overrides(overrides, action='converted')
return res
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
if action not in ACCEPTED_ACTIONS:
LOG.debug("oidc-auth-apps: omit %s, %s, %s",
from_release, to_release, action)
return 0
elif from_release not in ACCEPTED_FROM:
LOG.warning("oidc-auth-apps: not valid from release %s",
from_release)
return 0
elif to_release not in ACCEPTED_TO:
LOG.warning("oidc-auth-apps: not valid to release %s",
to_release)
return 0
try:
conn = psycopg2.connect("dbname=sysinv user=postgres")
overrides = get_overrides(conn)
except Exception as ex:
LOG.exception("oidc-auth-apps: %s", ex)
return 1
if not overrides:
LOG.error("oidc-auth-apps: failed to fetch overrides")
return 1
elif not get_chart_override(overrides, 'dex'):
LOG.info("oidc-auth-apps: no dex overrides to convert")
return 0
if action == 'health-check':
if validate_overrides(overrides):
LOG.info("oidc-auth-apps: upgrade script health-check: success")
return 0
return 1
elif action == 'start':
return backup_overrides(overrides, action='start')
elif action == 'migrate':
convert_overrides(overrides, conn)
# A failure of oidc-auth-apps overrides conversion is unhandled.
# A patch for 21.12 release is needed to pre-test the
# compatibility of user overrides with expected configurations.
# 22.06 version of oidc-auth-apps will fail to apply if overrides
# are not converted.
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,434 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script moves legacy PTP configuration (contents) from other tables:
# - Global (system-wide) ptp4l configuration in 'ptp' table, by creating
# a "legacy" 'ptp4l' entry in 'ptp_instances' table and inserting the
# corresponding entries in 'ptp_parameters';
# - If advanced (specialized) ptp4l configuration is found in
# 'service_parameter' table, it inserts the corresponding entry(ies) in
# 'ptp_parameters' and refers to the "legacy" 'ptp4l' instance created
# for global (system-wide) configuration;
# - If phc2sys configuration is found in 'service_parameter' table, it
# inserts a 'phc2sys' entry in 'ptp_instances' table and inserts the
# corresponding entry(ies) in 'ptp_parameters';
# - If any interface has 'ptp_role' not equal to 'none', it inserts a
# 'ptp4l' entry in 'ptp_instances' and inserts the corresponding entry
# in 'ptp_parameters'.
import os
import sys
import psycopg2
from controllerconfig.common import log
from datetime import datetime
from oslo_utils import uuidutils
from psycopg2.extras import DictCursor
LOG = log.get_logger(__name__)
INTERFACE_PTP_ROLE_NONE = 'none'
PLATFORM_PATH = '/opt/platform' # following tsconfig
# Hidden file indicating the update/upgrade from legacy configuration
# has been already run
PTP_UPDATE_PARAMETERS_DONE = '.update_ptp_parameters_done'
# PTP instance types
PTP_INSTANCE_TYPE_PTP4L = 'ptp4l'
PTP_INSTANCE_TYPE_PHC2SYS = 'phc2sys'
# PTP instances created during migration
PTP_INSTANCE_LEGACY_PTP4L = 'ptp4l-legacy'
PTP_INSTANCE_LEGACY_PHC2SYS = 'phc2sys-legacy'
# PTP interfaces created during migration
PTP_INTERFACE_LEGACY_PTP4L = 'ptp4lif-legacy'
PTP_INTERFACE_LEGACY_PHC2SYS = 'phc2sysif-legacy'
# PTP parameter: owner types
PTP_PARAMETER_OWNER_INSTANCE = 'ptp-instance'
PTP_PARAMETER_OWNER_INTERFACE = 'ptp-interface'
# Global PTP configuration migrated to legacy instance
PTP_PARAMETER_DELAY_MECHANISM = 'delay_mechanism'
PTP_PARAMETER_TIME_STAMPING = 'time_stamping'
PTP_PARAMETER_NETWORK_TRANSPORT = 'network_transport'
# PTP service parameter sections
SERVICE_PARAM_SECTION_PTP_GLOBAL = 'global'
SERVICE_PARAM_SECTION_PTP_PHC2SYS = 'phc2sys'
# Special PTP service parameters migrated from legacy configuration
PTP_PARAMETER_UDS_ADDRESS = 'uds_address'
PTP_PARAMETER_DOMAIN_NUMBER = 'domainNumber'
PTP_PARAMETER_DEFAULT_DOMAIN = '0'
PTP_PARAMETER_BC_JBOD = 'boundary_clock_jbod'
PTP_BOUNDARY_CLOCK_JBOD_1 = '1'
# PTP service parameters NOT migrated from legacy configuration
PTP_PARAMETER_UPDATE_RATE = 'update-rate'
PTP_PARAMETER_SUMMARY_UPDATES = 'summary-updates'
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.info("%s invoked from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
res = 0
if action == "migrate" and (
from_release == '21.05' or from_release == '21.12'):
# Avoid attempt of updating PTP configuration after this upgrade
TO_CONFIG_PATH = PLATFORM_PATH + '/config/' + to_release + '/'
to_file = os.path.join(TO_CONFIG_PATH, PTP_UPDATE_PARAMETERS_DONE)
open(to_file, 'w').close()
# First check on filesystem to detect if some update from old PTP
# configuration has been already done (before upgrading)
FROM_CONFIG_PATH = PLATFORM_PATH + '/config/' + from_release + '/'
from_file = os.path.join(FROM_CONFIG_PATH, PTP_UPDATE_PARAMETERS_DONE)
if not os.path.isfile(from_file):
conn = psycopg2.connect("dbname=sysinv user=postgres")
try:
# Second check, using the restored database contents
if _legacy_instances_not_found(conn):
_move_ptp_parameters(conn)
except psycopg2.Error as ex:
LOG.exception(ex)
res = 1
except Exception as ex:
LOG.exception(ex)
res = 1
finally:
if to_release.startswith('22'):
_cleanup_ptp_service_parameters(conn)
# Committing all changes
LOG.info("Committing all PTP-related changes into database")
conn.commit()
conn.close()
return res
def _legacy_instances_not_found(connection):
with connection.cursor(cursor_factory=DictCursor) as cur:
cur.execute("SELECT id FROM ptp_instances WHERE "
"name = %s OR name = %s;",
(PTP_INSTANCE_LEGACY_PTP4L,
PTP_INSTANCE_LEGACY_PHC2SYS))
instance = cur.fetchone()
if instance is not None:
LOG.info("PTP legacy instance found with id = %s" % instance['id'])
return False
LOG.info("No PTP legacy instances found")
# If no legacy instances are present BUT any other instance is found,
# it would mean that instance must have been created after patching the
# current release making the new API fully functional, while legacy API was
# deprecated since then.
# In such case, legacy instances shouldn't be created automatically, even
# even based on the still existing (but now legacy and potentially
# untouched) 'ptp' table, because that would be an undesired behavior
with connection.cursor(cursor_factory=DictCursor) as cur:
cur.execute("SELECT id FROM ptp_instances;")
instance = cur.fetchone()
if instance is not None:
LOG.info("PTP legacy instances dismissed")
return False
return True
def _insert_ptp_parameter_owner(connection, owner_type, capabilities=None):
owner_uuid = uuidutils.generate_uuid()
with connection.cursor(cursor_factory=DictCursor) as cur:
LOG.debug("Creating PTP parameter owner %s" % owner_uuid)
cur.execute("INSERT INTO ptp_parameter_owners "
"(created_at, uuid, type, capabilities)"
"VALUES (%s, %s, %s, %s);",
(datetime.now(), owner_uuid, owner_type, capabilities))
cur.execute("SELECT id FROM ptp_parameter_owners WHERE uuid = %s;",
(owner_uuid,))
row = cur.fetchone()
owner_id = row['id']
return (owner_id, owner_uuid)
def _insert_ptp_instance(connection,
id,
name,
service,
extra_info=None):
with connection.cursor(cursor_factory=DictCursor) as cur:
LOG.debug("Creating PTP instance %s id %d" % (name, id))
cur.execute("INSERT INTO ptp_instances "
"(id, name, service, extra_info) "
"VALUES (%s, %s, %s, %s);",
(id, name, service, extra_info))
def _insert_ptp_interface(connection,
id,
name,
instance_id,
extra_info=None):
with connection.cursor(cursor_factory=DictCursor) as cur:
LOG.debug("Creating PTP interface %s id %d" % (name, id))
cur.execute("INSERT INTO ptp_interfaces "
"(id, name, ptp_instance_id, extra_info) "
"VALUES (%s, %s, %s, %s);",
(id, name, instance_id, extra_info))
def _insert_ptp_parameter(connection, name, value):
param_uuid = uuidutils.generate_uuid()
with connection.cursor(cursor_factory=DictCursor) as cur:
LOG.debug("Creating PTP parameter %s=%s" % (name, value))
cur.execute("INSERT INTO ptp_parameters "
"(created_at, uuid, name, value) "
"VALUES (%s, %s, %s, %s);",
(datetime.now(), param_uuid, name, value))
return param_uuid
def _add_parameter_to_instance(connection, owner_uuid, param_uuid):
with connection.cursor(cursor_factory=DictCursor) as cur:
LOG.debug("Adding PTP parameter %s to %s" % (param_uuid, owner_uuid))
cur.execute("INSERT INTO ptp_parameter_ownerships "
"(created_at, uuid, parameter_uuid, owner_uuid) "
"VALUES (%s, %s, %s, %s);",
(datetime.now(), uuidutils.generate_uuid(), param_uuid,
owner_uuid))
def _assign_instance_to_host(connection, instance_id, host_id):
with connection.cursor(cursor_factory=DictCursor) as cur:
LOG.debug("Assigning PTP instance %d to host %d" %
(instance_id, host_id))
cur.execute("INSERT INTO ptp_instance_maps "
"(created_at, uuid, host_id, ptp_instance_id) "
"VALUES (%s, %s, %s, %s);",
(datetime.now(), uuidutils.generate_uuid(), host_id,
instance_id))
def _assign_ptp_to_interface(connection, ptp_interface_id, interface_id):
with connection.cursor(cursor_factory=DictCursor) as cur:
LOG.debug("Assigning PTP interface %d to interface %d" %
(ptp_interface_id, interface_id))
cur.execute("INSERT INTO ptp_interface_maps "
"(created_at, uuid, interface_id, ptp_interface_id) "
"VALUES (%s, %s, %s, %s);",
(datetime.now(), uuidutils.generate_uuid(), interface_id,
ptp_interface_id))
def _cleanup_ptp_service_parameters(connection):
with connection.cursor(cursor_factory=DictCursor) as cur:
LOG.info("Removing PTP configuration from 'service_parameter' table")
cur.execute("DELETE FROM service_parameter WHERE service = 'ptp';")
def _move_ptp_parameters(connection):
with connection.cursor(cursor_factory=DictCursor) as cur:
# List all the hosts with clock_synchronization=ptp
cur.execute("SELECT id FROM i_host "
"WHERE clock_synchronization = 'ptp';")
ptp_hosts = cur.fetchall()
LOG.debug("There are %d hosts with clock_synchronization=ptp" %
len(ptp_hosts))
with connection.cursor(cursor_factory=DictCursor) as cur:
# List all PTP parameters in service-parameters table
cur.execute("SELECT section, name, value FROM service_parameter "
"WHERE service = 'ptp';")
param_entries = cur.fetchall()
LOG.debug("There are %d PTP rows in 'service_parameter' table" %
len(param_entries))
if len(ptp_hosts) == 0 and len(param_entries) == 0:
# No need for upgrade
return
with connection.cursor(cursor_factory=DictCursor) as cur:
# List all the interfaces with ptp_role!=none
cur.execute("SELECT id FROM interfaces WHERE ptp_role <> %s;",
(INTERFACE_PTP_ROLE_NONE,))
ptp_ifaces = cur.fetchall()
LOG.debug("There are %d interfaces with ptp_role != none" %
len(ptp_ifaces))
LOG.info("Creating PTP instances for legacy parameters")
# Take system-wide parameters from legacy configuration
with connection.cursor(cursor_factory=DictCursor) as cur:
cur.execute("SELECT mechanism, mode, transport FROM ptp;")
ptp_config = cur.fetchone()
delay_mechanism = str(ptp_config['mechanism']).upper()
time_stamping = str(ptp_config['mode']).lower()
network_transport = str(ptp_config['transport']).upper()
# Legacy instance for system-wide parameters and those of
# section "global" in service-parameters table
(ptp4l_id, ptp4l_uuid) = _insert_ptp_parameter_owner(
connection, PTP_PARAMETER_OWNER_INSTANCE)
_insert_ptp_instance(connection,
ptp4l_id,
PTP_INSTANCE_LEGACY_PTP4L,
PTP_INSTANCE_TYPE_PTP4L)
# Legacy PTP interface associated to legacy ptp4l instance
(ptp4lif_id, ptp4lif_uuid) = _insert_ptp_parameter_owner(
connection, PTP_PARAMETER_OWNER_INTERFACE)
_insert_ptp_interface(connection,
ptp4lif_id,
PTP_INTERFACE_LEGACY_PTP4L,
ptp4l_id)
# Legacy instance for parameters of section "phc2sys"
(phc2sys_id, phc2sys_uuid) = _insert_ptp_parameter_owner(
connection, PTP_PARAMETER_OWNER_INSTANCE)
_insert_ptp_instance(connection,
phc2sys_id,
PTP_INSTANCE_LEGACY_PHC2SYS,
PTP_INSTANCE_TYPE_PHC2SYS)
# Legacy PTP interface associated to legacy phc2sys instance
(phc2sysif_id, phc2sysif_uuid) = _insert_ptp_parameter_owner(
connection, PTP_PARAMETER_OWNER_INTERFACE)
_insert_ptp_interface(connection,
phc2sysif_id,
PTP_INTERFACE_LEGACY_PHC2SYS,
phc2sys_id)
# Add 'uds_address' parameter to phy2sys instance for linkage
# with ptp4l instance
uds_address_path = '/var/run/ptp4l-%s' % PTP_INSTANCE_LEGACY_PTP4L
uds_address_uuid = _insert_ptp_parameter(connection,
PTP_PARAMETER_UDS_ADDRESS,
uds_address_path)
_add_parameter_to_instance(connection, phc2sys_uuid, uds_address_uuid)
# Assign legacy instances to all hosts with clock_synchronization=ptp
for host in ptp_hosts:
_assign_instance_to_host(connection, ptp4l_id, host['id'])
_assign_instance_to_host(connection, phc2sys_id, host['id'])
# Assign legacy PTP interfaces to all interfaces with ptp_role!=none
for iface in ptp_ifaces:
_assign_ptp_to_interface(connection, ptp4lif_id, iface['id'])
_assign_ptp_to_interface(connection, phc2sysif_id, iface['id'])
# Copy service-parameter PTP entries, if any
domain_number = PTP_PARAMETER_DEFAULT_DOMAIN
for param in param_entries:
if (param['name'] == PTP_PARAMETER_UPDATE_RATE or
param['name'] == PTP_PARAMETER_SUMMARY_UPDATES):
LOG.info("Found %s parameter, ignored" % param['name'])
continue
if param['name'] == PTP_PARAMETER_DOMAIN_NUMBER:
domain_number = param['value'] # overwrite default
continue # skip it for below
if param['name'] == PTP_PARAMETER_DELAY_MECHANISM:
delay_mechanism = str(param['value']).upper() # overwrite global
continue # skip it for below
if param['name'] == PTP_PARAMETER_TIME_STAMPING:
time_stamping = str(param['value']).lower() # overwrite global
continue # skip it for below
if param['name'] == PTP_PARAMETER_NETWORK_TRANSPORT:
network_transport = str(param['value']).upper() # overwrite global
continue # skip it for below
if param['section'] == SERVICE_PARAM_SECTION_PTP_GLOBAL:
owner_uuid = ptp4l_uuid
elif param['section'] == SERVICE_PARAM_SECTION_PTP_PHC2SYS:
owner_uuid = phc2sys_uuid
else:
raise Exception("Unexpected PTP section in "
"'service-parameter' table")
param_uuid = _insert_ptp_parameter(connection,
param['name'],
param['value'])
_add_parameter_to_instance(connection, owner_uuid, param_uuid)
# Whatever 'global' parameter has been found, it must be
# added also to phc2sys instance, since now this has own
# configuration file
if param['section'] == SERVICE_PARAM_SECTION_PTP_GLOBAL:
_add_parameter_to_instance(connection,
phc2sys_uuid,
param_uuid)
domain_number_uuid = _insert_ptp_parameter(
connection, PTP_PARAMETER_DOMAIN_NUMBER, domain_number)
_add_parameter_to_instance(connection, ptp4l_uuid, domain_number_uuid)
_add_parameter_to_instance(connection,
phc2sys_uuid,
domain_number_uuid)
ptp_delay_mechanism_uuid = _insert_ptp_parameter(
connection, PTP_PARAMETER_DELAY_MECHANISM, delay_mechanism)
_add_parameter_to_instance(connection,
ptp4l_uuid,
ptp_delay_mechanism_uuid)
_add_parameter_to_instance(connection,
phc2sys_uuid,
ptp_delay_mechanism_uuid)
ptp_time_stamping_uuid = _insert_ptp_parameter(
connection, PTP_PARAMETER_TIME_STAMPING, time_stamping)
_add_parameter_to_instance(connection,
ptp4l_uuid,
ptp_time_stamping_uuid)
_add_parameter_to_instance(connection,
phc2sys_uuid,
ptp_time_stamping_uuid)
ptp_network_transport_uuid = _insert_ptp_parameter(
connection, PTP_PARAMETER_NETWORK_TRANSPORT, network_transport)
_add_parameter_to_instance(connection,
ptp4l_uuid,
ptp_network_transport_uuid)
_add_parameter_to_instance(connection,
phc2sys_uuid,
ptp_network_transport_uuid)
# Add 'boundary_clock_jbod' parameter to ptp4l instance if mode is
# "hardware"
if time_stamping == 'hardware':
bc_clock_jbod_uuid = _insert_ptp_parameter(
connection, PTP_PARAMETER_BC_JBOD, PTP_BOUNDARY_CLOCK_JBOD_1)
_add_parameter_to_instance(connection, ptp4l_uuid, bc_clock_jbod_uuid)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,268 +0,0 @@
#!/bin/bash
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
# This migration script is used for upgrading cert manager during the
# activate stage of a platform upgrade. It will:
# - dump existing certificates and issuers
# - convert the dump from deprecated v1alpha2 and v1alpha3 to v1
# - remove the old armada version of cert manager
# - apply the new fluxcd version of cert manager
#
# A lot of the logic for determining application versions is copied
# from the generic application upgrade script in order to keep things
# consistent.
#
# This script should only be needed in the upgrade from the armada
# version of cert manager to the fluxcd version of cert manager.
# The cert manager version difference between the armada and fluxcd
# tarball is too great, and require additional steps for data migration.
# The migration scripts are passed these parameters:
NAME=$(basename $0)
FROM_RELEASE=$1
TO_RELEASE=$2
ACTION=$3
# This will log to /var/log/platform.log
function log {
logger -p local1.info $1
}
# only run this script during upgrade-activate
if [ "$ACTION" != "activate" ]; then
exit 0
fi
# TODO: double check the inclusive condition.
if [[ "$TO_RELEASE" == "22.12" && "$FROM_RELEASE" == "22.06" ]]; then
log "upgrade to 22.12, skip"
exit 0
fi
PLATFORM_APPLICATION_PATH='/usr/local/share/applications/helm'
CONFIG_PERMDIR="/opt/platform/config/${TO_RELEASE}"
PATH=$PATH:/usr/local/sbin
DELETE_RESULT_SLEEP=10
DELETE_RESULT_ATTEMPTS=6 # ~1 min to delete app
UPLOAD_RESULT_SLEEP=10
UPLOAD_RESULT_ATTEMPTS=24 # ~4 min to upload app
APPLY_RESULT_SLEEP=30
APPLY_RESULT_ATTEMPTS=30 # ~15 min to update app
REMOVE_RESULT_SLEEP=10
REMOVE_RESULT_ATTEMPTS=48 # ~8 min to remove app
source /etc/platform/openrc
source /etc/platform/platform.conf
EXISTING_APP_NAME='cert-manager'
EXISTING_APP_INFO=$(system application-show $EXISTING_APP_NAME --column app_version --column status --format yaml)
EXISTING_APP_VERSION=$(echo ${EXISTING_APP_INFO} | sed 's/.*app_version:[[:space:]]\(\S*\).*/\1/')
EXISTING_APP_STATUS=$(echo ${EXISTING_APP_INFO} | sed 's/.*status:[[:space:]]\(\S*\).*/\1/')
# Extract the app name and version from the tarball name: app_name-version.tgz
UPGRADE_CERT_MANAGER_TARBALL=$(find $PLATFORM_APPLICATION_PATH -name "cert-manager*")
re='^(.*)-([0-9]+\.[0-9]+-[0-9]+).tgz'
[[ "$(basename $UPGRADE_CERT_MANAGER_TARBALL)" =~ $re ]]
UPGRADE_APP_NAME=${BASH_REMATCH[1]}
UPGRADE_APP_VERSION=${BASH_REMATCH[2]}
# cert manager is a required application
# if it is not in the applied state, something is very wrong
if [ $EXISTING_APP_STATUS != "applied" ]; then
log "$NAME: ${UPGRADE_APP_NAME}, version ${EXISTING_APP_VERSION}, in bad state ${EXISTING_APP_STATUS}. Exiting for manual intervention..."
exit 1
fi
# assuming application is in applied state, but log it anyways
log "$NAME: cert-manager, version $EXISTING_APP_VERSION, is currently in the state: $EXISTING_APP_STATUS"
# only upgrade the application if the versions dont match
# in case the upgrade activate failed due to other reasons, and this
# is not the first time this script is run
if [ "x${UPGRADE_APP_VERSION}" != "x${EXISTING_APP_VERSION}" ]; then
# dump existing cert manager CRDs
# only dump once, to prevent overwriting existing dumps
# if the script is run more than once due to other failures
if [ ! -f "$CONFIG_PERMDIR/.cm_upgrade_dump" ]; then
log "$NAME: creating cert manager resources backup"
EXISTING_CM_RESOURCES=$(kubectl --kubeconfig=/etc/kubernetes/admin.conf \
get issuer,clusterissuer,certificates,certificaterequests \
--all-namespaces 2>&1 > /dev/null)
if [ "${EXISTING_CM_RESOURCES}" == 'No resources found' ]; then
log "$NAME: no existing cert manager resources detected."
touch "$CONFIG_PERMDIR/.cm_upgrade_no_existing_resources"
else
kubectl get -o yaml \
--kubeconfig=/etc/kubernetes/admin.conf \
--all-namespaces \
issuer,clusterissuer,certificates,certificaterequests \
> $CONFIG_PERMDIR/cert-manager-backup.yaml
if [ $? != 0 ]; then
log "$NAME: Failed to dump existing cert manager resources. Exiting for manual intervention..."
exit 1
fi
# remove the 'resourceVersion' of all cm resources in backup file
# to avoid version related errors while updating the resource
sed -i '/resourceVersion:/d' $CONFIG_PERMDIR/cert-manager-backup.yaml
if [ $? != 0 ]; then
log "$NAME: Failed to delete resourceVersion in cert-manager-backup.yaml. Exiting for manual intervention..."
exit 1
fi
fi
touch "$CONFIG_PERMDIR/.cm_upgrade_dump"
fi
# convert dump using kubectl cert-manager kubernetes plugin
# .cm_upgrade_no_existing_resources check is to not convert an empty dump
# the dump can be empty if the system does not have any cert manager resources.
# this fails the kubectl plugin and the subsequent kubectl apply to restore the backup.
if [ ! -f "$CONFIG_PERMDIR/.cm_upgrade_convert" ] && \
[ ! -f "$CONFIG_PERMDIR/.cm_upgrade_no_existing_resources" ]; then
log "$NAME: converting cert manager resources backup"
kubectl cert-manager convert \
--output-version cert-manager.io/v1 \
-f $CONFIG_PERMDIR/cert-manager-backup.yaml \
> $CONFIG_PERMDIR/cert-manager-v1.yaml
if [ $? != 0 ]; then
log "$NAME: Failed to convert cert manager resources. Exiting for manual intervention..."
exit 1
fi
touch "$CONFIG_PERMDIR/.cm_upgrade_convert"
fi
# remove extra args overrides.
# we need to do this because our configuration of cert manager deletes secrets tied to
# cert manager certificates when the certificates are deleted.
# this means when we delete the certificates as part of data migration, the secrets are deleted.
# when the certificates are restored, the underlying secrets will be missing.
# this triggers a refresh on all cert manager certificates, which could mess up
# trust chains if the certificates are being used for a rootca, like in DC deployments
log "$NAME: removing extra args overrides from ${EXISTING_APP_NAME}"
system helm-override-update ${EXISTING_APP_NAME} cert-manager cert-manager --set extraArgs=""
# apply old cert manager
log "$NAME: Applying ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}"
system application-apply ${EXISTING_APP_NAME}
# Wait on the apply
for tries in $(seq 1 $APPLY_RESULT_ATTEMPTS); do
EXISTING_APP_STATUS=$(system application-show $EXISTING_APP_NAME --column status --format value)
if [ "${EXISTING_APP_STATUS}" == 'applied' ]; then
log "$NAME: ${EXISTING_APP_NAME} has been applied."
break
fi
sleep $APPLY_RESULT_SLEEP
done
if [ $tries == $APPLY_RESULT_ATTEMPTS ]; then
log "$NAME: ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}, was not applied in the allocated time. Exiting for manual intervention..."
exit 1
fi
# remove old cert manager
log "$NAME: Removing ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}"
system application-remove -f ${EXISTING_APP_NAME}
# Wait on the remove, should be somewhat quick
for tries in $(seq 1 $REMOVE_RESULT_ATTEMPTS); do
EXISTING_APP_STATUS=$(system application-show $EXISTING_APP_NAME --column status --format value)
if [ "${EXISTING_APP_STATUS}" == 'uploaded' ]; then
log "$NAME: ${EXISTING_APP_NAME} has been removed."
break
fi
sleep $REMOVE_RESULT_SLEEP
done
if [ $tries == $REMOVE_RESULT_ATTEMPTS ]; then
log "$NAME: ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}, was not removed in the allocated time. Exiting for manual intervention..."
exit 1
fi
# delete old cert manager
log "$NAME: Deleting ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}"
system application-delete -f ${EXISTING_APP_NAME}
# Wait on the delete, should be quick
for tries in $(seq 1 $DELETE_RESULT_ATTEMPTS); do
EXISTING_APP_STATUS=$(system application-show $EXISTING_APP_NAME --column status --format value)
if [ -z "${EXISTING_APP_STATUS}" ]; then
log "$NAME: ${EXISTING_APP_NAME} has been deleted."
break
fi
sleep $DELETE_RESULT_SLEEP
done
if [ $tries == $DELETE_RESULT_ATTEMPTS ]; then
log "$NAME: ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}, was not deleted in the allocated time. Exiting for manual intervention..."
exit 1
fi
# upload new cert manager
log "$NAME: Uploading ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION} from $UPGRADE_CERT_MANAGER_TARBALL"
system application-upload $UPGRADE_CERT_MANAGER_TARBALL
# Wait on the upload, should be quick
for tries in $(seq 1 $UPLOAD_RESULT_ATTEMPTS); do
UPGRADE_APP_STATUS=$(system application-show $UPGRADE_APP_NAME --column status --format value)
if [ "${UPGRADE_APP_STATUS}" == 'uploaded' ]; then
log "$NAME: ${UPGRADE_APP_NAME} has been uploaded."
break
fi
sleep $UPLOAD_RESULT_SLEEP
done
if [ $tries == $UPLOAD_RESULT_ATTEMPTS ]; then
log "$NAME: ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION}, was not uploaded in the allocated time. Exiting for manual intervention..."
exit 1
fi
# apply new cert manager
log "$NAME: Applying ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION}"
system application-apply ${UPGRADE_APP_NAME}
# Wait on the apply
for tries in $(seq 1 $APPLY_RESULT_ATTEMPTS); do
UPGRADE_APP_STATUS=$(system application-show $UPGRADE_APP_NAME --column status --format value)
if [ "${UPGRADE_APP_STATUS}" == 'applied' ]; then
log "$NAME: ${UPGRADE_APP_NAME} has been applied."
break
fi
sleep $APPLY_RESULT_SLEEP
done
if [ $tries == $APPLY_RESULT_ATTEMPTS ]; then
log "$NAME: ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION}, was not applied in the allocated time. Exiting for manual intervention..."
exit 1
fi
# apply converted cert manager resources to the new cert manager application
# -f check is required because the cert manager backup could be empty
# if the system had no cert manager resources before the upgrade
if [ ! -f "$CONFIG_PERMDIR/.cm_upgrade_no_existing_resources" ]; then
log "$NAME: Restoring cert manager resource backup"
kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f $CONFIG_PERMDIR/cert-manager-v1.yaml
if [ $? != 0 ]; then
log "$NAME: Failed to apply cert manager resources on the fluxcd version of cert manager. Exiting for manual intervention..."
exit 1
fi
fi
fi
exit 0

View File

@ -1,88 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script adds pod security admission controller labels to a system
# after upgrades. These are applied by ansible and sysinv when a new
# namespace is created during application deployment. Upgrades needs
# to apply these labels to existing namespaces
import subprocess
import sys
from controllerconfig.common import log
from sysinv.helm import common
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
elif arg == 4:
# postgres_port = sys.argv[arg]
pass
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == '21.12' and action == 'activate':
LOG.info("%s invoked from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
add_pod_security_admission_controller_labels()
def add_pod_security_admission_controller_labels():
try:
cmd = ["kubectl", "--kubeconfig=/etc/kubernetes/admin.conf",
"get", "namespaces", "-o=name"]
namespaces_output = subprocess.check_output(cmd).decode("utf-8")
except Exception:
LOG.error('Command failed:\n %s' % (cmd))
raise Exception('Cannot get namespaces for pod security labels')
for line in namespaces_output.splitlines():
# we add pod security admission controller labels to namespaces that
# we create
namespace = line.replace("namespace/", "")
if namespace not in common.PRIVILEGED_NS:
continue
security_version = 'latest'
if namespace in common.PRIVILEGED_NS:
security_level = 'privileged'
try:
cmd = ["kubectl", "--kubeconfig=/etc/kubernetes/admin.conf",
"label", "--overwrite", "namespaces", namespace,
"pod-security.kubernetes.io/enforce=%s"
% (security_level),
"pod-security.kubernetes.io/warn=%s"
% (security_level),
"pod-security.kubernetes.io/audit=%s"
% (security_level),
"pod-security.kubernetes.io/enforce-version=%s"
% (security_version),
"pod-security.kubernetes.io/warn-version=%s"
% (security_version),
"pod-security.kubernetes.io/audit-version=%s"
% (security_version)]
subprocess.call(cmd)
except Exception as exc:
LOG.error('Command failed:\n %s\n%s' % (cmd, exc))
raise Exception('Cannot assign pod security label')
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,125 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This migration script is used for update controller-platform-nfs
# during migrate stage of platform upgrade. It will:
# - create controller-platform-nfs alias for controller mgmt IP in
# /opt/platform/config/<to_release>/hosts file
# - remove the controller-platform-nfs-mgmt IP address from address DB
import sys
import psycopg2
from psycopg2.extras import DictCursor
import subprocess
import os.path
from controllerconfig.common import log
LOG = log.get_logger(__name__)
def _add_nfs_alias_to_hosts_file(connection, to_release):
with connection.cursor(cursor_factory=DictCursor) as cur:
# during the upgrade-start the hosts file is copied from:
# /opt/platform/config/<from_release>/
# to
# /opt/platform/config/<to_release>/
# and /opt/platform/config/<to_release>/host is used to upgrade
# other controller/storage/worker nodes
# the host files from this path must be updated
CONFIG_PERMDIR = "/opt/platform/config/{}".format(to_release)
host_file = "{}/hosts".format(CONFIG_PERMDIR)
LOG.info("updating hosts in {}".format(CONFIG_PERMDIR))
if (not os.path.exists(host_file)):
LOG.info("Skipping update of {}. File does not exists"
.format(host_file))
return None
LOG.info("Get controller-mgmt floating ip from 'addresses' table")
cur.execute("SELECT address FROM addresses WHERE "
"name='controller-mgmt';")
ctrl_mgmt_ip = cur.fetchone()
# remove the controller-platform-nfs line from ${host_file}
sed_cmd = "sed -i '/controller\-platform\-nfs/d' {}".format(host_file)
# Find the controller mgmt floating IP
# copy entire line and put in ctrl_float
grep_cmd = "grep -w '{}' {} | xargs -I ctrl_float" \
.format(ctrl_mgmt_ip['address'], host_file)
# Add the alias controller-platform-nfs to controller IP
# replacing the ${ctrl_float} by
# "${ctrl_float} controller-platform-nfs"
sed_concat = "sed -i -e " \
"'s|ctrl_float|ctrl_float controller-platform-nfs|' {}" \
.format(host_file)
command = "{} && {} {}".format(sed_cmd, grep_cmd, sed_concat)
sub = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
if sub.returncode != 0:
LOG.error('Cmd Failed:\n%s\n.%s\n%s' % (command, stdout, stderr))
raise Exception('Error controller-platform-nfs IP: {} '
'in etc/hosts'.format(ctrl_mgmt_ip['address']))
LOG.info('alias controller-platform-nfs added for IP: {} '
'in {}'.format(ctrl_mgmt_ip['address'], host_file))
def main():
action = None
from_release = None
to_release = None
arg = 1
res = 0
log.configure()
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
LOG.error("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
LOG.info("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if action == "migrate" and from_release in ['21.12', '22.06']:
conn = psycopg2.connect("dbname=sysinv user=postgres")
try:
_add_nfs_alias_to_hosts_file(conn, to_release)
except psycopg2.Error as ex:
LOG.exception(ex)
LOG.warning("DB Connection error")
res = 1
except Exception as ex:
LOG.exception(ex)
LOG.warning("Exception")
res = 1
else:
LOG.info("controller-platform-nfs alias updated")
finally:
LOG.info("Closing DB connection")
conn.close()
return res
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,224 +0,0 @@
#!/bin/bash
#
# Copyright (c) 2022-2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This migration script is used for replacing an app during the
# activate stage of a platform upgrade. The app is not otherwise
# handled by 65-k8s-app-upgrade.sh. The code will:
# - remove the old app version
# - run app specific code with is inserted into the script
# - apply the new app version
#
# The script is based on 64-upgrade-cert-manager.sh. Logic for
# determining application versions is copied from 65-k8s-app-upgrade.sh
# application upgrade script in order to keep things consistent.
#
# This script is intended initially as a generic template.
#
# The current copy is writen for oidc-auth-apps
# The migration scripts are passed these parameters:
NAME=$(basename $0)
FROM_RELEASE=$1
TO_RELEASE=$2
ACTION=$3
# only run this script during upgrade-activate
if [ "$ACTION" != "activate" ]; then
exit 0
fi
# only run if from 21.12 release
if [ "$FROM_RELEASE" != "21.12" ]; then
exit 0
fi
# only run if to 22.12 release
if [ "$TO_RELEASE" != "22.12" ]; then
exit 0
fi
PLATFORM_APPLICATION_PATH='/usr/local/share/applications/helm'
UPGRADE_IN_PROGRESS_APPS_FILE='/etc/platform/.upgrade_in_progress_apps'
PATH=$PATH:/usr/local/sbin
# conversion script; this script will convert the helm overrides
# reading from postgres and putting overrides into /var/opt/oidc-auth-apps
CONV_SCRIPT='/etc/upgrade.d/50-validate-oidc-auth-apps.py'
CONV_PARAMS="$FROM_RELEASE $TO_RELEASE migrate"
DELETE_RESULT_SLEEP=10
DELETE_RESULT_ATTEMPTS=6 # ~1 min to delete app
UPLOAD_RESULT_SLEEP=10
UPLOAD_RESULT_ATTEMPTS=24 # ~4 min to upload app
APPLY_RESULT_SLEEP=30
APPLY_RESULT_ATTEMPTS=30 # ~15 min to update app
REMOVE_RESULT_SLEEP=10
REMOVE_RESULT_ATTEMPTS=48 # ~8 min to remove app
source /etc/platform/openrc
source /etc/platform/platform.conf
# This will log to /var/log/platform.log
function log {
logger -p local1.info $1
}
EXISTING_APP_NAME='oidc-auth-apps'
EXISTING_APP_INFO=$(system application-show $EXISTING_APP_NAME --column app_version --column status --format yaml)
EXISTING_APP_VERSION=$(echo ${EXISTING_APP_INFO} | sed 's/.*app_version:[[:space:]]\(\S*\).*/\1/')
EXISTING_APP_STATUS=$(echo ${EXISTING_APP_INFO} | sed 's/.*status:[[:space:]]\(\S*\).*/\1/')
ORIGINAL_APP_STATUS=$EXISTING_APP_STATUS
# oidc-auth-apps has user overrides converted and saved for
# re-apply at this time
OIDC_OVERRIDES="/var/opt/oidc-auth-apps/converted"
OIDC_CHARTS="dex oidc-client secret-observer"
function oidc_specific_handling {
for chart in $OIDC_CHARTS; do
chart_f="${OIDC_OVERRIDES}/${chart}_user_overrides.yaml"
if [ ! -f "$chart_f" ]; then
continue
fi
system helm-override-update oidc-auth-apps "${chart}" kube-system \
--values="${chart_f}" \
|| return 1
done
}
# Extract the app name and version from the tarball name: app_name-version.tgz
UPGRADE_TARBALL="$(find $PLATFORM_APPLICATION_PATH -name "${EXISTING_APP_NAME}*.tgz")"
filecount="$( echo "$UPGRADE_TARBALL" | wc -w )"
if [ -z "$UPGRADE_TARBALL" -o "$filecount" -ne 1 ]; then
log "$NAME: ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}, upgrade tarball not found (${filecount}). Exiting for manual intervention..."
exit 1
fi
re='^('${EXISTING_APP_NAME}')-([0-9]+\.[0-9]+-[0-9]+).tgz'
[[ "$(basename $UPGRADE_TARBALL)" =~ $re ]]
UPGRADE_APP_NAME=${BASH_REMATCH[1]}
UPGRADE_APP_VERSION=${BASH_REMATCH[2]}
# Accept the application in the following states
ACCEPTED_STATES="applied uploaded"
if [[ " $ACCEPTED_STATES " != *" $EXISTING_APP_STATUS "* ]]; then
log "$NAME: ${UPGRADE_APP_NAME}, version ${EXISTING_APP_VERSION}, in bad state ${EXISTING_APP_STATUS}. Exiting for manual intervention..."
exit 1
fi
# assuming application is in applied state, but log it anyways
log "$NAME: $EXISTING_APP_NAME, version $EXISTING_APP_VERSION, is currently in the state: $EXISTING_APP_STATUS"
# only upgrade the application if the versions dont match
# in case the upgrade activate failed due to other reasons, and this
# is not the first time this script is run
if [ "x${UPGRADE_APP_VERSION}" == "x${EXISTING_APP_VERSION}" ]; then
log "$NAME: $UPGRADE_APP_NAME, version $UPGRADE_APP_VERSION, is the same."
exit 0
else
# Include app in upgrade in progress file
if ! grep -q "${EXISTING_APP_NAME},${EXISTING_APP_VERSION},${UPGRADE_APP_VERSION}" $UPGRADE_IN_PROGRESS_APPS_FILE; then
echo "${EXISTING_APP_NAME},${EXISTING_APP_VERSION},${UPGRADE_APP_VERSION}" >> $UPGRADE_IN_PROGRESS_APPS_FILE
fi
# The 50-validate-oidc-auth-apps.py is used to convert helm
# overrides. Run it here on the active controller during
# uprade-activate
su postgres -c "$CONV_SCRIPT $CONV_PARAMS"
if [ "$ORIGINAL_APP_STATUS" != "uploaded" ]; then
# remove old app version
log "$NAME: Removing ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}"
system application-remove -f ${EXISTING_APP_NAME}
# Wait on the remove, should be somewhat quick
for tries in $(seq 1 $REMOVE_RESULT_ATTEMPTS); do
EXISTING_APP_STATUS=$(system application-show $EXISTING_APP_NAME --column status --format value)
if [ "${EXISTING_APP_STATUS}" == 'uploaded' ]; then
log "$NAME: ${EXISTING_APP_NAME} has been removed."
break
fi
sleep $REMOVE_RESULT_SLEEP
done
if [ $tries == $REMOVE_RESULT_ATTEMPTS ]; then
log "$NAME: ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}, was not removed in the allocated time. Exiting for manual intervention..."
exit 1
fi
fi
# delete old app
log "$NAME: Deleting ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}"
system application-delete -f ${EXISTING_APP_NAME}
# Wait on the delete, should be quick
for tries in $(seq 1 $DELETE_RESULT_ATTEMPTS); do
EXISTING_APP_STATUS=$(system application-show $EXISTING_APP_NAME --column status --format value)
if [ -z "${EXISTING_APP_STATUS}" ]; then
log "$NAME: ${EXISTING_APP_NAME} has been deleted."
break
fi
sleep $DELETE_RESULT_SLEEP
done
if [ $tries == $DELETE_RESULT_ATTEMPTS ]; then
log "$NAME: ${EXISTING_APP_NAME}, version ${EXISTING_APP_VERSION}, was not deleted in the allocated time. Exiting for manual intervention..."
exit 1
fi
# upload new app version
log "$NAME: Uploading ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION} from $UPGRADE_TARBALL"
system application-upload $UPGRADE_TARBALL
# Wait on the upload, should be quick
for tries in $(seq 1 $UPLOAD_RESULT_ATTEMPTS); do
UPGRADE_APP_STATUS=$(system application-show $UPGRADE_APP_NAME --column status --format value)
if [ "${UPGRADE_APP_STATUS}" == 'uploaded' ]; then
log "$NAME: ${UPGRADE_APP_NAME} has been uploaded."
break
fi
sleep $UPLOAD_RESULT_SLEEP
done
if [ $tries == $UPLOAD_RESULT_ATTEMPTS ]; then
log "$NAME: ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION}, was not uploaded in the allocated time. Exiting for manual intervention..."
exit 1
fi
if [ ! -d "$OIDC_OVERRIDES" ]; then
# this is a soft error, the upgrades procedure should not
# be affected by the absence of helm-overrides. Either the
# application is not configured, or the conversion of overrides
# was not possible
log "$NAME: ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION}, no helm overrides to set. Upgrade of ${UPGRADE_APP_NAME} complete."
exit 0
fi
oidc_specific_handling
if [ $? -ne 0 ]; then
log "$NAME: ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION}, Helm overrides not set. Exiting for manual intervention..."
if [ "$ORIGINAL_APP_STATUS" == "uploaded" ]; then
# the application that is not applied does not interfere
exit 0
else
exit 1
fi
fi
if [ "$ORIGINAL_APP_STATUS" == "uploaded" ]; then
log "$NAME: ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION}: upload complete"
exit 0
fi
# dex won't apply without overrides, do not try
if [ ! -f "${OIDC_OVERRIDES}/dex_user_overrides.yaml" ]; then
log "$NAME: ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION}: dex does not have overrides"
exit 0
fi
# apply new app version
log "$NAME: Applying ${UPGRADE_APP_NAME}, version ${UPGRADE_APP_VERSION}"
system application-apply ${UPGRADE_APP_NAME}
fi
exit 0

View File

@ -1,75 +0,0 @@
#!/bin/bash
# Copyright (c) 2022 Wind River Systems, Inc.
# SPDX-License-Identifier: Apache-2.0
# Remove Etcd RBAC against V2 backend
#
# Note: this can be removed in the release after STX7.0
. /etc/platform/platform.conf
# This will log to /var/log/platform.log
function log {
logger -p local1.info $1
}
FROM_REL=$1
TO_REL=$2
ACTION=$3
ACCEPTED_REL="21.12"
STATIC="/opt/platform/puppet/${sw_version}/hieradata/static.yaml"
NET_KEY="platform::etcd::params::bind_address"
NETVER_KEY="platform::etcd::params::bind_address_version"
PORT="2379"
ETCD_CERT="/etc/etcd/etcd-client.crt"
ETCD_KEY="/etc/etcd/etcd-client.key"
ETCD_CA="/etc/etcd/ca.crt"
ETCD_CMDS="auth disable
user remove root
user remove apiserver-etcd-client"
remove-etcd-rbac()
{
local host_addr
local host_ver
local server_url
if [[ ! -f "${STATIC}" ]]; then
log "Script $0 does not find static yaml file: $STATIC"
exit 1
fi
host_addr="$( grep "^${NET_KEY}:" "${STATIC}" | gawk '{print $NF}' )"
host_ver="$( grep "^${NETVER_KEY}:" "${STATIC}" | gawk '{print $NF}' )"
if [ "$host_ver" == "6" ]; then
server_url="https://[${host_addr}]:${PORT},https://127.0.0.1:${PORT}"
else
server_url="https://${host_addr}:${PORT},https://127.0.0.1:${PORT}"
fi
# Ignore the return code of etcdctl calls here because the
# configuration against v2 API does not persist BnR; it may be absent
while read -r cmd; do
etcdctl --cert-file="${ETCD_CERT}" \
--key-file="${ETCD_KEY}" \
--ca-file="${ETCD_CA}" \
--endpoint="${server_url}" \
$cmd
done <<<"$ETCD_CMDS"
}
log "Script ${0} invoked with from_release = ${FROM_REL} to_release = ${TO_REL} action = ${ACTION}"
if [ ${FROM_REL} == "$ACCEPTED_REL" -a ${ACTION} == "activate" ]; then
remove-etcd-rbac
else
log "Script $0: No actions required from release $FROM_REL to $TO_REL with action $ACTION"
fi
exit 0

View File

@ -1,42 +0,0 @@
#!/bin/bash
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script resets permissions of some Kubernetes *.key files to 0600 on controller-0 only.
FROM_RELEASE=$1
TO_RELEASE=$2
ACTION=$3
# This will log to /var/log/platform.log
function log {
logger -p local1.info $1
}
reset-k8s-key-file-permissions()
{
APISERVER_KEY="/etc/kubernetes/pki/apiserver-etcd-client.key"
CA_KEY="/etc/kubernetes/pki/ca.key"
declare -a FILE_LIST=("$APISERVER_KEY" "$CA_KEY" )
for file in "${FILE_LIST[@]}"; do
if [ -f "$file" ]; then
log "Resetting permissions for file $file ..."
chmod 0600 $file
fi
done
log "Kubernetes key files permissions successfully reset."
}
log "Script $0 invoked with from_release = $FROM_RELEASE to_release = $TO_RELEASE action = $ACTION"
if [ "$TO_RELEASE" == "22.12" ] && [ "$ACTION" == "activate" ]; then
reset-k8s-key-file-permissions
else
log "Script $0 execution skipped"
fi
exit 0

View File

@ -1,227 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will upgrade kubernetes service parameters
# from release 22.06 and 21.12 to 22.12
#
# Note: this can be removed in the release after STX8.0
import datetime
import json
import sys
import ruamel.yaml as yaml
from oslo_utils import uuidutils
import psycopg2
from psycopg2.extras import RealDictCursor
from psycopg2.extras import DictCursor
from controllerconfig.common import log
LOG = log.get_logger(__name__)
K8S_SERVICE = 'kubernetes'
K8S_BOOTSTRAP_PARAMETERS =\
"/opt/platform/config/22.12/last_kube_extra_config_bootstrap.yaml"
SYSINV_K8S_SECTIONS = {
'apiserver_extra_args': 'kube_apiserver',
'controllermanager_extra_args': 'kube_controller_manager',
'scheduler_extra_args': 'kube_scheduler',
'apiserver_extra_volumes': 'kube_apiserver_volumes',
'controllermanager_extra_volumes': 'kube_controller_manager_volumes',
'scheduler_extra_volumes': 'kube_scheduler_volumes',
'kubelet_configurations': 'kubelet'}
default_extra_volumes = {
"encryption-config": {
'name': "encryption-config",
'hostPath': "/etc/kubernetes/encryption-provider.yaml",
'mountPath': "/etc/kubernetes/encryption-provider.yaml",
'readOnly': True,
'pathType': 'File'},
"default-audit-policy-file": {
'name': "default-audit-policy-file",
'hostPath': "/etc/kubernetes/default-audit-policy.yaml",
'mountPath': "/etc/kubernetes/default-audit-policy.yaml",
'readOnly': True,
'pathType': 'File'},
"audit-log-dir": {
'name': "audit-log-dir",
'hostPath': "/var/log/kubernetes/audit/",
'mountPath': "/var/log/kubernetes/audit/",
'readOnly': False,
'pathType': 'DirectoryOrCreate'}
}
def get_service_parameters(db_conn, K8S_SERVICE, K8S_SECTION):
with db_conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("select name, uuid, value, personality, resource from "
"service_parameter where service='{}' and "
"section='{}'".format(K8S_SERVICE, K8S_SECTION))
return cur.fetchall()
def add_service_parameter(db_conn, name, value, service, section,
personality=None, resource=None):
with db_conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute(
"INSERT INTO service_parameter "
"(created_at, uuid, name, value, service, "
"section, personality, resource) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s);",
(datetime.datetime.now(), uuidutils.generate_uuid(),
name, value, service, section, personality, resource))
LOG.info("Adding %s=%s to db [%s]." % (name, value, section))
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
LOG.error("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.info("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if action == "migrate" and from_release in ['22.06', '21.12']:
try:
db_conn = psycopg2.connect("dbname=sysinv user=postgres")
with db_conn:
migrate_k8s_control_plane_and_kubelet_parameters(db_conn)
return 0
except Exception as ex:
LOG.exception(ex)
return 1
def migrate_k8s_control_plane_and_kubelet_parameters(db_conn):
"""This method will take each k8s cluster config and kubelet parameter
stored in backup data and will restore it into sysinv database
"""
try:
with open(K8S_BOOTSTRAP_PARAMETERS, 'r') as file:
cluster_cfg = yaml.load(file, Loader=yaml.RoundTripLoader)
except FileNotFoundError as e:
msg = str('Loading k8s bootstrap parameters from file. {}'.format(e))
LOG.error(msg)
return 1
# -------------------------------------------------------------------------
# Restoring params into sysinv db
# -------------------------------------------------------------------------
for kubeadm_section in [
'apiserver_extra_args', 'controllermanager_extra_args',
'scheduler_extra_args', 'apiserver_extra_volumes',
'controllermanager_extra_volumes', 'scheduler_extra_volumes',
'kubelet_configurations']:
# current parameters stored into sysinv db
sysinv_section = SYSINV_K8S_SECTIONS.get(kubeadm_section)
sysinv_section_params = get_service_parameters(
db_conn, K8S_SERVICE, sysinv_section)
sysinv_section_params_names =\
[param.get('name') for param in sysinv_section_params]
# cases: apiserver, controller-manager and scheduler extra-args
# params loaded during latest bootstrap take precedence over 22.06
if isinstance(cluster_cfg[kubeadm_section], (
dict, yaml.comments.CommentedMap)):
for param_name, param_value in cluster_cfg[
kubeadm_section].items():
if param_name not in sysinv_section_params_names:
try:
if isinstance(param_value, (
dict, yaml.comments.CommentedMap)):
param_value = str(dict(param_value))
# add new parameter to sysinv
add_service_parameter(
db_conn, param_name, param_value,
K8S_SERVICE, sysinv_section)
except Exception as e:
LOG.error("[%s] Adding %s=%s to db [Detail: %s]." % (
sysinv_section, param_name, param_value, e))
else:
LOG.info("Skipping %s pre existent param." % (param_name))
# cases: apiserver, controller-manager and scheduler extra-volumes
elif isinstance(cluster_cfg[kubeadm_section], (
list, yaml.comments.CommentedSeq)):
for parameter in cluster_cfg[kubeadm_section]:
if not isinstance(parameter, yaml.comments.CommentedMap):
continue
# each parameter is a dictionary containing the fields needed
# to create an extra-volume service-parameter entry and the
# associated k8s configmap.
param_dict = dict(parameter)
param_name = param_dict['name']
if 'content' in param_dict:
param_dict.pop('content')
param_value = json.dumps(param_dict)
if param_name not in sysinv_section_params_names:
try:
# add new extra-volume parameter to sysinv
add_service_parameter(
db_conn, param_name, param_value,
K8S_SERVICE, sysinv_section)
except Exception as e:
LOG.error("[%s] Adding %s=%s to db [Detail: %s]." % (
sysinv_section, param_name, param_value, e))
continue
else:
LOG.info("Skipping %s pre existent param." % (param_name))
# -------------------------------------------------------------------------
# Restoring params into sysinv db
# -------------------------------------------------------------------------
# The default extra_volumes in 22.06 or earlier versions are hardcoded
# in the kubeadmin configuration file. This function adds the corresponding
# service parameter entries in the sysinv database
# (service: kubernetes, section: kube_apiserver_volumes).
# current parameters stored into sysinv db
sysinv_section = 'kube_apiserver_volumes'
sysinv_section_params = get_service_parameters(
db_conn, K8S_SERVICE, sysinv_section)
sysinv_section_params_names =\
[param.get('name') for param in sysinv_section_params]
for param_name, volume_dict in default_extra_volumes.items():
if param_name not in sysinv_section_params_names:
param_value = json.dumps(volume_dict)
try:
add_service_parameter(
db_conn, param_name, param_value,
K8S_SERVICE, sysinv_section)
except Exception as e:
LOG.error("[%s] Adding %s=%s to db [Detail: %s]." % (
sysinv_section, param_name, param_value, e))
raise
LOG.info("k8s service-parameters upgrade completed")
return 0
if __name__ == "__main__":
sys.exit(main())