Update upgrade code for removing Ceph Cache Tiering
Ceph Cache Tiering feature is not supported anymore. This implies: a. service parameters for cache tiering are removed b. personality subtype is removed c. cache tier is removed from crushmap Story: 2002884 Task: 22846 Change-Id: I9c1b74860a4dc90875e0e889d179f028bed9ecbe Signed-off-by: Jack Ding <jack.ding@windriver.com>
This commit is contained in:
parent
f645b9c2e0
commit
a4e88b6069
|
@ -0,0 +1,104 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will update the storage backends for controller-1.
|
||||
#
|
||||
|
||||
import json
|
||||
import psycopg2
|
||||
import sys
|
||||
|
||||
from sysinv.common import constants
|
||||
from psycopg2.extras import RealDictCursor
|
||||
from controllerconfig.common import log
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
# Sections that need to be removed from retired Ceph cache tiering feature
|
||||
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER = 'cache_tiering'
|
||||
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_DESIRED = 'cache_tiering.desired'
|
||||
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_APPLIED = 'cache_tiering.applied'
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "18.03" and action == "migrate":
|
||||
try:
|
||||
cleanup_ceph_cache_tiering_service_parameters(from_release)
|
||||
cleanup_ceph_personality_subtype(from_release)
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
return 1
|
||||
|
||||
|
||||
def cleanup_ceph_cache_tiering_service_parameters(from_release):
|
||||
conn = psycopg2.connect("dbname=sysinv user=postgres")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
for s in [SERVICE_PARAM_SECTION_CEPH_CACHE_TIER,
|
||||
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_DESIRED,
|
||||
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_APPLIED]:
|
||||
cur.execute("select * from service_parameter where service=%s "
|
||||
"and section=%s", (constants.SERVICE_TYPE_CEPH,
|
||||
s,))
|
||||
parameters = cur.fetchall()
|
||||
if not parameters:
|
||||
LOG.info("No service_parameter data for section %s "
|
||||
"found." % s)
|
||||
continue
|
||||
|
||||
for p in parameters:
|
||||
LOG.debug("Found %s/%s" % (p['section'], p['name']))
|
||||
|
||||
LOG.info("Removing ceph service parameters from section "
|
||||
"%s" % s)
|
||||
cur.execute("delete from service_parameter where service=%s "
|
||||
"and section=%s", (constants.SERVICE_TYPE_CEPH,
|
||||
s,))
|
||||
|
||||
|
||||
def cleanup_ceph_personality_subtype(from_release):
|
||||
conn = psycopg2.connect("dbname=sysinv user=postgres")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("select hostname, capabilities from i_host")
|
||||
parameters = cur.fetchall()
|
||||
if not parameters:
|
||||
LOG.info("No capabilities data found ")
|
||||
return
|
||||
|
||||
for p in parameters:
|
||||
LOG.debug("Found host capabilities %s/%s" %
|
||||
(p['hostname'], p['capabilities']))
|
||||
json_dict = json.loads(p['capabilities'])
|
||||
if 'pers_subtype' in json_dict:
|
||||
del json_dict['pers_subtype']
|
||||
|
||||
LOG.info("Removing ceph pers_subtype from capabilities")
|
||||
cur.execute("update i_host set capabilities='%s';" %
|
||||
json.dumps(json_dict))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -76,6 +76,7 @@ from sysinv.common import service
|
|||
from sysinv.common import utils as cutils
|
||||
from sysinv.common.retrying import retry
|
||||
from sysinv.common.storage_backend_conf import StorageBackendConfig
|
||||
from cephclient import wrapper as ceph
|
||||
from sysinv.conductor import ceph as iceph
|
||||
from sysinv.conductor import openstack
|
||||
from sysinv.db import api as dbapi
|
||||
|
@ -149,6 +150,8 @@ class ConductorManager(service.PeriodicService):
|
|||
self.fm_api = None
|
||||
self.fm_log = None
|
||||
self._ceph = None
|
||||
self._ceph_api = ceph.CephWrapper(
|
||||
endpoint='http://localhost:5001/api/v0.1/')
|
||||
|
||||
self._openstack = None
|
||||
self._api_token = None
|
||||
|
@ -8606,6 +8609,31 @@ class ConductorManager(service.PeriodicService):
|
|||
controller_0 = self.dbapi.ihost_get_by_hostname(
|
||||
constants.CONTROLLER_0_HOSTNAME)
|
||||
|
||||
# TODO: This code is only useful for supporting R5 to R6 upgrades.
|
||||
# Remove in future release.
|
||||
# update crushmap and remove cache-tier on upgrade
|
||||
if from_version == tsc.SW_VERSION_1803:
|
||||
ceph_backend = StorageBackendConfig.get_backend(self.dbapi, constants.CINDER_BACKEND_CEPH)
|
||||
if ceph_backend and ceph_backend.state == constants.SB_STATE_CONFIGURED:
|
||||
try:
|
||||
response, body = self._ceph_api.osd_crush_rule_rm("cache_tier_ruleset",
|
||||
body='json')
|
||||
if response.ok:
|
||||
LOG.info("Successfully removed cache_tier_ruleset "
|
||||
"[ceph osd crush rule rm cache_tier_ruleset]")
|
||||
try:
|
||||
response, body = self._ceph_api.osd_crush_remove("cache-tier",
|
||||
body='json')
|
||||
if response.ok:
|
||||
LOG.info("Successfully removed cache_tier "
|
||||
"[ceph osd crush remove cache-tier]")
|
||||
except exception.CephFailure:
|
||||
LOG.warn("Failed to remove bucket cache-tier from crushmap")
|
||||
pass
|
||||
except exception.CephFailure:
|
||||
LOG.warn("Failed to remove rule cache-tier from crushmap")
|
||||
pass
|
||||
|
||||
if state in [constants.UPGRADE_ABORTING,
|
||||
constants.UPGRADE_ABORTING_ROLLBACK]:
|
||||
if upgrade.state != constants.UPGRADE_ABORT_COMPLETING:
|
||||
|
@ -9338,12 +9366,10 @@ class ConductorManager(service.PeriodicService):
|
|||
response = self._openstack.cinder_prepare_db_for_volume_restore(context)
|
||||
return response
|
||||
|
||||
# TODO: remove this function after 1st 17.x release
|
||||
#
|
||||
def get_software_upgrade_status(self, context):
|
||||
"""
|
||||
Software upgrade status is needed by ceph-manager to set require_jewel_osds
|
||||
flag when upgrading from 16.10 to 17.x
|
||||
Software upgrade status is needed by ceph-manager to take ceph specific
|
||||
upgrade actions
|
||||
"""
|
||||
upgrade = {
|
||||
'from_version': None,
|
||||
|
|
|
@ -1463,21 +1463,19 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||
return self.call(context,
|
||||
self.make_msg('get_ceph_object_pool_name'))
|
||||
|
||||
# TODO: remove this function after 1st 17.x release
|
||||
#
|
||||
def get_software_upgrade_status(self, context):
|
||||
"""
|
||||
Software upgrade status is needed by ceph-manager to set
|
||||
require_jewel_osds flag when upgrading from 16.10 to 17.x
|
||||
Software upgrade status is needed by ceph-manager to take ceph specific
|
||||
upgrade actions
|
||||
|
||||
This rpcapi function is added to signal that conductor's
|
||||
get_software_upgrade_status function is used by an RPC client
|
||||
|
||||
ceph-manager however doesn't call rpcapi.get_software_upgrade_status
|
||||
and instead it uses oslo_messaging to construct a call on
|
||||
conductor's topic for this function. The reason is that sysinv
|
||||
is using an old version of openstack common and messaging libraries
|
||||
incompatible with the one used by ceph-manager.
|
||||
ceph-manager however doesn't call rpcapi.get_software_upgrade_status and
|
||||
instead it uses oslo_messaging to construct a call on conductor's topic
|
||||
for this function. The reason is that sysinv is using an old version of
|
||||
openstack common and messaging libraries incompatible with the one used
|
||||
by ceph-manager.
|
||||
"""
|
||||
return self.call(context,
|
||||
self.make_msg('get_software_upgrade_status'))
|
||||
|
|
Loading…
Reference in New Issue