From 9d5675fdf6d0846af54f1068c01e224000ab1bb3 Mon Sep 17 00:00:00 2001 From: Sun Austin Date: Tue, 11 Dec 2018 16:45:37 +0800 Subject: [PATCH] Fix: "filter" issue for Python 2/3 compatible code Replace filter(lambda obj: test(obj), data) with [obj for obj in data if test(obj)] Story: 2003433 Task: 28380 Change-Id: I69262226bb454319be6b3d2a1c3c64bb7bb3357c Signed-off-by: Sun Austin --- .../configutilities/common/configobjects.py | 8 +++++--- .../controllerconfig/controllerconfig/utils.py | 2 +- .../cgtsclient/openstack/common/config/generator.py | 4 ++-- .../cgts-client/cgtsclient/v1/sm_service_shell.py | 2 +- sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py | 4 ++-- .../sysinv/sysinv/api/controllers/v1/storage_backend.py | 2 +- .../sysinv/sysinv/sysinv/cluster/cluster_services_api.py | 2 +- sysinv/sysinv/sysinv/sysinv/common/ceph.py | 8 +++----- sysinv/sysinv/sysinv/sysinv/common/safe_utils.py | 2 +- .../sysinv/sysinv/sysinv/common/storage_backend_conf.py | 6 ++---- sysinv/sysinv/sysinv/sysinv/common/utils.py | 2 +- sysinv/sysinv/sysinv/sysinv/conductor/ceph.py | 8 ++++---- sysinv/sysinv/sysinv/sysinv/conductor/manager.py | 8 ++++---- .../sysinv/sysinv/openstack/common/config/generator.py | 4 ++-- .../sysinv/sysinv/openstack/common/eventlet_backdoor.py | 2 +- .../sysinv/openstack/common/rpc/matchmaker_redis.py | 3 +-- sysinv/sysinv/sysinv/sysinv/puppet/cinder.py | 5 ++--- 17 files changed, 34 insertions(+), 38 deletions(-) diff --git a/configutilities/configutilities/configutilities/common/configobjects.py b/configutilities/configutilities/configutilities/common/configobjects.py index a97f2ddc06..5c9464f703 100755 --- a/configutilities/configutilities/configutilities/common/configobjects.py +++ b/configutilities/configutilities/configutilities/common/configobjects.py @@ -63,9 +63,11 @@ class LogicalInterface(object): "Valid values: 576 - 9216" % logical_interface) # Parse the ports - self.ports = filter(None, [x.strip() for x in - system_config.get(logical_interface, - 'INTERFACE_PORTS').split(',')]) + self.ports = [_f for _f in + [x.strip() for x in + system_config.get(logical_interface, + 'INTERFACE_PORTS').split(',')] + if _f] # Parse/validate the LAG config lag_interface = system_config.get(logical_interface, diff --git a/controllerconfig/controllerconfig/controllerconfig/utils.py b/controllerconfig/controllerconfig/controllerconfig/utils.py index 03ae34ed78..15c5598abe 100644 --- a/controllerconfig/controllerconfig/controllerconfig/utils.py +++ b/controllerconfig/controllerconfig/controllerconfig/utils.py @@ -552,7 +552,7 @@ def restart_networking(stdout=None, stderr=None): def output_to_dict(output): dict = {} - output = filter(None, output.split('\n')) + output = [_f for _f in output.split('\n') if _f] for row in output: values = row.split() diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/config/generator.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/config/generator.py index 2af93aad35..3a9a2db512 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/config/generator.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/config/generator.py @@ -70,9 +70,9 @@ def generate(srcfiles): os.path.basename(filepath).split('.')[0]]) mods_by_pkg.setdefault(pkg_name, list()).append(mod_str) # NOTE(lzyeval): place top level modules before packages - pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys()) + pkg_names = [x for x in mods_by_pkg.keys() if x.endswith(PY_EXT)] pkg_names.sort() - ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys()) + ext_names = [x for x in mods_by_pkg.keys() if x not in pkg_names] ext_names.sort() pkg_names.extend(ext_names) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/sm_service_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/sm_service_shell.py index c199e38212..d166a4ff16 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/sm_service_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/sm_service_shell.py @@ -42,7 +42,7 @@ def do_service_list(cc, args): fields = ['id', 'name', 'node_name', 'state'] field_labels = ['id', 'service_name', 'hostname', 'state'] # remove the entry in the initial state - clean_list = filter(lambda x: x.state != 'initial', service) + clean_list = [x for x in service if x.state != 'initial'] for s in clean_list: if s.status: setattr(s, 'state', s.state + '-' + s.status) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py index 52bbd07ed0..bb69b030ea 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py @@ -5207,7 +5207,7 @@ class HostController(rest.RestController): if utils.is_aio_simplex_system(pecan.request.dbapi): # Check if host has enough OSDs configured for each tier tiers = pecan.request.dbapi.storage_tier_get_all() - ceph_tiers = filter(lambda t: t.type == constants.SB_TIER_TYPE_CEPH, tiers) + ceph_tiers = [t for t in tiers if t.type == constants.SB_TIER_TYPE_CEPH] max_replication, __ = \ StorageBackendConfig.get_ceph_max_replication(pecan.request.dbapi) for tier in ceph_tiers: @@ -5231,7 +5231,7 @@ class HostController(rest.RestController): _("Can not unlock node until at least one OSD is configured.")) tiers = pecan.request.dbapi.storage_tier_get_all() - ceph_tiers = filter(lambda t: t.type == constants.SB_TIER_TYPE_CEPH, tiers) + ceph_tiers = [t for t in tiers if t.type == constants.SB_TIER_TYPE_CEPH] # On a two-node configuration, both nodes should have at least one OSD # in each tier. Otherwise, the cluster is remains in an error state. for tier in ceph_tiers: diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_backend.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_backend.py index 3a01160a21..f625827c85 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_backend.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_backend.py @@ -225,7 +225,7 @@ class StorageBackendController(rest.RestController): return "" output = process.stdout.read() - fs_list = filter(None, output.split('\n')) + fs_list = [_f for _f in output.split('\n') if _f] output = fs_list[1].split() mib = float(1024 * 1024) total = round(float(output[1].strip('K')) / mib, 2) diff --git a/sysinv/sysinv/sysinv/sysinv/cluster/cluster_services_api.py b/sysinv/sysinv/sysinv/sysinv/cluster/cluster_services_api.py index 4961fb68e6..4dc3232d38 100644 --- a/sysinv/sysinv/sysinv/sysinv/cluster/cluster_services_api.py +++ b/sysinv/sysinv/sysinv/sysinv/cluster/cluster_services_api.py @@ -72,7 +72,7 @@ def __set_service_instance_state__(instance, resource_name, crm_resource): # Remove any empty strings from reason if the state is not enabled. if instance.state != cluster.SERVICE_STATE_ENABLED: - instance.reason = filter(None, instance.reason) + instance.reason = [_f for _f in instance.reason if _f] def __set_service_instance_activity__(instance, crm_resource): diff --git a/sysinv/sysinv/sysinv/sysinv/common/ceph.py b/sysinv/sysinv/sysinv/sysinv/common/ceph.py index f0ca9bbce9..03f843b9b5 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/common/ceph.py @@ -168,15 +168,13 @@ class CephApiOperator(object): response, body = self._ceph_api.osd_crush_tree(body='json') if response.status_code == requests.codes.ok: # Scan for the destination root, should not be present - dest_root = filter(lambda r: r['name'] == dest_root_name, - body['output']) + dest_root = [r for r in body['output'] if r['name'] == dest_root_name] if dest_root: reason = "Tier '%s' already exists." % dest_root_name raise exception.CephCrushInvalidTierUse(tier=dest_root_name, reason=reason) - src_root = filter(lambda r: r['name'] == src_root_name, - body['output']) + src_root = [r for r in body['output'] if r['name'] == src_root_name] if not src_root: reason = ("The required source root '%s' does not exist." % src_root_name) @@ -205,7 +203,7 @@ class CephApiOperator(object): response, body = self._ceph_api.osd_crush_tree(body='json') if response.status_code == requests.codes.ok: # Scan for the destinaion root, should not be present - root = filter(lambda r: r['name'] == root_name, body['output']) + root = [r for r in body['output'] if r['name'] == root_name] if not root: reason = "The crushmap root '%s' does not exist." % root_name diff --git a/sysinv/sysinv/sysinv/sysinv/common/safe_utils.py b/sysinv/sysinv/sysinv/sysinv/common/safe_utils.py index 7f03fd6796..cca9bcabb9 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/safe_utils.py +++ b/sysinv/sysinv/sysinv/sysinv/common/safe_utils.py @@ -43,7 +43,7 @@ def getcallargs(function, *args, **kwargs): else: keyed_args[argnames[0]] = None - remaining_argnames = filter(lambda x: x not in keyed_args, argnames) + remaining_argnames = [x for x in argnames if x not in keyed_args] keyed_args.update(dict(zip(remaining_argnames, args))) if defaults: diff --git a/sysinv/sysinv/sysinv/sysinv/common/storage_backend_conf.py b/sysinv/sysinv/sysinv/sysinv/common/storage_backend_conf.py index 7be59a29b4..9c98e1a9a8 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/storage_backend_conf.py +++ b/sysinv/sysinv/sysinv/sysinv/common/storage_backend_conf.py @@ -53,10 +53,8 @@ class StorageBackendConfig(object): elif target == constants.SB_TYPE_CEPH: # Support multiple ceph backends storage_cephs = api.storage_ceph_get_list() - primary_backends = filter( - lambda b: b['name'] == constants.SB_DEFAULT_NAMES[ - constants.SB_TYPE_CEPH], - storage_cephs) + primary_backends = [b for b in storage_cephs if b['name'] == constants.SB_DEFAULT_NAMES[ + constants.SB_TYPE_CEPH]] if primary_backends: return primary_backends[0] elif target == constants.SB_TYPE_EXTERNAL: diff --git a/sysinv/sysinv/sysinv/sysinv/common/utils.py b/sysinv/sysinv/sysinv/sysinv/common/utils.py index 2b311ae411..bd5cb5b7c9 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/common/utils.py @@ -1220,7 +1220,7 @@ def is_cpe(host_obj): def output_to_dict(output): dict = {} - output = filter(None, output.split('\n')) + output = [_f for _f in output.split('\n') if _f] for row in output: values = row.split() diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/ceph.py b/sysinv/sysinv/sysinv/sysinv/conductor/ceph.py index f87f51f5ad..9803b2c571 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/ceph.py @@ -837,7 +837,7 @@ class CephOperator(object): """ # Handle pools for multiple tiers tiers = self._db_api.storage_tier_get_by_cluster(self.cluster_db_uuid) - ceph_tiers = filter(lambda t: t.type == constants.SB_TIER_TYPE_CEPH, tiers) + ceph_tiers = [t for t in tiers if t.type == constants.SB_TIER_TYPE_CEPH] ceph_backends = self._db_api.storage_ceph_get_list() for t in ceph_tiers: @@ -1189,12 +1189,12 @@ class CephOperator(object): # either cinder or ceph stors = self._db_api.istor_get_by_ihost(i.uuid) - osds += len(filter(lambda s: s.tier_name == tiers_obj.name, stors)) + osds += len([s for s in stors if s.tier_name == tiers_obj.name]) osds_raw = osds stors = self._db_api.istor_get_by_ihost(last_storage.uuid) storage_gap = len(storage_hosts) % replication - stors_number = len(filter(lambda s: s.tier_name == tiers_obj.name, stors)) + stors_number = len([s for s in stors if s.tier_name == tiers_obj.name]) if storage_gap != 0 and stors_number != 0: osds_adjust = (replication - storage_gap) * stors_number osds += osds_adjust @@ -1514,7 +1514,7 @@ class CephOperator(object): """ tiers = self._db_api.storage_tier_get_by_cluster(self.cluster_db_uuid) - ceph_tiers = filter(lambda t: t.type == constants.SB_TIER_TYPE_CEPH, tiers) + ceph_tiers = [t for t in tiers if t.type == constants.SB_TIER_TYPE_CEPH] for t in ceph_tiers: # Only provision default quotas once diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py index 6cc89fc8aa..88a9c9240c 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py @@ -7335,7 +7335,7 @@ class ConductorManager(service.PeriodicService): output = subprocess.check_output("drbd-overview", stderr=subprocess.STDOUT) - output = filter(None, output.split('\n')) + output = [_f for _f in output.split('\n') if _f] for row in output: if "Connected" in row: @@ -7349,7 +7349,7 @@ class ConductorManager(service.PeriodicService): def _drbd_fs_sync(self): output = subprocess.check_output("drbd-overview", stderr=subprocess.STDOUT) - output = filter(None, output.split('\n')) + output = [_f for _f in output.split('\n') if _f] fs = [] for row in output: @@ -7371,7 +7371,7 @@ class ConductorManager(service.PeriodicService): def _drbd_fs_updated(self, context): drbd_dict = subprocess.check_output("drbd-overview", stderr=subprocess.STDOUT) - drbd_dict = filter(None, drbd_dict.split('\n')) + drbd_dict = [_f for _f in drbd_dict.split('\n') if _f] drbd_patch_size = 0 patch_lv_size = 0 @@ -7383,7 +7383,7 @@ class ConductorManager(service.PeriodicService): for row in drbd_dict: if "sync\'ed" not in row: try: - size = (filter(None, row.split(' ')))[8] + size = ([_f for _f in row.split(' ') if _f])[8] except IndexError: LOG.error("Skipping unexpected drbd-overview output: %s" % row) continue diff --git a/sysinv/sysinv/sysinv/sysinv/openstack/common/config/generator.py b/sysinv/sysinv/sysinv/sysinv/openstack/common/config/generator.py index ecaaad3a01..f3b89e9c7d 100755 --- a/sysinv/sysinv/sysinv/sysinv/openstack/common/config/generator.py +++ b/sysinv/sysinv/sysinv/sysinv/openstack/common/config/generator.py @@ -70,9 +70,9 @@ def generate(srcfiles): os.path.basename(filepath).split('.')[0]]) mods_by_pkg.setdefault(pkg_name, list()).append(mod_str) # NOTE(lzyeval): place top level modules before packages - pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys()) + pkg_names = [x for x in mods_by_pkg.keys() if x.endswith(PY_EXT)] pkg_names.sort() - ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys()) + ext_names = [x for x in mods_by_pkg.keys() if x not in pkg_names] ext_names.sort() pkg_names.extend(ext_names) diff --git a/sysinv/sysinv/sysinv/sysinv/openstack/common/eventlet_backdoor.py b/sysinv/sysinv/sysinv/sysinv/openstack/common/eventlet_backdoor.py index deca3a9819..2f85b475f9 100644 --- a/sysinv/sysinv/sysinv/sysinv/openstack/common/eventlet_backdoor.py +++ b/sysinv/sysinv/sysinv/sysinv/openstack/common/eventlet_backdoor.py @@ -43,7 +43,7 @@ def _dont_use_this(): def _find_objects(t): - return filter(lambda o: isinstance(o, t), gc.get_objects()) + return [o for o in gc.get_objects() if isinstance(o, t)] def _print_greenthreads(): diff --git a/sysinv/sysinv/sysinv/sysinv/openstack/common/rpc/matchmaker_redis.py b/sysinv/sysinv/sysinv/sysinv/openstack/common/rpc/matchmaker_redis.py index dfeaa6b6f6..b68cdda5ff 100644 --- a/sysinv/sysinv/sysinv/sysinv/openstack/common/rpc/matchmaker_redis.py +++ b/sysinv/sysinv/sysinv/sysinv/openstack/common/rpc/matchmaker_redis.py @@ -83,8 +83,7 @@ class RedisFanoutExchange(RedisExchange): def run(self, topic): topic = topic.split('~', 1)[1] hosts = self.redis.smembers(topic) - good_hosts = filter( - lambda host: self.matchmaker.is_alive(topic, host), hosts) + good_hosts = [host for host in hosts if self.matchmaker.is_alive(topic, host)] return [(x, x.split('.', 1)[1]) for x in good_hosts] diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/cinder.py b/sysinv/sysinv/sysinv/sysinv/puppet/cinder.py index 0a71d1c1c0..d6629bef12 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/cinder.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/cinder.py @@ -788,9 +788,8 @@ class CinderPuppet(openstack.OpenstackBasePuppet): is_service_enabled, enabled_backends) # Build the list of possible HPE3PAR backends - possible_hpe3pars = filter( - lambda s: constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR in s, - SP_CINDER_SECTION_MAPPING.keys()) + possible_hpe3pars = [s for s in SP_CINDER_SECTION_MAPPING.keys() + if constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR in s] config.update({'openstack::cinder::backends::hpe3par::sections': possible_hpe3pars}) return config