Fix: "filter" issue for Python 2/3 compatible code

Replace filter(lambda obj: test(obj), data) with
 [obj for obj in data if test(obj)]

Story: 2003433
Task: 28380

Change-Id: I69262226bb454319be6b3d2a1c3c64bb7bb3357c
Signed-off-by: Sun Austin <austin.sun@intel.com>
This commit is contained in:
Sun Austin 2018-12-11 16:45:37 +08:00
parent 634e513d62
commit 9d5675fdf6
17 changed files with 34 additions and 38 deletions

View File

@ -63,9 +63,11 @@ class LogicalInterface(object):
"Valid values: 576 - 9216" % logical_interface)
# Parse the ports
self.ports = filter(None, [x.strip() for x in
system_config.get(logical_interface,
'INTERFACE_PORTS').split(',')])
self.ports = [_f for _f in
[x.strip() for x in
system_config.get(logical_interface,
'INTERFACE_PORTS').split(',')]
if _f]
# Parse/validate the LAG config
lag_interface = system_config.get(logical_interface,

View File

@ -552,7 +552,7 @@ def restart_networking(stdout=None, stderr=None):
def output_to_dict(output):
dict = {}
output = filter(None, output.split('\n'))
output = [_f for _f in output.split('\n') if _f]
for row in output:
values = row.split()

View File

@ -70,9 +70,9 @@ def generate(srcfiles):
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names = [x for x in mods_by_pkg.keys() if x.endswith(PY_EXT)]
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names = [x for x in mods_by_pkg.keys() if x not in pkg_names]
ext_names.sort()
pkg_names.extend(ext_names)

View File

@ -42,7 +42,7 @@ def do_service_list(cc, args):
fields = ['id', 'name', 'node_name', 'state']
field_labels = ['id', 'service_name', 'hostname', 'state']
# remove the entry in the initial state
clean_list = filter(lambda x: x.state != 'initial', service)
clean_list = [x for x in service if x.state != 'initial']
for s in clean_list:
if s.status:
setattr(s, 'state', s.state + '-' + s.status)

View File

@ -5207,7 +5207,7 @@ class HostController(rest.RestController):
if utils.is_aio_simplex_system(pecan.request.dbapi):
# Check if host has enough OSDs configured for each tier
tiers = pecan.request.dbapi.storage_tier_get_all()
ceph_tiers = filter(lambda t: t.type == constants.SB_TIER_TYPE_CEPH, tiers)
ceph_tiers = [t for t in tiers if t.type == constants.SB_TIER_TYPE_CEPH]
max_replication, __ = \
StorageBackendConfig.get_ceph_max_replication(pecan.request.dbapi)
for tier in ceph_tiers:
@ -5231,7 +5231,7 @@ class HostController(rest.RestController):
_("Can not unlock node until at least one OSD is configured."))
tiers = pecan.request.dbapi.storage_tier_get_all()
ceph_tiers = filter(lambda t: t.type == constants.SB_TIER_TYPE_CEPH, tiers)
ceph_tiers = [t for t in tiers if t.type == constants.SB_TIER_TYPE_CEPH]
# On a two-node configuration, both nodes should have at least one OSD
# in each tier. Otherwise, the cluster is remains in an error state.
for tier in ceph_tiers:

View File

@ -225,7 +225,7 @@ class StorageBackendController(rest.RestController):
return ""
output = process.stdout.read()
fs_list = filter(None, output.split('\n'))
fs_list = [_f for _f in output.split('\n') if _f]
output = fs_list[1].split()
mib = float(1024 * 1024)
total = round(float(output[1].strip('K')) / mib, 2)

View File

@ -72,7 +72,7 @@ def __set_service_instance_state__(instance, resource_name, crm_resource):
# Remove any empty strings from reason if the state is not enabled.
if instance.state != cluster.SERVICE_STATE_ENABLED:
instance.reason = filter(None, instance.reason)
instance.reason = [_f for _f in instance.reason if _f]
def __set_service_instance_activity__(instance, crm_resource):

View File

@ -168,15 +168,13 @@ class CephApiOperator(object):
response, body = self._ceph_api.osd_crush_tree(body='json')
if response.status_code == requests.codes.ok:
# Scan for the destination root, should not be present
dest_root = filter(lambda r: r['name'] == dest_root_name,
body['output'])
dest_root = [r for r in body['output'] if r['name'] == dest_root_name]
if dest_root:
reason = "Tier '%s' already exists." % dest_root_name
raise exception.CephCrushInvalidTierUse(tier=dest_root_name,
reason=reason)
src_root = filter(lambda r: r['name'] == src_root_name,
body['output'])
src_root = [r for r in body['output'] if r['name'] == src_root_name]
if not src_root:
reason = ("The required source root '%s' does not exist." %
src_root_name)
@ -205,7 +203,7 @@ class CephApiOperator(object):
response, body = self._ceph_api.osd_crush_tree(body='json')
if response.status_code == requests.codes.ok:
# Scan for the destinaion root, should not be present
root = filter(lambda r: r['name'] == root_name, body['output'])
root = [r for r in body['output'] if r['name'] == root_name]
if not root:
reason = "The crushmap root '%s' does not exist." % root_name

View File

@ -43,7 +43,7 @@ def getcallargs(function, *args, **kwargs):
else:
keyed_args[argnames[0]] = None
remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
remaining_argnames = [x for x in argnames if x not in keyed_args]
keyed_args.update(dict(zip(remaining_argnames, args)))
if defaults:

View File

@ -53,10 +53,8 @@ class StorageBackendConfig(object):
elif target == constants.SB_TYPE_CEPH:
# Support multiple ceph backends
storage_cephs = api.storage_ceph_get_list()
primary_backends = filter(
lambda b: b['name'] == constants.SB_DEFAULT_NAMES[
constants.SB_TYPE_CEPH],
storage_cephs)
primary_backends = [b for b in storage_cephs if b['name'] == constants.SB_DEFAULT_NAMES[
constants.SB_TYPE_CEPH]]
if primary_backends:
return primary_backends[0]
elif target == constants.SB_TYPE_EXTERNAL:

View File

@ -1220,7 +1220,7 @@ def is_cpe(host_obj):
def output_to_dict(output):
dict = {}
output = filter(None, output.split('\n'))
output = [_f for _f in output.split('\n') if _f]
for row in output:
values = row.split()

View File

@ -837,7 +837,7 @@ class CephOperator(object):
"""
# Handle pools for multiple tiers
tiers = self._db_api.storage_tier_get_by_cluster(self.cluster_db_uuid)
ceph_tiers = filter(lambda t: t.type == constants.SB_TIER_TYPE_CEPH, tiers)
ceph_tiers = [t for t in tiers if t.type == constants.SB_TIER_TYPE_CEPH]
ceph_backends = self._db_api.storage_ceph_get_list()
for t in ceph_tiers:
@ -1189,12 +1189,12 @@ class CephOperator(object):
# either cinder or ceph
stors = self._db_api.istor_get_by_ihost(i.uuid)
osds += len(filter(lambda s: s.tier_name == tiers_obj.name, stors))
osds += len([s for s in stors if s.tier_name == tiers_obj.name])
osds_raw = osds
stors = self._db_api.istor_get_by_ihost(last_storage.uuid)
storage_gap = len(storage_hosts) % replication
stors_number = len(filter(lambda s: s.tier_name == tiers_obj.name, stors))
stors_number = len([s for s in stors if s.tier_name == tiers_obj.name])
if storage_gap != 0 and stors_number != 0:
osds_adjust = (replication - storage_gap) * stors_number
osds += osds_adjust
@ -1514,7 +1514,7 @@ class CephOperator(object):
"""
tiers = self._db_api.storage_tier_get_by_cluster(self.cluster_db_uuid)
ceph_tiers = filter(lambda t: t.type == constants.SB_TIER_TYPE_CEPH, tiers)
ceph_tiers = [t for t in tiers if t.type == constants.SB_TIER_TYPE_CEPH]
for t in ceph_tiers:
# Only provision default quotas once

View File

@ -7335,7 +7335,7 @@ class ConductorManager(service.PeriodicService):
output = subprocess.check_output("drbd-overview",
stderr=subprocess.STDOUT)
output = filter(None, output.split('\n'))
output = [_f for _f in output.split('\n') if _f]
for row in output:
if "Connected" in row:
@ -7349,7 +7349,7 @@ class ConductorManager(service.PeriodicService):
def _drbd_fs_sync(self):
output = subprocess.check_output("drbd-overview",
stderr=subprocess.STDOUT)
output = filter(None, output.split('\n'))
output = [_f for _f in output.split('\n') if _f]
fs = []
for row in output:
@ -7371,7 +7371,7 @@ class ConductorManager(service.PeriodicService):
def _drbd_fs_updated(self, context):
drbd_dict = subprocess.check_output("drbd-overview",
stderr=subprocess.STDOUT)
drbd_dict = filter(None, drbd_dict.split('\n'))
drbd_dict = [_f for _f in drbd_dict.split('\n') if _f]
drbd_patch_size = 0
patch_lv_size = 0
@ -7383,7 +7383,7 @@ class ConductorManager(service.PeriodicService):
for row in drbd_dict:
if "sync\'ed" not in row:
try:
size = (filter(None, row.split(' ')))[8]
size = ([_f for _f in row.split(' ') if _f])[8]
except IndexError:
LOG.error("Skipping unexpected drbd-overview output: %s" % row)
continue

View File

@ -70,9 +70,9 @@ def generate(srcfiles):
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names = [x for x in mods_by_pkg.keys() if x.endswith(PY_EXT)]
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names = [x for x in mods_by_pkg.keys() if x not in pkg_names]
ext_names.sort()
pkg_names.extend(ext_names)

View File

@ -43,7 +43,7 @@ def _dont_use_this():
def _find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():

View File

@ -83,8 +83,7 @@ class RedisFanoutExchange(RedisExchange):
def run(self, topic):
topic = topic.split('~', 1)[1]
hosts = self.redis.smembers(topic)
good_hosts = filter(
lambda host: self.matchmaker.is_alive(topic, host), hosts)
good_hosts = [host for host in hosts if self.matchmaker.is_alive(topic, host)]
return [(x, x.split('.', 1)[1]) for x in good_hosts]

View File

@ -788,9 +788,8 @@ class CinderPuppet(openstack.OpenstackBasePuppet):
is_service_enabled, enabled_backends)
# Build the list of possible HPE3PAR backends
possible_hpe3pars = filter(
lambda s: constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR in s,
SP_CINDER_SECTION_MAPPING.keys())
possible_hpe3pars = [s for s in SP_CINDER_SECTION_MAPPING.keys()
if constants.SERVICE_PARAM_SECTION_CINDER_HPE3PAR in s]
config.update({'openstack::cinder::backends::hpe3par::sections': possible_hpe3pars})
return config