Fix: "dict" issue for Python 2/3 compatible code

Replace dict.iteritems() with dict.items()
Replace dict.iterkeys() with dict.keys()
Change dict.keys() to list(dict.keys())

Story: 2002909
Task: 24564

Change-Id: Icf5ef6cc9947a87ee8e013d1225b3f36914e6678
Signed-off-by: Sun Austin <austin.sun@intel.com>
This commit is contained in:
Sun Austin 2018-12-18 11:43:00 +08:00
parent d1dc7bdf90
commit 707a12317b
4 changed files with 48 additions and 48 deletions

View File

@ -23,7 +23,7 @@ class CephManagerException(Exception):
message = self.message % kwargs
except TypeError:
LOG.warn(_LW('Exception in string format operation'))
for name, value in kwargs.iteritems():
for name, value in kwargs.items():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.message

View File

@ -106,7 +106,7 @@ def collectMemtop(influx_info, node, ci):
fields["platform_avail"] += avail / MiB
fields["platform_hfree"] += hfree
f1.close()
s = generateString(measurement, tags.keys(), tags.values(), fields.keys(), fields.values())
s = generateString(measurement, list(tags.keys()), list(tags.values()), list(fields.keys()), list(fields.values()))
if s is None:
good_string = False
else:
@ -221,7 +221,7 @@ def collectMemstats(influx_info, node, ci, services, syseng_services, openstack_
fields["total"]["vsz"] += vsz
break
# send data to InfluxDB
for key in fields.keys():
for key in fields:
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "rss", fields[key]["rss"], "vsz", fields[key]["vsz"]) + "\n"
p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True)
p.communicate()
@ -333,7 +333,7 @@ def collectSchedtop(influx_info, node, ci, services, syseng_services, openstack_
fields[svc] += occ
fields["total"] += occ
break
for key in fields.keys():
for key in fields:
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", key, "occ", fields[key]) + "\n"
# send data to InfluxDB
p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True)
@ -800,7 +800,7 @@ def collectRabbitMq(influx_info, node, ci):
info[i] = "processes_" + info[i]
if info[i].replace("_", "").isalpha() and info[i + 1].isdigit():
fields[info[i]] = info[i + 1]
s = generateString(measurement, tags.keys(), tags.values(), fields.keys(), fields.values())
s = generateString(measurement, list(tags.keys()), list(tags.values()), list(fields.keys()), list(fields.values()))
if s is None:
rabbitmq_output.kill()
else:
@ -993,7 +993,7 @@ def collectFilestats(influx_info, node, ci, services, syseng_services, exclude_l
p.kill()
continue
p.kill()
for key in fields.keys():
for key in fields:
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "read/write", fields[key]["read/write"], "write", fields[key]["write"], "read", fields[key]["read"]) + "\n"
# send data to InfluxDB
p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True)
@ -1041,7 +1041,7 @@ def collectVswitch(influx_info, node, ci):
for key in fields:
fields[key] = line[i].strip("%")
i += 1
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, tags.keys()[0], tags.values()[0], tags.keys()[1], tags.values()[1], fields.keys()[0], fields.values()[0], fields.keys()[1], fields.values()[1], fields.keys()[2], fields.values()[2], fields.keys()[3], fields.values()[3], fields.keys()[4], fields.values()[4], fields.keys()[5], fields.values()[5], fields.keys()[6], fields.values()[6], fields.keys()[7], fields.values()[7], fields.keys()[8], fields.values()[8]) + "\n"
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, list(tags.keys())[0], list(tags.values())[0], list(tags.keys())[1], list(tags.values())[1], list(fields.keys())[0], list(fields.values())[0], list(fields.keys())[1], list(fields.values())[1], list(fields.keys())[2], list(fields.values())[2], list(fields.keys())[3], list(fields.values())[3], list(fields.keys())[4], list(fields.values())[4], list(fields.keys())[5], list(fields.values())[5], list(fields.keys())[6], list(fields.values())[6], list(fields.keys())[7], list(fields.values())[7], list(fields.keys())[8], list(fields.values())[8]) + "\n"
vshell_engine_stats_output.kill()
vshell_port_stats_output = Popen("vshell port-stats-list", shell=True, stdout=PIPE)
vshell_port_stats_output.stdout.readline()
@ -1059,7 +1059,7 @@ def collectVswitch(influx_info, node, ci):
for key in fields1:
fields1[key] = line[i].strip("%")
i += 1
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, tags1.keys()[0], tags1.values()[0], tags1.keys()[1], tags1.values()[1], fields1.keys()[0], fields1.values()[0], fields1.keys()[1], fields1.values()[1], fields1.keys()[2], fields1.values()[2], fields1.keys()[3], fields1.values()[3], fields1.keys()[4], fields1.values()[4], fields1.keys()[5], fields1.values()[5], fields1.keys()[6], fields1.values()[6]) + "\n"
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, list(tags1.keys())[0], list(tags1.values())[0], list(tags1.keys())[1], list(tags1.values())[1], list(fields1.keys())[0], list(fields1.values())[0], list(fields1.keys())[1], list(fields1.values())[1], list(fields1.keys())[2], list(fields1.values())[2], list(fields1.keys())[3], list(fields1.values())[3], list(fields1.keys())[4], list(fields1.values())[4], list(fields1.keys())[5], list(fields1.values())[5], list(fields1.keys())[6], list(fields1.values())[6]) + "\n"
vshell_port_stats_output.kill()
vshell_interface_stats_output = Popen("vshell interface-stats-list", shell=True, stdout=PIPE)
vshell_interface_stats_output.stdout.readline()
@ -1078,7 +1078,7 @@ def collectVswitch(influx_info, node, ci):
for key in fields2:
fields2[key] = line[i].strip("%")
i += 1
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, tags2.keys()[0], tags2.values()[0], tags2.keys()[1], tags2.values()[1], fields2.keys()[0], fields2.values()[0], fields2.keys()[1], fields2.values()[1], fields2.keys()[2], fields2.values()[2], fields2.keys()[3], fields2.values()[3], fields2.keys()[4], fields2.values()[4], fields2.keys()[5], fields2.values()[5], fields2.keys()[6], fields2.values()[6], fields2.keys()[7], fields2.values()[7], fields2.keys()[8], fields2.values()[8], fields2.keys()[9], fields2.values()[9]) + "\n"
influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, list(tags2.keys())[0], list(tags2.values())[0], list(tags2.keys())[1], list(tags2.values())[1], list(fields2.keys())[0], list(fields2.values())[0], list(fields2.keys())[1], list(fields2.values())[1], list(fields2.keys())[2], list(fields2.values())[2], list(fields2.keys())[3], list(fields2.values())[3], list(fields2.keys())[4], list(fields2.values())[4], list(fields2.keys())[5], list(fields2.values())[5], list(fields2.keys())[6], list(fields2.values())[6], list(fields2.keys())[7], list(fields2.values())[7], list(fields2.keys())[8], list(fields2.values())[8], list(fields2.keys())[9], list(fields2.values())[9]) + "\n"
else:
continue
vshell_interface_stats_output.kill()
@ -1135,7 +1135,7 @@ def collectApiStats(influx_info, node, ci, services, db_port, rabbit_port):
break
lsof_lines.append(line)
lsof_result.kill()
for name, service in services.iteritems():
for name, service in services.items():
pid_list = list()
check_pid = False
if name == "keystone-public":

View File

@ -286,7 +286,7 @@ def get_info_and_display(cc, show=None):
pv_pd_num_ext = 4
pv_pd_num = 3
for k, v in host_storage_attr.iteritems():
for k, v in host_storage_attr.items():
if show['diskview'] or show['all']:
for disk_o in v['host_disks']:
device_node = getattr(disk_o, 'device_node', '')

View File

@ -287,7 +287,7 @@ def parse_arguments(debug, show):
S[0:0] = L_opts
# Enable debug option, but its usage/help is hidden.
D = debug.keys()
D = list(debug.keys())
D.sort()
D.insert(0, 'all')
@ -338,7 +338,7 @@ def parse_arguments(debug, show):
# Enable all debug flags (except libvirt_xml) if 'all' is specified
x = debug['libvirt_xml']
if debug['all']:
{debug.update({e: True}) for e in debug.keys()}
{debug.update({e: True}) for e in debug}
debug['libvirt_xml'] = x
# Flatten show options list
@ -370,8 +370,8 @@ def _translate_keys(collection, convert):
""" For a collection of elements, translate _info field names
into human-readable names based on a list of conversion tuples.
"""
for k, item in collection.iteritems():
keys = item.__dict__.keys()
for k, item in collection.items():
keys = list(item.__dict__.keys())
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
try:
@ -394,7 +394,7 @@ def _translate_extended_states(collection):
'Crashed', # 0x06
'Suspended' # 0x07
]
for k, item in collection.iteritems():
for k, item in collection.items():
try:
setattr(item, 'power_state',
power_states[getattr(item, 'power_state')])
@ -624,7 +624,7 @@ def do_libvirt_domain_info((host)):
up_total += 1
cpuset_total |= cpuset
cpulist_f = _mask_to_cpulist(mask=cpuset_total)
for key in sorted(cpulist_d.iterkeys()):
for key in sorted(cpulist_d.keys()):
cpulist_p.append(cpulist_d[key])
# Determine if floating or pinned, display appropriate cpulist
@ -833,7 +833,7 @@ def define_option_flags(show, options=[],
if 'all' in options:
{show.update({e: True}) for e in L_brief + L_details}
for e in options:
if e in show.keys():
if e in show:
show.update({e: True})
@ -898,9 +898,9 @@ def print_all_tables(tenants=None,
for C in ['servers', 'pcpus', 'U:dedicated', 'U:shared',
'memory', 'U:memory', 'A:mem_4K', 'A:mem_2M', 'A:mem_1G']:
pt.align[C] = 'r'
for host_name, H in sorted(hypervisors.iteritems(),
for host_name, H in sorted(hypervisors.items(),
key=lambda (k, v): (natural_keys(k))):
A = agg_h[host_name].keys()
A = list(agg_h[host_name].keys())
try:
topology_idx = topologies_idx[host_name]
@ -914,9 +914,9 @@ def print_all_tables(tenants=None,
cpu_id = 0
socket_id = topology_idx[cpu_id]['s']
core_id = topology_idx[cpu_id]['c']
n_sockets = len(topology.keys())
n_cores = len(topology[socket_id].keys())
n_threads = len(topology[socket_id][core_id].keys())
n_sockets = len(list(topology.keys()))
n_cores = len(list(topology[socket_id].keys()))
n_threads = len(list(topology[socket_id][core_id].keys()))
else:
if 'topology' in H.cpu_info:
topology = H.cpu_info['topology']
@ -1019,7 +1019,7 @@ def print_all_tables(tenants=None,
if show['topology']:
print
print('LOGICAL CPU TOPOLOGY (compute hosts):')
for host_name, topology in sorted(topologies.iteritems(),
for host_name, topology in sorted(topologies.items(),
key=lambda (k, v): (natural_keys(k))):
H = hypervisors[host_name]
try:
@ -1038,9 +1038,9 @@ def print_all_tables(tenants=None,
cpu_id = 0
socket_id = topology_idx[cpu_id]['s']
core_id = topology_idx[cpu_id]['c']
n_sockets = len(topology.keys())
n_cores = len(topology[socket_id].keys())
n_threads = len(topology[socket_id][core_id].keys())
n_sockets = len(list(topology.keys()))
n_cores = len(list(topology[socket_id].keys()))
n_threads = len(list(topology[socket_id][core_id].keys()))
print('%s: Model:%s, Arch:%s, Vendor:%s, '
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d'
@ -1083,7 +1083,7 @@ def print_all_tables(tenants=None,
if show['topology-long']:
print
print('LOGICAL CPU TOPOLOGY (compute hosts):')
for host_name, topology in sorted(topologies.iteritems(),
for host_name, topology in sorted(topologies.items(),
key=lambda (k, v): (natural_keys(k))):
H = hypervisors[host_name]
try:
@ -1102,9 +1102,9 @@ def print_all_tables(tenants=None,
cpu_id = 0
socket_id = topology_idx[cpu_id]['s']
core_id = topology_idx[cpu_id]['c']
n_sockets = len(topology.keys())
n_cores = len(topology[socket_id].keys())
n_threads = len(topology[socket_id][core_id].keys())
n_sockets = len(list(topology.keys()))
n_cores = len(list(topology[socket_id].keys()))
n_threads = len(list(topology[socket_id][core_id].keys()))
print('%s: Model:%s, Arch:%s, Vendor:%s, '
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d'
@ -1160,7 +1160,7 @@ def print_all_tables(tenants=None,
pt.align[C] = 'r'
for C in ['in_libvirt']:
pt.align[C] = 'c'
for _, S in sorted(servers.iteritems(),
for _, S in sorted(servers.items(),
key=lambda (k, v): (natural_keys(v.host),
v.server_group,
v.instance_name)
@ -1211,7 +1211,7 @@ def print_all_tables(tenants=None,
vcpus_scale = flavor_vcpus
in_libvirt = False
for h, D in domains.iteritems():
for h, D in domains.items():
if S.id in D:
in_libvirt = True
break
@ -1256,9 +1256,9 @@ def print_all_tables(tenants=None,
pt.align[C] = 'r'
for C in ['in_nova']:
pt.align[C] = 'c'
for host, D in sorted(domains.iteritems(),
for host, D in sorted(domains.items(),
key=lambda (k, v): (natural_keys(k))):
for _, S in sorted(D.iteritems(),
for _, S in sorted(D.items(),
key=lambda (k, v): (v['name'])):
in_nova = True if S['uuid'] in servers else False
pt.add_row(
@ -1291,7 +1291,7 @@ def print_all_tables(tenants=None,
'created_at',
])
pt.align = 'l'
for _, M in sorted(migrations.iteritems(),
for _, M in sorted(migrations.items(),
key=lambda (k, v): (k)):
pt.add_row(
[M.instance_uuid,
@ -1327,7 +1327,7 @@ def print_all_tables(tenants=None,
for C in ['id', 'vcpus', 'ram', 'disk', 'ephemeral', 'swap',
'rxtx_factor']:
pt.align[C] = 'r'
for _, F in sorted(flavors.iteritems(),
for _, F in sorted(flavors.items(),
key=lambda (k, v): (k)):
if F.id in flavors_in_use:
pt.add_row(
@ -1361,7 +1361,7 @@ def print_all_tables(tenants=None,
pt.align = 'l'
for C in ['id', 'min_disk', 'min_ram', 'status']:
pt.align[C] = 'r'
for _, I in sorted(images.iteritems(),
for _, I in sorted(images.items(),
key=lambda (k, v): (k)):
if I.id in images_in_use:
pt.add_row(
@ -1387,7 +1387,7 @@ def print_all_tables(tenants=None,
'Metadata',
])
pt.align = 'l'
for _, S in sorted(server_groups.iteritems(),
for _, S in sorted(server_groups.items(),
key=lambda (k, v): (k)):
if S.id in server_groups_in_use:
tenant = tenants[S.project_id].name
@ -1615,7 +1615,7 @@ def get_info_and_display(show=None):
# translate fields into human-readable names
_translate_keys(images, convert)
for I_id, I in images.iteritems():
for I_id, I in images.items():
meta = copy.deepcopy(I.properties)
I.properties = {}
for k, v in meta.items():
@ -1708,7 +1708,7 @@ def get_info_and_display(show=None):
# Get extra_specs
extra_specs = {}
for f_id, F in flavors.iteritems():
for f_id, F in flavors.items():
try:
specs = F.get_keys()
except Exception as e:
@ -1794,7 +1794,7 @@ def get_info_and_display(show=None):
# Build up aggregate list per compute host
agg_h = {}
for H in hypervisors.keys():
for H in hypervisors:
agg_h[H] = {}
for A in aggregates.values():
for H in A.hosts:
@ -1837,7 +1837,7 @@ def get_info_and_display(show=None):
sys.exit(1)
hosts = []
for h in hypervisors.keys():
for h in hypervisors:
hosts.append(h)
# Launch tasks
@ -1851,7 +1851,7 @@ def get_info_and_display(show=None):
# Reap aged workers that exceed hang timeout
now = time.time()
reap = []
for pid in active_pids.keys():
for pid in active_pids:
if pid == 0:
continue
try:
@ -1957,7 +1957,7 @@ def get_info_and_display(show=None):
# We need libvirt topology information to make sense of cpusets.
have_topology = True
try:
if len(topologies_idx[host].keys()) < 1:
if len(list(topologies_idx[host].keys())) < 1:
have_topology = False
except:
have_topology = False
@ -2042,7 +2042,7 @@ def get_info_and_display(show=None):
server_mismatch = False
for S in servers.values():
in_libvirt = False
for h, D in domains.iteritems():
for h, D in domains.items():
if S.id in D and S.host == h:
in_libvirt = True
break
@ -2053,8 +2053,8 @@ def get_info_and_display(show=None):
% (S.id, S.instance_name, S.name, S.host))
# Detect mismatch where server is in libvirt but not in nova
for host, D in domains.iteritems():
for k, S in D.iteritems():
for host, D in domains.items():
for k, S in D.items():
in_nova = False
uuid = S['uuid']
if uuid in servers and servers[uuid].host == host: