Enable Flake8 E12X Errors

Flake8 currently ignores the following Errors:
E121: continuation line under-indented for hanging indent
E123: closing bracket doesn't match indentation of opening bracket
E124: closing bracket doesn't match visual indentation
E125: continuation line with same indent as next logical line
E126: continuation line over-indented for hanging indent
E127: continuation line over-indented for visual indent
E128: continuation line under-indented for visual indent
Enable them for more consistent formatting of code

Change-Id: I415d4824a1f335ba3fceb488b0ae60b9861a036a
Story: 2004515
Task: 30076
Signed-off-by: Eric Barrett <eric.barrett@windriver.com>
This commit is contained in:
Eric Barrett 2019-03-29 08:59:40 -04:00
parent 98f2ff7bf4
commit 7bf3a477f3
11 changed files with 90 additions and 99 deletions

View File

@ -33,13 +33,13 @@ class CephManagerException(Exception):
class CephPoolSetQuotaFailure(CephManagerException):
message = _("Error seting the OSD pool "
"quota %(name)s for %(pool)s to %(value)s") \
+ ": %(reason)s"
"quota %(name)s for %(pool)s to "
"%(value)s") + ": %(reason)s"
class CephPoolGetQuotaFailure(CephManagerException):
message = _("Error geting the OSD pool quota for %(pool)s") \
+ ": %(reason)s"
message = _("Error geting the OSD pool quota for "
"%(pool)s") + ": %(reason)s"
class CephPoolCreateFailure(CephManagerException):

View File

@ -140,9 +140,9 @@ class HandleUpgradesMixin(object):
# unsurpress require_jewel_osds in case upgrade
# is aborting
if (state in [
constants.UPGRADE_ABORTING,
constants.UPGRADE_ABORT_COMPLETING,
constants.UPGRADE_ABORTING_ROLLBACK]):
constants.UPGRADE_ABORTING,
constants.UPGRADE_ABORT_COMPLETING,
constants.UPGRADE_ABORTING_ROLLBACK]):
self.wait_for_upgrade_complete = False
return health

View File

@ -99,7 +99,7 @@ def is_partitioning_correct(disk_path, partition_sizes):
str(size) + "[\\.0]*MiB")
if not re.search(regex, output, re.MULTILINE):
print("Journal partition %(node)s size is not %(size)s, "
"zapping disk" % {"node": partition_node, "size": size})
"zapping disk" % {"node": partition_node, "size": size})
return False
partition_index += 1
@ -124,14 +124,14 @@ def create_partitions(disk_path, partition_sizes):
links = []
if os.path.isdir(DISK_BY_PARTUUID):
links = [os.path.join(DISK_BY_PARTUUID, l) for l in os.listdir(DISK_BY_PARTUUID)
if os.path.islink(os.path.join(DISK_BY_PARTUUID, l))]
if os.path.islink(os.path.join(DISK_BY_PARTUUID, l))]
# Erase all partitions on current node by creating a new GPT table
_, err, ret = command(["parted", "-s", disk_node, "mktable", "gpt"])
if ret:
print("Error erasing partition table of %(node)s\n"
"Return code: %(ret)s reason: %(reason)s" %
{"node": disk_node, "ret": ret, "reason": err})
"Return code: %(ret)s reason: %(reason)s" %
{"node": disk_node, "ret": ret, "reason": err})
exit(1)
# Erase old symlinks
@ -152,20 +152,20 @@ def create_partitions(disk_path, partition_sizes):
"end": used_space_mib + size,
"reason": err}
print("Created partition from start=%(start)s MiB to end=%(end)s MiB"
" on %(disk_node)s" % parms)
" on %(disk_node)s" % parms)
if ret:
print("Failed to create partition with "
"start=%(start)s, end=%(end)s "
"on %(disk_node)s reason: %(reason)s" % parms)
"start=%(start)s, end=%(end)s "
"on %(disk_node)s reason: %(reason)s" % parms)
exit(1)
# Set partition type to ceph journal
# noncritical operation, it makes 'ceph-disk list' output correct info
cmd = ['sgdisk',
'--change-name={num}:ceph journal'.format(num=num),
'--typecode={num}:{uuid}'.format(
num=num,
uuid=JOURNAL_UUID,
),
num=num,
uuid=JOURNAL_UUID,
),
disk_node]
_, err, ret = command(cmd)
if ret:
@ -263,11 +263,11 @@ def fix_location(mount_point, journal_path, osdid):
"reason": err}
if not ret:
print("Prepared new journal partition: %(journal_node)s "
"for osd id: %(osdid)s" % params)
"for osd id: %(osdid)s" % params)
else:
print("Error initializing journal node: "
"%(journal_node)s for osd id: %(osdid)s "
"ceph-osd return code: %(ret)s reason: %(reason)s" % params)
"%(journal_node)s for osd id: %(osdid)s "
"ceph-osd return code: %(ret)s reason: %(reason)s" % params)
########
@ -308,8 +308,8 @@ def main(argv):
create_partitions(partitions['disk_path'], partitions['journals'])
else:
print("Partition table for %s is correct, "
"no need to repartition" %
device_path_to_device_node(partitions['disk_path']))
"no need to repartition" %
device_path_to_device_node(partitions['disk_path']))
elif location:
# we need to have the data partition mounted & we can let it mounted
mount_point = mount_data_partition(location['data_path'],
@ -319,13 +319,13 @@ def main(argv):
location['journal_path'],
location['osdid']):
print("Fixing journal location for "
"OSD id: %(id)s" % {"node": location['data_path'],
"id": location['osdid']})
"OSD id: %(id)s" % {"node": location['data_path'],
"id": location['osdid']})
fix_location(mount_point,
location['journal_path'],
location['osdid'])
else:
print("Journal location for %s is correct,"
"no need to change it" % location['data_path'])
"no need to change it" % location['data_path'])
main(sys.argv[1:])

View File

@ -75,8 +75,8 @@ def get_osd_tree():
'osd', 'tree', '--format', 'json']
try:
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise OsdException(
@ -100,7 +100,7 @@ def get_osd_tree():
def osd_match_status(target_osd, target_status,
reverse_logic):
reverse_logic):
LOG.info(('Match status: '
'target_osd={}, '
'target status={}, '
@ -237,7 +237,7 @@ if __name__ == "__main__":
LOG.warn(
('Retry error: {}. '
'Elapsed time: {:.02f} seconds'.format(
e, time.time() - start)))
e, time.time() - start)))
except OsdException as e:
LOG.warn(
('OSD wait error: {}. '

View File

@ -90,9 +90,9 @@ class LogMgmtDaemon():
# Use python's log rotation, rather than logrotate
handler = logging.handlers.RotatingFileHandler(
LOG_FILE,
maxBytes=LOG_FILE_MAX_BYTES,
backupCount=LOG_FILE_BACKUP_COUNT)
LOG_FILE,
maxBytes=LOG_FILE_MAX_BYTES,
backupCount=LOG_FILE_BACKUP_COUNT)
my_logger = logging.getLogger()
my_logger.setLevel(level)
@ -147,10 +147,10 @@ class LogMgmtDaemon():
continue
# Ignore some files
if '/var/log/puppet' in fname \
or '/var/log/dmesg' in fname \
or '/var/log/rabbitmq' in fname \
or '/var/log/lastlog' in fname:
if ('/var/log/puppet' in fname
or '/var/log/dmesg' in fname
or '/var/log/rabbitmq' in fname
or '/var/log/lastlog' in fname):
continue
if os.path.exists(fname):

View File

@ -10,10 +10,9 @@ SPDX-License-Identifier: Apache-2.0
import setuptools
setuptools.setup(name='logmgmt',
version='1.0.0',
description='logmgmt',
license='Apache-2.0',
packages=['logmgmt'],
entry_points={
}
)
version='1.0.0',
description='logmgmt',
license='Apache-2.0',
packages=['logmgmt'],
entry_points={}
)

View File

@ -88,9 +88,9 @@ class BuddyInfo(object):
ret_string += " Zone: %s\n" % zoneinfo.get("zone")
ret_string += " Free KiB in zone: %.2f\n" % (sum(zoneinfo.get("usage")) / (1024.0))
ret_string += '\t{0:{align}{width}} {1:{align}{width}} {2:{align}{width}}\n'.format(
"Fragment size", "Free fragments", "Total available KiB",
width=width,
align="<")
"Fragment size", "Free fragments", "Total available KiB",
width=width,
align="<")
for idx in range(len(zoneinfo.get("sz_fragment"))):
ret_string += '\t{order:{align}{width}} {nr:{align}{width}} {usage:{align}{width}}\n'.format(
width=width,

View File

@ -410,7 +410,7 @@ def main():
ts = datetime.datetime.now()
if show['debug']:
print("%s: %s options: view:%s System api version: %s"
% (prog, ts.isoformat(), show, api_version))
% (prog, ts.isoformat(), show, api_version))
cgts_client_creds = get_system_creds()
if not cgts_client_creds['os_username']:

View File

@ -312,14 +312,14 @@ def parse_arguments(debug, show):
default_config_files=[NOVACONF],
prog=os.path.basename(sys.argv[0]),
description=(
'Tool to summarize server resouce usage and vcpu placement'
'related attributes for nova and libvirt.'),
'Tool to summarize server resouce usage and vcpu placement'
'related attributes for nova and libvirt.'),
# NOTE: oslo_config implementation of _CachedArgumentParser does not
# configure argparse formatter_class. The resulting epilog text is
# automatically text-wrapped which is not desired. Manually adding
# newlines does not work either. The epilog text is disabled for now.
#epilog=help_text_epilog(),
)
)
# Configure logging to appropriate level
level = logging.INFO
@ -496,7 +496,7 @@ def range_to_list(csv_range=None):
if not csv_range:
return []
ranges = [(lambda L: range(L[0], L[-1] + 1))([int(x) for x in r.split('-')])
for r in csv_range.split(',')]
for r in csv_range.split(',')]
return [y for x in ranges for y in x]
@ -874,7 +874,7 @@ def print_all_tables(tenants=None,
'Avail Zone',
'Hosts',
'Metadata',
], caching=False)
], caching=False)
pt.align = 'l'
for name, A in sorted(aggregates.items()):
pt.add_row(
@ -882,7 +882,7 @@ def print_all_tables(tenants=None,
str(A.availability_zone),
", ".join([str(x) for x in A.hosts]),
str(A.metadata)
])
])
print(pt)
# Print list of compute host hypervisors, showing per numa details
@ -906,7 +906,7 @@ def print_all_tables(tenants=None,
'A:mem_2M',
'A:mem_1G',
'Aggregate',
])
])
pt.align = 'l'
for C in ['servers', 'pcpus', 'U:dedicated', 'U:shared',
'memory', 'U:memory', 'A:mem_4K', 'A:mem_2M', 'A:mem_1G']:
@ -965,7 +965,7 @@ def print_all_tables(tenants=None,
cell['memory_avail_1G'],
textwrap.fill(", ".join([str(x) for x in A]),
width=75),
])
])
else:
pt.add_row(
['', # host
@ -983,7 +983,7 @@ def print_all_tables(tenants=None,
cell['memory_avail_2M'],
cell['memory_avail_1G'],
'', # agg
])
])
first = False
if len(computes_cell[host_name]) < 1:
@ -1005,7 +1005,7 @@ def print_all_tables(tenants=None,
'-', # memory_avail_2M
'-', # memory_avail_1G
", ".join([str(x) for x in A]),
])
])
# Add row with statistics
Y = statistics
@ -1025,7 +1025,7 @@ def print_all_tables(tenants=None,
'-', # memory_avail_2M
'-', # memory_avail_1G
'-', # agg
])
])
print(pt)
# Print list of compute hosts topology
@ -1133,7 +1133,7 @@ def print_all_tables(tenants=None,
'thread_id',
'sibling_id',
'affinity'
])
])
pt.align = 'r'
pt.align['affinity'] = 'l'
for i in cpu_ids:
@ -1144,7 +1144,7 @@ def print_all_tables(tenants=None,
topology_idx[i]['t'],
list_to_range(siblings[i]) or '-',
'0x%x' % (1 << i)
])
])
print(pt)
print()
@ -1167,7 +1167,7 @@ def print_all_tables(tenants=None,
'memory',
'instance_topology',
'in_libvirt',
])
])
pt.align = 'l'
for C in ['vcpus', 'memory']:
pt.align[C] = 'r'
@ -1175,10 +1175,10 @@ def print_all_tables(tenants=None,
pt.align[C] = 'c'
for _, S in sorted(servers.items(),
key=lambda k_v4: (natural_keys(k_v4[1].host),
k_v4[1].server_group,
k_v4[1].instance_name)
k_v4[1].server_group,
k_v4[1].instance_name)
if (k_v4[1].host is not None) else 'None'
):
):
if S.server_group is not None and S.server_group:
match = re_server_group.search(S.server_group)
if match:
@ -1241,7 +1241,7 @@ def print_all_tables(tenants=None,
flavor_ram,
S.topology,
'yes' if in_libvirt else 'NO',
])
])
print(pt)
# Print each libvirt domain info
@ -1260,7 +1260,7 @@ def print_all_tables(tenants=None,
'nodelist',
'cpulist',
'in_nova',
])
])
pt.align = 'l'
for C in ['id', 'vcpus', 'memory', 'nodelist']:
pt.align[C] = 'r'
@ -1282,7 +1282,7 @@ def print_all_tables(tenants=None,
list_to_range(S['nodelist']) or '-',
list_to_range(S['cpulist']) or '-',
'yes' if in_nova else 'NO',
])
])
print(pt)
# Print list of in-progress migrations
@ -1299,7 +1299,7 @@ def print_all_tables(tenants=None,
'S:flavor[PKey]',
'D:flavor[PKey]',
'created_at',
])
])
pt.align = 'l'
for _, M in sorted(migrations.items(),
key=lambda k_v6: (k_v6[0])):
@ -1313,7 +1313,7 @@ def print_all_tables(tenants=None,
M.new_instance_type_id,
M.old_instance_type_id,
M.created_at,
])
])
print(pt)
# Print flavors for instances currently in use
@ -1332,7 +1332,7 @@ def print_all_tables(tenants=None,
'rxtx_factor',
'is_public',
'extra_specs',
])
])
pt.align = 'l'
for C in ['id', 'vcpus', 'ram', 'disk', 'ephemeral', 'swap',
'rxtx_factor']:
@ -1351,7 +1351,7 @@ def print_all_tables(tenants=None,
F.rxtx_factor,
F.is_public,
pp.pformat(extra_specs[F.id]),
])
])
print(pt)
# Print images for instances currently in use
@ -1367,7 +1367,7 @@ def print_all_tables(tenants=None,
'size(MB)',
'status',
'properties',
])
])
pt.align = 'l'
for C in ['id', 'min_disk', 'min_ram', 'status']:
pt.align[C] = 'r'
@ -1382,7 +1382,7 @@ def print_all_tables(tenants=None,
'%.2f' % (I.size / 1024.0 / 1024.0),
I.status,
I.properties,
])
])
print(pt)
# Print server groups for instances currently in use (exclude members data)
@ -1395,7 +1395,7 @@ def print_all_tables(tenants=None,
'Name',
'Policies',
'Metadata',
])
])
pt.align = 'l'
for _, S in sorted(server_groups.items(),
key=lambda k_v9: (k_v9[0])):
@ -1407,7 +1407,7 @@ def print_all_tables(tenants=None,
S.name,
str(S.policies),
str(S.metadata),
])
])
print(pt)
@ -1420,7 +1420,7 @@ def _get_host_id(tenant_id=None, host_name=None):
def start_process():
logger.debug('Starting: %s, %d'
% (multiprocessing.current_process().name, os.getpid()))
% (multiprocessing.current_process().name, os.getpid()))
def get_info_and_display(show=None):
@ -1505,7 +1505,7 @@ def get_info_and_display(show=None):
q = select([S.name,
S.region_name,
S.deleted_at]
).where(S.deleted_at is None)
).where(S.deleted_at is None)
result = conn.execute(q)
for row in result:
field = 'region_name'
@ -1522,7 +1522,7 @@ def get_info_and_display(show=None):
q = select([S.name,
S.region_name,
S.deleted_at]
).where(S.deleted_at is None)
).where(S.deleted_at is None)
result = conn.execute(q)
for row in result:
name = str(row['name'])
@ -1680,7 +1680,7 @@ def get_info_and_display(show=None):
'image_id': image_id,
'image_name': image_name,
'vars': vars(V),
}
}
del volumes_
# Get list of migrations, sort-by id which puts them in time order.
@ -1828,12 +1828,12 @@ def get_info_and_display(show=None):
max(1,
min(multiprocessing.cpu_count(),
int(0.6 * (avail_MiB - 100.0) / process_MiB)
)
)
)
)
)
logger.debug('WORKERS: avail=%.2f MiB, process=%.2f MiB, pool_size=%d'
% (avail_MiB, process_MiB, pool_size))
% (avail_MiB, process_MiB, pool_size))
# Create pool of workers that connect to libvirt hypervisor.
try:
@ -1956,7 +1956,7 @@ def get_info_and_display(show=None):
q = select([CN.hypervisor_hostname,
CN.numa_topology,
CN.deleted]
).where(CN.deleted == 0)
).where(CN.deleted == 0)
result = conn.execute(q)
for row in result:
host = row['hypervisor_hostname']

10
tox.ini
View File

@ -37,13 +37,6 @@ commands =
[flake8]
# E series are pep8
# E121 continuation line under-indented for hanging indent
# E123 closing bracket does not match indentation of opening bracket's line
# E124 closing bracket does not match visual indentation
# E125 continuation line with same indent as next logical line
# E126 continuation line over-indented for hanging indent
# E127 continuation line over-indented for visual indent
# E128 continuation line under-indented for visual indent
# E265 block comment should start with '# '
# E266 too many leading '#' for block comment
# E302 expected 2 blank lines, found 1
@ -76,8 +69,7 @@ commands =
# B301 Python 3 does not include `.iter*` methods on dictionaries.
# F series
# F401 'module' imported but unused
ignore = E121,E123,E124,E125,E126,E127,E128,E265,E266,
E302,E303,E305,E402,E501,E722
ignore = E265,E266,E302,E303,E305,E402,E501,E722
H101,H102,H104,H201,H238,H237,H306,H401,H404,H405,
W191,W291,W391,W503,
B001,B007,B009,B010,B301,

View File

@ -38,7 +38,7 @@ def get_licenses_info():
features = sm_common.flex_lm_license_get_feature_list()
if features.value:
feature_list = [feature for feature in features.value.split(',')
if feature.startswith(constants.FEATURE_PREFIX)]
if feature.startswith(constants.FEATURE_PREFIX)]
sm_common.flex_lm_license_free(features)
lc_attrs_list = []
@ -66,14 +66,14 @@ def get_licenses_info():
name = constants.LICENSE_MAP.get(feature)
if process_license and name:
lc_attrs = dict(name=name, status=status,
expiry_date=expiry_date)
expiry_date=expiry_date)
else:
lc_attrs = dict()
if lc_attrs:
license_name = lc_attrs.get('name')
if (not any(lc.get('name') == license_name
for lc in lc_attrs_list)):
for lc in lc_attrs_list)):
# Get the list of license attributes for all valid
# licenses and installed expired/invalid licenses
lc_attrs_list.append(lc_attrs)
@ -85,8 +85,8 @@ def get_licenses_info():
# not-installed licenses
for license_name in licenses:
lc_attrs = dict(name=license_name,
status=constants.NOT_INSTALLED,
expiry_date='-')
status=constants.NOT_INSTALLED,
expiry_date='-')
lc_attrs_list.append(lc_attrs)
# Return the list of license attributes
@ -145,7 +145,7 @@ def verify_feature_license(feature_name, feature_version=None):
# Return license attributes of a valid license
lc_attrs = dict(name=license_name, status=constants.INSTALLED,
expiry_date=expire_date_text.value)
expiry_date=expire_date_text.value)
return lc_attrs
@ -162,7 +162,7 @@ def verify_license(license_file):
features = sm_common.flex_lm_license_get_feature_list()
if features.value:
feature_list = [feature for feature in features.value.split(',')
if feature.startswith(constants.FEATURE_PREFIX)]
if feature.startswith(constants.FEATURE_PREFIX)]
sm_common.flex_lm_license_free(features)
# Validate license of each feature in the license file
@ -173,7 +173,7 @@ def verify_license(license_file):
if system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX:
product_license = constants.AIO_SIMPLEX_SYSTEM_LICENSES
elif (system_mode == sysinv_constants.SYSTEM_MODE_DUPLEX or
system_mode == sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT):
system_mode == sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT):
product_license = constants.AIO_SYSTEM_LICENSES
elif system_type == sysinv_constants.TIS_STD_BUILD:
product_license = constants.STD_SYSTEM_LICENSES