Fix pylint errors

Fixing remaining pylint errors and removing from pylint disable list
so they cannot be re-introduced in the future:
E1102: not-callable
E1120: no-value-for-parameter
E1128: assignment-from-none

Change-Id: I0f59fdcd0fd20e9d0a8042ba3f7f2a5a20a701db
Story: 2007082
Task: 41335
Signed-off-by: Bart Wensley <barton.wensley@windriver.com>
This commit is contained in:
Bart Wensley 2020-12-03 10:12:49 -06:00
parent c71c13f5e1
commit 164e8be8dd
14 changed files with 38 additions and 37 deletions

View File

@ -361,8 +361,8 @@ class SubcloudInstall(object):
LOG.debug("update_iso_cmd:(%s)", str_cmd)
try:
with open(os.devnull, "w") as fnull:
subprocess.check_call(update_iso_cmd, stdout=fnull,
stderr=fnull)
subprocess.check_call( # pylint: disable=E1102
update_iso_cmd, stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError:
msg = "Failed to update iso %s, " % str(update_iso_cmd)
raise Exception(msg)
@ -384,8 +384,8 @@ class SubcloudInstall(object):
]
try:
with open(os.devnull, "w") as fnull:
subprocess.check_call(cleanup_cmd, stdout=fnull,
stderr=fnull)
subprocess.check_call( # pylint: disable=E1102
cleanup_cmd, stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError:
LOG.error("Failed to delete boot files.")

View File

@ -55,10 +55,11 @@ def run_playbook(log_file, playbook_command):
f_out_log.write(txt)
f_out_log.flush()
subprocess.check_call(playbook_command,
stdout=f_out_log,
stderr=f_out_log,
env=exec_env)
subprocess.check_call( # pylint: disable=E1102
playbook_command,
stdout=f_out_log,
stderr=f_out_log,
env=exec_env)
except subprocess.CalledProcessError:
raise PlaybookExecutionFailed(playbook_cmd=playbook_command)
except Exception as e:

View File

@ -39,8 +39,9 @@ class ResourceManager(object):
resource = []
for json_object in json_objects:
for resource_data in json_object:
resource.append(self.resource_class(self, resource_data,
json_object[resource_data]))
resource.append(self.resource_class( # pylint: disable=E1102
self, resource_data,
json_object[resource_data]))
return resource
def _list(self, url, response_key=None):
@ -75,9 +76,10 @@ class ResourceManager(object):
for json_object in json_objects:
data = json_object.get('usage').keys()
for values in data:
resource.append(self.resource_class(self, values,
json_object['limits'][values],
json_object['usage'][values]))
resource.append(self.resource_class( # pylint: disable=E1102
self, values,
json_object['limits'][values],
json_object['usage'][values]))
return resource
def _delete(self, url):

View File

@ -670,7 +670,9 @@ def initialize_subcloud_group_default(engine):
subcloud_group = sqlalchemy.Table('subcloud_group', meta, autoload=True)
try:
with engine.begin() as conn:
conn.execute(subcloud_group.insert(), default_group)
conn.execute(
subcloud_group.insert(), # pylint: disable=E1120
default_group)
LOG.info("Default Subcloud Group created")
except DBDuplicateEntry:
# The default already exists.

View File

@ -180,7 +180,7 @@ def upgrade(migrate_engine):
# populate the sw_update_opts_default with the default values.
con = migrate_engine.connect()
con.execute(sw_update_opts_default.insert(),
con.execute(sw_update_opts_default.insert(), # pylint: disable=E1120
storage_apply_type=vim.APPLY_TYPE_PARALLEL,
compute_apply_type=vim.APPLY_TYPE_PARALLEL,
max_parallel_computes=10,

View File

@ -74,7 +74,9 @@ def upgrade(migrate_engine):
# Inserting the GROUP as ID 1,
# This should increment the pkey to 2
with migrate_engine.begin() as conn:
conn.execute(subcloud_group.insert(), default_group)
conn.execute(
subcloud_group.insert(), # pylint: disable=E1120
default_group)
# postgres does not increment the subcloud group id sequence
# after the insert above as part of the migrate.

View File

@ -127,14 +127,13 @@ class ManagerClient(RPCClient):
update_state_only=update_state_only,
audit_fail_count=audit_fail_count))
def update_subcloud_sync_endpoint_type(self, ctxt, subcloud_id,
def update_subcloud_sync_endpoint_type(self, ctxt,
subcloud_name,
endpoint_type_list,
openstack_installed):
return self.cast(
ctxt,
self.make_msg('update_subcloud_sync_endpoint_type',
subcloud_id=subcloud_id,
subcloud_name=subcloud_name,
endpoint_type_list=endpoint_type_list,
openstack_installed=openstack_installed))

View File

@ -84,7 +84,7 @@ class Middleware(Application):
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
response = self.process_request(req) # pylint: disable=E1128
if response:
return response
# call the next app on the stack to process the request

View File

@ -108,8 +108,7 @@ def upgrade(migrate_engine):
meta.drop_all(tables=tables[:index])
raise
rows = quota_classes.count(). \
where(quota_classes.c.class_name == 'default').execute().scalar()
rows = quota_classes.count().where(quota_classes.c.class_name == 'default').execute().scalar() # pylint: disable=E1120
# Do not add entries if there are already 'default' entries. We don't
# want to write over something the user added.
@ -117,7 +116,7 @@ def upgrade(migrate_engine):
created_at = datetime.datetime.now()
# Set default quota limits
qci = quota_classes.insert()
qci = quota_classes.insert() # pylint: disable=E1120
for resource, default in CONF.dc_orch_global_limit.items():
qci.execute({'created_at': created_at,
'class_name': QUOTA_CLASS_NAME_DEFAULT,

View File

@ -112,7 +112,7 @@ class FernetKeyManager(manager.Manager):
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(KEY_ROTATE_CMD,
subprocess.check_call(KEY_ROTATE_CMD, # pylint: disable=E1102
stdout=fnull,
stderr=fnull)
except subprocess.CalledProcessError:

View File

@ -174,11 +174,11 @@ class EngineService(service.Service):
project_id, user_id)
@request_context
def quota_sync_for_project(self, context, project_id):
def quota_sync_for_project(self, context, project_id, user_id):
# On Demand Quota Sync for a project, will be triggered by KB-API
LOG.info("On Demand Quota Sync Called for: %s",
project_id)
self.qm.quota_sync_for_project(project_id)
LOG.info("On Demand Quota Sync Called for: %s %s",
project_id, user_id)
self.qm.quota_sync_for_project(project_id, user_id)
@request_context
def add_subcloud(self, ctxt, subcloud_name, sw_version):

View File

@ -775,7 +775,7 @@ class SyncThread(object):
m_resources = None
db_resources = None
# Query subcloud first. If not reachable, abort audit.
sc_resources = self.get_subcloud_resources(resource_type)
sc_resources = self.get_subcloud_resources(resource_type) # pylint: disable=E1128
if sc_resources is None:
return m_resources, db_resources, sc_resources
db_resources = self.get_db_master_resources(resource_type)
@ -787,7 +787,7 @@ class SyncThread(object):
if resource_type in SyncThread.master_resources_dict:
m_resources = SyncThread.master_resources_dict[resource_type]
else:
m_resources = self.get_master_resources(resource_type)
m_resources = self.get_master_resources(resource_type) # pylint: disable=E1128
if m_resources is not None:
SyncThread.master_resources_dict[resource_type] = m_resources
return m_resources

View File

@ -63,9 +63,10 @@ class EngineClient(object):
project_id=project_id,
user_id=user_id))
def quota_sync_for_project(self, ctxt, project_id):
def quota_sync_for_project(self, ctxt, project_id, user_id):
return self.cast(ctxt, self.make_msg('quota_sync_for_project',
project_id=project_id))
project_id=project_id,
user_id=user_id))
def keypair_sync_for_user(self, ctxt, job_id, payload):
return self.cast(

View File

@ -60,15 +60,10 @@ load-plugins=
# W1113: keyword-arg-before-vararg
# W1201: logging-not-lazy
# W1401: anomalous-backslash-in-string
# E detect Errors for important programming issues (i.e. most probably bug)
# E1102: not-callable
# E1120: no-value-for-parameter (sqlalchemy)
# E1128: assignment-from-none
disable=C,R,fixme,
W0102,W0105,W0107,W0123,W0201,W0211,W0212,W0221,W0223,W0231,W0235,
W0311,W0402,W0403,W0603,W0612,W0613,W0621,W0622,W0631,W0703,W0706,
W1113,W1201,W1401,
E1102,E1120,E1128
W1113,W1201,W1401
[REPORTS]