From 164e8be8dd972031547127b4c1a820dc27a4886c Mon Sep 17 00:00:00 2001 From: Bart Wensley Date: Thu, 3 Dec 2020 10:12:49 -0600 Subject: [PATCH] Fix pylint errors Fixing remaining pylint errors and removing from pylint disable list so they cannot be re-introduced in the future: E1102: not-callable E1120: no-value-for-parameter E1128: assignment-from-none Change-Id: I0f59fdcd0fd20e9d0a8042ba3f7f2a5a20a701db Story: 2007082 Task: 41335 Signed-off-by: Bart Wensley --- distributedcloud/dccommon/subcloud_install.py | 8 ++++---- distributedcloud/dccommon/utils.py | 9 +++++---- distributedcloud/dcdbsync/dbsyncclient/base.py | 12 +++++++----- distributedcloud/dcmanager/db/sqlalchemy/api.py | 4 +++- .../migrate_repo/versions/001_first_version.py | 2 +- .../versions/006_add_subcloud_group_table.py | 4 +++- distributedcloud/dcmanager/rpc/client.py | 3 +-- distributedcloud/dcorch/api/proxy/common/service.py | 2 +- .../sqlalchemy/migrate_repo/versions/001_initial.py | 5 ++--- distributedcloud/dcorch/engine/fernet_key_manager.py | 2 +- distributedcloud/dcorch/engine/service.py | 8 ++++---- distributedcloud/dcorch/engine/sync_thread.py | 4 ++-- distributedcloud/dcorch/rpc/client.py | 5 +++-- distributedcloud/pylint.rc | 7 +------ 14 files changed, 38 insertions(+), 37 deletions(-) diff --git a/distributedcloud/dccommon/subcloud_install.py b/distributedcloud/dccommon/subcloud_install.py index 1cbb49270..ecbb12c88 100644 --- a/distributedcloud/dccommon/subcloud_install.py +++ b/distributedcloud/dccommon/subcloud_install.py @@ -361,8 +361,8 @@ class SubcloudInstall(object): LOG.debug("update_iso_cmd:(%s)", str_cmd) try: with open(os.devnull, "w") as fnull: - subprocess.check_call(update_iso_cmd, stdout=fnull, - stderr=fnull) + subprocess.check_call( # pylint: disable=E1102 + update_iso_cmd, stdout=fnull, stderr=fnull) except subprocess.CalledProcessError: msg = "Failed to update iso %s, " % str(update_iso_cmd) raise Exception(msg) @@ -384,8 +384,8 @@ class SubcloudInstall(object): ] try: with open(os.devnull, "w") as fnull: - subprocess.check_call(cleanup_cmd, stdout=fnull, - stderr=fnull) + subprocess.check_call( # pylint: disable=E1102 + cleanup_cmd, stdout=fnull, stderr=fnull) except subprocess.CalledProcessError: LOG.error("Failed to delete boot files.") diff --git a/distributedcloud/dccommon/utils.py b/distributedcloud/dccommon/utils.py index 7e064167e..5268f16dd 100644 --- a/distributedcloud/dccommon/utils.py +++ b/distributedcloud/dccommon/utils.py @@ -55,10 +55,11 @@ def run_playbook(log_file, playbook_command): f_out_log.write(txt) f_out_log.flush() - subprocess.check_call(playbook_command, - stdout=f_out_log, - stderr=f_out_log, - env=exec_env) + subprocess.check_call( # pylint: disable=E1102 + playbook_command, + stdout=f_out_log, + stderr=f_out_log, + env=exec_env) except subprocess.CalledProcessError: raise PlaybookExecutionFailed(playbook_cmd=playbook_command) except Exception as e: diff --git a/distributedcloud/dcdbsync/dbsyncclient/base.py b/distributedcloud/dcdbsync/dbsyncclient/base.py index fc8d04a56..54395c884 100644 --- a/distributedcloud/dcdbsync/dbsyncclient/base.py +++ b/distributedcloud/dcdbsync/dbsyncclient/base.py @@ -39,8 +39,9 @@ class ResourceManager(object): resource = [] for json_object in json_objects: for resource_data in json_object: - resource.append(self.resource_class(self, resource_data, - json_object[resource_data])) + resource.append(self.resource_class( # pylint: disable=E1102 + self, resource_data, + json_object[resource_data])) return resource def _list(self, url, response_key=None): @@ -75,9 +76,10 @@ class ResourceManager(object): for json_object in json_objects: data = json_object.get('usage').keys() for values in data: - resource.append(self.resource_class(self, values, - json_object['limits'][values], - json_object['usage'][values])) + resource.append(self.resource_class( # pylint: disable=E1102 + self, values, + json_object['limits'][values], + json_object['usage'][values])) return resource def _delete(self, url): diff --git a/distributedcloud/dcmanager/db/sqlalchemy/api.py b/distributedcloud/dcmanager/db/sqlalchemy/api.py index ccafbced9..f04e6dc35 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/api.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/api.py @@ -670,7 +670,9 @@ def initialize_subcloud_group_default(engine): subcloud_group = sqlalchemy.Table('subcloud_group', meta, autoload=True) try: with engine.begin() as conn: - conn.execute(subcloud_group.insert(), default_group) + conn.execute( + subcloud_group.insert(), # pylint: disable=E1120 + default_group) LOG.info("Default Subcloud Group created") except DBDuplicateEntry: # The default already exists. diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py index a70cc7c8c..1170a6ebb 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py @@ -180,7 +180,7 @@ def upgrade(migrate_engine): # populate the sw_update_opts_default with the default values. con = migrate_engine.connect() - con.execute(sw_update_opts_default.insert(), + con.execute(sw_update_opts_default.insert(), # pylint: disable=E1120 storage_apply_type=vim.APPLY_TYPE_PARALLEL, compute_apply_type=vim.APPLY_TYPE_PARALLEL, max_parallel_computes=10, diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/006_add_subcloud_group_table.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/006_add_subcloud_group_table.py index a8b8b782a..106da472f 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/006_add_subcloud_group_table.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/006_add_subcloud_group_table.py @@ -74,7 +74,9 @@ def upgrade(migrate_engine): # Inserting the GROUP as ID 1, # This should increment the pkey to 2 with migrate_engine.begin() as conn: - conn.execute(subcloud_group.insert(), default_group) + conn.execute( + subcloud_group.insert(), # pylint: disable=E1120 + default_group) # postgres does not increment the subcloud group id sequence # after the insert above as part of the migrate. diff --git a/distributedcloud/dcmanager/rpc/client.py b/distributedcloud/dcmanager/rpc/client.py index 9549dba8c..6617a1a13 100644 --- a/distributedcloud/dcmanager/rpc/client.py +++ b/distributedcloud/dcmanager/rpc/client.py @@ -127,14 +127,13 @@ class ManagerClient(RPCClient): update_state_only=update_state_only, audit_fail_count=audit_fail_count)) - def update_subcloud_sync_endpoint_type(self, ctxt, subcloud_id, + def update_subcloud_sync_endpoint_type(self, ctxt, subcloud_name, endpoint_type_list, openstack_installed): return self.cast( ctxt, self.make_msg('update_subcloud_sync_endpoint_type', - subcloud_id=subcloud_id, subcloud_name=subcloud_name, endpoint_type_list=endpoint_type_list, openstack_installed=openstack_installed)) diff --git a/distributedcloud/dcorch/api/proxy/common/service.py b/distributedcloud/dcorch/api/proxy/common/service.py index 43f6ab7c9..3a71839f8 100644 --- a/distributedcloud/dcorch/api/proxy/common/service.py +++ b/distributedcloud/dcorch/api/proxy/common/service.py @@ -84,7 +84,7 @@ class Middleware(Application): @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): - response = self.process_request(req) + response = self.process_request(req) # pylint: disable=E1128 if response: return response # call the next app on the stack to process the request diff --git a/distributedcloud/dcorch/db/sqlalchemy/migrate_repo/versions/001_initial.py b/distributedcloud/dcorch/db/sqlalchemy/migrate_repo/versions/001_initial.py index db62737d6..3ddb48515 100644 --- a/distributedcloud/dcorch/db/sqlalchemy/migrate_repo/versions/001_initial.py +++ b/distributedcloud/dcorch/db/sqlalchemy/migrate_repo/versions/001_initial.py @@ -108,8 +108,7 @@ def upgrade(migrate_engine): meta.drop_all(tables=tables[:index]) raise - rows = quota_classes.count(). \ - where(quota_classes.c.class_name == 'default').execute().scalar() + rows = quota_classes.count().where(quota_classes.c.class_name == 'default').execute().scalar() # pylint: disable=E1120 # Do not add entries if there are already 'default' entries. We don't # want to write over something the user added. @@ -117,7 +116,7 @@ def upgrade(migrate_engine): created_at = datetime.datetime.now() # Set default quota limits - qci = quota_classes.insert() + qci = quota_classes.insert() # pylint: disable=E1120 for resource, default in CONF.dc_orch_global_limit.items(): qci.execute({'created_at': created_at, 'class_name': QUOTA_CLASS_NAME_DEFAULT, diff --git a/distributedcloud/dcorch/engine/fernet_key_manager.py b/distributedcloud/dcorch/engine/fernet_key_manager.py index df873447c..e8b4b6671 100644 --- a/distributedcloud/dcorch/engine/fernet_key_manager.py +++ b/distributedcloud/dcorch/engine/fernet_key_manager.py @@ -112,7 +112,7 @@ class FernetKeyManager(manager.Manager): with open(os.devnull, "w") as fnull: try: - subprocess.check_call(KEY_ROTATE_CMD, + subprocess.check_call(KEY_ROTATE_CMD, # pylint: disable=E1102 stdout=fnull, stderr=fnull) except subprocess.CalledProcessError: diff --git a/distributedcloud/dcorch/engine/service.py b/distributedcloud/dcorch/engine/service.py index 9e1ffc319..72333605b 100644 --- a/distributedcloud/dcorch/engine/service.py +++ b/distributedcloud/dcorch/engine/service.py @@ -174,11 +174,11 @@ class EngineService(service.Service): project_id, user_id) @request_context - def quota_sync_for_project(self, context, project_id): + def quota_sync_for_project(self, context, project_id, user_id): # On Demand Quota Sync for a project, will be triggered by KB-API - LOG.info("On Demand Quota Sync Called for: %s", - project_id) - self.qm.quota_sync_for_project(project_id) + LOG.info("On Demand Quota Sync Called for: %s %s", + project_id, user_id) + self.qm.quota_sync_for_project(project_id, user_id) @request_context def add_subcloud(self, ctxt, subcloud_name, sw_version): diff --git a/distributedcloud/dcorch/engine/sync_thread.py b/distributedcloud/dcorch/engine/sync_thread.py index ff23a90fb..9afc8bbf6 100644 --- a/distributedcloud/dcorch/engine/sync_thread.py +++ b/distributedcloud/dcorch/engine/sync_thread.py @@ -775,7 +775,7 @@ class SyncThread(object): m_resources = None db_resources = None # Query subcloud first. If not reachable, abort audit. - sc_resources = self.get_subcloud_resources(resource_type) + sc_resources = self.get_subcloud_resources(resource_type) # pylint: disable=E1128 if sc_resources is None: return m_resources, db_resources, sc_resources db_resources = self.get_db_master_resources(resource_type) @@ -787,7 +787,7 @@ class SyncThread(object): if resource_type in SyncThread.master_resources_dict: m_resources = SyncThread.master_resources_dict[resource_type] else: - m_resources = self.get_master_resources(resource_type) + m_resources = self.get_master_resources(resource_type) # pylint: disable=E1128 if m_resources is not None: SyncThread.master_resources_dict[resource_type] = m_resources return m_resources diff --git a/distributedcloud/dcorch/rpc/client.py b/distributedcloud/dcorch/rpc/client.py index 5cd96f33c..3fa1de42c 100644 --- a/distributedcloud/dcorch/rpc/client.py +++ b/distributedcloud/dcorch/rpc/client.py @@ -63,9 +63,10 @@ class EngineClient(object): project_id=project_id, user_id=user_id)) - def quota_sync_for_project(self, ctxt, project_id): + def quota_sync_for_project(self, ctxt, project_id, user_id): return self.cast(ctxt, self.make_msg('quota_sync_for_project', - project_id=project_id)) + project_id=project_id, + user_id=user_id)) def keypair_sync_for_user(self, ctxt, job_id, payload): return self.cast( diff --git a/distributedcloud/pylint.rc b/distributedcloud/pylint.rc index d2ccc10e4..93887e5a7 100644 --- a/distributedcloud/pylint.rc +++ b/distributedcloud/pylint.rc @@ -60,15 +60,10 @@ load-plugins= # W1113: keyword-arg-before-vararg # W1201: logging-not-lazy # W1401: anomalous-backslash-in-string -# E detect Errors for important programming issues (i.e. most probably bug) -# E1102: not-callable -# E1120: no-value-for-parameter (sqlalchemy) -# E1128: assignment-from-none disable=C,R,fixme, W0102,W0105,W0107,W0123,W0201,W0211,W0212,W0221,W0223,W0231,W0235, W0311,W0402,W0403,W0603,W0612,W0613,W0621,W0622,W0631,W0703,W0706, - W1113,W1201,W1401, - E1102,E1120,E1128 + W1113,W1201,W1401 [REPORTS]