Merge "Re-enable important py3k checks for distcloud"

This commit is contained in:
Zuul 2021-10-27 13:58:52 +00:00 committed by Gerrit Code Review
commit 9545c32be9
10 changed files with 25 additions and 27 deletions

View File

@ -40,14 +40,14 @@ class DCCommonException(Exception):
def __init__(self, **kwargs):
try:
super(DCCommonException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
super(DCCommonException, self).__init__(self.message % kwargs) # pylint: disable=W1645
self.msg = self.message % kwargs # pylint: disable=W1645
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
if not self.use_fatal_exceptions():
ctxt.reraise = False
# at least get the core message out if something happened
super(DCCommonException, self).__init__(self.message)
super(DCCommonException, self).__init__(self.message) # pylint: disable=W1645
if six.PY2:
def __unicode__(self):

View File

@ -42,14 +42,14 @@ class DBsyncException(Exception):
def __init__(self, **kwargs):
try:
super(DBsyncException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
super(DBsyncException, self).__init__(self.message % kwargs) # pylint: disable=exception-message-attribute
self.msg = self.message % kwargs # pylint: disable=exception-message-attribute
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
if not self.use_fatal_exceptions():
ctxt.reraise = False
# at least get the core message out if something happened
super(DBsyncException, self).__init__(self.message)
super(DBsyncException, self).__init__(self.message) # pylint: disable=exception-message-attribute
if six.PY2:
def __unicode__(self):

View File

@ -50,7 +50,7 @@ LOG = logging.getLogger(__name__)
# We will update the state of each subcloud in the dcorch about once per hour.
# Calculate how many iterations that will be.
SUBCLOUD_STATE_UPDATE_ITERATIONS = \
dccommon_consts.SECONDS_IN_HOUR / CONF.scheduler.subcloud_audit_interval
dccommon_consts.SECONDS_IN_HOUR // CONF.scheduler.subcloud_audit_interval
# Patch audit normally happens every CONF.scheduler.patch_audit_interval
# seconds, but can be forced to happen on the next audit interval by calling
@ -422,7 +422,7 @@ class SubcloudAuditManager(manager.Manager):
kube_rootca_update_audit_data))
# We want a chunksize of at least 1 so add the number of workers.
chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) / CONF.audit_worker_workers
chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) // CONF.audit_worker_workers
for audit in subcloud_audits:
subcloud_ids.append(audit.subcloud_id)
if len(subcloud_ids) == chunksize:

View File

@ -50,7 +50,7 @@ LOG = logging.getLogger(__name__)
# We will update the state of each subcloud in the dcorch about once per hour.
# Calculate how many iterations that will be.
SUBCLOUD_STATE_UPDATE_ITERATIONS = \
dccommon_consts.SECONDS_IN_HOUR / CONF.scheduler.subcloud_audit_interval
dccommon_consts.SECONDS_IN_HOUR // CONF.scheduler.subcloud_audit_interval
class SubcloudAuditWorkerManager(manager.Manager):

View File

@ -44,14 +44,14 @@ class DCManagerException(Exception):
def __init__(self, **kwargs):
try:
super(DCManagerException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
super(DCManagerException, self).__init__(self.message % kwargs) # pylint: disable=W1645
self.msg = self.message % kwargs # pylint: disable=W1645
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
if not self.use_fatal_exceptions():
ctxt.reraise = False
# at least get the core message out if something happened
super(DCManagerException, self).__init__(self.message)
super(DCManagerException, self).__init__(self.message) # pylint: disable=W1645
if six.PY2:
def __unicode__(self):

View File

@ -825,7 +825,7 @@ class PatchOrchThread(threading.Thread):
return
wait_count += 1
if wait_count >= (WAIT_LIMIT / WAIT_INTERVAL):
if wait_count >= (WAIT_LIMIT // WAIT_INTERVAL):
# We have waited too long.
message = ("Too much time expired after creating strategy for "
"%s." % region)
@ -920,7 +920,7 @@ class PatchOrchThread(threading.Thread):
# when the controller reboots.
get_fail_count += 1
wait_count += 1
if get_fail_count >= (GET_FAIL_LIMIT / WAIT_INTERVAL):
if get_fail_count >= (GET_FAIL_LIMIT // WAIT_INTERVAL):
# We have waited too long.
message = ("Failed to get patch strategy for %s" %
region)
@ -982,7 +982,7 @@ class PatchOrchThread(threading.Thread):
return
wait_count += 1
if wait_count >= (WAIT_LIMIT / WAIT_INTERVAL):
if wait_count >= (WAIT_LIMIT // WAIT_INTERVAL):
# We have waited too long.
message = ("Too much time expired while applying strategy for "
"%s." % region)

View File

@ -40,14 +40,14 @@ class OrchestratorException(Exception):
def __init__(self, **kwargs):
try:
super(OrchestratorException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
super(OrchestratorException, self).__init__(self.message % kwargs) # pylint: disable=W1645
self.msg = self.message % kwargs # pylint: disable=W1645
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
if not self.use_fatal_exceptions():
ctxt.reraise = False
# at least get the core message out if something happened
super(OrchestratorException, self).__init__(self.message)
super(OrchestratorException, self).__init__(self.message) # pylint: disable=W1645
if six.PY2:
def __unicode__(self):

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from keystoneauth1 import exceptions as keystone_exceptions
from novaclient import client as novaclient
from novaclient import exceptions as novaclient_exceptions
@ -212,7 +214,7 @@ class ComputeSyncThread(SyncThread):
newflavor = self.sc_nova_client.flavors.create(
name, ram, vcpus, disk, **kwargs)
except novaclient_exceptions.Conflict as e:
if "already exists" in e.message:
if "already exists" in six.text_type(e):
# FlavorExists or FlavorIdExists.
LOG.info("Flavor {} already exists in subcloud"
.format(name), extra=self.log_extra)

View File

@ -1554,7 +1554,7 @@ class IdentitySyncThread(SyncThread):
consts.RESOURCE_TYPE_IDENTITY_GROUPS]
filtered_groups = [group for group in groups if
all(group.name != filtered for
all(group.name != filtered for # pylint: disable=comprehension-escape
filtered in filtered_list)]
return filtered_groups
except (keystone_exceptions.connection.ConnectTimeout,
@ -1581,7 +1581,7 @@ class IdentitySyncThread(SyncThread):
consts.RESOURCE_TYPE_IDENTITY_ROLES]
filtered_roles = [role for role in roles if
(all(role.name != filtered for
(all(role.name != filtered for # pylint: disable=comprehension-escape
filtered in filtered_list))]
return filtered_roles
except (keystone_exceptions.connection.ConnectTimeout,
@ -1608,7 +1608,7 @@ class IdentitySyncThread(SyncThread):
consts.RESOURCE_TYPE_IDENTITY_PROJECTS]
filtered_projects = [project for project in projects if
all(project.name != filtered for
all(project.name != filtered for # pylint: disable=comprehension-escape
filtered in filtered_list)]
return filtered_projects
except (keystone_exceptions.connection.ConnectTimeout,

View File

@ -141,14 +141,10 @@ enable=E1603,E1609,E1610,E1602,E1606,E1608,E1607,E1605,E1604,E1601,E1611,W1652,
# W1201: logging-not-lazy
# W1401: anomalous-backslash-in-string
# W1618: no-absolute-import
# W1645: exception-message-attribute
# W1619: old-division
# W1654: dict-items-not-iterating
# W1662: comprehension-escape
disable=C,R,fixme,
W0102,W0105,W0107,W0123,W0201,W0211,W0212,W0221,W0223,W0231,W0235,
W0311,W0402,W0403,W0603,W0612,W0613,W0621,W0622,W0631,W0703,W0706,
W1113,W1201,W1401,W1618,W1645,W1619,W1654,W1662
W1113,W1201,W1401,W1618
[REPORTS]