pylint cleanup for nfv to use standard modules

Cleanup the code to allow unsuppressing the following
pylint checks:

 C0207 use-maxsplit-arg
 R1730 consider-using-min-builtin
 R1731 consider-using-max-builtin

These three types of pylint checks report scenarios where
a standard python command can be used rather than trying
to 'reinvent the wheel'.

There is no functional difference for any of these changes

Test Plan:
  PASS: tox
  PASS: create / delete a kubernetes upgrade strategy

Story: 2010531
Task: 47617
Signed-off-by: Al Bailey <al.bailey@windriver.com>
Change-Id: Ieb638cbcf7280f6fa322a062467dfc098efdec5e
This commit is contained in:
Al Bailey 2023-03-09 18:40:36 +00:00
parent 4ae4240c80
commit 9784b7526a
4 changed files with 10 additions and 16 deletions

View File

@ -13,7 +13,7 @@ from six.moves import http_client as httplib
from nfv_common import debug
from nfv_common.helpers import Result
K8S_MODULE_MAJOR_VERSION = int(K8S_MODULE_VERSION.split('.')[0])
K8S_MODULE_MAJOR_VERSION = int(K8S_MODULE_VERSION.split('.', maxsplit=1)[0])
DLOG = debug.debug_get_logger('nfv_plugins.nfvi_plugins.clients.kubernetes_client')

View File

@ -1326,15 +1326,13 @@ class Instance(ObjectData):
= int(section.get('max_live_migrate_wait_in_secs', 800))
else:
# Ensure specified timeout is between the configured min/max.
if self._max_live_migrate_wait_in_secs \
<= max_live_migrate_wait_in_secs_min:
self._max_live_migrate_wait_in_secs \
= max_live_migrate_wait_in_secs_min
self._max_live_migrate_wait_in_secs \
= max(self._max_live_migrate_wait_in_secs,
max_live_migrate_wait_in_secs_min)
if self._max_live_migrate_wait_in_secs \
>= max_live_migrate_wait_in_secs_max:
self._max_live_migrate_wait_in_secs \
= max_live_migrate_wait_in_secs_max
self._max_live_migrate_wait_in_secs \
= min(self._max_live_migrate_wait_in_secs,
max_live_migrate_wait_in_secs_max)
if self._max_live_migrate_wait_in_secs is None:
# No timeout specified and no configured default so use 800.

View File

@ -203,11 +203,10 @@ class SwUpdateStrategy(strategy.Strategy):
host_table = tables.tables_get_host_table()
num_worker_hosts = host_table.total_by_personality(
HOST_PERSONALITY.WORKER)
aggregate_ratio = \
float(self._max_parallel_worker_hosts) / num_worker_hosts
# Limit the ratio to half the worker hosts in an aggregate
if aggregate_ratio > 0.5:
aggregate_ratio = 0.5
aggregate_ratio = min(
float(self._max_parallel_worker_hosts) / num_worker_hosts,
0.5)
for host_aggregate in host_aggregate_table:
aggregate_count = len(

View File

@ -31,7 +31,6 @@ disable=
C0201, # consider-iterating-dictionary
C0204, # bad-mcs-classmethod-argument
C0206, # consider-using-dict-items
C0207, # use-maxsplit-arg !!!
C0209, # consider-using-f-string
C0301, # line-too-long
C0302, # too-many-lines
@ -65,8 +64,6 @@ disable=
R1724, # no-else-continue
R1725, # super-with-arguments
R1729, # use-a-generator
R1730, # consider-using-min-builtin !!!
R1731, # consider-using-max-builtin !!!
R1732, # consider-using-with
R1734, # use-list-literal
R1735, # use-dict-literal