Update tox pylint/pep8 for dccommon

This commit enables the check of new pylint/pep8
violations.

PYLINT - All convention related checks, except:
- missing-class-docstring
- missing-function-docstring
- missing-module-docstring
- consider-using-f-string
- invalid-name
- import-outside-toplevel
- too-many-lines
- consider-iterating-dictionary
- unnecessary-lambda-assignment

PEP8:
- E117: over-indented
- E123: closing bracket does not match indentation
  of opening bracket's line
- E125: continuation line with the same indent as the next
  logical line
- E305: expected 2 blank lines after class or function
  definition
- E402: module level import not at top of file
- E501: line too long
- H216: flag use of third party mock

Test Plan:
1. Perform `tox` command
- Pass in py39, pylint, pep8

Partial-bug: 2033294

Change-Id: I45855e6bc23b61420281afc0132dd0662e292fc1
Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
Hugo Brito 2023-08-28 17:13:55 -03:00 committed by rlima
parent 3685223b28
commit a2b9bb7ab2
13 changed files with 127 additions and 96 deletions

View File

@ -1,5 +1,5 @@
# Copyright 2016 Ericsson AB
# Copyright (c) 2020-2021 Wind River Systems, Inc.
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -35,8 +35,8 @@ class BarbicanClient(base.DriverBase):
"""
def __init__(
self, region, session, endpoint_type=dccommon_consts.KS_ENDPOINT_DEFAULT):
self, region, session,
endpoint_type=dccommon_consts.KS_ENDPOINT_DEFAULT):
try:
self.barbican_client = client.Client(
API_VERSION,

View File

@ -1,4 +1,4 @@
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -46,7 +46,7 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'System Peer not found' in response.text:
'System Peer not found' in response.text:
raise exceptions.SystemPeerNotFound(
system_peer=system_peer_uuid)
message = "Get SystemPeer: system_peer_uuid %s failed with RC: %d" \
@ -69,7 +69,7 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'Subcloud not found' in response.text:
'Subcloud not found' in response.text:
raise exceptions.SubcloudNotFound(subcloud_ref=subcloud_ref)
message = "Get Subcloud: subcloud_ref %s failed with RC: %d" % \
(subcloud_ref, response.status_code)
@ -137,7 +137,7 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'Subcloud Peer Group not found' in response.text:
'Subcloud Peer Group not found' in response.text:
raise exceptions.SubcloudPeerGroupNotFound(
peer_group_ref=peer_group_ref)
message = "Get Subcloud Peer Group: peer_group_ref %s " \
@ -160,7 +160,7 @@ class DcmanagerClient(base.DriverBase):
return data.get('subclouds', [])
else:
if response.status_code == 404 and \
'Subcloud Peer Group not found' in response.text:
'Subcloud Peer Group not found' in response.text:
raise exceptions.SubcloudPeerGroupNotFound(
peer_group_ref=peer_group_ref)
message = "Get Subcloud list by Peer Group: peer_group_ref %s " \
@ -173,7 +173,7 @@ class DcmanagerClient(base.DriverBase):
"""Get peer group association with peer id and PG id."""
for association in self.get_peer_group_association_list():
if peer_id == association.get('system-peer-id') and \
pg_id == association.get('peer-group-id'):
pg_id == association.get('peer-group-id'):
return association
raise exceptions.PeerGroupAssociationNotFound(
association_id=None)
@ -278,7 +278,7 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'Peer Group Association not found' in response.text:
'Peer Group Association not found' in response.text:
raise exceptions.PeerGroupAssociationNotFound(
association_id=association_id)
message = "Update Peer Group Association: association_id %s, " \
@ -302,7 +302,7 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'Subcloud Peer Group not found' in response.text:
'Subcloud Peer Group not found' in response.text:
raise exceptions.SubcloudPeerGroupNotFound(
peer_group_ref=peer_group_ref)
message = "Update Subcloud Peer Group: peer_group_ref %s, %s, " \
@ -326,7 +326,7 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'Subcloud Peer Group not found' in response.text:
'Subcloud Peer Group not found' in response.text:
raise exceptions.SubcloudPeerGroupNotFound(
peer_group_ref=peer_group_ref)
message = "Audit Subcloud Peer Group: peer_group_ref %s, %s, " \
@ -362,7 +362,7 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'Subcloud not found' in response.text:
'Subcloud not found' in response.text:
raise exceptions.SubcloudNotFound(subcloud_ref=subcloud_ref)
message = "Update Subcloud: subcloud_ref: %s files: %s, " \
"data: %s, failed with RC: %d" % (subcloud_ref, files, data,
@ -384,7 +384,7 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'Peer Group Association not found' in response.text:
'Peer Group Association not found' in response.text:
raise exceptions.PeerGroupAssociationNotFound(
association_id=association_id)
message = "Delete Peer Group Association: association_id %s " \
@ -406,14 +406,15 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'Subcloud Peer Group not found' in response.text:
'Subcloud Peer Group not found' in response.text:
raise exceptions.SubcloudPeerGroupNotFound(
peer_group_ref=peer_group_ref)
elif response.status_code == 400 and \
'a peer group which is associated with a system peer' in \
response.text:
raise exceptions.SubcloudPeerGroupDeleteFailedAssociated(
peer_group_ref=peer_group_ref)
raise exceptions.SubcloudPeerGroupDeleteFailedAssociated(
peer_group_ref=peer_group_ref
)
message = "Delete Subcloud Peer Group: peer_group_ref %s " \
"failed with RC: %d" % (peer_group_ref, response.status_code)
LOG.error(message)
@ -433,7 +434,7 @@ class DcmanagerClient(base.DriverBase):
return response.json()
else:
if response.status_code == 404 and \
'Subcloud not found' in response.text:
'Subcloud not found' in response.text:
raise exceptions.SubcloudNotFound(subcloud_ref=subcloud_ref)
message = "Delete Subcloud: subcloud_ref %s failed with RC: %d" % \
(subcloud_ref, response.status_code)

View File

@ -1,4 +1,4 @@
# Copyright 2017-2023 Wind River Inc
# Copyright 2017-2024 Wind River Inc
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -47,7 +47,7 @@ SUPPORTED_REGION_CLIENTS = [
DBSYNC_CLIENT_NAME,
]
# region client type and class mappings
# Region client type and class mappings
region_client_class_map = {
SYSINV_CLIENT_NAME: SysinvClient,
FM_CLIENT_NAME: FmClient,
@ -121,10 +121,13 @@ class OpenStackDriver(object):
if getattr(self, client_obj_name) is None:
# Create new client object and cache it
try:
# Since SysinvClient (cgtsclient) does not support session, also pass
# the cached endpoint so it does not need to retrieve it from keystone.
# Since SysinvClient (cgtsclient) does not support session,
# also pass the cached endpoint so it does not need to
# retrieve it from keystone.
if client_name == "sysinv":
sysinv_endpoint = self.keystone_client.endpoint_cache.get_endpoint('sysinv')
sysinv_endpoint = (
self.keystone_client.endpoint_cache.get_endpoint(
'sysinv'))
client_object = region_client_class_map[client_name](
region=region_name,
session=self.keystone_client.session,

View File

@ -1,5 +1,4 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -192,7 +191,9 @@ class SoftwareClient(base.DriverBase):
LOG.error(message)
raise FileNotFoundError(message)
to_upload_files[software_file] = (software_file, open(software_file, 'rb'))
to_upload_files[software_file] = (
software_file, open(software_file, 'rb')
)
enc = MultipartEncoder(fields=to_upload_files)
url = self.endpoint + '/upload'

View File

@ -1,5 +1,5 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright (c) 2018-2023 Wind River Systems, Inc.
# Copyright (c) 2018-2024 Wind River Systems, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -71,7 +71,8 @@ class EndpointCache(object):
CONF.endpoint_cache.project_domain_name)
self.keystone_client, self.service_endpoint_map = \
self.get_cached_master_keystone_client_and_region_endpoint_map(region_name)
self.get_cached_master_keystone_client_and_region_endpoint_map(
region_name)
# if Endpoint cache is intended for a subcloud then
# we need to retrieve the subcloud token and session.
@ -148,7 +149,8 @@ class EndpointCache(object):
master_endpoints_list = EndpointCache.master_keystone_client.endpoints.list()
service_id_name_map = {}
for service in EndpointCache.master_services_list: # pylint: disable=not-an-iterable
# pylint: disable-next=not-an-iterable
for service in EndpointCache.master_services_list:
service_dict = service.to_dict()
service_id_name_map[service_dict['id']] = service_dict['name']
@ -214,12 +216,13 @@ class EndpointCache(object):
@lockutils.synchronized(LOCK_NAME)
def get_cached_master_keystone_client_and_region_endpoint_map(self, region_name):
if (EndpointCache.master_keystone_client is None):
if EndpointCache.master_keystone_client is None:
self._create_master_cached_data()
LOG.info("Generated Master keystone client and master token the "
"very first time")
else:
token_expiring_soon = is_token_expiring_soon(token=EndpointCache.master_token)
token_expiring_soon = is_token_expiring_soon(
token=EndpointCache.master_token)
# If token is expiring soon, initialize a new master keystone
# client
@ -228,19 +231,26 @@ class EndpointCache(object):
"will expire soon %s" %
(consts.CLOUD_0, EndpointCache.master_token['expires_at']))
self._create_master_cached_data()
LOG.info("Generated Master keystone client and master token as they are expiring soon")
LOG.info("Generated Master keystone client and master token as they "
"are expiring soon")
else:
# Check if the cached master service endpoint map needs to be refreshed
# Check if the cached master service endpoint map needs to be
# refreshed
if region_name not in self.master_service_endpoint_map:
previous_size = len(EndpointCache.master_service_endpoint_map)
EndpointCache.master_service_endpoint_map = self._generate_master_service_endpoint_map(self)
EndpointCache.master_service_endpoint_map = (
self._generate_master_service_endpoint_map(self))
current_size = len(EndpointCache.master_service_endpoint_map)
LOG.info("Master endpoints list refreshed to include region %s: "
"prev_size=%d, current_size=%d" % (region_name, previous_size, current_size))
LOG.info(
"Master endpoints list refreshed to include region %s: "
"prev_size=%d, current_size=%d" % (
region_name, previous_size, current_size)
)
# TODO(clientsession)
if region_name is not None:
region_service_endpoint_map = EndpointCache.master_service_endpoint_map[region_name]
region_service_endpoint_map = EndpointCache.master_service_endpoint_map[
region_name]
else:
region_service_endpoint_map = collections.defaultdict(dict)
@ -255,9 +265,14 @@ class EndpointCache(object):
EndpointCache.master_keystone_client = ks_client.Client(
session=self.admin_session,
region_name=consts.CLOUD_0)
EndpointCache.master_token = EndpointCache.master_keystone_client.tokens.validate(
EndpointCache.master_keystone_client.session.get_token(),
include_catalog=False)
EndpointCache.master_token = (
EndpointCache.master_keystone_client.tokens.validate(
EndpointCache.master_keystone_client.session.get_token(),
include_catalog=False
)
)
if EndpointCache.master_services_list is None:
EndpointCache.master_services_list = EndpointCache.master_keystone_client.services.list()
EndpointCache.master_service_endpoint_map = self._generate_master_service_endpoint_map(self)
EndpointCache.master_services_list = (
EndpointCache.master_keystone_client.services.list())
EndpointCache.master_service_endpoint_map = (
self._generate_master_service_endpoint_map(self))

View File

@ -1,5 +1,6 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright 2015 Ericsson AB.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,9 +14,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
#
"""
DC Orchestrator base exception handling.
@ -40,14 +38,14 @@ class DCCommonException(Exception):
def __init__(self, **kwargs):
try:
super(DCCommonException, self).__init__(self.message % kwargs) # pylint: disable=W1645
self.msg = self.message % kwargs # pylint: disable=W1645
super(DCCommonException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
if not self.use_fatal_exceptions():
ctxt.reraise = False
# at least get the core message out if something happened
super(DCCommonException, self).__init__(self.message) # pylint: disable=W1645
super(DCCommonException, self).__init__(self.message)
if six.PY2:
def __unicode__(self):
@ -116,8 +114,8 @@ class PlaybookExecutionTimeout(PlaybookExecutionFailed):
class ImageNotInLocalRegistry(NotFound):
message = _("Image %(image_name)s:%(image_tag)s not found in the local registry. "
"Please check with command: system registry-image-list or "
message = _("Image %(image_name)s:%(image_tag)s not found in the local "
"registry. Please check with command: system registry-image-list or "
"system registry-image-tags %(image_name)s")

View File

@ -1,5 +1,4 @@
#
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -14,7 +13,7 @@ from six.moves import http_client as httplib
LOG = logging.getLogger(__name__)
K8S_MODULE_MAJOR_VERSION = int(K8S_MODULE_VERSION.split('.')[0])
K8S_MODULE_MAJOR_VERSION = int(K8S_MODULE_VERSION.split('.', maxsplit=1)[0])
KUBE_CONFIG_PATH = '/etc/kubernetes/admin.conf'
CERT_MANAGER_GROUP = 'cert-manager.io'

View File

@ -1,4 +1,4 @@
# Copyright (c) 2021-2023 Wind River Systems, Inc.
# Copyright (c) 2021-2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -298,7 +298,7 @@ class SubcloudInstall(object):
"--timeout", BOOT_MENU_TIMEOUT,
"--patches-from-iso",
]
for key in consts.GEN_ISO_OPTIONS:
for key, _ in consts.GEN_ISO_OPTIONS.items():
if key in values:
LOG.debug("Setting option from key=%s, option=%s, value=%s",
key, consts.GEN_ISO_OPTIONS[key], values[key])
@ -431,7 +431,8 @@ class SubcloudInstall(object):
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(['mount', '-r', '-o', 'loop', # pylint: disable=not-callable
# pylint: disable-next=not-callable
subprocess.check_call(['mount', '-r', '-o', 'loop',
bootimage_path,
temp_bootimage_mnt_dir],
stdout=fnull,
@ -469,7 +470,8 @@ class SubcloudInstall(object):
LOG.error(msg)
raise Exception(msg)
finally:
subprocess.check_call(['umount', '-l', temp_bootimage_mnt_dir]) # pylint: disable=not-callable
# pylint: disable-next=not-callable
subprocess.check_call(['umount', '-l', temp_bootimage_mnt_dir])
os.rmdir(temp_bootimage_mnt_dir)
def check_ostree_mount(self, source_path):

View File

@ -1,5 +1,5 @@
# Copyright (c) 2015 Ericsson AB
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -15,9 +15,10 @@
# under the License.
#
from dccommon.tests import utils
from oslotest import base
from dccommon.tests import utils
KEYSTONE_ENDPOINT_0 = [
"9785cc7f99b6469ba6fe89bd8d5b9072", "NULL", "admin",
"7d48ddb964034eb588e557b976d11cdf", "http://[fd01:1::2]:9292", "{}", True,

View File

@ -1,13 +1,14 @@
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import mock
import os
import uuid
import yaml
import mock
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import dcmanager_v1
from dccommon import exceptions as dccommon_exceptions
@ -180,7 +181,8 @@ class TestDcmanagerClient(base.DCCommonTestCase):
@mock.patch('requests.get')
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
def test_get_subcloud_peer_group_not_found(
self, mock_client_init, mock_get):
self, mock_client_init, mock_get
):
mock_response = mock.MagicMock()
mock_response.status_code = 404
mock_response.text = "Subcloud Peer Group not found"
@ -221,7 +223,8 @@ class TestDcmanagerClient(base.DCCommonTestCase):
@mock.patch('requests.get')
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
def test_get_subcloud_list_by_peer_group_not_found(
self, mock_client_init, mock_get):
self, mock_client_init, mock_get
):
mock_response = mock.MagicMock()
mock_response.status_code = 404
mock_response.text = "Subcloud Peer Group not found"

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -21,7 +21,7 @@ from dccommon.tests import utils
FAKE_SERVICE = [
'endpoint_volume',
'endpoint_network'
]
]
class Project(object):

View File

@ -1,5 +1,5 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -17,15 +17,14 @@
import collections
import copy
import mock
from mock import patch
from keystoneclient.v3 import services
from keystoneclient.v3 import tokens
import mock
from oslo_config import cfg
from dccommon import endpoint_cache
from dccommon.tests import base
from keystoneclient.v3 import services
from keystoneclient.v3 import tokens
FAKE_REGIONONE_SYSINV_ENDPOINT = "http://[2620:10a:a001:a114::d00]:6385/v1"
FAKE_REGIONONE_KEYSTONE_ENDPOINT = "http://[2620:10a:a001:a114::d00]:5000/v3"
@ -59,6 +58,7 @@ class FakeService(object):
self.type = type
self.enabled = enabled
FAKE_SERVICES_LIST = [FakeService(1, "keystone", "identity", True),
FakeService(2, "sysinv", "platform", True),
FakeService(3, "patching", "patching", True),
@ -108,19 +108,21 @@ class EndpointCacheTest(base.DCCommonTestCase):
endpoint_cache.EndpointCache.master_service_endpoint_map = \
collections.defaultdict(dict)
@patch.object(endpoint_cache.EndpointCache, 'get_admin_session')
@patch.object(endpoint_cache.EndpointCache,
'get_cached_master_keystone_client_and_region_endpoint_map')
@mock.patch.object(endpoint_cache.EndpointCache, 'get_admin_session')
@mock.patch.object(
endpoint_cache.EndpointCache,
'get_cached_master_keystone_client_and_region_endpoint_map')
def test_get_endpoint(self, mock_get_cached_data, mock_get_admin_session):
mock_get_cached_data.return_value = (FakeKeystoneClient(), FAKE_SERVICE_ENDPOINT_MAP)
mock_get_cached_data.return_value = (
FakeKeystoneClient(), FAKE_SERVICE_ENDPOINT_MAP)
cache = endpoint_cache.EndpointCache("RegionOne", None)
endpoint = cache.get_endpoint("sysinv")
self.assertEqual(endpoint, FAKE_REGIONONE_SYSINV_ENDPOINT)
@patch.object(endpoint_cache.EndpointCache, 'get_admin_session')
@patch.object(tokens.TokenManager, 'validate')
@patch.object(endpoint_cache.EndpointCache,
'_generate_master_service_endpoint_map')
@mock.patch.object(endpoint_cache.EndpointCache, 'get_admin_session')
@mock.patch.object(tokens.TokenManager, 'validate')
@mock.patch.object(endpoint_cache.EndpointCache,
'_generate_master_service_endpoint_map')
def test_get_all_regions(self, mock_generate_cached_data, mock_tokens_validate,
mock_admin_session):
mock_generate_cached_data.return_value = FAKE_MASTER_SERVICE_ENDPOINT_MAP
@ -129,11 +131,11 @@ class EndpointCacheTest(base.DCCommonTestCase):
self.assertIn(CENTRAL_REGION, region_list)
self.assertIn(SUBCLOUD1_REGION, region_list)
@patch.object(endpoint_cache.EndpointCache, 'get_admin_session')
@patch.object(tokens.TokenManager, 'validate')
@patch.object(services.ServiceManager, 'list')
@patch.object(endpoint_cache.EndpointCache,
'_generate_master_service_endpoint_map')
@mock.patch.object(endpoint_cache.EndpointCache, 'get_admin_session')
@mock.patch.object(tokens.TokenManager, 'validate')
@mock.patch.object(services.ServiceManager, 'list')
@mock.patch.object(endpoint_cache.EndpointCache,
'_generate_master_service_endpoint_map')
def test_get_services_list(self, mock_generate_cached_data, mock_services_list,
mock_tokens_validate, mock_admin_session):
mock_services_list.return_value = FAKE_SERVICES_LIST
@ -142,10 +144,10 @@ class EndpointCacheTest(base.DCCommonTestCase):
services_list = endpoint_cache.EndpointCache.get_master_services_list()
self.assertEqual(FAKE_SERVICES_LIST, services_list)
@patch.object(endpoint_cache.EndpointCache, 'get_admin_session')
@patch.object(tokens.TokenManager, 'validate')
@patch.object(endpoint_cache.EndpointCache,
'_generate_master_service_endpoint_map')
@mock.patch.object(endpoint_cache.EndpointCache, 'get_admin_session')
@mock.patch.object(tokens.TokenManager, 'validate')
@mock.patch.object(endpoint_cache.EndpointCache,
'_generate_master_service_endpoint_map')
def test_update_master_service_endpoint_region(
self, mock_generate_cached_data, mock_tokens_validate,
mock_admin_session):

View File

@ -1,4 +1,4 @@
# Copyright (c) 2020-2023 Wind River Systems, Inc.
# Copyright (c) 2020-2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -127,8 +127,10 @@ class AnsiblePlaybook(object):
"""
with AnsiblePlaybook.lock:
AnsiblePlaybook.abort_status[self.subcloud_name]['abort'] = True
unabortable_flag = os.path.join(consts.ANSIBLE_OVERRIDES_PATH,
'.%s_deploy_not_abortable' % self.subcloud_name)
unabortable_flag = os.path.join(
consts.ANSIBLE_OVERRIDES_PATH,
'.%s_deploy_not_abortable' % self.subcloud_name
)
subp = AnsiblePlaybook.abort_status[self.subcloud_name]['subp']
while os.path.exists(unabortable_flag) and timeout > 0:
# If subprocess ended (subp.poll is not None), no further abort
@ -145,9 +147,9 @@ class AnsiblePlaybook(object):
:param log_file: Logs output to file
:param timeout: Timeout in seconds. Raises PlaybookExecutionTimeout
on timeout
on timeout
:param register_cleanup: Register the subprocess group for cleanup on
shutdown, if the underlying service supports cleanup.
shutdown, if the underlying service supports cleanup.
"""
exec_env = os.environ.copy()
exec_env["ANSIBLE_LOG_PATH"] = "/dev/null"
@ -172,8 +174,10 @@ class AnsiblePlaybook(object):
# Remove unabortable flag created by the playbook
# if present from previous executions
unabortable_flag = os.path.join(consts.ANSIBLE_OVERRIDES_PATH,
'.%s_deploy_not_abortable' % self.subcloud_name)
unabortable_flag = os.path.join(
consts.ANSIBLE_OVERRIDES_PATH,
'.%s_deploy_not_abortable' % self.subcloud_name
)
if os.path.exists(unabortable_flag):
os.remove(unabortable_flag)
@ -210,7 +214,8 @@ class AnsiblePlaybook(object):
# - playbook_failure is True with subp_rc != 0,
# aborted is True, unabortable_flag_exists is False
with AnsiblePlaybook.lock:
aborted = AnsiblePlaybook.abort_status[self.subcloud_name]['abort']
aborted = \
AnsiblePlaybook.abort_status[self.subcloud_name]['abort']
unabortable_flag_exists = os.path.exists(unabortable_flag)
playbook_failure = (subp_rc != 0 and
(not aborted or unabortable_flag_exists))
@ -266,7 +271,8 @@ def is_token_expiring_soon(token,
stale_token_duration_min=STALE_TOKEN_DURATION_MIN,
stale_token_duration_max=STALE_TOKEN_DURATION_MAX,
stale_token_duration_step=STALE_TOKEN_DURATION_STEP):
expiry_time = timeutils.normalize_time(timeutils.parse_isotime(token['expires_at']))
expiry_time = timeutils.normalize_time(timeutils.parse_isotime(
token['expires_at']))
duration = random.randrange(stale_token_duration_min,
stale_token_duration_max,
stale_token_duration_step)