Merge "Fix tox pep8 errors of type E231 in sysinv"

This commit is contained in:
Zuul 2018-08-09 20:57:23 +00:00 committed by Gerrit Code Review
commit 704251a7fc
29 changed files with 87 additions and 86 deletions

View File

@ -27,12 +27,12 @@ LOG = logging.getLogger(__name__)
ETHERNET_PCI_CLASSES = ['ethernet controller', 'network controller']
# Look for other devices we may want to inventory.
KNOWN_PCI_DEVICES = [{"vendor_id":constants.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
"device_id":constants.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_DEVICE,
"class_id":constants.NOVA_PCI_ALIAS_QAT_CLASS},
{"vendor_id":constants.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
"device_id":constants.NOVA_PCI_ALIAS_QAT_C62X_PF_DEVICE,
"class_id":constants.NOVA_PCI_ALIAS_QAT_CLASS},
KNOWN_PCI_DEVICES = [{"vendor_id": constants.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
"device_id": constants.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_DEVICE,
"class_id": constants.NOVA_PCI_ALIAS_QAT_CLASS},
{"vendor_id": constants.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
"device_id": constants.NOVA_PCI_ALIAS_QAT_C62X_PF_DEVICE,
"class_id": constants.NOVA_PCI_ALIAS_QAT_CLASS},
{"class_id": constants.NOVA_PCI_ALIAS_GPU_CLASS}]
# PCI-SIG 0x06 bridge devices to not inventory.

View File

@ -273,7 +273,8 @@ class AlarmController(rest.RestController):
@wsme_pecan.wsexpose(AlarmCollection, [Query],
types.uuid, int, wtypes.text, wtypes.text, bool)
def get_all(self, q=[], marker=None, limit=None, sort_key='id', sort_dir='asc',include_suppress=False):
def get_all(self, q=[], marker=None, limit=None, sort_key='id',
sort_dir='asc', include_suppress=False):
"""Retrieve a list of ialarm.
:param marker: pagination marker for large data sets.

View File

@ -63,7 +63,7 @@ class License(base.APIBase):
self.fields = []
# they are all an API-only attribute
for fp in ['name','status','expiry_date']:
for fp in ['name', 'status', 'expiry_date']:
self.fields.append(fp)
setattr(self, fp, kwargs.get(fp, None))
@ -72,7 +72,7 @@ class License(base.APIBase):
license = License(**rpc_license)
if not expand:
license.unset_fields_except(['name','status',
license.unset_fields_except(['name', 'status',
'expiry_date'])
return license
@ -114,8 +114,8 @@ class LicenseController(rest.RestController):
licenses = license.get_licenses_info()
return LicenseCollection.convert_with_links(
licenses, limit, url=resource_url,expand=expand,
sort_key=sort_key,sort_dir=sort_dir)
licenses, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
@wsme_pecan.wsexpose(LicenseCollection, wtypes.text, int, wtypes.text, wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):

View File

@ -351,7 +351,7 @@ def _check_host(partition, ihost, idisk):
if ihost['id'] != idisk['forihostid']:
raise wsme.exc.ClientSideError(_("The requested disk (%s) for the partition "
"is not present on host %s.") %
(idisk.uuid,ihost.hostname))
(idisk.uuid, ihost.hostname))
def _partition_pre_patch_checks(partition_obj, patch_obj, host_obj):

View File

@ -122,7 +122,7 @@ class StorageBackend(base.APIBase):
self.fields.append('confirmed')
for k in self.fields:
setattr(self, k, kwargs.get(k,defaults.get(k)))
setattr(self, k, kwargs.get(k, defaults.get(k)))
@classmethod
def convert_with_links(cls, rpc_storage_backend, expand=True):

View File

@ -179,7 +179,7 @@ class StorageCeph(base.APIBase):
v, strict=True)
except ValueError as e:
raise exception.Invalid(e)
setattr(self, k, kwargs.get(k,defaults.get(k)))
setattr(self, k, kwargs.get(k, defaults.get(k)))
@classmethod
def convert_with_links(cls, rpc_storage_ceph, expand=True):
@ -351,7 +351,7 @@ def _get_options_string(storage_ceph):
if svc_dict and svc in services:
svc_str = ""
for key in svc_dict:
svc_str += "\t%s: %s\n" % (key, svc_dict.get(key,None))
svc_str += "\t%s: %s\n" % (key, svc_dict.get(key, None))
if len(svc_str) > 0:
opt_str += "%s:\n%s" % (svc.title(), svc_str)
@ -558,7 +558,7 @@ def check_and_update_services(storage_ceph):
for k in check_data[s]:
cap.pop(k, None)
values = {'services': ','.join(services),
'capabilities': cap,}
'capabilities': cap, }
pecan.request.dbapi.storage_backend_update(sb.uuid, values)

View File

@ -321,7 +321,7 @@ def _check_and_update_services(storage_ceph_ext):
for k in HIERA_DATA[s]:
cap.pop(k, None)
values = {'services': ','.join(services),
'capabilities': cap,}
'capabilities': cap, }
pecan.request.dbapi.storage_backend_update(sb.uuid, values)

View File

@ -119,7 +119,7 @@ class StorageExternal(base.APIBase):
# Set the value for any of the field
for k in self.fields:
setattr(self, k, kwargs.get(k,defaults.get(k)))
setattr(self, k, kwargs.get(k, defaults.get(k)))
@classmethod
def convert_with_links(cls, rpc_storage_external, expand=True):
@ -280,7 +280,7 @@ def _get_options_string(storage_external):
if svc_dict and svc in services:
svc_str = ""
for key in svc_dict:
svc_str += "\t%s: %s\n" % (key, svc_dict.get(key,None))
svc_str += "\t%s: %s\n" % (key, svc_dict.get(key, None))
if len(svc_str) > 0:
opt_str += "%s:\n%s" % (svc.title(), svc_str)

View File

@ -117,7 +117,7 @@ class StorageFile(base.APIBase):
# Set the value for any of the field
for k in self.fields:
setattr(self, k, kwargs.get(k,defaults.get(k)))
setattr(self, k, kwargs.get(k, defaults.get(k)))
@classmethod
def convert_with_links(cls, rpc_storage_file, expand=True):
@ -278,7 +278,7 @@ def _get_options_string(storage_file):
if svc_dict and svc in services:
svc_str = ""
for key in svc_dict:
svc_str += "\t%s: %s\n" % (key, svc_dict.get(key,None))
svc_str += "\t%s: %s\n" % (key, svc_dict.get(key, None))
if len(svc_str) > 0:
opt_str += "%s:\n%s" % (svc.title(), svc_str)

View File

@ -121,7 +121,7 @@ class StorageLVM(base.APIBase):
# Set the value for any of the field
for k in self.fields:
setattr(self, k, kwargs.get(k,defaults.get(k)))
setattr(self, k, kwargs.get(k, defaults.get(k)))
@classmethod
def convert_with_links(cls, rpc_storage_lvm, expand=True):
@ -282,7 +282,7 @@ def _get_options_string(storage_lvm):
if svc_dict and svc in services:
svc_str = ""
for key in svc_dict:
svc_str += "\t%s: %s\n" % (key, svc_dict.get(key,None))
svc_str += "\t%s: %s\n" % (key, svc_dict.get(key, None))
if len(svc_str) > 0:
opt_str += "%s:\n%s" % (svc.title(), svc_str)
@ -398,7 +398,7 @@ def _discover_and_validate_cinder_hiera_data(caps_dict):
raise wsme.exc.ClientSideError(msg)
# Log all the LVM parameters
for k,v in caps_dict.iteritems():
for k, v in caps_dict.iteritems():
LOG.info("Cinder LVM Data %s = %s" % (k, v))

View File

@ -723,8 +723,8 @@ SB_TIER_CEPH_POOLS = [
# varies greatly in Titanium Cloud and we want to avoid running too low on PGs
CEPH_TARGET_PGS_PER_OSD = 200
CEPH_REPLICATION_FACTOR_DEFAULT = 2
CEPH_REPLICATION_FACTOR_SUPPORTED = [2,3]
CEPH_MIN_REPLICATION_FACTOR_SUPPORTED = [1,2]
CEPH_REPLICATION_FACTOR_SUPPORTED = [2, 3]
CEPH_MIN_REPLICATION_FACTOR_SUPPORTED = [1, 2]
CEPH_REPLICATION_MAP_DEFAULT = {
# replication: min_replication
2: 1,
@ -1249,8 +1249,8 @@ SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1_OPTS = 'nopti nospectre_v2'
SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL = 'spectre_meltdown_all'
SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL_OPTS = ''
SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS = {
SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1:SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1_OPTS,
SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL:SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL_OPTS
SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1: SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1_OPTS,
SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL: SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL_OPTS
}

View File

@ -854,8 +854,8 @@ HORIZON_AUTH_PARAMETER_OPTIONAL = [
]
HORIZON_AUTH_PARAMETER_VALIDATOR = {
constants.SERVICE_PARAM_HORIZON_AUTH_LOCKOUT_PERIOD_SEC:_validate_integer,
constants.SERVICE_PARAM_HORIZON_AUTH_LOCKOUT_RETRIES:_validate_integer,
constants.SERVICE_PARAM_HORIZON_AUTH_LOCKOUT_PERIOD_SEC: _validate_integer,
constants.SERVICE_PARAM_HORIZON_AUTH_LOCKOUT_RETRIES: _validate_integer,
}
HORIZON_AUTH_PARAMETER_RESOURCE = {

View File

@ -464,7 +464,7 @@ def is_valid_hex(num):
def is_valid_pci_device_vendor_id(id):
"""Check if the provided id is a valid 16 bit hexadecimal."""
val = id.replace('0x','').strip()
val = id.replace('0x', '').strip()
if not is_valid_hex(id):
return False
if (len(val) > 4):
@ -474,7 +474,7 @@ def is_valid_pci_device_vendor_id(id):
def is_valid_pci_class_id(id):
"""Check if the provided id is a valid 16 bit hexadecimal."""
val = id.replace('0x','').strip()
val = id.replace('0x', '').strip()
if not is_valid_hex(id):
return False
if (len(val) > 6):

View File

@ -278,7 +278,7 @@ class ConductorManager(service.PeriodicService):
# set default storage_backend
values.update({'backend': constants.SB_TYPE_FILE,
'name': constants.SB_DEFAULT_NAMES[constants.SB_TYPE_FILE],
'state':constants.SB_STATE_CONFIGURED,
'state': constants.SB_STATE_CONFIGURED,
'task': constants.SB_TASK_NONE,
'services': None,
'capabilities': {}})
@ -1665,7 +1665,7 @@ class ConductorManager(service.PeriodicService):
for interface in iinterfaces:
if constants.CLONE_ISO_MAC in interface['imac']:
LOG.warn("Missing interface [{},{}] on the cloned host"
.format(interface['ifname'],interface['id']))
.format(interface['ifname'], interface['id']))
raise exception.SysinvException(_(
"Missing interface on the cloned host"))
@ -3845,7 +3845,7 @@ class ConductorManager(service.PeriodicService):
self.dbapi.ipv_update(
pv.uuid,
{'pv_state': constants.PROVISIONED,
'lvm_pv_name':constants.CINDER_DRBD_DEVICE})
'lvm_pv_name': constants.CINDER_DRBD_DEVICE})
self.dbapi.ilvg_update(
pv.forilvgid,
{'vg_state': constants.PROVISIONED})
@ -4030,7 +4030,7 @@ class ConductorManager(service.PeriodicService):
for ipv in ipvs:
if ipv['pv_state'] != constants.PROVISIONED:
host_id = ipv['forihostid']
update_hosts_dict(host_id,constants.PV_AUDIT_REQUEST)
update_hosts_dict(host_id, constants.PV_AUDIT_REQUEST)
# Make sure we get at least one good report for PVs & LVGs
hosts = self.dbapi.ihost_get_list()
@ -4126,7 +4126,7 @@ class ConductorManager(service.PeriodicService):
if (iscsi_initiator_name and
ihost.iscsi_initiator_name is None):
LOG.info("%s updating iscsi initiator=%s" %
(ihost.hostname,iscsi_initiator_name))
(ihost.hostname, iscsi_initiator_name))
val['iscsi_initiator_name'] = iscsi_initiator_name
if val:
@ -4958,7 +4958,7 @@ class ConductorManager(service.PeriodicService):
self.dbapi, constants.SB_TYPE_LVM):
pools = self._openstack.get_cinder_pools()
for pool in pools:
if getattr(pool,'volume_backend_name','') == constants.CINDER_BACKEND_LVM:
if getattr(pool, 'volume_backend_name', '') == constants.CINDER_BACKEND_LVM:
return pool.to_dict()
return None
@ -5612,7 +5612,7 @@ class ConductorManager(service.PeriodicService):
config_dict = {"personalities": personalities,
"host_uuids": [ctrl.uuid for ctrl in valid_ctrls],
"classes": classes,
puppet_common.REPORT_STATUS_CFG: report_config,}
puppet_common.REPORT_STATUS_CFG: report_config, }
# TODO(oponcea) once sm supports in-service config reload always
# set reboot=False
@ -5704,7 +5704,7 @@ class ConductorManager(service.PeriodicService):
self.dbapi.storage_ceph_update(sb_uuid,
{'state': constants.SB_STATE_CONFIGURING,
'task':str({h.hostname: constants.SB_TASK_APPLY_MANIFESTS for h in valid_ctrls})})
'task': str({h.hostname: constants.SB_TASK_APPLY_MANIFESTS for h in valid_ctrls})})
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
@ -8750,7 +8750,7 @@ class ConductorManager(service.PeriodicService):
LOG.info("Calling nova cleanup")
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(["systemctl","start","nova-cleanup"],
subprocess.check_call(["systemctl", "start", "nova-cleanup"],
stdout=fnull,
stderr=fnull)
except subprocess.CalledProcessError:
@ -9116,7 +9116,7 @@ class ConductorManager(service.PeriodicService):
pools = self._openstack.get_cinder_pools()
if pools is not None:
for pool in pools:
volume_backend = getattr(pool,'volume_backend_name','')
volume_backend = getattr(pool, 'volume_backend_name', '')
if volume_backend and volume_backend != constants.CINDER_BACKEND_LVM and \
volume_backend != constants.CINDER_BACKEND_CEPH:
return True

View File

@ -855,7 +855,7 @@ class OpenStackOperator(object):
ceph_mon_info['ceph-mon-0-ip'] = ceph_mon_ips.get(
'ceph-mon-0-ip', '')
ceph_mon_info['ceph-mon-1-ip'] = ceph_mon_ips.get(
'ceph-mon-1-ip','')
'ceph-mon-1-ip', '')
ceph_mon_info['ceph-mon-2-ip'] = ceph_mon_ips.get(
'ceph-mon-2-ip', '')
else:

View File

@ -54,14 +54,14 @@ class Token(object):
Search the catalog of a service for the administrative url
Returns: admin url or None on failure
"""
return self._get_service_url(service_type, service_name,'admin')
return self._get_service_url(service_type, service_name, 'admin')
def get_service_internal_url(self, service_type, service_name):
"""
Search the catalog of a service for the administrative url
Returns: admin url or None on failure
"""
return self._get_service_url(service_type,service_name, 'internal')
return self._get_service_url(service_type, service_name, 'internal')
def get_service_public_url(self, service_type, service_name):
"""

View File

@ -195,7 +195,7 @@ class GlancePuppet(openstack.OpenstackBasePuppet):
config.update({'openstack::glance::params::rbd_store_pool':
rbd_store_pool,
'openstack::glance::params::rbd_store_ceph_conf':
rbd_store_ceph_conf,})
rbd_store_ceph_conf, })
# set remote registry auth_url for subcloud
if (self._distributed_cloud_role() ==

View File

@ -64,7 +64,7 @@ class IronicPuppet(openstack.OpenstackBasePuppet):
'ironic::neutron::api_endpoint': self._operator.neutron.get_internal_url(),
'ironic::neutron::auth_url': self._keystone_auth_uri(),
'ironic::neutron::project_name': self._get_service_tenant_name(),
'ironic::neutron::user_domain_name':self._get_service_user_domain_name(),
'ironic::neutron::user_domain_name': self._get_service_user_domain_name(),
'ironic::neutron::project_domain_name': self._get_service_project_domain_name(),
# Populate Glance credentials
'ironic::glance::auth_url': self._keystone_auth_uri(),

View File

@ -74,7 +74,7 @@ class MagnumPuppet(openstack.OpenstackBasePuppet):
'magnum::keystone::authtoken::user_domain_name':
self._get_service_user_domain_name(),
'magnum::keystone::authtoken::project_domain_name':
self._get_service_project_domain_name(),})
self._get_service_project_domain_name(), })
return config
def get_secure_system_config(self):

View File

@ -55,7 +55,7 @@ class MuranoPuppet(openstack.OpenstackBasePuppet):
'murano::keystone::auth::auth_name': ksuser,
'murano::keystone::auth::region': self._region_name(),
'murano::keystone::auth::tenant':
self._get_service_tenant_name(),})
self._get_service_tenant_name(), })
return config

View File

@ -490,7 +490,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
'openstack::nova::storage::instances_lv_size':
"%sm" % instances_lv_size,
'openstack::nova::storage::concurrent_disk_operations':
concurrent_disk_operations,}
concurrent_disk_operations, }
# If NOVA is a service on a ceph-external backend, use the ephemeral_pool
# and ceph_conf file that are stored in that DB entry.
@ -510,7 +510,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
values.update({'openstack::nova::storage::images_rbd_pool':
images_rbd_pool,
'openstack::nova::storage::images_rbd_ceph_conf':
images_rbd_ceph_conf,})
images_rbd_ceph_conf, })
return values
# TODO(oponcea): Make lvm global_filter generic
@ -576,7 +576,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
def _get_shared_pcpu_map(self, host):
shared_cpus = self._get_host_cpu_list(
host, function=constants.SHARED_FUNCTION, threads=True)
cpu_map = {c.numa_node:c.cpu for c in shared_cpus}
cpu_map = {c.numa_node: c.cpu for c in shared_cpus}
return "\"%s\"" % ','.join(
"%r:%r" % (node, cpu) for node, cpu in cpu_map.items())

View File

@ -149,7 +149,7 @@ class TestListServers(base.FunctionalTest):
for id in xrange(1000): # there is a limit of 1000 returned by json
ndict = dbutils.get_test_ihost(id=id, hostname=id, mgmt_mac=id,
forisystemid=self.system.id,
mgmt_ip="%s.%s.%s.%s" % (id,id,id,id),
mgmt_ip="%s.%s.%s.%s" % (id, id, id, id),
uuid=uuidutils.generate_uuid())
s = self.dbapi.ihost_create(ndict)
ihosts.append(s['uuid'])
@ -174,7 +174,7 @@ class TestListServers(base.FunctionalTest):
for id in xrange(100):
ndict = dbutils.get_test_ihost(id=id, hostname=id, mgmt_mac=id,
forisystemid=self.system.id,
mgmt_ip="%s.%s.%s.%s" % (id,id,id,id),
mgmt_ip="%s.%s.%s.%s" % (id, id, id, id),
uuid=uuidutils.generate_uuid())
ihost = self.dbapi.ihost_create(ndict)
ihosts.append(ihost['uuid'])

View File

@ -625,7 +625,7 @@ class StorageTierDependentTCs(base.FunctionalTest):
with nested(mock.patch.object(ceph_utils.CephApiOperator, 'get_monitors_status'),
mock.patch.object(StorageBackendConfig, 'has_backend_configured'),
mock.patch.object(rpcapi.ConductorAPI,'configure_osd_istor')) as (
mock.patch.object(rpcapi.ConductorAPI, 'configure_osd_istor')) as (
mock_mon_status, mock_backend_configured, mock_osd):
def fake_configure_osd_istor(context, istor_obj):
@ -691,7 +691,7 @@ class StorageTierDependentTCs(base.FunctionalTest):
with nested(mock.patch.object(ceph_utils.CephApiOperator, 'get_monitors_status'),
mock.patch.object(StorageBackendConfig, 'has_backend_configured'),
mock.patch.object(rpcapi.ConductorAPI,'configure_osd_istor')) as (
mock.patch.object(rpcapi.ConductorAPI, 'configure_osd_istor')) as (
mock_mon_status, mock_backend_configured, mock_osd):
def fake_configure_osd_istor(context, istor_obj):
@ -715,7 +715,7 @@ class StorageTierDependentTCs(base.FunctionalTest):
with nested(mock.patch.object(ceph_utils.CephApiOperator, 'get_monitors_status'),
mock.patch.object(StorageBackendConfig, 'has_backend_configured'),
mock.patch.object(rpcapi.ConductorAPI,'configure_osd_istor')) as (
mock.patch.object(rpcapi.ConductorAPI, 'configure_osd_istor')) as (
mock_mon_status, mock_backend_configured, mock_osd):
def fake_configure_osd_istor(context, istor_obj):
@ -788,7 +788,7 @@ class StorageTierDependentTCs(base.FunctionalTest):
values = {
'backend': constants.SB_TYPE_CEPH,
'capabilities': {'test_bparam3': 'foo'},
'name':'ceph-gold',
'name': 'ceph-gold',
'confirmed': True
}
with nested(mock.patch.object(StorageBackendConfig, 'get_ceph_mon_ip_addresses'),
@ -807,7 +807,7 @@ class StorageTierDependentTCs(base.FunctionalTest):
'capabilities': {'test_bparam3': 'one',
'test_cparam3': 'two'},
'services': constants.SB_SVC_CINDER,
'name':'ceph-gold',
'name': 'ceph-gold',
'tier_uuid': saved_tier_uuid,
'confirmed': True
}

View File

@ -266,7 +266,7 @@ class UpdateCephCluster(base.DbTestCase):
peers = self.dbapi.peers_get_all_by_cluster(cluster_uuid)
self.assertEqual(
set([(p.name, tuple(sorted(p.hosts))) for p in peers]),
{('group-0', ('storage-0',)),})
{('group-0', ('storage-0',)), })
storage_1 = self._create_storage_ihost('storage-1')
self.service._ceph.update_ceph_cluster(storage_1)
@ -279,7 +279,7 @@ class UpdateCephCluster(base.DbTestCase):
peers = self.dbapi.peers_get_all_by_cluster(cluster_uuid)
self.assertEqual(
set([(p.name, tuple(sorted(p.hosts))) for p in peers]),
{('group-0', ('storage-0', 'storage-1')),})
{('group-0', ('storage-0', 'storage-1')), })
storage_2 = self._create_storage_ihost('storage-2')
self.service._ceph.update_ceph_cluster(storage_2)
@ -401,7 +401,7 @@ class UpdateCephCluster(base.DbTestCase):
peers = self.dbapi.peers_get_all_by_cluster(cluster_uuid)
self.assertEqual(
set([(p.name, tuple(sorted(p.hosts))) for p in peers]),
{('group-0', ('storage-0',)),})
{('group-0', ('storage-0',)), })
storage_1 = self._create_storage_ihost('storage-1')
self.service._ceph.update_ceph_cluster(storage_1)
@ -414,7 +414,7 @@ class UpdateCephCluster(base.DbTestCase):
peers = self.dbapi.peers_get_all_by_cluster(cluster_uuid)
self.assertEqual(
set([(p.name, tuple(sorted(p.hosts))) for p in peers]),
{('group-0', ('storage-0', 'storage-1')),})
{('group-0', ('storage-0', 'storage-1')), })
storage_2 = self._create_storage_ihost('storage-2')
self.service._ceph.update_ceph_cluster(storage_2)

View File

@ -704,7 +704,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
'deleted_at': 'DateTime', 'task': 'String', 'location': 'Text',
'created_at': 'DateTime', 'updated_at': 'DateTime', 'uptime': 'Integer',
'capabilities': 'Text', 'config_status': 'String', 'config_applied': 'String',
'config_target': 'String','forisystemid': 'Integer'
'config_target': 'String', 'forisystemid': 'Integer'
}
for col, coltype in servers_col.items():
self.assertTrue(isinstance(servers.c[col].type,
@ -1241,7 +1241,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
sensors_analog = db_utils.get_table(engine, 'i_sensors_analog')
sensors_analog_col = {
'id': 'Integer', 'deleted_at': 'DateTime', 'created_at': 'DateTime',
'updated_at': 'DateTime','unit_base': 'String', 'unit_modifier': 'String',
'updated_at': 'DateTime', 'unit_base': 'String', 'unit_modifier': 'String',
'unit_rate': 'String', 't_minor_lower': 'String', 't_minor_upper': 'String',
't_major_lower': 'String', 't_major_upper': 'String', 't_critical_lower': 'String',
't_critical_upper': 'String',
@ -1253,7 +1253,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
pci_devices = db_utils.get_table(engine, 'pci_devices')
pci_devices_col = {
'id': 'Integer', 'uuid': 'String', 'deleted_at': 'DateTime',
'created_at': 'DateTime', 'updated_at': 'DateTime','host_id': 'Integer',
'created_at': 'DateTime', 'updated_at': 'DateTime', 'host_id': 'Integer',
'name': 'String', 'pciaddr': 'String', 'pclass_id': 'String',
'pvendor_id': 'String', 'pdevice_id': 'String', 'pclass': 'String', 'pvendor': 'String',
'pdevice': 'String', 'psvendor': 'String', 'psdevice': 'String', 'numa_node': 'Integer',
@ -1267,7 +1267,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
loads = db_utils.get_table(engine, 'loads')
loads_col = {
'id': 'Integer', 'uuid': 'String', 'deleted_at': 'DateTime',
'created_at': 'DateTime', 'updated_at': 'DateTime','state': 'String',
'created_at': 'DateTime', 'updated_at': 'DateTime', 'state': 'String',
'software_version': 'String', 'compatible_version': 'String',
'required_patches': 'String',
}
@ -1278,7 +1278,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
software_upgrade = db_utils.get_table(engine, 'software_upgrade')
software_upgrade_col = {
'id': 'Integer', 'uuid': 'String', 'deleted_at': 'DateTime',
'created_at': 'DateTime', 'updated_at': 'DateTime','state': 'String',
'created_at': 'DateTime', 'updated_at': 'DateTime', 'state': 'String',
'from_load': 'Integer', 'to_load': 'Integer',
}
for col, coltype in software_upgrade_col.items():
@ -1343,7 +1343,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
self.assertTrue(isinstance(storconfigs.c[col].type,
getattr(sqlalchemy.types, coltype)))
# make sure the rename worked properly
self.assertColumnNotExists(engine, 'i_storconfig','glance_gib')
self.assertColumnNotExists(engine, 'i_storconfig', 'glance_gib')
self.assertColumnExists(engine, 'i_storconfig', 'glance_pool_gib')
def _check_032(self, engine, data):
@ -1469,7 +1469,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
# Assert data types for 5 new columns in table "i_storconfig"
storconfigs = db_utils.get_table(engine, 'i_storconfig')
storconfigs_cols = {
'state':'String',
'state': 'String',
'task': 'String',
'ceph_mon_gib': 'Integer',
'ceph_mon_dev_ctrl0': 'String',
@ -1533,7 +1533,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
# Assert data types for all columns in new table "i_horizon_lockout"
horizon_lockout = db_utils.get_table(engine, 'i_horizon_lockout')
horizon_lockout_cols = {
'lockout_time':'Integer',
'lockout_time': 'Integer',
'lockout_retries': 'Integer',
}
for col, coltype in horizon_lockout_cols.items():

View File

@ -195,7 +195,7 @@ class DbNodeTestCase(base.DbTestCase):
f = self._create_test_storage_backend_with_file()
ll = self._create_test_storage_backend_with_lvm()
res = self.dbapi.storage_backend_get_list(sort_key='backend')
self.assertEqual(len(res),3)
self.assertEqual(len(res), 3)
self.assertEqual(c['backend'], res[0]['backend'])
self.assertEqual(f['backend'], res[1]['backend'])
self.assertEqual(ll['backend'], res[2]['backend'])
@ -206,7 +206,7 @@ class DbNodeTestCase(base.DbTestCase):
ll = self._create_test_storage_backend_with_lvm()
res = self.dbapi.storage_backend_get_by_isystem(self.system['id'],
sort_key='backend')
self.assertEqual(len(res),3)
self.assertEqual(len(res), 3)
self.assertEqual(c['backend'], res[0]['backend'])
self.assertEqual(f['backend'], res[1]['backend'])
self.assertEqual(ll['backend'], res[2]['backend'])
@ -224,7 +224,7 @@ class DbNodeTestCase(base.DbTestCase):
f = self._create_test_storage_backend_with_file()
ll = self._create_test_storage_backend_with_lvm()
res = self.dbapi.storage_backend_get_list(sort_key='backend')
self.assertEqual(len(res),3)
self.assertEqual(len(res), 3)
self.assertEqual(c['backend'], res[0]['backend'])
self.assertEqual(f['backend'], res[1]['backend'])
self.assertEqual(ll['backend'], res[2]['backend'])
@ -281,7 +281,7 @@ class DbNodeTestCase(base.DbTestCase):
def test_storage_file_get_list(self):
n = self._create_test_storage_backend_file()
res = self.dbapi.storage_file_get_list()
self.assertEqual(len(res),1)
self.assertEqual(len(res), 1)
self.assertEqual(n['backend'], res[0]['backend'])
self.assertEqual(n['uuid'], res[0]['uuid'])
@ -326,7 +326,7 @@ class DbNodeTestCase(base.DbTestCase):
def test_storage_lvm_get_list(self):
n = self._create_test_storage_backend_lvm()
res = self.dbapi.storage_lvm_get_list()
self.assertEqual(len(res),1)
self.assertEqual(len(res), 1)
self.assertEqual(n['backend'], res[0]['backend'])
self.assertEqual(n['uuid'], res[0]['uuid'])
@ -373,7 +373,7 @@ class DbNodeTestCase(base.DbTestCase):
def test_storage_ceph_get_list(self):
n = self._create_test_storage_backend_ceph()
res = self.dbapi.storage_ceph_get_list()
self.assertEqual(len(res),1)
self.assertEqual(len(res), 1)
self.assertEqual(n['backend'], res[0]['backend'])
self.assertEqual(n['uuid'], res[0]['uuid'])

View File

@ -435,7 +435,7 @@ def get_test_stor(**kw):
stor = {
'id': kw.get('id', 2),
'function': kw.get('function'),
'idisk_uuid':kw.get('idisk_uuid', 2),
'idisk_uuid': kw.get('idisk_uuid', 2),
'forihostid': kw.get('forihostid', 2),
'forilvgid': kw.get('forilvgid', 2),
}
@ -474,7 +474,7 @@ def get_test_storage_backend(**kw):
'state': kw.get('state', None),
'task': kw.get('task', None),
'services': kw.get('services', None),
'capabilities': kw.get('capabilities',{}),
'capabilities': kw.get('capabilities', {}),
'forisystemid': kw.get('forisystemid', None)
}
return inv
@ -490,13 +490,13 @@ def get_test_ceph_storage_backend(**kw):
'task': kw.get('task', None),
'services': kw.get('services', None),
'tier_id': kw.get('tier_id'),
'capabilities': kw.get('capabilities',{}),
'capabilities': kw.get('capabilities', {}),
'forisystemid': kw.get('forisystemid', None),
'cinder_pool_gib': kw.get('cinder_pool_gib',80),
'cinder_pool_gib': kw.get('cinder_pool_gib', 80),
'glance_pool_gib': kw.get('glance_pool_gib', 10),
'ephemeral_pool_gib': kw.get('ephemeral_pool_gib', 0),
'object_pool_gib': kw.get('object_pool_gib', 0),
'object_gateway':kw.get('object_gateway', False)
'object_gateway': kw.get('object_gateway', False)
}
return inv
@ -510,7 +510,7 @@ def get_test_file_storage_backend(**kw):
'state': kw.get('state', None),
'task': kw.get('task', None),
'services': kw.get('services', None),
'capabilities': kw.get('capabilities',{}),
'capabilities': kw.get('capabilities', {}),
'forisystemid': kw.get('forisystemid', None)
}
return inv
@ -525,7 +525,7 @@ def get_test_lvm_storage_backend(**kw):
'state': kw.get('state', None),
'task': kw.get('task', None),
'services': kw.get('services', None),
'capabilities': kw.get('capabilities',{}),
'capabilities': kw.get('capabilities', {}),
'forisystemid': kw.get('forisystemid', None)
}
return inv

View File

@ -1352,7 +1352,7 @@ class InterfaceTestCase(BaseTestCase):
port0, iface0 = self._create_sriov_cx3_if_test(ifname0, vf_num)
port1, iface1 = self._create_sriov_cx3_if_test(
ifname1, vf_num, pciaddr=port0['pciaddr'],dev_id=1)
ifname1, vf_num, pciaddr=port0['pciaddr'], dev_id=1)
self._update_context()
expected = "%s-%d;0;0" % (port0['pciaddr'], vf_num)

View File

@ -81,7 +81,7 @@ commands =
# H231..H238 are python3 compatability
# H401,H403,H404,H405 are docstring and not important
[flake8]
ignore = E501,E127,E128,E231,E402,E126,E722,H101,H102,H104,H105,H231,H232,H233,H234,H235,H236,H237,H238,H401,H403,H404,H405
ignore = E501,E127,E128,E402,E126,E722,H101,H102,H104,H105,H231,H232,H233,H234,H235,H236,H237,H238,H401,H403,H404,H405
builtins = _
[testenv:flake8]