Added distributed cloud test suites to StarlingX

- test_alarm_aggregation
- test_dc_swact_host
- test_https_unshared
- test_shared_config_dns

Change-Id: I9c0528523c7321d34c60ddccebe31cb0143cc982
Signed-off-by: Yvonne Ding <yvonne.ding@windriver.com>
This commit is contained in:
Yvonne Ding 2020-11-10 12:16:13 -05:00
parent d1e88a989e
commit 92ee9a56ec
16 changed files with 1655 additions and 27 deletions

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2019 Wind River Systems, Inc. # Copyright (c) 2019, 2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -311,8 +311,16 @@ def pytest_configure(config):
horizon_visible=horizon_visible) horizon_visible=horizon_visible)
if lab.get('central_region'): if lab.get('central_region'):
ProjVar.set_var(IS_DC=True, default_subloud = config.getoption('subcloud')
PRIMARY_SUBCLOUD=config.getoption('subcloud')) subcloud_list = config.getoption('subcloud_list')
if subcloud_list:
if default_subloud not in subcloud_list:
msg = ("default subcloud --subcloud=%s not in --subcloud_list=%s" %
(default_subloud, subcloud_list))
LOG.error(msg)
pytest.exit(msg)
ProjVar.set_var(IS_DC=True, PRIMARY_SUBCLOUD=default_subloud, SUBCLOUD_LIST=subcloud_list)
if is_vbox: if is_vbox:
ProjVar.set_var(IS_VBOX=True) ProjVar.set_var(IS_VBOX=True)
@ -356,6 +364,14 @@ def pytest_addoption(parser):
count_help = "Repeat tests x times - NO stop on failure" count_help = "Repeat tests x times - NO stop on failure"
horizon_visible_help = "Display horizon on screen" horizon_visible_help = "Display horizon on screen"
no_console_log = 'Print minimal console logs' no_console_log = 'Print minimal console logs'
region_help = "Multi-region parameter. Use when connected region is " \
"different than region to test. " \
"e.g., creating vm on RegionTwo from RegionOne"
subcloud_help = "Default subcloud used for automated test when boot vm, " \
"etc. 'subcloud1' if unspecified."
subcloud_list_help = "Specifies subclouds for DC labs, e.g. --subcloud_list=subcloud1," \
"subcloud2. If unspecified the lab's subclouds from lab.py will " \
"be used."
# Test session options on installed and configured STX system: # Test session options on installed and configured STX system:
parser.addoption('--testcase-config', action='store', parser.addoption('--testcase-config', action='store',
@ -370,6 +386,14 @@ def pytest_addoption(parser):
parser.addoption('--vm', '--vbox', action='store_true', dest='is_vbox', parser.addoption('--vm', '--vbox', action='store_true', dest='is_vbox',
help=vbox_help) help=vbox_help)
# Multi-region or distributed cloud options
parser.addoption('--region', action='store', metavar='region',
default=None, help=region_help)
parser.addoption('--subcloud', action='store', metavar='subcloud',
default='subcloud1', help=subcloud_help)
parser.addoption("--subcloud_list", action="store", default=None,
help=subcloud_list_help)
# Debugging/Log collection options: # Debugging/Log collection options:
parser.addoption('--sessiondir', '--session_dir', '--session-dir', parser.addoption('--sessiondir', '--session_dir', '--session-dir',
action='store', dest='sessiondir', action='store', dest='sessiondir',

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2019 Wind River Systems, Inc. # Copyright (c) 2019, 2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -35,6 +35,7 @@ class ProjVar:
'VSWITCH_TYPE': None, 'VSWITCH_TYPE': None,
'IS_DC': False, 'IS_DC': False,
'PRIMARY_SUBCLOUD': None, 'PRIMARY_SUBCLOUD': None,
'SUBCLOUD_LIST': None,
'BUILD_INFO': {}, 'BUILD_INFO': {},
'TEMP_DIR': '', 'TEMP_DIR': '',
'INSTANCE_BACKING': {}, 'INSTANCE_BACKING': {},

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2019 Wind River Systems, Inc. # Copyright (c) 2019, 2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -20,7 +20,7 @@ class HostTimeout:
REBOOT = 2400 REBOOT = 2400
# Active controller switched and being able to run openstack CLI after # Active controller switched and being able to run openstack CLI after
# system host-swact returned # system host-swact returned
SWACT = 180 SWACT = 600
# Host in locked state after system host-lock cli returned # Host in locked state after system host-lock cli returned
LOCK = 900 LOCK = 900
# Task clears in system host-show after host reaches enabled/available state # Task clears in system host-show after host reaches enabled/available state
@ -144,7 +144,7 @@ class OrchestrationPhaseTimeout:
class DCTimeout: class DCTimeout:
SYNC = 660 # 10 minutes + 1 SYNC = 3600 # 60 minutes
SUBCLOUD_AUDIT = 600 # 4 minutes + 1 SUBCLOUD_AUDIT = 600 # 4 minutes + 1
PATCH_AUDIT = 240 # 3 minutes + 1 PATCH_AUDIT = 240 # 3 minutes + 1

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2019 Wind River Systems, Inc. # Copyright (c) 2019, 2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -9,6 +9,7 @@
# DO NOT import anything from helper modules to this module # # DO NOT import anything from helper modules to this module #
############################################################# #############################################################
import socket
import os import os
import re import re
import time import time
@ -788,6 +789,21 @@ def ssh_to_remote_node(host, username=None, password=None, prompt=None,
remote_ssh.close() remote_ssh.close()
def ssh_to_stx(lab=None, set_client=False):
if not lab:
lab = ProjVar.get_var('LAB')
con_ssh = SSHClient(lab['floating ip'], user=HostLinuxUser.get_user(),
password=HostLinuxUser.get_password(),
initial_prompt=Prompt.CONTROLLER_PROMPT)
con_ssh.connect(retry=True, retry_timeout=30, use_current=False)
if set_client:
ControllerClient.set_active_controller(con_ssh)
return con_ssh
def get_yaml_data(filepath): def get_yaml_data(filepath):
""" """
Returns the yaml data in json Returns the yaml data in json
@ -817,3 +833,27 @@ def write_yaml_data_to_file(data, filename, directory=None):
with open(src_path, 'w') as f: with open(src_path, 'w') as f:
yaml.dump(data, f) yaml.dump(data, f)
return src_path return src_path
def get_lab_fip(region=None):
"""
Returns system OAM floating ip
Args:
region (str|None): central_region or subcloud, only applicable to DC
Returns (str): floating ip of the lab
"""
if ProjVar.get_var('IS_DC'):
if not region:
region = ProjVar.get_var('PRIMARY_SUBCLOUD')
elif region == 'RegionOne':
region = 'central_region'
oam_fip = ProjVar.get_var('lab')[region]["floating ip"]
else:
oam_fip = ProjVar.get_var('lab')["floating ip"]
return oam_fip
def get_dnsname(region='RegionOne'):
# means that the dns name is unreachable
return None

View File

@ -0,0 +1,488 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
import copy
from utils import cli, exceptions, table_parser
from utils.tis_log import LOG
from utils.clients.ssh import ControllerClient
from consts.auth import Tenant, HostLinuxUser
from consts.proj_vars import ProjVar
from consts.timeout import DCTimeout
from consts.filepaths import SysLogPath
from keywords import system_helper, nova_helper
def get_subclouds(field='name', name=None, avail=None, sync=None, mgmt=None, deploy=None,
auth_info=Tenant.get('admin_platform', 'RegionOne'), con_ssh=None,
source_openrc=None, rtn_dict=False, evaluate=False, strict=True, regex=False,
filter_subclouds=True):
"""
Get subclouds values
Args:
field (str | tuple): fields of value to get
name (str): subcloud name
avail (str): subcloud availability status
sync (str): subcloud sync status
mgmt (str): subcloud management status
deploy (str): subcloud deploy status
auth_info (dict):
con_ssh (SSHClient):
source_openrc (None|bool):
rtn_dict (bool): whether to return dict of field/value pairs
evaluate (bool): whether to convert value to python data type
strict (bool): True to use re.match, False to use re.search
regex (bool): whether to use regex to find value(s)
filter_subclouds (bool): whether to filter out the subclouds that are not in
the --subcloud_list arg
Returns (list | dict):
when rtn_dict is False, list of values
when rtn_dict is True, dict of field/values pairs
"""
table_ = table_parser.table(cli.dcmanager('subcloud list', ssh_client=con_ssh,
auth_info=auth_info, source_openrc=source_openrc)[1])
arg_map = {'name': name,
'availability': avail,
'sync': sync,
'management': mgmt,
'deploy status': deploy}
kwargs = {key: val for key, val in arg_map.items() if val}
if filter_subclouds:
filtered_subclouds = table_parser.get_values(table_, target_header=field, **kwargs)
subcloud_list = ProjVar.get_var('SUBCLOUD_LIST')
if subcloud_list:
filtered_subclouds = [subcloud for subcloud in filtered_subclouds
if subcloud in subcloud_list]
LOG.info('filtered_subclouds: {}'.format(filtered_subclouds))
return filtered_subclouds
else:
return table_parser.get_multi_values(table_, field, rtn_dict=rtn_dict, evaluate=evaluate,
strict=strict, regex=regex, **kwargs)
def wait_for_subcloud_status(subcloud, avail=None, sync=None, mgmt=None, deploy=None,
timeout=DCTimeout.SUBCLOUD_AUDIT, check_interval=30,
auth_info=Tenant.get('admin_platform', 'RegionOne'),
con_ssh=None, source_openrc=None, fail_ok=False):
"""
Wait for subcloud status
Args:
subcloud:
avail:
sync:
mgmt:
timeout:
check_interval:
auth_info:
con_ssh:
source_openrc:
fail_ok:
Returns:
"""
if not subcloud:
raise ValueError("Subcloud name must be specified")
expt_status = {}
if avail:
expt_status['avail'] = avail
if sync:
expt_status['sync'] = sync
if mgmt:
expt_status['mgmt'] = mgmt
if deploy:
expt_status['deploy'] = deploy
if not expt_status:
raise ValueError("At least one expected status of the subcloud must be specified.")
LOG.info("Wait for {} status: {}".format(subcloud, expt_status))
end_time = time.time() + timeout + check_interval
while time.time() < end_time:
if get_subclouds(field='name', name=subcloud, con_ssh=con_ssh, source_openrc=source_openrc,
auth_info=auth_info, **expt_status):
return 0, subcloud
LOG.info("Not in expected states yet...")
time.sleep(check_interval)
msg = '{} status did not reach {} within {} seconds'.format(subcloud, expt_status, timeout)
LOG.warning(msg)
if fail_ok:
return 1, msg
else:
raise exceptions.DCError(msg)
def _manage_unmanage_subcloud(subcloud=None, manage=False, check_first=True, fail_ok=False,
con_ssh=None, auth_info=Tenant.get('admin_platform', 'RegionOne'),
source_openrc=False):
"""
Manage/Unmanage given subcloud(s)
Args:
subcloud:
manage:
check_first:
fail_ok:
Returns:
"""
operation = 'manage' if manage else 'unmanage'
expt_state = '{}d'.format(operation)
if not subcloud:
subcloud = [ProjVar.get_var('PRIMARY_SUBCLOUD')]
elif isinstance(subcloud, str):
subcloud = [subcloud]
subclouds_to_update = list(subcloud)
if check_first:
subclouds_in_state = get_subclouds(mgmt=expt_state, con_ssh=con_ssh, auth_info=auth_info)
subclouds_to_update = list(set(subclouds_to_update) - set(subclouds_in_state))
if not subclouds_to_update:
LOG.info("{} already {}. Do nothing.".format(subcloud, expt_state))
return -1, []
LOG.info("Attempt to {}: {}".format(operation, subclouds_to_update))
failed_subclouds = []
for subcloud_ in subclouds_to_update:
code, out = cli.dcmanager('subcloud ' + operation, subcloud_, ssh_client=con_ssh,
fail_ok=True, auth_info=auth_info, source_openrc=source_openrc)
if code > 0:
failed_subclouds.append(subcloud_)
if failed_subclouds:
err = "Failed to {} {}".format(operation, failed_subclouds)
if fail_ok:
LOG.info(err)
return 1, failed_subclouds
raise exceptions.DCError(err)
LOG.info("Check management status for {} after dcmanager subcloud {}".format(
subclouds_to_update, operation))
mgmt_states = get_subclouds(field='management', name=subclouds_to_update, auth_info=auth_info,
con_ssh=con_ssh)
failed_subclouds = \
[subclouds_to_update[i] for i in range(len(mgmt_states)) if mgmt_states[i] != expt_state]
if failed_subclouds:
raise exceptions.DCError("{} not {} after dcmanger subcloud {}".format(
failed_subclouds, expt_state, operation))
return 0, subclouds_to_update
def manage_subcloud(subcloud=None, check_first=True, fail_ok=False, con_ssh=None):
"""
Manage subcloud(s)
Args:
subcloud (str|tuple|list):
check_first (bool):
fail_ok (bool):
con_ssh(SSClient):
Returns (tuple):
(-1, []) All give subcloud(s) already managed. Do nothing.
(0, [<updated subclouds>]) Successfully managed the give subcloud(s)
(1, [<cli_rejected_subclouds>]) dcmanager manage cli failed on these subcloud(s)
"""
return _manage_unmanage_subcloud(subcloud=subcloud, manage=True, check_first=check_first,
fail_ok=fail_ok,
con_ssh=con_ssh)
def unmanage_subcloud(subcloud=None, check_first=True, fail_ok=False, con_ssh=None,
source_openrc=False):
"""
Unmanage subcloud(s)
Args:
subcloud (str|tuple|list):
check_first (bool):
fail_ok (bool):
con_ssh (SSHClient):
Returns (tuple):
(-1, []) All give subcloud(s) already unmanaged. Do nothing.
(0, [<updated subclouds>]) Successfully unmanaged the give subcloud(s)
(1, [<cli_rejected_subclouds>]) dcmanager unmanage cli failed on these subcloud(s)
"""
return _manage_unmanage_subcloud(subcloud=subcloud, manage=False, check_first=check_first,
fail_ok=fail_ok, con_ssh=con_ssh, source_openrc=source_openrc)
def wait_for_subcloud_config(func, *func_args, subcloud=None, config_name=None,
expected_value=None, auth_name='admin_platform', fail_ok=False,
timeout=DCTimeout.SYNC, check_interval=30, strict_order=True,
**func_kwargs):
"""
Wait for subcloud configuration to reach expected value
Args:
subcloud (str|None):
func: function defined to get current value, which has to has parameter con_ssh and auth_info
*func_args: positional args for above func. Should NOT include auth_info or con_ssh.
config_name (str): such as dns, keypair, etc
expected_value (None|str|list):
auth_name (str): auth dict name. e.g., admin_platform, admin, tenant1, TENANT2, etc
fail_ok (bool):
timeout (int):
check_interval (int):
strict_order (bool)
**func_kwargs: kwargs for defined func. auth_info and con_ssh has to be provided here
Returns (tuple):
(0, <subcloud_config>) # same as expected
(1, <subcloud_config>) # did not update within timeout
(2, <subcloud_config>) # updated to unexpected value
"""
if not subcloud:
subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
config_name = ' ' + config_name if config_name else ''
if expected_value is None:
central_ssh = ControllerClient.get_active_controller(name='RegionOne')
expected_value = func(con_ssh=central_ssh,
auth_info=Tenant.get(auth_name, dc_region='RegionOne'))
elif isinstance(expected_value, str):
expected_value = expected_value.split(sep=',')
if not strict_order:
expected_value = sorted(list(expected_value))
LOG.info("Wait for {}{} to be {}".format(subcloud, config_name, expected_value))
if not func_kwargs.get('con_ssh', None):
func_kwargs['con_ssh'] = ControllerClient.get_active_controller(name=subcloud)
if not func_kwargs.get('auth_info', None):
func_kwargs['auth_info'] = Tenant.get(auth_name, dc_region=subcloud)
origin_subcloud_val = func(*func_args, **func_kwargs)
subcloud_val = copy.copy(origin_subcloud_val)
if isinstance(subcloud_val, str):
subcloud_val = subcloud_val.split(sep=',')
if not strict_order:
subcloud_val = sorted(list(subcloud_val))
end_time = time.time() + timeout + check_interval
while time.time() < end_time:
if subcloud_val == expected_value:
LOG.info("{}{} setting is same as central region".format(subcloud, config_name))
return 0, subcloud_val
elif subcloud_val != origin_subcloud_val:
msg = '{}{} config changed to unexpected value. Expected: {}; Actual: {}'.\
format(subcloud, config_name, expected_value, subcloud_val)
if fail_ok:
LOG.info(msg)
return 2, subcloud_val
else:
raise exceptions.DCError(msg)
time.sleep(check_interval)
subcloud_val = func(*func_args, **func_kwargs)
msg = '{}{} config did not reach: {} within {} seconds; actual: {}'.format(
subcloud, config_name, expected_value, timeout, subcloud_val)
if fail_ok:
LOG.info(msg)
return 1, subcloud_val
else:
raise exceptions.DCError(msg)
def wait_for_sync_audit(subclouds, con_ssh=None, fail_ok=False, filters_regex=None,
timeout=DCTimeout.SYNC):
"""
Wait for Updating subcloud log msg in dcmanager.log for given subcloud(s)
Args:
subclouds (list|tuple|str):
con_ssh:
fail_ok:
filters_regex: e.g., ['audit_action.*keypair', 'Clean audit.*ntp'], '\/compute'
timeout:
Returns (tuple):
(True, <res_dict>)
(False, <res_dict>)
"""
if not con_ssh:
con_ssh = ControllerClient.get_active_controller('RegionOne')
if isinstance(subclouds, str):
subclouds = [subclouds]
LOG.info("Waiting for sync audit in dcmanager.log for: {}".format(subclouds))
if not filters_regex:
filters_regex = ['platform', 'patching', 'identity']
elif isinstance(filters_regex, str):
filters_regex = [filters_regex]
subclouds_dict = {subcloud: list(filters_regex) for subcloud in subclouds}
res = {subcloud: False for subcloud in subclouds}
subclouds_to_wait = list(subclouds)
end_time = time.time() + timeout
expt_list = []
for subcloud in subclouds_dict:
expt_list += ['{}.*{}'.format(subcloud, service) for service in subclouds_dict[subcloud]]
con_ssh.send('tail -n 0 -f {}'.format(SysLogPath.DC_ORCH))
try:
while time.time() < end_time:
index = con_ssh.expect(expt_list, timeout=timeout, fail_ok=True)
if index >= 0:
subcloud_, service_ = expt_list[index].split('.*', maxsplit=1)
subclouds_dict[subcloud_].remove(service_)
expt_list.pop(index)
if not subclouds_dict[subcloud_]:
subclouds_to_wait.remove(subcloud_)
subclouds_dict.pop(subcloud_)
res[subcloud_] = True
if not subclouds_to_wait:
LOG.info("sync request logged for: {}".format(subclouds))
return True, res
else:
msg = 'sync audit for {} not shown in {} in {}s: {}'.format(
subclouds_to_wait, SysLogPath.DC_ORCH, timeout, subclouds_dict)
if fail_ok:
LOG.info(msg)
for subcloud in subclouds_to_wait:
res[subcloud] = False
return False, res
else:
raise exceptions.DCError(msg)
finally:
con_ssh.send_control()
con_ssh.expect()
def wait_for_subcloud_dns_config(subcloud=None, subcloud_ssh=None, expected_dns=None,
fail_ok=False, timeout=DCTimeout.SYNC, check_interval=30):
"""
Wait for dns configuration to reach expected value
Args:
subcloud (str|None):
subcloud_ssh (None|SSHClient):
expected_dns (None|str|list):
fail_ok (bool):
timeout (int):
check_interval (int):
Returns (tuple):
(0, <subcloud_dns_servers>) # same as expected
(1, <subcloud_dns_servers>) # did not update within timeout
(2, <subcloud_dns_servers>) # updated to unexpected value
"""
func = system_helper.get_dns_servers
func_kwargs = {'con_ssh': subcloud_ssh} if subcloud_ssh else {}
return wait_for_subcloud_config(subcloud=subcloud, func=func, config_name='DNS',
expected_value=expected_dns, fail_ok=fail_ok, timeout=timeout,
check_interval=check_interval, **func_kwargs)
def wait_for_subcloud_snmp_comms(subcloud=None, subcloud_ssh=None, expected_comms=None,
fail_ok=False, timeout=DCTimeout.SYNC, check_interval=30):
"""
Wait for dns configuration to reach expected value
Args:
subcloud (str|None):
subcloud_ssh (None|SSHClient):
expected_comms (None|str|list):
fail_ok (bool):
timeout (int):
check_interval (int):
Returns (tuple):
(0, <subcloud_dns_servers>) # same as expected
(1, <subcloud_dns_servers>) # did not update within timeout
(2, <subcloud_dns_servers>) # updated to unexpected value
"""
func = system_helper.get_snmp_comms
func_kwargs = {'con_ssh': subcloud_ssh} if subcloud_ssh else {}
return wait_for_subcloud_config(subcloud=subcloud, func=func,
config_name='SNMP Community strings',
expected_value=expected_comms, fail_ok=fail_ok,
timeout=timeout, check_interval=check_interval,
strict_order=False, **func_kwargs)
def wait_for_subcloud_snmp_trapdests(subcloud=None, subcloud_ssh=None, expected_trapdests=None,
fail_ok=False, timeout=DCTimeout.SYNC, check_interval=30):
"""
Wait for dns configuration to reach expected value
Args:
subcloud (str|None):
subcloud_ssh (None|SSHClient):
expected_trapdests (None|str|list):
fail_ok (bool):
timeout (int):
check_interval (int):
Returns (tuple):
(0, <subcloud_dns_servers>) # same as expected
(1, <subcloud_dns_servers>) # did not update within timeout
(2, <subcloud_dns_servers>) # updated to unexpected value
"""
func = system_helper.get_snmp_trapdests
func_kwargs = {'con_ssh': subcloud_ssh} if subcloud_ssh else {}
return wait_for_subcloud_config(subcloud=subcloud, func=func,
config_name='SNMP Community strings',
expected_value=expected_trapdests, fail_ok=fail_ok,
timeout=timeout, check_interval=check_interval,
strict_order=False, **func_kwargs)
def wait_for_subcloud_ntp_config(subcloud=None, subcloud_ssh=None, expected_ntp=None,
clear_alarm=True, fail_ok=False, timeout=DCTimeout.SYNC,
check_interval=30):
"""
Wait for ntp configuration to reach expected value
Args:
subcloud (str|None):
subcloud_ssh (None|SSHClient):
expected_ntp (None|str|list):
clear_alarm (bool)
fail_ok (bool):
timeout (int):
check_interval (int):
Returns (tuple):
(0, <subcloud_ntp_servers>) # same as expected
(1, <subcloud_ntp_servers>) # did not update within timeout
(2, <subcloud_ntp_servers>) # updated to unexpected value
"""
if not subcloud:
subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
func_kwargs = {'auth_info': Tenant.get('admin_platform', subcloud)}
if subcloud_ssh:
func_kwargs['con_ssh'] = subcloud_ssh
func = system_helper.get_ntp_servers
res = wait_for_subcloud_config(subcloud=subcloud, func=func, config_name='NTP',
expected_value=expected_ntp, fail_ok=fail_ok, timeout=timeout,
check_interval=check_interval, **func_kwargs)
if res[0] in (0, 2) and clear_alarm:
system_helper.wait_and_clear_config_out_of_date_alarms(host_type='controller',
**func_kwargs)
return res

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2019 Wind River Systems, Inc. # Copyright (c) 2019, 2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -7,9 +7,10 @@
import os import os
from keywords import common
from utils.tis_log import LOG from utils.tis_log import LOG
from utils.horizon.helper import HorizonDriver from utils.horizon.helper import HorizonDriver
from consts.auth import Tenant from consts.auth import Tenant, CliAuth
from consts.proj_vars import ProjVar from consts.proj_vars import ProjVar
@ -43,3 +44,19 @@ def download_openrc_files(quit_driver=True):
LOG.info("openrc files are successfully downloaded to: {}".format(local_dir)) LOG.info("openrc files are successfully downloaded to: {}".format(local_dir))
return rc_files return rc_files
def get_url(dnsname=False):
"""
Get the base url of the Horizon application
Args:
dnsname(bool): True if return the dns name of the host instead of the IP
Returns(str): the url on the active controller to access Horizon
"""
domain = common.get_lab_fip(region='RegionOne') if not dnsname else \
common.get_dnsname(region='RegionOne')
prefix = 'https' if CliAuth.get_var('https') else 'http'
port = 8080 if prefix == 'http' else 8443
return '{}://{}:{}'.format(prefix, domain, port)

View File

@ -413,8 +413,17 @@ def get_endpoints_values(endpoint_id, fields, con_ssh=None,
return table_parser.get_multi_values_two_col_table(table_, fields) return table_parser.get_multi_values_two_col_table(table_, fields)
def is_https_enabled(con_ssh=None, source_openrc=True, def is_https_enabled(con_ssh=None, source_openrc=True, interface='public',
auth_info=Tenant.get('admin_platform')): auth_info=Tenant.get('admin_platform')):
"""
Check whether interface is https
Args:
con_ssh:
source_openrc:
interface: default is public
auth_info:
Returns True or False
"""
if not con_ssh: if not con_ssh:
con_name = auth_info.get('region') if ( con_name = auth_info.get('region') if (
auth_info and ProjVar.get_var('IS_DC')) else None auth_info and ProjVar.get_var('IS_DC')) else None
@ -425,10 +434,11 @@ def is_https_enabled(con_ssh=None, source_openrc=True,
source_openrc=source_openrc)[1]) source_openrc=source_openrc)[1])
con_ssh.exec_cmd('unset OS_REGION_NAME') # Workaround con_ssh.exec_cmd('unset OS_REGION_NAME') # Workaround
filters = {'Service Name': 'keystone', 'Service Type': 'identity', filters = {'Service Name': 'keystone', 'Service Type': 'identity',
'Interface': 'public'} 'Interface': interface}
keystone_pub = table_parser.get_values(table_=table_, target_header='URL', keystone_values = table_parser.get_values(table_=table_, target_header='URL',
**filters)[0] **filters)
return 'https' in keystone_pub LOG.info('keystone {} URLs: {}'.format(interface, keystone_values))
return all('https' in i for i in keystone_values)
def delete_users(user, fail_ok=False, auth_info=Tenant.get('admin'), def delete_users(user, fail_ok=False, auth_info=Tenant.get('admin'),

View File

@ -1,14 +1,16 @@
# #
# Copyright (c) 2019 Wind River Systems, Inc. # Copyright (c) 2019, 2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
import json
import random import random
import re import re
import os import os
import time import time
import requests
from pexpect import EOF from pexpect import EOF
from string import ascii_lowercase, ascii_uppercase, digits from string import ascii_lowercase, ascii_uppercase, digits
@ -1111,3 +1113,172 @@ def fetch_cert_file(cert_file=None, scp_to_local=True, con_ssh=None):
LOG.info("Cert file copied to {} on localhost".format(dest_path)) LOG.info("Cert file copied to {} on localhost".format(dest_path))
return cert_file return cert_file
def get_auth_token(region=None, auth_info=Tenant.get('admin_platform'), use_dnsname=True):
"""
Get an authentication token from keystone
Args:
region(str): the cloud region for get the keystone token
auth_info:
use_dnsname(bool): True if use dns name instead of IP to perform the rest request
Returns(str|None): Authentication token
"""
keystone_endpoint = keystone_helper.get_endpoints(field='URL', service_name='keystone',
interface="public", region=region,
auth_info=auth_info)[0]
keystone_url = '{}/{}'.format(keystone_endpoint, 'auth/tokens')
if use_dnsname:
lab_ip = common.get_lab_fip(region=region)
lab_dns_name = common.get_dnsname(region=region)
keystone_url = keystone_url.replace(lab_ip, lab_dns_name)
LOG.info('Get authentication token from keystone url {}'.format(keystone_url))
headers = {'Content-type': 'application/json'}
body = {
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'domain': {
'name': 'Default'
},
'name': 'admin',
'password': 'Li69nux*'
}
}
},
'scope': {
'project': {
'name': 'admin',
'domain': {
'name': 'Default'
}
}
}
}
}
try:
req = requests.post(url=keystone_url, headers=headers, data=json.dumps(body), verify=False)
except Exception as e:
LOG.error('Error trying to get a token')
LOG.debug(e)
return None
LOG.debug('\n{} {}\nHeaders: {}\nBody: {}\nResponse code: {}\nResponse body: {}'.format(
req.request.method, req.request.url, req.request.headers,
req.request.body, req.status_code, req.text))
LOG.info('Status: [{}]'.format(req.status_code))
req.raise_for_status()
return req.headers.get('X-Subject-Token')
def check_url_access(url, headers=None, verify=True, fail_ok=False):
"""
Check the access to a given url
Args:
url(str): url to check
headers(None|dict): request headers of the http request
verify(bool|str):
True: secure request
False: equivalent to --insecure in curl cmd
str: applies to https system. CA-Certificate path. e.g., verify=/path/to/cert
fail_ok(bool):
Returns(tuple): (status_code, response)
- (1, <std_err>): An exception has occurred
- (status_code, response): status code and response from requests call
"""
LOG.info('curl -i {}...'.format(url))
try:
req = requests.get(url=url, headers=headers, verify=verify)
except requests.exceptions.RequestException as e:
if fail_ok:
message = 'Exception trying to access {}: {}'.format(url, e)
LOG.warn(message)
return 1, message
raise e
LOG.info('Status: [{}]'.format(req.status_code))
LOG.debug('\n{} {}\nHeaders: {}\nResponse code: {}\nResponse body: {}'.format(
req.request.method, req.request.url, req.request.headers, req.status_code, req.text))
if not fail_ok:
req.raise_for_status()
return req.status_code, req.text
def check_services_access(service_name=None, region=None, auth=True, verify=True,
use_dnsname=True, auth_info=Tenant.get('admin_platform')):
"""
Check public endpoints of services are reachable via get request
Args:
service_name(str|list|None): filter only certainly services to check
region(str|None): filter only the endpoints from a certain region
auth(bool): perform the requests with an authentication from keystone
verify(bool|str):
True: if https is enabled, verify the cert with the default CA
False: equivalent to --insecure in curl cmd
str: applies to https system. CA-Certificate path. e.g., verify=/path/to/cert
use_dnsname(bool): True if use dns name instead of IP to perform the rest request
auth_info(dict):
Returns(None):
"""
if not use_dnsname:
verify = False
LOG.info('Check services access via curl')
token = None
if auth:
token = get_auth_token(region=region, auth_info=auth_info, use_dnsname=use_dnsname)
headers = {'X-Auth-Token': token} if token else None
if service_name:
urls_to_check = []
if isinstance(service_name, str):
service_name = [service_name]
for service in service_name:
url = keystone_helper.get_endpoints(field='URL', interface='public', region=region,
enabled='True', service_name=service,
auth_info=auth_info)
if url:
urls_to_check.append(url)
else:
LOG.warn('{} service\'s public endpoint not found or not enabled')
else:
urls_to_check = keystone_helper.get_endpoints(field='URL', interface='public',
region=region, enabled='True',
auth_info=auth_info)
if use_dnsname:
lab_ip = common.get_lab_fip(region=region)
lab_dns_name = common.get_dnsname(region=region)
urls_to_check = [url.replace(lab_ip, lab_dns_name) for url in urls_to_check]
for url in urls_to_check:
# FIXME skip unreachable port 7777 (sm-api) until CGTS-19988 is resolved
# FIXME skip unreachable port 8219 (dcdbsync) until 1892391 is resolved
if url.endswith('7777') or url.endswith('8219/v1.0'):
continue
check_url_access(url=url, headers=headers, verify=verify)
def check_platform_horizon_access(verify=True, use_dnsname=True):
"""
Check horizon URL is reachable via get request
Args:
verify(bool|str):
True: if https is enabled, verify the cert with the default CA
False: equivalent to --insecure in curl cmd
str: applies to https system. CA-Certificate path. e.g., verify=/path/to/cert
use_dnsname(bool): True if use dns name instead of IP to perform the rest request
Returns(None):
"""
from keywords import horizon_helper
if not use_dnsname:
verify = False
LOG.info('Check platform horizon access via curl')
horizon_url = horizon_helper.get_url(dnsname=use_dnsname)
check_url_access(url=horizon_url, verify=verify)

View File

@ -85,15 +85,14 @@ def is_aio_duplex(con_ssh=None, auth_info=Tenant.get('admin_platform')):
def is_aio_simplex(con_ssh=None, auth_info=Tenant.get('admin_platform')): def is_aio_simplex(con_ssh=None, auth_info=Tenant.get('admin_platform')):
sys_type = ProjVar.get_var('SYS_TYPE') sys_type = ProjVar.get_var('SYS_TYPE')
if sys_type: if sys_type:
if not (ProjVar.get_var('IS_DC') and auth_info and if not (con_ssh and ProjVar.get_var('IS_DC') and auth_info and
ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region', ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region',
None)): None)):
return SysType.AIO_SX == sys_type return SysType.AIO_SX == sys_type
else:
return is_aio_system(controller_ssh=con_ssh, return is_aio_system(controller_ssh=con_ssh,
auth_info=auth_info) and \ auth_info=auth_info) and \
len(get_controllers(con_ssh=con_ssh, len(get_controllers(con_ssh=con_ssh, auth_info=auth_info)) == 1
auth_info=auth_info)) == 1
def is_aio_system(controller_ssh=None, controller='controller-0', def is_aio_system(controller_ssh=None, controller='controller-0',
@ -111,7 +110,7 @@ def is_aio_system(controller_ssh=None, controller='controller-0',
""" """
sys_type = ProjVar.get_var('SYS_TYPE') sys_type = ProjVar.get_var('SYS_TYPE')
if sys_type: if sys_type:
if not (ProjVar.get_var('IS_DC') and auth_info and if not (controller_ssh and ProjVar.get_var('IS_DC') and auth_info and
ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region', ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region',
None)): None)):
return 'aio' in sys_type.lower() return 'aio' in sys_type.lower()

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2019 Wind River Systems, Inc. # Copyright (c) 2019, 2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -16,7 +16,7 @@ from consts.stx import Prompt, SUBCLOUD_PATTERN, SysType, GuestImages, Networks
from consts.lab import Labs, add_lab_entry, NatBoxes from consts.lab import Labs, add_lab_entry, NatBoxes
from consts.proj_vars import ProjVar from consts.proj_vars import ProjVar
from keywords import host_helper, nova_helper, system_helper, keystone_helper, \ from keywords import host_helper, nova_helper, system_helper, keystone_helper, \
common, container_helper common, container_helper, dc_helper
from utils import exceptions from utils import exceptions
from utils.clients.ssh import SSHClient, CONTROLLER_PROMPT, ControllerClient, \ from utils.clients.ssh import SSHClient, CONTROLLER_PROMPT, ControllerClient, \
NATBoxClient, PASSWORD_PROMPT NATBoxClient, PASSWORD_PROMPT
@ -528,8 +528,67 @@ def set_region(region=None):
Tenant.set_platform_url(urls[0]) Tenant.set_platform_url(urls[0])
def set_dc_vars():
if not ProjVar.get_var('IS_DC') or ControllerClient.get_active_controller(
name='RegionOne', fail_ok=True):
return
central_con_ssh = ControllerClient.get_active_controller()
ControllerClient.set_active_controller(central_con_ssh, name='RegionOne')
primary_subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
sub_clouds = dc_helper.get_subclouds(avail='online', mgmt='managed',
con_ssh=central_con_ssh)
LOG.info("Online subclouds: {}".format(sub_clouds))
lab = ProjVar.get_var('LAB')
primary_ssh = None
for subcloud in sub_clouds:
subcloud_lab = lab.get(subcloud, None)
if not subcloud_lab:
raise ValueError('Please add {} to {} in consts/lab.py'.format(
subcloud, lab['short_name']))
LOG.info("Create ssh connection to {}, and add to ControllerClient".
format(subcloud))
# subcloud_ssh = SSHClient(subcloud_lab['floating ip'],
# HostLinuxUser.get_user(),
# HostLinuxUser.get_password(),
# CONTROLLER_PROMPT)
subcloud_ssh = common.ssh_to_stx(lab=subcloud_lab)
try:
subcloud_ssh.connect(retry=True, retry_timeout=30)
ControllerClient.set_active_controller(subcloud_ssh, name=subcloud)
except exceptions.SSHException as e:
if subcloud == primary_subcloud:
raise
LOG.warning('Cannot connect to {} via its floating ip. {}'.
format(subcloud, e.__str__()))
continue
LOG.info("Add {} to DC_MAP".format(subcloud))
subcloud_auth = get_auth_via_openrc(subcloud_ssh)
auth_url = subcloud_auth['OS_AUTH_URL']
region = subcloud_auth['OS_REGION_NAME']
Tenant.add_dc_region(region_info={subcloud: {'auth_url': auth_url,
'region': region}})
if subcloud == primary_subcloud:
primary_ssh = subcloud_ssh
LOG.info("Set default cli auth to use {}".format(subcloud))
Tenant.set_region(region=region)
Tenant.set_platform_url(url=auth_url)
LOG.info("Set default controller ssh to {} in ControllerClient".
format(primary_subcloud))
ControllerClient.set_default_ssh(primary_subcloud)
return primary_ssh
def set_sys_type(con_ssh): def set_sys_type(con_ssh):
sys_type = system_helper.get_sys_type(con_ssh=con_ssh) primary_ssh = set_dc_vars()
sys_type = system_helper.get_sys_type(con_ssh=primary_ssh if primary_ssh else con_ssh)
ProjVar.set_var(SYS_TYPE=sys_type) ProjVar.set_var(SYS_TYPE=sys_type)

View File

@ -0,0 +1,18 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from pytest import fixture, skip
from consts.proj_vars import ProjVar
# Import DC fixtures for testcases to use
from testfixtures.dc_fixtures import check_central_alarms
@fixture(scope='module', autouse=True)
def dc_only():
if not ProjVar.get_var('IS_DC'):
skip('Skip Distributed Cloud test cases for non-DC system.')

View File

@ -0,0 +1,287 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from pytest import fixture
from utils import cli
from utils.tis_log import LOG
from utils.clients.ssh import ControllerClient
from utils import table_parser
from consts.proj_vars import ProjVar
from consts.auth import Tenant
from consts.stx import SubcloudStatus, EventLogID
from consts.timeout import DCTimeout
from keywords import dc_helper, system_helper
# Set the level of stress you want to test
ALARMS_NO = 500
@fixture(scope="module")
def subcloud_to_test():
check_alarm_summary_match_subcloud(ProjVar.get_var('PRIMARY_SUBCLOUD'))
return ProjVar.get_var('PRIMARY_SUBCLOUD')
def check_alarm_summary_match_subcloud(subcloud, timeout=400):
LOG.info("Ensure alarm summary on SystemController with subcloud {}".format(subcloud))
subcloud_auth = Tenant.get('admin_platform', dc_region=subcloud)
central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
severities = ["critical_alarms", "major_alarms", "minor_alarms", "warnings"]
central_alarms = subcloud_alarms = None
end_time = time.time() + timeout
while time.time() < end_time:
output_central = cli.dcmanager('alarm summary', auth_info=central_auth, fail_ok=False)[1]
output_sub = cli.fm("alarm-summary", auth_info=subcloud_auth, fail_ok=False)[1]
central_alarms = table_parser.get_multi_values(table_parser.table(output_central),
fields=severities, **{"NAME": subcloud})
subcloud_alarms = table_parser.get_multi_values(table_parser.table(output_sub), severities)
if central_alarms == subcloud_alarms:
LOG.info("'dcmanager alarm summary' output for {} matches 'fm alarm-summary' on "
"{}".format(subcloud, subcloud))
return
time.sleep(30)
assert central_alarms == subcloud_alarms, \
"'dcmanager alarm summary did not match 'fm alarm-summary' on {} " \
"within {}s".format(subcloud, timeout)
def alarm_summary_add_and_del(subcloud):
try:
# Test adding alarm on subcloud
ssh_client = ControllerClient.get_active_controller(name=subcloud)
LOG.info("Wait for alarm raised on subcloud {}".format(subcloud))
system_helper.wait_for_alarm(alarm_id=EventLogID.PROVIDER_NETWORK_FAILURE,
con_ssh=ssh_client)
LOG.tc_step("Ensure alarm summary match nn Central with subcloud: {}".format(subcloud))
check_alarm_summary_match_subcloud(subcloud)
# Test clearing alarm on subcloud
LOG.tc_step("Clear alarm on subcloud: {}".format(subcloud))
ssh_client.exec_cmd('fmClientCli -D host=testhost-0', fail_ok=False)
LOG.info("Wait for alarm clear on subcloud {}".format(subcloud))
system_helper.wait_for_alarm_gone(alarm_id=EventLogID.PROVIDER_NETWORK_FAILURE,
con_ssh=ssh_client)
check_alarm_summary_match_subcloud(subcloud)
finally:
ssh_client = ControllerClient.get_active_controller(name=subcloud)
LOG.info("Clear alarm on subcloud: {}".format(subcloud))
ssh_client.exec_cmd('fmClientCli -D host=testhost-0')
def add_routes_to_subcloud(subcloud, subcloud_table, fail_ok=False):
LOG.debug("Add routes back to subcloud: {}".format(subcloud))
ssh_client = ControllerClient.get_active_controller(name=subcloud)
for host_id in subcloud_table:
comm_args = table_parser.get_multi_values(subcloud_table[host_id],
["ifname", "network", "prefix", "gateway"])
command = "host-route-add {} {} {} {} {}".format(host_id, comm_args[0][0],
comm_args[1][0], comm_args[2][0],
comm_args[3][0])
code, output = cli.system("host-route-list {}".format(host_id))
uuid_list = table_parser.get_values(table_parser.table(output), "uuid")
if table_parser.get_values(subcloud_table[host_id], "uuid")[0] not in uuid_list:
cli.system(command, ssh_client=ssh_client, fail_ok=fail_ok)
def test_dc_alarm_aggregation_managed(subcloud_to_test):
"""
Test Alarm Aggregation on Distributed Cloud
Args:
subcloud_to_test (str): module fixture
Setups:
- Make sure there is consistency between alarm summary on
Central Cloud and on subclouds
Test Steps:
- Raise an alarm at subcloud;
- Ensure relative alarm raised on subcloud
- Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
- Clean alarm at subcloud
- Ensure relative alarm cleared on subcloud
- Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
"""
ssh_client = ControllerClient.get_active_controller(name=subcloud_to_test)
LOG.tc_step("Raise alarm on subcloud: {}".format(subcloud_to_test))
ssh_client.exec_cmd(
"fmClientCli -c \"### ###300.005###clear###system.vm###host=testhost-0"
"### ###critical### ###processing-error###cpu-cycles-limit-exceeded### ###"
"True###True###'\"", fail_ok=False)
alarm_summary_add_and_del(subcloud_to_test)
def test_dc_fault_scenario(subcloud_to_test):
"""
Test Fault Scenario on Distributed Cloud
Args:
subcloud_to_test (str): module fixture
Setup:
- Make sure there is consistency between alarm summary on
Central Cloud and on subclouds
Test Steps:
- Make subcloud offline (e. g. delete route)
Step1:
- Ensure suncloud shows offline
Step2:
- Raise alarm on subcloud
- Ensure relative alarm raised on subcloud,
- Ensure system alarm-summary on subcloud has changed
- Ensure  dcmanager alarm summary on system controller has no change
Step3:
- Resume connectivity to subcloud (e. g. add route back)
- Ensure suncloud shows online and in-sync
- Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
controller
Step4:
- Clean alarm on subcloud
- Ensure relative alarm cleared on subcloud
- Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
controller
"""
ssh_central = ControllerClient.get_active_controller(name="RegionOne")
ssh_subcloud = ControllerClient.get_active_controller(name=subcloud_to_test)
subcloud_table = {}
try:
code, output = cli.dcmanager("subcloud show {}".format(subcloud_to_test),
ssh_client=ssh_central)
gateway = table_parser.get_value_two_col_table(table_parser.table(output),
"management_gateway_ip")
code, hosts_raw = cli.system("host-list", ssh_client=ssh_subcloud)
hosts_id = table_parser.get_values(table_parser.table(hosts_raw), 'id')
for host_id in hosts_id:
code, route_raw = cli.system("host-route-list {}".format(host_id),
ssh_client=ssh_subcloud)
route_table = table_parser.filter_table(table_parser.table(route_raw),
**{'gateway': gateway})
subcloud_table[host_id] = route_table
LOG.tc_step("Delete route for subcloud: {} and wait for it to go offline.".format(
subcloud_to_test))
ssh_subcloud = ControllerClient.get_active_controller(name=subcloud_to_test)
for host_id in subcloud_table:
command = "host-route-delete {}".format(table_parser.get_values(
subcloud_table[host_id], "uuid")[0])
cli.system(command, ssh_client=ssh_subcloud)
dc_helper.wait_for_subcloud_status(subcloud_to_test,
avail=SubcloudStatus.AVAIL_OFFLINE,
timeout=DCTimeout.SYNC, con_ssh=ssh_central)
LOG.tc_step("Raise alarm on subcloud: {}".format(subcloud_to_test))
ssh_subcloud = ControllerClient.get_active_controller(name=subcloud_to_test)
code_sub_before, output_sub_before = cli.fm("alarm-summary", ssh_client=ssh_subcloud)
code_central_before, output_central_before = cli.dcmanager('alarm summary')
ssh_subcloud.exec_cmd(
"fmClientCli -c \"### ###300.005###clear###system.vm###host="
"testhost-0### ###critical### ###processing-error###cpu-cycles-limit-exceeded"
"### ###True###True###'\"", fail_ok=False)
LOG.info("Ensure relative alarm was raised at subcloud: {}".format(subcloud_to_test))
system_helper.wait_for_alarm(alarm_id=EventLogID.PROVIDER_NETWORK_FAILURE,
con_ssh=ssh_subcloud)
code_sub_after, output_sub_after = cli.fm("alarm-summary", ssh_client=ssh_subcloud)
code_central_after, output_central_after = cli.dcmanager('alarm summary')
LOG.info("Ensure fm alarm summary on subcloud: {} has changed but dcmanager alarm"
"summary has not changed".format(subcloud_to_test))
assert output_central_before == output_central_after and output_sub_before != \
output_sub_after
add_routes_to_subcloud(subcloud_to_test, subcloud_table)
dc_helper.wait_for_subcloud_status(subcloud_to_test, avail=SubcloudStatus.AVAIL_ONLINE,
sync=SubcloudStatus.SYNCED, timeout=DCTimeout.SYNC,
con_ssh=ssh_central)
alarm_summary_add_and_del(subcloud_to_test)
finally:
cli.dcmanager("subcloud show {}".format(subcloud_to_test),
ssh_client=ssh_central, fail_ok=True)
add_routes_to_subcloud(subcloud_to_test, subcloud_table, fail_ok=True)
LOG.info("Clear alarm on subcloud: {}".format(subcloud_to_test))
ssh_subcloud.exec_cmd('fmClientCli -D host=testhost-0')
check_alarm_summary_match_subcloud(subcloud=subcloud_to_test)
def test_dc_stress_alarm(subcloud_to_test):
"""
Test Stress Scenario on Distributed Cloud
Args:
subcloud_to_test (str): module fixture
Setup:
- Make sure there is consistency between alarm summary on
Central Cloud and on subclouds
Test Steps:
Step1:
- Trigger large amount of alarms, quickly on one subcloud
- ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
controller
Step2:
- Trigger large amount of alarms quickly for a long time on all subclouds
- Each alarm summary updates once every 30 seconds until the event is over
- Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
controller
Step3:
- Clear all alarms
- Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
controller
"""
ssh_client = ControllerClient.get_active_controller(name=subcloud_to_test)
# Step 1
LOG.tc_step("Trigger large amount of alarms, quickly on one subcloud")
try:
for i in range(1, ALARMS_NO + 1):
ssh_client.exec_cmd(
"fmClientCli -c \"### ###300.005###clear###system.vm###host="
"testhost-{}### ###critical### ###processing-error###cpu-cycles-limit-exceeded"
"### ###True###True###'\"".format(i), fail_ok=False)
finally:
for i in range(1, ALARMS_NO + 1):
ssh_client.exec_cmd('fmClientCli -D host=testhost-{}'.format(i))
check_alarm_summary_match_subcloud(subcloud_to_test)
# Step 2
ssh_client_list = {}
for subcloud in dc_helper.get_subclouds(mgmt='managed'):
ssh_client_list[subcloud] = ControllerClient.get_active_controller(name=subcloud_to_test)
try:
LOG.tc_step("Trigger large amount of alarms quickly for a long time on all subclouds")
for subcloud in ssh_client_list:
subcloud_ssh = ssh_client_list[subcloud]
for i in range(1, ALARMS_NO + 1):
subcloud_ssh.exec_cmd(
"fmClientCli -c \"### ###300.005###clear###"
"system.vm###host=testhost-{}### ###critical### ###processing-error###"
"cpu-cycles-limit-exceeded### ###True###True###'\"".format(i),
fail_ok=False)
for subcloud in ssh_client_list:
check_alarm_summary_match_subcloud(subcloud)
finally:
# Step 3
LOG.tc_step("Clear all alarms on all subclouds")
for subcloud in ssh_client_list:
subcloud_ssh = ssh_client_list[subcloud]
for i in range(1, ALARMS_NO + 1):
subcloud_ssh.exec_cmd('fmClientCli -D host=testhost-{}'.format(i))
for subcloud in ssh_client_list:
check_alarm_summary_match_subcloud(subcloud)

View File

@ -0,0 +1,78 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from pytest import fixture
from consts.auth import Tenant
from consts.proj_vars import ProjVar
from consts.stx import SubcloudStatus
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
from keywords import host_helper, dc_helper
@fixture(scope='module')
def swact_precheck(request):
LOG.info("Gather subcloud management info")
subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
def revert():
LOG.fixture_step("Manage {} if unmanaged".format(subcloud))
dc_helper.manage_subcloud(subcloud)
request.addfinalizer(revert)
managed_subclouds = dc_helper.get_subclouds(mgmt=SubcloudStatus.MGMT_MANAGED,
avail=SubcloudStatus.AVAIL_ONLINE,
sync=SubcloudStatus.SYNCED)
if subcloud in managed_subclouds:
managed_subclouds.remove(subcloud)
ssh_map = ControllerClient.get_active_controllers_map()
managed_subclouds = [subcloud for subcloud in managed_subclouds if subcloud in ssh_map]
return subcloud, managed_subclouds
def test_dc_swact_host(swact_precheck, check_central_alarms):
"""
Test host swact on central region
Args:
swact_precheck(fixture): check subclouds managed and online
Setup:
- Ensure primary subcloud is managed
Test Steps:
- Unmanage primary subcloud
- Swact the host
- Verify subclouds are managed
Teardown:
- Manage unmanaged subclouds
"""
primary_subcloud, managed_subcloud = swact_precheck
ssh_central = ControllerClient.get_active_controller(name="RegionOne")
LOG.tc_step("Unmanage {}".format(primary_subcloud))
dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=True)
LOG.tc_step("Swact host on central region")
central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
host_helper.swact_host(auth_info=central_auth)
LOG.tc_step("Check subclouds after host swact on central region")
for managed_subcloud in managed_subcloud:
dc_helper.wait_for_subcloud_status(subcloud=managed_subcloud,
avail=SubcloudStatus.AVAIL_ONLINE,
mgmt=SubcloudStatus.MGMT_MANAGED,
sync=SubcloudStatus.SYNCED,
con_ssh=ssh_central)
LOG.tc_step("Manage {}".format(primary_subcloud))
dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=True)
dc_helper.wait_for_subcloud_status(subcloud=primary_subcloud,
avail=SubcloudStatus.AVAIL_ONLINE,
mgmt=SubcloudStatus.MGMT_MANAGED,
sync=SubcloudStatus.SYNCED,
con_ssh=ssh_central)

View File

@ -0,0 +1,163 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from pytest import fixture
from consts.auth import Tenant
from consts.proj_vars import ProjVar
from keywords import security_helper, keystone_helper, dc_helper, container_helper, host_helper, \
system_helper, common
from utils import cli
from utils.tis_log import LOG
@fixture(scope='module')
def revert_https(request):
"""
Fixture for get the current http mode of the system, and if the test fails,
leave the system in the same mode than before
"""
central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
sub_auth = Tenant.get('admin_platform')
use_dnsname = (bool(common.get_dnsname()) and
bool(common.get_dnsname(region=ProjVar.get_var('PRIMARY_SUBCLOUD'))))
origin_https_sub = keystone_helper.is_https_enabled(auth_info=sub_auth)
origin_https_central = keystone_helper.is_https_enabled(auth_info=central_auth)
def _revert():
LOG.fixture_step("Revert central https config to {}.".format(origin_https_central))
security_helper.modify_https(enable_https=origin_https_central, auth_info=central_auth)
LOG.fixture_step("Revert subcloud https config to {}.".format(origin_https_sub))
security_helper.modify_https(enable_https=origin_https_central, auth_info=sub_auth)
LOG.fixture_step("Verify cli's on subcloud and central region.".format(origin_https_sub))
verify_cli(sub_auth, central_auth)
request.addfinalizer(_revert)
return origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname
def test_dc_modify_https(revert_https):
"""
Test enable/disable https
Test Steps:
- Ensure central region and subcloud admin endpoint are https
- Ensure central region https to be different than subcloud
- Wait for subcloud sync audit and ensure subcloud https is not changed
- Verify cli's in subcloud and central region
- Modify https on central and subcloud
- Verify cli's in subcloud and central region
- swact central and subcloud
- Ensure central region and subcloud admin endpoint are https
Teardown:
- Revert https config on central and subcloud
"""
origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname = revert_https
subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
LOG.tc_step(
"Before testing, Ensure central region and subcloud admin internal endpoint are https")
assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
"Central region admin internal endpoint is not https"
assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
"Subcloud admin internal endpoint is not https"
new_https_sub = not origin_https_sub
new_https_central = not origin_https_central
LOG.tc_step("Ensure central region https to be different than {}".format(subcloud))
security_helper.modify_https(enable_https=new_https_sub, auth_info=central_auth)
LOG.tc_step('Check public endpoints accessibility for central region')
security_helper.check_services_access(region='RegionOne', auth_info=central_auth,
use_dnsname=use_dnsname)
LOG.tc_step('Check platform horizon accessibility')
security_helper.check_platform_horizon_access(use_dnsname=use_dnsname)
LOG.tc_step("Wait for subcloud sync audit with best effort and ensure {} https is not "
"changed".format(subcloud))
dc_helper.wait_for_sync_audit(subclouds=subcloud, fail_ok=True, timeout=660)
assert origin_https_sub == keystone_helper.is_https_enabled(auth_info=sub_auth), \
"HTTPS config changed in subcloud"
LOG.tc_step("Verify cli's in {} and central region".format(subcloud))
verify_cli(sub_auth, central_auth)
if new_https_central != new_https_sub:
LOG.tc_step("Set central region https to {}".format(new_https_central))
security_helper.modify_https(enable_https=new_https_central, auth_info=central_auth)
LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https")
assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
"Central region admin internal endpoint is not https"
assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
"Subcloud admin internal endpoint is not https"
LOG.tc_step('Check public endpoints accessibility for central region')
security_helper.check_services_access(region='RegionOne', auth_info=central_auth,
use_dnsname=use_dnsname)
LOG.tc_step('Check platform horizon accessibility')
security_helper.check_platform_horizon_access(use_dnsname=use_dnsname)
LOG.tc_step("Set {} https to {}".format(subcloud, new_https_sub))
security_helper.modify_https(enable_https=new_https_sub, auth_info=sub_auth)
LOG.tc_step('Check public endpoints accessibility for {} region'.format(subcloud))
security_helper.check_services_access(region=subcloud, auth_info=sub_auth,
use_dnsname=use_dnsname)
LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https")
assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
"Central region admin internal endpoint is not https"
assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
"Subcloud admin internal endpoint is not https"
LOG.tc_step("Verify cli's in {} and central region after https modify on "
"subcloud".format(subcloud))
verify_cli(sub_auth, central_auth)
LOG.tc_step("Swact on central region")
host_helper.swact_host(auth_info=central_auth)
LOG.tc_step(
"Verify cli's in {} and central region after central region swact" .format(subcloud))
verify_cli(sub_auth, central_auth)
if not system_helper.is_aio_simplex(auth_info=sub_auth):
LOG.tc_step("Swact on subcloud {}".format(subcloud))
host_helper.swact_host(auth_info=sub_auth)
LOG.tc_step("Verify cli's in {} and central region after subcloud swact".format(subcloud))
verify_cli(sub_auth, central_auth)
LOG.tc_step("Ensure after swact, central region and subcloud admin internal endpoint are https")
assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
"Central region admin internal endpoint is not https"
assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
"Subcloud admin internal endpoint is not https"
def verify_cli(sub_auth=None, central_auth=None):
auths = [central_auth, sub_auth]
auths = [auth for auth in auths if auth]
for auth in auths:
cli.system('host-list', fail_ok=False, auth_info=auth)
cli.fm('alarm-list', fail_ok=False, auth_info=auth)
if container_helper.is_stx_openstack_deployed(applied_only=True, auth_info=auth):
cli.openstack('server list --a', fail_ok=False, auth_info=auth)
cli.openstack('image list', fail_ok=False, auth_info=auth)
cli.openstack('volume list --a', fail_ok=False, auth_info=auth)
cli.openstack('user list', fail_ok=False, auth_info=auth)
cli.openstack('router list', fail_ok=False, auth_info=auth)
if sub_auth and container_helper.is_stx_openstack_deployed(applied_only=True,
auth_info=sub_auth):
cli.openstack('stack list', fail_ok=False, auth_info=sub_auth)
cli.openstack('alarm list', fail_ok=False, auth_info=sub_auth)
cli.openstack('metric status', fail_ok=False, auth_info=sub_auth)

View File

@ -0,0 +1,231 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from pytest import fixture, skip, mark
from utils.tis_log import LOG
from utils.clients.ssh import ControllerClient
from consts.proj_vars import ProjVar
from consts.auth import Tenant
from keywords import dc_helper, system_helper, host_helper
@fixture(scope='module')
def subclouds_to_test(request):
LOG.info("Gather DNS config and subcloud management info")
sc_auth = Tenant.get('admin_platform', dc_region='SystemController')
dns_servers = system_helper.get_dns_servers(auth_info=sc_auth)
subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
def revert():
LOG.fixture_step("Manage {} if unmanaged".format(subcloud))
dc_helper.manage_subcloud(subcloud)
LOG.fixture_step("Revert DNS config if changed")
system_helper.set_dns_servers(nameservers=dns_servers, auth_info=sc_auth)
request.addfinalizer(revert)
managed_subclouds = dc_helper.get_subclouds(mgmt='managed', avail='online')
if subcloud in managed_subclouds:
managed_subclouds.remove(subcloud)
ssh_map = ControllerClient.get_active_controllers_map()
managed_subclouds = [subcloud for subcloud in managed_subclouds if subcloud in ssh_map]
return subcloud, managed_subclouds
def compose_new_dns_servers(scenario, prev_dns_servers):
dns_servers = list(prev_dns_servers)
unreachable_dns_server_ip = "8.4.4.4"
if scenario == 'add_unreachable_server':
dns_servers.append(unreachable_dns_server_ip)
elif scenario == 'unreachable_server':
dns_servers = [unreachable_dns_server_ip]
else:
if len(dns_servers) < 2:
skip('Less than two DNS servers configured.')
if scenario == 'change_order':
dns_servers.append(dns_servers.pop(0))
elif scenario == 'remove_one_server':
dns_servers.append(dns_servers.pop(0))
dns_servers.pop()
else:
raise ValueError("Unknown scenario: {}".format(scenario))
return dns_servers
@fixture()
def ensure_synced(subclouds_to_test, check_central_alarms):
primary_subcloud, managed_subclouds = subclouds_to_test
LOG.fixture_step(
"Ensure {} is managed and DNS config is valid and synced".format(primary_subcloud))
subcloud_auth = Tenant.get('admin_platform', dc_region=primary_subcloud)
subcloud_dns = system_helper.get_dns_servers(con_ssh=None, auth_info=subcloud_auth)
sc_dns = system_helper.get_dns_servers(con_ssh=None,
auth_info=Tenant.get('admin_platform',
dc_region='SystemController'))
if subcloud_dns != sc_dns:
dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=True)
dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud, expected_dns=sc_dns)
verify_dns_on_central_and_subcloud(primary_subcloud)
return primary_subcloud, managed_subclouds, sc_dns
@mark.parametrize('scenario', (
'add_unreachable_server',
'change_order',
'remove_one_server',
))
def test_dc_dns_modify(ensure_synced, scenario):
"""
Update DNS servers on central region and check it is propagated to subclouds
Args:
ensure_synced: test fixture
scenario: DNS change scenario
Setups:
- Ensure primary subcloud is managed and DNS config is valid and synced
Test Steps:
- Un-manage primary subcloud
- Configure DNS servers on central region to new value based on given scenario
- Wait for new DNS config to sync over to managed online subclouds
- Ensure DNS config is not updated on unmanaged primary subcloud
- Re-manage primary subcloud and ensure DNS config syncs over
- Verify nslookup works in Central Region and primary subcloud
Teardown:
- Reset DNS servers to original value (module)
"""
primary_subcloud, managed_subclouds, prev_dns_servers = ensure_synced
new_dns_servers = compose_new_dns_servers(scenario=scenario, prev_dns_servers=prev_dns_servers)
LOG.tc_step("Unmanage {}".format(primary_subcloud))
dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=True)
LOG.tc_step("Reconfigure DNS servers on central region from {} to {}".
format(prev_dns_servers, new_dns_servers))
system_helper.set_dns_servers(new_dns_servers,
auth_info=Tenant.get('admin_platform',
dc_region='SystemController'))
LOG.tc_step("Wait for new DNS config to sync over to managed online subclouds")
for managed_sub in managed_subclouds:
dc_helper.wait_for_subcloud_dns_config(subcloud=managed_sub, expected_dns=new_dns_servers)
LOG.tc_step("Ensure DNS config is not updated on unmanaged subcloud: {}".
format(primary_subcloud))
code = dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud,
expected_dns=new_dns_servers,
timeout=60, fail_ok=True)[0]
assert 1 == code, "Actual return code: {}".format(code)
LOG.tc_step('Re-manage {} and ensure DNS config syncs over'.format(primary_subcloud))
dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=False)
dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud, expected_dns=new_dns_servers)
LOG.tc_step('Verify nslookup works in Central Region and {}'.format(primary_subcloud))
verify_dns_on_central_and_subcloud(primary_subcloud)
def test_dc_dns_override_local_change(ensure_synced):
"""
Verify DNS modification on subcloud will be overridden by central region config
Args:
ensure_synced: test fixture
Setups:
- Ensure primary subcloud is managed and DNS config is valid and synced
Test Steps:
- Un-manage primary subcloud
- Configure DNS servers on primary subcloud to a unreachable ip address (8.4.4.4)
- Wait for sync log for any managed subcloud with best effort
- Ensure DNS config is not updated on unmanaged primary subcloud
- Verify nslookup passes on central region and fails on primary subcloud
- Re-manage primary subcloud and ensure DNS config syncs over
- Verify nslookup in Central Region and primary subcloud are working as expected
Teardown:
- Manage primary subcloud if not managed (module)
- Reset DNS servers to original value on central region (module)
"""
primary_subcloud, managed_subclouds, sc_dns = ensure_synced
new_dns_servers = compose_new_dns_servers(scenario='unreachable_server',
prev_dns_servers=sc_dns)
LOG.tc_step("Unmanage {}".format(primary_subcloud))
dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=True)
LOG.tc_step("Reconfigure DNS on {} from {} to {}".format(
primary_subcloud, sc_dns, new_dns_servers))
system_helper.set_dns_servers(new_dns_servers, auth_info=Tenant.get('admin_platform',
dc_region=primary_subcloud))
managed_cloud = managed_subclouds[0] if managed_subclouds else ''
LOG.tc_step("Wait for sync update log for managed subcloud {} with best effort".format(
managed_cloud))
dc_helper.wait_for_sync_audit(subclouds=managed_cloud, fail_ok=True, timeout=660)
LOG.tc_step("Ensure DNS config is not updated on unmanaged subcloud: {}".format(
primary_subcloud))
code = dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud, expected_dns=sc_dns,
fail_ok=True, timeout=60)[0]
assert 1 == code, "Actual return code: {}".format(code)
LOG.tc_step("Verify nslookup fails on {}".format(primary_subcloud))
central_res, local_res = verify_dns_on_central_and_subcloud(primary_subcloud, fail_ok=True,
sc_dns=sc_dns)
assert 0 == central_res, "nslookup failed on central region"
assert 1 == local_res, "nslookup succeeded on {} with unreachable DNS servers configured".\
format(primary_subcloud)
central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
if system_helper.get_standby_controller_name(auth_info=central_auth):
LOG.tc_step("Swact in central region")
host_helper.swact_host(auth_info=central_auth)
LOG.tc_step('Re-manage {} and ensure local DNS config is overridden by central config'.
format(primary_subcloud))
dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=False)
dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud, expected_dns=sc_dns)
LOG.tc_step('Verify nslookup works in Central Region and {}'.format(primary_subcloud))
verify_dns_on_central_and_subcloud(primary_subcloud, sc_dns=sc_dns)
def verify_dns_on_central_and_subcloud(primary_subcloud, fail_ok=False, sc_dns=None):
res = []
for region in ('RegionOne', primary_subcloud):
# take snapshot
orig_dns_servers = system_helper.get_dns_servers(auth_info=Tenant.get('admin_platform',
dc_region=region))
if not sc_dns or set(sc_dns) <= set(orig_dns_servers):
LOG.info("Modify dns server to public dns")
system_helper.set_dns_servers(nameservers=['8.8.8.8'],
auth_info=Tenant.get('admin_platform',
dc_region=region))
LOG.info("Check dns on {}".format(region))
con_ssh = ControllerClient.get_active_controller(name=region)
code, out = con_ssh.exec_cmd('nslookup -timeout=1 www.google.com', fail_ok=fail_ok,
expect_timeout=30)
res.append(code)
# revert
system_helper.set_dns_servers(nameservers=orig_dns_servers,
auth_info=Tenant.get('admin_platform',
dc_region=region))
return res

View File

@ -0,0 +1,42 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from pytest import fixture
from consts.auth import Tenant
from keywords import system_helper, check_helper
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
@fixture(scope='function')
def check_central_alarms(request):
"""
Check system alarms before and after test case.
Args:
request: caller of this fixture. i.e., test func.
"""
__verify_central_alarms(request=request, scope='function')
def __verify_central_alarms(request, scope):
region = 'RegionOne'
auth_info = Tenant.get('admin_platform', dc_region=region)
con_ssh = ControllerClient.get_active_controller(name=region)
LOG.fixture_step("({}) Gathering fm alarms in central region before test {} begins.".format(
scope, scope))
before_alarms = system_helper.get_alarms(fields=('Alarm ID', 'Entity ID', 'Severity'),
auth_info=auth_info, con_ssh=con_ssh)
def verify_alarms():
LOG.fixture_step(
"({}) Verifying system alarms in central region after test {} ended.".format(
scope, scope))
check_helper.check_alarms(before_alarms=before_alarms, auth_info=auth_info,
con_ssh=con_ssh)
LOG.info("({}) fm alarms verified in central region.".format(scope))
request.addfinalizer(verify_alarms)