Pytest: Add system-inventory tests

Added new tests to System-Inventory suite
Convert the following robot test cases to pytest test cases

1.17-Test-System-Type.robot
2.27-Query-Product-Type-On-Std-System-Gui.robot
3.30-Verify-Cpu-Details-Cli.robot
4.35-Check-Resource-Usage-Panel.robot
5.36-Alarm-Delete.robot
6.109-Change-Storage-Pool-Ceph.robot
7.116-Verify-The-System-Type-Read-Only-CLI.robot
8.130-Verify-System-Type-Is-Read-Only.robot
9.147-Verify-System-Pm-Modify-Does-Not-Require-Action-Apply.robot
10.166-CPU-Profile-Rejection.robot
11.172-Invalid-Inputs-Rejected-By-Hugepages.robot
12.181-Export-hosts-information-host-bulk-export-Api.robot
13.355-Verify-Bulk-Export-Hosts.robot
14.358-Verify-Cpu-Data-Via-CLI.robot
15.366-Verify-Update-And-Delete-Helmchart-Override.robot
16.367-NTP-Server-Change-Using-GUI.robot
17.378-Modify-NTP-Server-List.robot
18.55-Change-DNS-On-GUI.robot
19.61-Edit-MTU-OAM-Interface-GUI.robot
20.64-change-mtu-value-data-interface-using-cli.robot
21.129-Verify-System-Ntp-Modify-Rejects-Action.robot
22.142-Change-MTU-Value-Using-GUI.robot
23.155-Modify-Hugepages-CLI-GUI.robot
24.180-Change-DNS-Server-IP.robot
25.338-Verify-Software-Version.robot
26.341-Verify-System-Mode-And-Type.robot
27.347-Resynchronize-Host-To-NTP-Server.robot
28.357-Change-Oam-Ip-Using-GUI.robot
29.359-Verify-Removing-Container-Application.robot
30.361-Verify-Deleting-Container-Application.robot
31.362-Modify-DNS-Servers.robot
32.371-Modify-Mtu-Value-And-Verify-Network.robot
33.385-Reinstall-Dynamic-Addressing.robot
34.397-Verify-Bulk-Add-By-Deleting-Hosts.robot
35.413-Change-Hugepages-And-Verify-Status.robot
36.387-Verify-BMC-Functionality.robot

Signed-off-by: Yong Fu <fuyong@neusoft.com>
Change-Id: Ibae75ca13fd25290e3c6dc7ce07a40879296c5b7
This commit is contained in:
Yong Fu 2021-06-23 16:06:59 +08:00
parent a68778f64d
commit 9b2b60ea48
42 changed files with 2208 additions and 31 deletions

View File

@ -2,4 +2,4 @@
host=review.opendev.org
port=29418
project=starlingx/test.git
defaultbranch=r/stx.4.0
defaultbranch=devel

View File

@ -0,0 +1,44 @@
from pytest import fixture, skip
from testfixtures.resource_mgmt import *
from keywords import system_helper, network_helper
from utils.clients import ssh
from utils.tis_log import LOG
NETWORK_NAME = "network"
SUBNET_NAME = "subnet"
SUBNET_RANGE = "192.168.0.0/24"
IP_VERSION = 4
@fixture(scope='session')
def no_aio_system():
LOG.fixture_step("(Session) Skip if AIO system")
if system_helper.is_aio_system():
skip('skip if AIO system')
# Creating network
@fixture(scope="session")
def create_network():
net_id = network_helper.create_network(name=NETWORK_NAME, cleanup="session")[1]
subnet_id = network_helper.create_subnet(name=SUBNET_NAME, network=NETWORK_NAME,
subnet_range=SUBNET_RANGE, dhcp=True,
ip_version=IP_VERSION, cleanup="session")[1]
return net_id, subnet_id
@fixture(scope='session')
def duplex_only():
LOG.fixture_step("(Session) Skip if not Duplex")
if not system_helper.is_aio_duplex():
skip('Only applicable to Duplex system')
@fixture(scope='session')
def bare_metal_only():
LOG.fixture_step("(Session) Skip if not bare metal")
con_ssh = ssh.ControllerClient.get_active_controller()
output = con_ssh.exec_sudo_cmd('dmesg | grep -i paravirtualized')[1]
if 'KVM' in output and 'bare hardware' not in output:
skip('KVM does not support')

View File

@ -0,0 +1,73 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verifies that an alarm can be deleted using CLI.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import os
from pytest import fixture, mark
from consts.stx import GuestImages
from keywords import nova_helper, vm_helper, glance_helper, system_helper
# Flavor, Image, Volume info
cirros_params = {
"flavor_name": "flavor",
"flavor_vcpus": 1,
"flavor_ram": 2048,
"flavor_disk": 4,
"image_name": "image",
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
"disk_format": "qcow2"
}
# Creating Flavor For Image
@fixture(scope="module")
def create_flavor_and_image():
fl_id = nova_helper.create_flavor(name=cirros_params['flavor_name'],
vcpus=cirros_params['flavor_vcpus'],
ram=cirros_params['flavor_ram'],
root_disk=cirros_params['flavor_disk'],
is_public=True, add_default_specs=False,
cleanup="module")[1]
im_id = glance_helper.create_image(name=cirros_params['image_name'],
source_image_file=cirros_params['image_file'],
disk_format=cirros_params['disk_format'],
cleanup="module")[1]
return {
"flavor": fl_id,
"image": im_id
}
# Creating Instance
@fixture(scope='module')
def launch_instance(create_network, create_flavor_and_image):
net_id_list = [{"net-id": create_network[0]}]
return vm_helper.boot_vm(flavor=create_flavor_and_image["flavor"], nics=net_id_list,
source="image", source_id=create_flavor_and_image["image"],
cleanup="module")[1]
@mark.system_inventory
def test_alarm_delete(launch_instance):
"""
36-Alarm-Delete.robot
Args:
launch_instance:
Returns:
"""
vm_helper.set_vm_state(launch_instance)
vm_name = vm_helper.get_vm_values(launch_instance, fields="Name")[0]
result = system_helper.get_alarms(fields=('UUID',), reason_text=vm_name)
system_helper.delete_alarms(result[0])

View File

@ -0,0 +1,47 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to check DNS configuration is successful.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from consts import proj_vars
from keywords import system_helper
from utils.horizon.pages.admin.platform import systemconfigurationpage
from utils.tis_log import LOG
@mark.system_inventory
def test_change_dns_on_gui(admin_home_pg):
"""
55-Change-DNS-On-GUI.robot
Args:
admin_home_pg:
Returns:
"""
LOG.info("Go to System Configuration Page")
systemconfiguration_pg = systemconfigurationpage.SystemConfigurationPage(admin_home_pg.driver)
systemconfiguration_pg.go_to_target_page()
systemconfiguration_pg.go_to_dns_tab()
systemconfiguration_pg.edit_dns(server2='9.18.3.211')
server1 = system_helper.get_dns_servers()[0]
server2 = systemconfiguration_pg.get_dns_info(server1, 'DNS Server 2 IP')
assert server2 == '9.18.3.211'
systemconfiguration_pg.edit_dns(server2='')
server2 = systemconfiguration_pg.get_dns_info(server1, 'DNS Server 2 IP')
assert server2 == ''
path = proj_vars.ProjVar.get_var(var_name='LOG_DIR')
pic_path = '{}/{}/{}'.format(path, "horizon", "system_configuration_page_capture.png")
LOG.info('{} {}'.format("saving capture to:", pic_path))
systemconfiguration_pg.driver.save_screenshot(pic_path)

View File

@ -0,0 +1,56 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Change the dns server ip addresses using cli.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.stx import EventLogID
from keywords import system_helper, host_helper
@mark.system_inventory
def test_change_dns_server_ip(no_simplex):
"""
180-Change-DNS-Server-IP.robot
Args:
no_simplex:
Returns:
"""
default_val = system_helper.get_dns_servers()
system_helper.set_dns_servers(["100.100.100.101"])
events_found = system_helper.wait_for_events(event_log_id=EventLogID.CONFIG_OUT_OF_DATE)
assert events_found
system_helper.set_dns_servers(default_val, "apply")
hosts = system_helper.get_controllers()
status = system_helper.get_host_values(hosts[-1], fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.lock_unlock_hosts(hosts[-1])
status = system_helper.get_host_values("controller-0", fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.swact_host("controller-0")
host_helper.lock_unlock_hosts("controller-0")
host_helper.swact_host("controller-1")
if not system_helper.is_aio_system():
computes = system_helper.get_computes()
for compute in computes:
status = system_helper.get_host_values(compute, fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.lock_unlock_hosts(compute)
if system_helper.is_storage_system():
storages = system_helper.get_storage_nodes()
for storage in storages:
status = system_helper.get_host_values(storage, fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.lock_unlock_hosts(storage)
assert EventLogID.CONFIG_OUT_OF_DATE not in \
system_helper.get_alarms(fields=("Alarm ID",), alarm_id=EventLogID.CONFIG_OUT_OF_DATE)

View File

@ -0,0 +1,99 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Modify number of hugepages using Horizon and check status pending.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import os
from pytest import mark
from consts.stx import HostAvailState, GuestImages
from testfixtures.horizon import admin_home_pg, driver
from keywords import system_helper, kube_helper, network_helper, nova_helper, glance_helper,\
vm_helper
from utils.horizon.pages.admin.platform import hostinventorypage
from utils.tis_log import LOG
NETWORK_NAME = "hugepage-network"
SUBNET_NAME = "hugepage-subnet"
SUBNET_RANGE = "192.168.0.0/24"
IP_VERSION = 4
# Flavor, Image, Volume info
cirros_params = {
"flavor_name": "testvm-flavor",
"flavor_vcpus": 1,
"flavor_ram": 2048,
"flavor_disk": 4,
"image_name": "testvm-image",
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
"disk_format": "qcow2"
}
# Creating Instance
def launch_instance(host):
net_id = network_helper.create_network(name=NETWORK_NAME, cleanup="module")[1]
network_helper.create_subnet(name=SUBNET_NAME, network=NETWORK_NAME,
subnet_range=SUBNET_RANGE, dhcp=True,
ip_version=IP_VERSION, cleanup="module")
net_id_list = [{"net-id": net_id}]
fl_id = nova_helper.create_flavor(name=cirros_params["flavor_name"],
vcpus=cirros_params["flavor_vcpus"],
ram=cirros_params["flavor_ram"],
root_disk=cirros_params["flavor_disk"],
is_public=True, add_default_specs=False,
cleanup="module")[1]
im_id = glance_helper.create_image(name=cirros_params["image_name"],
source_image_file=cirros_params["image_file"],
disk_format=cirros_params["disk_format"],
cleanup="module")[1]
vm_id = vm_helper.boot_vm(flavor=fl_id, nics=net_id_list, source="image",
source_id=im_id, cleanup="module", vm_host=host)[1]
return vm_id
@mark.system_inventory
def test_change_hugepages_and_verify_status(no_aio_system, admin_home_pg):
"""
413-Change-Hugepages-And-Verify-Status.robot
Args:
no_aio_system:
admin_home_pg:
Returns:
"""
LOG.info("Go to Host Inventory")
hostinventory_pg = hostinventorypage.HostInventoryPage(admin_home_pg.driver)
hostinventory_pg.go_to_target_page()
hostinventory_pg.go_to_hosts_tab()
compute = system_helper.get_computes()[-1]
hostinventory_pg.lock_host(compute)
system_helper.wait_for_hosts_states(compute, timeout=360, check_interval=30,
availability=['online'])
detail_pg = hostinventory_pg.go_to_host_detail_page(compute)
detail_pg.go_to_memory_tab()
detail_pg.update_memory(hugepages_2M='3000')
memory_huge_page = detail_pg.get_memory_table_info('0', 'Application Pages')
assert 'Pending' in memory_huge_page
hostinventory_pg.go_to_target_page()
hostinventory_pg.go_to_hosts_tab()
hostinventory_pg.unlock_host(compute)
system_helper.wait_for_hosts_states(compute, timeout=360, check_interval=30,
availability=[HostAvailState.AVAILABLE,
HostAvailState.DEGRADED])
kube_helper.wait_for_pods_healthy(timeout=600)
detail_pg = hostinventory_pg.go_to_host_detail_page(compute)
detail_pg.go_to_memory_tab()
memory_huge_page = detail_pg.get_memory_table_info('0', 'Application Pages')
assert 'Pending' not in memory_huge_page
launch_instance(compute)

View File

@ -0,0 +1,33 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Tchange the mtu value of the data interface using cli.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from keywords import system_helper, host_helper
@mark.system_inventory
def test_change_mtu_value_data_interface_using_cli(no_aio_system):
"""
64-change-mtu-value-data-interface-using-cli.robot
Args:
no_aio_system:
Returns:
"""
host = system_helper.get_computes()[0]
host_helper.lock_host(host)
name_list = host_helper.get_host_interfaces(host)
for name in name_list:
host_helper.modify_host_interface(host, name, mtu=1600)
host_helper.unlock_host(host)

View File

@ -0,0 +1,60 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Change The MTU Value Of The Data Interface Using GUI.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import re
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from consts.stx import HostAvailState
from keywords import system_helper, host_helper, kube_helper
from utils.horizon.pages.admin.platform import hostinventorypage
from utils.tis_log import LOG
@mark.system_inventory
def test_change_mtu_value_using_gui(no_aio_system, admin_home_pg):
"""
142-Change-MTU-Value-Using-GUI.robot
Args:
no_aio_system:
admin_home_pg:
Returns:
"""
LOG.info("Go to Host Inventory")
hostinventory_pg = hostinventorypage.HostInventoryPage(admin_home_pg.driver)
hostinventory_pg.go_to_target_page()
for host in system_helper.get_computes():
hostinventory_pg.go_to_hosts_tab()
hostinventory_pg.lock_host(host)
system_helper.wait_for_hosts_states(host, timeout=360, check_interval=30,
availability=['online'])
LOG.info("Go to Host Detail")
detail_pg = hostinventory_pg.go_to_host_detail_page(host)
detail_pg.go_to_interfaces_tab()
interface_name = host_helper.get_host_interfaces(host, **{'class': 'data'})[0]
attribute = detail_pg.get_interface_table_info(interface_name, 'Attributes')
old_mtu = re.split('[=,]', attribute)[1]
new_mtu = str(int(old_mtu) + int(100))
detail_pg.edit_interface(interface_name, new_mtu)
new_attribute = detail_pg.get_interface_table_info(interface_name, 'Attributes')
assert new_mtu == re.split('[=,]', new_attribute)[1]
detail_pg.edit_interface(interface_name, old_mtu)
hostinventory_pg.go_to_target_page()
hostinventory_pg.unlock_host(host)
system_helper.wait_for_hosts_states(host, timeout=360, check_interval=30,
availability=[HostAvailState.AVAILABLE,
HostAvailState.DEGRADED])
kube_helper.wait_for_pods_healthy()

View File

@ -0,0 +1,51 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Modify OAM IP address using GUI.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from consts.stx import EventLogID
from keywords import system_helper, host_helper, kube_helper
from utils.horizon.pages.admin.platform import systemconfigurationpage
from utils.tis_log import LOG
@mark.system_inventory
def test_change_oam_ip_using_gui(no_simplex, admin_home_pg):
"""
357-Change-Oam-Ip-Using-GUI.robot
Args:
no_simplex:
admin_home_pg:
Returns:
"""
LOG.info("Go to System Configuration Page")
systemconfiguration_pg = systemconfigurationpage.SystemConfigurationPage(admin_home_pg.driver)
systemconfiguration_pg.go_to_target_page()
systemconfiguration_pg.go_to_oam_ip_tab()
systemconfiguration_pg.edit_oam(controller1="10.10.10.10")
host_helper.lock_unlock_hosts("controller-1")
kube_helper.wait_for_pods_healthy(fail_ok=True)
computes = system_helper.get_computes()
host_helper.lock_host(computes[0])
host_helper.unlock_host(computes[0])
host_helper.lock_host(computes[1])
host_helper.unlock_host(computes[-1])
host_helper.swact_host("controller-0")
host_helper.lock_unlock_hosts("controller-0")
kube_helper.wait_for_pods_healthy(fail_ok=False)
out = system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, fail_ok=True)[0]
assert not out, "Alarm ID 250.001 exist"
host_helper.swact_host("controller-1")

View File

@ -0,0 +1,43 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to check CEPH stogade modification is successful.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import time
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from utils.horizon.pages.admin.platform import systemconfigurationpage
from utils.tis_log import LOG
@mark.system_inventory
def test_change_storage_pool_ceph(admin_home_pg):
"""
109-Change-Storage-Pool-Ceph.robot
Args:
admin_home_pg:
Returns:
"""
LOG.info("Go to System Configuration Page")
systemconfiguration_pg = systemconfigurationpage.SystemConfigurationPage(admin_home_pg.driver)
systemconfiguration_pg.go_to_target_page()
systemconfiguration_pg.go_to_ceph_storage_pools_tab()
systemconfiguration_pg.edit_storage_pool("storage", cinder_pool=5, object_pool=5)
time.sleep(20)
cinder_storage = systemconfiguration_pg.get_ceph_storage_pools_info(
"storage", "Cinder Volume Storage (GiB)")
assert cinder_storage == '5'
object_storage = systemconfiguration_pg.get_ceph_storage_pools_info(
"storage", "Object Storage (GiB)")
assert object_storage == '5'

View File

@ -0,0 +1,37 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Check Resource Usage panel working properly.
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import time
from pytest import mark
from testfixtures.horizon import admin_home_pg_container, driver
from utils.horizon.pages.admin import overviewpage
@mark.system_inventory
def test_check_resource_usage_panel(admin_home_pg_container):
"""
35-Check-Resource-Usage-Panel.robot
Args:
admin_home_pg_container:
Returns:
"""
overview_pg = overviewpage.OverviewPage(
admin_home_pg_container.driver, port=admin_home_pg_container.port)
overview_pg.go_to_target_page()
overview_pg.date_form.to_date.send_keys()
overview_pg.date_form.submit()
time.sleep(5)
overview_pg.csv_summary()

View File

@ -0,0 +1,53 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Change and Verify Mtu Value in GUI.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import re
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from keywords import host_helper
from utils.horizon.pages.admin.platform import hostinventorypage
from utils.tis_log import LOG
@mark.system_inventory
def test_edit_mtu_oam_interface_gui(no_simplex, admin_home_pg):
"""
61-Edit-MTU-OAM-Interface-GUI.robot
Args:
no_simplex:
admin_home_pg:
Returns:
"""
host = 'controller-1'
host_helper.lock_host(host)
LOG.info("Go to Host Inventory")
hostinventory_pg = hostinventorypage.HostInventoryPage(admin_home_pg.driver)
hostinventory_pg.go_to_target_page()
hostinventory_pg.go_to_hosts_tab()
LOG.info("Go to Host Detail")
detail_pg = hostinventory_pg.go_to_host_detail_page(host)
detail_pg.go_to_interfaces_tab()
interfaces = host_helper.get_host_interfaces(host)
interface_name = list(filter(lambda x: re.search('oam.*', x), interfaces))[0]
old_attribute = detail_pg.get_interface_table_info(interface_name, "Attributes")
old_mtu = re.split('[=,]', old_attribute)[1]
LOG.info("original mtu is {}".format(old_mtu))
detail_pg.edit_interface(interface_name, '9000')
mtu = detail_pg.get_interface_table_info(interface_name, "Attributes")
assert str.split(mtu, '=')[1] == '9000'
detail_pg.edit_interface(interface_name, old_mtu)
host_helper.unlock_host(host)

View File

@ -0,0 +1,31 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to export host bulk information to a file in the current machine's desktop.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.auth import Tenant
from utils import cli
from utils.clients.ssh import ControllerClient
@mark.system_inventory
def test_export_host_bulk():
"""
19-Export-Host-Bulk.robot
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
cli.system("host-bulk-export", "--filename BulkFile", ssh_client=con_ssh,
auth_info=Tenant.get('admin_platform'))
con_ssh.exec_cmd("awk '{ sub(\"\r$\", \"\"); print }' BulkFile >> Bulk_File.xml")
assert "Bulk_File.xml" in con_ssh.exec_cmd("ls")[1]

View File

@ -0,0 +1,58 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Import host information in a file.
#
# Author(s): Chen,Dongqi Trica dongqix.chen@intel.com
###
from pytest import mark
from utils.clients import ssh
from utils.tis_log import LOG
from consts.auth import Tenant
password = Tenant.get('admin_platform').get('password')
file_name = "Host_info.txt"
url = "http://192.178.204.2:5000/v3/auth/tokens"
url2 = "http://192.178.204.2:6385/v1/ihosts/bulk_export"
content1 = "Content-Type: application/json"
content2 = "User-Agent: python-keystoneclient"
part1 = "content-type: application/json"
part2 = "accept: application/json"
set1 = """'{"auth": {"scope": {"project": {"domain": {"name": "Default"},"name": "admin"}}, """
set2 = """"identity": {"password": {"user": {"domain": {"name": "Default"}, """
set3 = """"password": "%s","name": "admin"}},"methods": ["password"]}}}'""" % (password)
@mark.system_inventory
def test_export_hosts_information_host_bulk_export_api():
"""
181-Export-hosts-information-host-bulk-export-Api.robot
Returns:
"""
con_ssh = ssh.ControllerClient.get_active_controller()
# get token
sets = set1 + set2 + set3
token_cmd = "curl -is -X POST '{}' -H '{}' -H '{}' -d {} | grep Subject-Token | " \
"awk -F ':' '{{print$2}}'".format(url, content1, content2, sets)
token = con_ssh.exec_cmd(token_cmd)[1]
LOG.info(token)
assert token
# get host information
part3 = "x-auth-token: {}".format(token)
cmd = "curl -i '{}' -X GET -H '{}' -H '{}' -H '{}' -d 'null' >> {}" \
.format(url2, part1, part2, part3, file_name)
code = con_ssh.exec_cmd(cmd)[0]
assert code == 0
# check if file is exist
out = con_ssh.exec_cmd(r"ls")[1]
assert file_name in out, "Import host information in a file failed"
con_ssh.exec_cmd(r"rm {}".format(file_name))

View File

@ -0,0 +1,44 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Invalid inputs for number of hugepages will be rejected GUI.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from keywords import system_helper, kube_helper
from utils.horizon.pages.admin.platform import hostinventorypage
from utils.tis_log import LOG
@mark.system_inventory
def test_invalid_inputs_rejected_by_hugepages(no_aio_system, admin_home_pg):
"""
172-Invalid-Inputs-Rejected-By-Hugepages.robot
Args:
no_aio_system:
admin_home_pg:
Returns:
"""
LOG.info("Go to Host Inventory")
hostinventory_pg = hostinventorypage.HostInventoryPage(admin_home_pg.driver)
hostinventory_pg.go_to_target_page()
hosts = system_helper.get_computes()
LOG.info("host is {}".format(hosts[0]))
hostinventory_pg.lock_host(hosts[0])
system_helper.wait_for_hosts_states(hosts[0], timeout=360, check_interval=30,
availability=['online'])
hostinventory_pg.unlock_host(hosts[0])
system_helper.wait_for_hosts_states(hosts[0], timeout=360, check_interval=30,
availability=['available'])
kube_helper.wait_for_pods_healthy()

View File

@ -0,0 +1,50 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Change the dns server ip addresses using GUI.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from keywords import system_helper
from utils.horizon.pages.admin.platform import systemconfigurationpage
from utils.tis_log import LOG
@mark.system_inventory
def test_modify_dns_servers(admin_home_pg):
"""
362-Modify-DNS-Servers.robot
Args:
admin_home_pg:
Returns:
"""
LOG.info("Go to System Configuration Page")
systemconfiguration_pg = systemconfigurationpage.SystemConfigurationPage(admin_home_pg.driver)
systemconfiguration_pg.go_to_target_page()
systemconfiguration_pg.go_to_dns_tab()
dns_servers = system_helper.get_dns_servers()
systemconfiguration_pg.edit_dns(server1='10.252.1.1', server2='10.252.1.2', server3='10.252.1.3')
server2 = systemconfiguration_pg.get_dns_info('10.252.1.1', 'DNS Server 2 IP')
assert server2 == '10.252.1.2'
server3 = systemconfiguration_pg.get_dns_info('10.252.1.1', 'DNS Server 3 IP')
assert server3 == '10.252.1.3'
length = len(dns_servers)
server1, server2, server3 = '', '', ''
if length == 1:
server1 = dns_servers[0]
if length == 2:
server2 = dns_servers[1]
if length == 3:
server3 = dns_servers[2]
systemconfiguration_pg.edit_dns(server1, server2, server3)

View File

@ -0,0 +1,128 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Modifies Hugepages on CLI and GUI for compute-0.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import os
import time
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from consts.stx import GuestImages, HostAvailState
from keywords import host_helper, system_helper, network_helper, nova_helper, \
glance_helper, vm_helper, kube_helper
from utils import cli, exceptions
from utils.clients import ssh
from utils.horizon.pages.admin.platform import hostinventorypage
from utils.tis_log import LOG
NETWORK_NAME = "hugepage-network"
SUBNET_NAME = "hugepage-subnet"
SUBNET_RANGE = "192.168.0.0/24"
IP_VERSION = 4
# Flavor, Image, Volume info
cirros_params = {
"flavor_name": "hugepage-flavor",
"flavor_vcpus": 1,
"flavor_ram": 2048,
"flavor_disk": 4,
"image_name": "hugepage-image",
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
"disk_format": "qcow2"
}
# Creating Instance
def launch_instance(host):
net_id = network_helper.create_network(name=NETWORK_NAME, cleanup="module")[1]
network_helper.create_subnet(name=SUBNET_NAME, network=NETWORK_NAME,
subnet_range=SUBNET_RANGE, dhcp=True,
ip_version=IP_VERSION, cleanup="module")
net_id_list = [{"net-id": net_id}]
fl_id = nova_helper.create_flavor(name=cirros_params["flavor_name"],
vcpus=cirros_params["flavor_vcpus"],
ram=cirros_params["flavor_ram"],
root_disk=cirros_params["flavor_disk"],
is_public=True, add_default_specs=False,
cleanup="module")[1]
im_id = glance_helper.create_image(name=cirros_params["image_name"],
source_image_file=cirros_params["image_file"],
disk_format=cirros_params["disk_format"],
cleanup="module")[1]
vm_id = vm_helper.boot_vm(flavor=fl_id, nics=net_id_list, source="image",
source_id=im_id, cleanup="module", vm_host=host)[1]
return vm_id
def check_host_memory_state(host):
LOG.info("waiting for {} state of the Huge Page change, should not contain the word pending"
.format(host))
end_time = time.time() + 30
current_status = cli.system('host-memory-show', "{} 0 | grep -w 2M -A 1".format(host))[1]
while time.time() < end_time:
if "Pending" not in current_status:
return 0
time.sleep(5)
current_status = cli.system('host-memory-show', "{} 0".format(host))[1]
err_msg = "Timed out waiting for {} not in pending.".format(host)
raise exceptions.VMTimeout(err_msg)
@mark.system_inventory
def test_modify_hugepages_cli_gui(no_aio_system, admin_home_pg):
"""
155-Modify-Hugepages-CLI-GUI.robot
Args:
no_aio_system:
admin_home_pg:
Returns:
"""
con_ssh = ssh.ControllerClient.get_active_controller()
host = system_helper.get_computes()[0]
original = cli.system('host-memory-show', "{} 0 | grep -w 2M | awk '{{print$8}}'".format(host),
ssh_client=con_ssh)[1]
host_helper.lock_host(host)
args = "-2M 1600 {} 0".format(host)
code, output = cli.system('host-memory-modify', args, ssh_client=con_ssh)
assert code == 0, output
host_helper.unlock_host(host)
kube_helper.wait_for_pods_healthy()
check_host_memory_state(host)
current = cli.system('host-memory-show', "{} 0 | grep -w 2M | awk '{{print$8}}'".format(host),
ssh_client=con_ssh)[1]
assert current != original
launch_instance(host)
LOG.info("Go to Host Inventory")
hostinventory_pg = hostinventorypage.HostInventoryPage(admin_home_pg.driver)
hostinventory_pg.go_to_target_page()
hostinventory_pg.go_to_hosts_tab()
hostinventory_pg.lock_host(host)
system_helper.wait_for_hosts_states(host, timeout=360, check_interval=30,
availability=['online'])
detail_pg = hostinventory_pg.go_to_host_detail_page(host)
detail_pg.go_to_memory_tab()
detail_pg.update_memory(hugepages_2M=original)
hostinventory_pg.go_to_target_page()
hostinventory_pg.go_to_hosts_tab()
hostinventory_pg.unlock_host(host)
system_helper.wait_for_hosts_states(host, timeout=360, check_interval=30,
availability=[HostAvailState.AVAILABLE,
HostAvailState.DEGRADED])
kube_helper.wait_for_pods_healthy()
check_host_memory_state(host)
current = cli.system('host-memory-show', "{} 0 | grep -w 2M | awk '{{print$8}}'".format(host),
ssh_client=con_ssh)[1]
assert current == original
launch_instance(host)

View File

@ -0,0 +1,55 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Change the MTU value of the data interface in cli and verify the network works without issues.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from keywords import system_helper, host_helper
from utils import cli
from utils.clients import ssh
@mark.system_inventory
def test_modify_mtu_value_and_verify_network(no_simplex):
"""
371-Modify-Mtu-Value-And-Verify-Network.robot
Args:
no_simplex:
Returns:
"""
sys_type = system_helper.get_system_values(fields="system_type")[0]
if sys_type == "duplex":
host = 'controller-1'
else:
host = system_helper.get_computes()[0]
con_ssh = ssh.ControllerClient.get_active_controller()
args = "{} -a | grep eth1000 | awk '{{print$18}}' | grep -Eo '[0-9]{{1,4}}' | " \
"sed '/^$/d'".format(host)
code, output = cli.system('host-if-list', args)
assert code == 0, output
default_mtu = output
args = "{} -a | grep eth1000 | awk '{{print$4}}'".format(host)
code, output = cli.system('host-if-list', args)
assert code == 0, output
name = output
host_helper.lock_host(host)
code, output = cli.system('host-if-modify', "-m 3000 {} {}".format(host, name))
assert code == 0, output
host_helper.unlock_host(host)
cmd = 'ping -c 2 {}'.format(host)
output = con_ssh.exec_cmd(cmd)[1]
assert '64 bytes' in output
host_helper.lock_host(host)
code, output = cli.system('host-if-modify', "-m {} {} {}".format(default_mtu, host, name))
assert code == 0, output
host_helper.unlock_host(host)

View File

@ -0,0 +1,54 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Modify the NTP server using CLI.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.stx import EventLogID
from keywords import system_helper, host_helper
@mark.system_inventory
def test_modify_ntp_server_list():
"""
378-Modify-NTP-Server-List.robot
Returns:
"""
ntp = system_helper.get_ntp_values()
system_helper.modify_ntp(ntp_servers=('10.22.1.1', '10.22.1.2', '10.22.1.3'),
check_first=False, clear_alarm=False)
system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, timeout=60)
system_helper.modify_ntp(ntp_servers=ntp, check_first=False, clear_alarm=False)
hosts = system_helper.get_controllers()
status = system_helper.get_host_values(hosts[-1], fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.lock_unlock_hosts(hosts[-1])
if not system_helper.is_aio_simplex():
status = system_helper.get_host_values("controller-0", fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.swact_host("controller-0")
host_helper.lock_unlock_hosts("controller-0")
host_helper.swact_host("controller-1")
if not system_helper.is_aio_system():
computes = system_helper.get_computes()
for compute in computes:
status = system_helper.get_host_values(compute, fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.lock_unlock_hosts(compute)
if system_helper.is_storage_system():
storages = system_helper.get_storage_nodes()
for storage in storages:
status = system_helper.get_host_values(storage, fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.lock_unlock_hosts(storage)
assert EventLogID.CONFIG_OUT_OF_DATE not in \
system_helper.get_alarms(fields=("Alarm ID",), alarm_id=EventLogID.CONFIG_OUT_OF_DATE)

View File

@ -0,0 +1,64 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Modify the NTP server using Horizon.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from consts.stx import EventLogID
from keywords import system_helper, host_helper
from utils.horizon.pages.admin.platform import systemconfigurationpage
from utils.tis_log import LOG
@mark.system_inventory
def test_ntp_server_change_using_gui(admin_home_pg):
"""
367-NTP-Server-Change-Using-GUI.robot
Args:
admin_home_pg:
Returns:
"""
LOG.info("Go to System Configuration Page")
systemconfiguration_pg = systemconfigurationpage.SystemConfigurationPage(admin_home_pg.driver)
systemconfiguration_pg.go_to_target_page()
systemconfiguration_pg.go_to_ntp_tab()
ntp = system_helper.get_ntp_values()
systemconfiguration_pg.edit_ntp("10.252.1.1", "10.252.1.2", "10.252.1.3")
system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, timeout=60)
system_helper.modify_ntp(ntp_servers=ntp, check_first=False, clear_alarm=False)
hosts = system_helper.get_controllers()
status = system_helper.get_host_values(hosts[-1], fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.lock_unlock_hosts(hosts[-1])
if not system_helper.is_aio_simplex():
status = system_helper.get_host_values("controller-0", fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.swact_host("controller-0")
host_helper.lock_unlock_hosts("controller-0")
host_helper.swact_host("controller-1")
if not system_helper.is_aio_system():
computes = system_helper.get_computes()
for compute in computes:
status = system_helper.get_host_values(compute, fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.lock_unlock_hosts(compute)
if system_helper.is_storage_system():
storages = system_helper.get_storage_nodes()
for storage in storages:
status = system_helper.get_host_values(storage, fields="config_status")[0]
if "Config out-of-date" == status:
host_helper.lock_unlock_hosts(storage)
assert EventLogID.CONFIG_OUT_OF_DATE not in \
system_helper.get_alarms(fields=("Alarm ID",), alarm_id=EventLogID.CONFIG_OUT_OF_DATE)

View File

@ -0,0 +1,40 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to verify host cpu details.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from consts import proj_vars
from utils.horizon.pages.admin.platform import systemconfigurationpage
from utils.tis_log import LOG
@mark.system_inventory
def test_query_product_type_on_std_system_gui(admin_home_pg):
"""
27-Query-Product-Type-On-Std-System-Gui.robot
Args:
admin_home_pg:
Returns:
"""
LOG.info('{} {}'.format("home page title:", admin_home_pg.page_title))
assert admin_home_pg.page_title == "Host Inventory - StarlingX"
LOG.info("Go to System Configuration Page")
systemconfiguration_pg = systemconfigurationpage.SystemConfigurationPage(admin_home_pg.driver)
systemconfiguration_pg.go_to_target_page()
path = proj_vars.ProjVar.get_var(var_name='LOG_DIR')
pic_path = '{}/{}/{}'.format(path, "horizon", "system_configuration_page_capture.png")
LOG.info('{} {}'.format("saving capture to:", pic_path))
systemconfiguration_pg.driver.save_screenshot(pic_path)

View File

@ -0,0 +1,149 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to check vm response to reboot of the active compute.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import os
import time
from pytest import mark
from consts.auth import Tenant
from consts.stx import GuestImages
from keywords import system_helper, host_helper, network_helper, nova_helper, \
glance_helper, cinder_helper, vm_helper
from utils import cli, table_parser, exceptions
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
NETWORK_NAME = "385-network"
SUBNET_NAME = "385-subnet"
SUBNET_RANGE = "192.168.0.0/24"
IP_VERSION = 4
MAX_TIME = 300
# Flavor, Image, Volume info
cirros_params = {
"flavor_name": "385-flavor",
"flavor_vcpus": 1,
"flavor_ram": 2048,
"flavor_disk": 4,
"volume_size": 5,
"volume_name": "385-cinder",
"image_name": "385-image",
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
"disk_format": "qcow2"
}
# Creating Instance
def launch_instance():
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin')
table_ = table_parser.table(
cli.openstack('network segment range list', ssh_client=con_ssh, auth_info=auth_info)[1])
val = table_parser.get_values(table_, "Name")
if not val:
network_helper.create_segmentation_range(name='physnet0-a', shared=False, project='admin',
minimum=400, maximum=499, network_type='vlan',
physical_network='physnet0')
network_helper.create_segmentation_range(name='physnet0-b', shared=True, minimum=10,
maximum=10, network_type='vlan',
physical_network='physnet0')
network_helper.create_segmentation_range(name='physnet1-a', shared=False, project='admin',
minimum=500, maximum=599, network_type='vlan',
physical_network='physnet1')
net_id = network_helper.create_network(name=NETWORK_NAME, cleanup="module")[1]
network_helper.create_subnet(name=SUBNET_NAME, network=NETWORK_NAME,
subnet_range=SUBNET_RANGE, dhcp=True,
ip_version=IP_VERSION, cleanup="module")
fl_id = nova_helper.create_flavor(name=cirros_params["flavor_name"],
vcpus=cirros_params["flavor_vcpus"],
ram=cirros_params["flavor_ram"],
root_disk=cirros_params["flavor_disk"],
is_public=True, add_default_specs=False,
cleanup="module")[1]
im_id = glance_helper.create_image(name=cirros_params["image_name"],
source_image_file=cirros_params["image_file"],
disk_format=cirros_params["disk_format"],
cleanup="module")[1]
vol_id = cinder_helper.create_volume(name=cirros_params['volume_name'], source_type='image',
source_id=im_id, size=cirros_params['volume_size'],
cleanup="module")[1]
net_id_list = [{"net-id": net_id}]
vm_id = vm_helper.boot_vm(flavor=fl_id, nics=net_id_list, source="volume",
source_id=vol_id, cleanup="module")[1]
return vm_id
def wait_for_host_delete_status(host):
LOG.info("waiting for {} to delete".format(host))
end_time = time.time() + 300
exists = system_helper.host_exists(host)
while time.time() < end_time:
if not exists:
LOG.info("{} has been deleted".format(host))
return 0
time.sleep(20)
exists = system_helper.host_exists(host)
err_msg = "Timed out waiting for {} to delete".format(host)
raise exceptions.VMTimeout(err_msg)
def wait_for_host_install_status(host):
LOG.info("waiting for {} install_state status: completed".format(host))
end_time = time.time() + 2400
current_status = system_helper.get_host_values(host, "install_state")[0]
while time.time() < end_time:
if current_status == "completed":
LOG.info("host status has reached completed")
return 0
time.sleep(30)
current_status = system_helper.get_host_values(host, "install_state")[0]
err_msg = "Timed out waiting for {} install_state status: completed. {} " \
"install_state status: {}".format(host, host, current_status)
raise exceptions.VMTimeout(err_msg)
@mark.system_inventory
def test_reinstall_dynamic_addressing(no_simplex):
"""
385-Reinstall-Dynamic-Addressing.robot
Args:
no_simplex:
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin_platform')
compute = system_helper.get_computes()[0]
host_helper.lock_host(compute)
output = con_ssh.exec_sudo_cmd('dmesg | grep -i paravirtualized')[1]
if 'KVM' in output and 'bare hardware' not in output:
with host_helper.ssh_to_host(compute) as host_ssh:
LOG.info("Clear partition information")
host_ssh.exec_sudo_cmd("dd if=/dev/zero of=/dev/sda bs=512 count=1")
cli.system('host-delete', compute, ssh_client=con_ssh, auth_info=auth_info)
wait_for_host_delete_status(compute)
launch_instance()
table_ = table_parser.table(
cli.system('host-list', ssh_client=con_ssh, auth_info=auth_info)[1])
host_id = table_parser.get_values(table_, "id", **{"hostname": "None"})[0]
LOG.info("host id is {}".format(host_id))
cli.system('host-update', '{} personality=worker hostname=compute-0'.format(host_id),
ssh_client=con_ssh, auth_info=auth_info)
wait_for_host_install_status(compute)
cli.system("interface-network-assign", "{} mgmt0 cluster-host".format(compute),
ssh_client=con_ssh, auth_info=auth_info)
host_helper.unlock_host(compute, con_ssh=con_ssh, available_only=False,
check_hypervisor_up=False, check_webservice_up=False,
check_subfunc=False, check_containers=False)

View File

@ -0,0 +1,109 @@
import os
import re
import time
from consts.auth import Tenant
from consts.stx import GuestImages
from keywords import system_helper, host_helper, network_helper, nova_helper, glance_helper,\
cinder_helper, vm_helper
from utils import cli, exceptions, table_parser
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
NETWORK_NAME = "388-network"
SUBNET_NAME = "388-subnet"
SUBNET_RANGE = "192.168.0.0/24"
IP_VERSION = 4
MAX_TIME = 300
# Flavor, Image, Volume info
cirros_params = {
"flavor_name": "388-flavor",
"flavor_vcpus": 1,
"flavor_ram": 2048,
"flavor_disk": 4,
"volume_size": 5,
"volume_name": "388-cinder",
"image_name": "388-image",
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
"disk_format": "qcow2"
}
# Creating Instance
def launch_instance():
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin')
table_ = table_parser.table(
cli.openstack('network segment range list', ssh_client=con_ssh, auth_info=auth_info)[1])
val = table_parser.get_values(table_, "Name")
if not val:
network_helper.create_segmentation_range(name='physnet0-a', shared=False, project='admin',
minimum=400, maximum=499, network_type='vlan',
physical_network='physnet0')
network_helper.create_segmentation_range(name='physnet0-b', shared=True, minimum=10,
maximum=10, network_type='vlan',
physical_network='physnet0')
network_helper.create_segmentation_range(name='physnet1-a', shared=False, project='admin',
minimum=500, maximum=599, network_type='vlan',
physical_network='physnet1')
net_id = network_helper.create_network(name=NETWORK_NAME, cleanup="module")[1]
network_helper.create_subnet(name=SUBNET_NAME, network=NETWORK_NAME,
subnet_range=SUBNET_RANGE, dhcp=True,
ip_version=IP_VERSION, cleanup="module")
fl_id = nova_helper.create_flavor(name=cirros_params["flavor_name"],
vcpus=cirros_params["flavor_vcpus"],
ram=cirros_params["flavor_ram"],
root_disk=cirros_params["flavor_disk"],
is_public=True, add_default_specs=False,
cleanup="module")[1]
im_id = glance_helper.create_image(name=cirros_params["image_name"],
source_image_file=cirros_params["image_file"],
disk_format=cirros_params["disk_format"],
cleanup="module")[1]
vol_id = cinder_helper.create_volume(name=cirros_params['volume_name'], source_type='image',
source_id=im_id, size=cirros_params['volume_size'],
cleanup="module")[1]
net_id_list = [{"net-id": net_id}]
vm_id = vm_helper.boot_vm(flavor=fl_id, nics=net_id_list, source="volume",
source_id=vol_id, cleanup="module")[1]
return vm_id, net_id
def wait_for_host_delete_status(host):
LOG.info("waiting for {} to delete".format(host))
end_time = time.time() + 300
exists = system_helper.host_exists(host)
while time.time() < end_time:
if not exists:
LOG.info("{} has been deleted".format(host))
return 0
time.sleep(20)
exists = system_helper.host_exists(host)
err_msg = "Timed out waiting for {} to delete".format(host)
raise exceptions.VMTimeout(err_msg)
def test_reinstall_static_addressing(no_aio_system):
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin_platform')
compute = system_helper.get_computes()[0]
host_helper.lock_host(compute)
output = con_ssh.exec_sudo_cmd('dmesg | grep -i paravirtualized')[1]
if 'KVM' in output and 'bare hardware' not in output:
with host_helper.ssh_to_host(compute) as host_ssh:
LOG.info("Clear partition information")
host_ssh.exec_sudo_cmd("dd if=/dev/zero of=/dev/sda bs=512 count=1")
cli.system('host-delete', compute, ssh_client=con_ssh, auth_info=auth_info)
wait_for_host_delete_status(compute)
vm_id, net_id = launch_instance()
args = " --fixed-ip-address 192.168.0.6 {} {}".format(vm_id, net_id)
code, output = cli.openstack('server add fixed ip', args,
ssh_client=con_ssh, fail_ok=False,
auth_info=Tenant.get('admin'))
assert code == 0, output
result = vm_helper.get_vm_values(vm_id, "addresses")[0]
assert re.search("192.168.0.6", result)
cli.system('host-add', ' -n controller-0 -p controller -i 192.168.0.6')
vm_helper.reboot_vm(launch_instance)

View File

@ -0,0 +1,44 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Resynchronize a node to the NTP server.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.stx import EventLogID
from keywords import system_helper, host_helper
from utils.clients import ssh
from utils.tis_log import LOG
@mark.system_inventory
def test_resynchronize_host_to_ntp_server(no_aio_system):
"""
347-Resynchronize-Host-To-NTP-Server.robot
Args:
no_aio_system:
Returns:
"""
con_ssh = ssh.ControllerClient.get_active_controller()
host = system_helper.get_computes()[1]
with host_helper.ssh_to_host(host) as host_ssh:
host_ssh.exec_sudo_cmd("date +%T -s '11:14:00'")
host_helper.lock_unlock_hosts(host)
out = system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, fail_ok=True)[0]
assert not out, "Alarm ID 250.001 exist"
out = system_helper.wait_for_alarm(alarm_id=EventLogID.STORAGE_DEGRADE, fail_ok=True)[0]
assert not out, "Alarm ID 200.006 exist"
date1 = con_ssh.exec_cmd("date +%H:%M")[1]
with host_helper.ssh_to_host(host) as host_ssh:
date2 = host_ssh.exec_cmd("date +%H:%M")[1]
LOG.info(f"date1 and date2 is {date1} {date2}")
assert date1 in date2

View File

@ -0,0 +1,39 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to check system type.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from keywords import system_helper
@mark.system_inventory
def test_system_type(no_simplex):
"""
17-Test-System-Type.robot
25-query-product-type-std-system-cli.robot
341-Verify-System-Mode-And-Type.robot
Args:
no_simplex:
Returns:
"""
mod = system_helper.get_system_values(fields="system_mode")[0]
sys_type = system_helper.get_system_values(fields="system_type")[0]
test_mod = "simplex"
if not system_helper.is_aio_simplex():
test_mod = "duplex"
test_type = "All-in-one"
if system_helper.get_computes():
test_type = "Standard"
assert test_type == sys_type
assert test_mod == mod

View File

@ -0,0 +1,72 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to add BMC information in Horizon and Verify.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import time
from pytest import mark
from selenium.webdriver.support.select import Select
from testfixtures.horizon import admin_home_pg, driver
from consts.stx import EventLogID
from keywords import system_helper, host_helper, kube_helper
from utils import cli
from utils.horizon.pages.admin.platform import hostinventorypage
from utils.tis_log import LOG
@mark.system_inventory
def test_verify_bmc_functionality(duplex_only, bare_metal_only, admin_home_pg):
"""
387-Verify-BMC-Functionality.robot
Args:
duplex_only:
bare_metal_only:
admin_home_pg:
Returns:
"""
host = 'controller-1'
LOG.info("Go to Host Inventory")
hostinventory_pg = hostinventorypage.HostInventoryPage(admin_home_pg.driver)
hostinventory_pg.go_to_target_page()
hostinventory_pg.go_to_hosts_tab()
row = hostinventory_pg._get_row_with_host_name(host)
host_edit_form = hostinventory_pg.hosts_table(host).edit_host(row)
host_edit_form.switch_to(2)
element = host_edit_form.driver.find_element_by_name("bm_type")
Select(element).select_by_value("dynamic")
host_edit_form.driver.find_element_by_name("bm_ip").send_keys('10.219.115.220')
host_edit_form.driver.find_element_by_name("bm_username").send_keys('starlingx')
host_edit_form.driver.find_element_by_name("bm_password").send_keys('Passw0rd')
host_edit_form.driver.find_element_by_name("bm_confirm_password").send_keys('Passw0rd')
host_edit_form.submit()
host_helper.lock_host(host)
cli.system('host-power-off ', host)
time.sleep(60)
results = system_helper.get_host_values(host, ['availability', 'task'])
assert results[0] == 'power-off' and 'Powering-off' in results[1]
out = system_helper.wait_for_alarm(alarm_id=EventLogID.LOSS_OF_REDUNDANCY, timeout=360)
assert out, "Alarm ID 400.002 not found"
cli.system('host-power-on ', host)
time.sleep(60)
results = system_helper.get_host_values(host, ['availability', 'task'])
assert results[0] == 'online' and 'Powering-on' in results[1]
cli.system('host-reset ', host)
time.sleep(60)
results = system_helper.get_host_values(host, ['availability', 'task'])
assert results[0] == 'online' and 'Resetting' in results[1]
host_helper.unlock_host(host)
kube_helper.wait_for_pods_healthy(fail_ok=False)
out = system_helper.wait_for_alarm(alarm_id=EventLogID.LOSS_OF_REDUNDANCY, timeout=360)
assert not out, "Alarm ID 400.002 exist"

View File

@ -0,0 +1,215 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Host operation to bulk add and delete the computes in controller-1
# and add the computes using the bulk add.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import os
import time
from pytest import mark
from consts.auth import Tenant
from consts.stx import GuestImages
from keywords import system_helper, host_helper, storage_helper, container_helper, kube_helper, \
network_helper, nova_helper, cinder_helper, glance_helper, vm_helper
from utils import cli, exceptions, table_parser
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
NETWORK_NAME = "397-network"
SUBNET_NAME = "397-subnet"
SUBNET_RANGE = "192.168.0.0/24"
IP_VERSION = 4
MAX_TIME = 300
# Flavor, Image, Volume info
cirros_params = {
"flavor_name": "397-flavor",
"flavor_vcpus": 1,
"flavor_ram": 2048,
"flavor_disk": 4,
"volume_size": 5,
"volume_name": "397-cinder",
"image_name": "397-image",
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
"disk_format": "qcow2"
}
# Creating Instance
def launch_instance():
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin')
table_ = table_parser.table(
cli.openstack('network segment range list', ssh_client=con_ssh, auth_info=auth_info)[1])
val = table_parser.get_values(table_, "Name")
if not val:
network_helper.create_segmentation_range(name='physnet0-a', shared=False, project='admin',
minimum=400, maximum=499, network_type='vlan',
physical_network='physnet0')
network_helper.create_segmentation_range(name='physnet0-b', shared=True, minimum=10,
maximum=10, network_type='vlan',
physical_network='physnet0')
network_helper.create_segmentation_range(name='physnet1-a', shared=False, project='admin',
minimum=500, maximum=599, network_type='vlan',
physical_network='physnet1')
net_id = network_helper.create_network(name=NETWORK_NAME, cleanup="module")[1]
network_helper.create_subnet(name=SUBNET_NAME, network=NETWORK_NAME,
subnet_range=SUBNET_RANGE, dhcp=True,
ip_version=IP_VERSION, cleanup="module")
fl_id = nova_helper.create_flavor(name=cirros_params["flavor_name"],
vcpus=cirros_params["flavor_vcpus"],
ram=cirros_params["flavor_ram"],
root_disk=cirros_params["flavor_disk"],
is_public=True, add_default_specs=False,
cleanup="module")[1]
im_id = glance_helper.create_image(name=cirros_params["image_name"],
source_image_file=cirros_params["image_file"],
disk_format=cirros_params["disk_format"],
cleanup="module")[1]
vol_id = cinder_helper.create_volume(name=cirros_params['volume_name'], source_type='image',
source_id=im_id, size=cirros_params['volume_size'],
cleanup="module")[1]
net_id_list = [{"net-id": net_id}]
vm_id = vm_helper.boot_vm(flavor=fl_id, nics=net_id_list, source="volume",
source_id=vol_id, cleanup="module")[1]
return vm_id
def wait_for_host_delete_status(host):
LOG.info("waiting for {} to delete".format(host))
end_time = time.time() + MAX_TIME
exists = system_helper.host_exists(host)
while time.time() < end_time:
if not exists:
LOG.info("{} has been deleted".format(host))
return 0
time.sleep(20)
exists = system_helper.host_exists(host)
err_msg = "Timed out waiting for {} to delete".format(host)
raise exceptions.VMTimeout(err_msg)
def wait_for_host_install_status(host):
LOG.info("waiting for {} install_state status: completed".format(host))
end_time = time.time() + 2400
current_status = system_helper.get_host_values(host, "install_state")[0]
while time.time() < end_time:
if current_status == "completed":
LOG.info("host status has reached completed")
return 0
time.sleep(30)
current_status = system_helper.get_host_values(host, "install_state")[0]
err_msg = "Timed out waiting for {} install_state status: completed. {} " \
"install_state status: {}".format(host, host, current_status)
raise exceptions.VMTimeout(err_msg)
def unlock_host(host, con_ssh):
application_status = container_helper.get_apps(application="stx-openstack")[0]
if application_status == "applying":
container_helper.abort_app("stx-openstack")
host_helper.unlock_host(host, con_ssh=con_ssh, available_only=False,
check_hypervisor_up=False, check_webservice_up=False,
check_subfunc=False, check_containers=False)
@mark.system_inventory
def test_verify_bulk_add_by_deleting_hosts(no_aio_system):
"""
397-Verify-Bulk-Add-By-Deleting-Hosts.robot
Args:
no_aio_system:
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin_platform')
cli.system("host-bulk-export", "--filename hosts.xml", ssh_client=con_ssh,
auth_info=auth_info)
computes = system_helper.get_computes()
for compute in computes:
host_helper.lock_host(compute, force=True)
for compute in computes:
output = con_ssh.exec_sudo_cmd('dmesg | grep -i paravirtualized')[1]
if 'KVM' in output and 'bare hardware' not in output:
with host_helper.ssh_to_host(compute) as host_ssh:
LOG.info("Clear partition information")
host_ssh.exec_sudo_cmd("dd if=/dev/zero of=/dev/sda bs=512 count=1")
cli.system('host-delete', compute, ssh_client=con_ssh, auth_info=auth_info)
wait_for_host_delete_status(compute)
time.sleep(60)
cli.system("host-bulk-add", "hosts.xml", ssh_client=con_ssh, auth_info=auth_info)
table_ = table_parser.table(
cli.system('host-list', ssh_client=con_ssh, auth_info=auth_info)[1])
id_list = table_parser.get_values(table_, "id", **{"hostname": "None"})
LOG.info("host id is {}".format(id_list))
for index, host_id in enumerate(id_list):
cli.system('host-update', '{} personality=worker hostname=compute-{}'.format(host_id, index),
ssh_client=con_ssh, auth_info=auth_info)
computes = system_helper.get_computes()
for compute in computes:
wait_for_host_install_status(compute)
if not system_helper.is_storage_system():
storage_helper.add_ceph_mon(computes[0])
for compute in computes:
# Setup Cluster-host Interfaces
LOG.info("Setup Cluster-host Interfaces")
cli.system("interface-network-assign", "{} mgmt0 cluster-host".format(compute),
ssh_client=con_ssh, auth_info=auth_info)
LOG.info("Configure data interfaces for compute.")
# Get Interface UUID
table_ = table_parser.table(cli.system("host-if-list", "{} -a".format(compute))[1])
data_uuid = table_parser.get_values(table_, "uuid", **{"class": "None"})
# Add Interface To Data Network
args0 = "-m 1500 -n data0 -c data {} {}".format(compute, data_uuid[0])
args1 = "-m 1500 -n data1 -c data {} {}".format(compute, data_uuid[-1])
cli.system('host-if-modify', args0, ssh_client=con_ssh, auth_info=auth_info)
cli.system('host-if-modify', args1, ssh_client=con_ssh, auth_info=auth_info)
cli.system("interface-datanetwork-assign", "{} {} physnet0"
.format(compute, data_uuid[0]),
ssh_client=con_ssh, auth_info=auth_info)
cli.system("interface-datanetwork-assign", "{} {} physnet1"
.format(compute, data_uuid[-1]),
ssh_client=con_ssh, auth_info=auth_info)
LOG.info("Set up disk partition for nova-local volume group")
rootfs = system_helper.get_host_values(compute, "rootfs_device")[0]
uuid = storage_helper.get_host_disks(compute, **{"device_node": rootfs})[0]
# Set up disk partition for nova-local volume group
args = " -t lvm_phys_vol {} {} 100".format(compute, uuid)
out = cli.system('host-disk-partition-add', args, ssh_client=con_ssh, auth_info=auth_info)[1]
new_uuid = table_parser.get_value_two_col_table(table_parser.table(out), "uuid")
# Add Local Volume Group
cli.system("host-lvg-add", "{} nova-local".format(compute), ssh_client=con_ssh,
auth_info=auth_info)
# Add Physical Volume
cli.system("host-pv-add", "{} nova-local {}".format(compute, new_uuid), ssh_client=con_ssh,
auth_info=auth_info)
# Enable Containerized Services
labels = ["openstack-compute-node", "openvswitch", "sriov"]
host_helper.assign_host_labels(compute, labels, unlock=False)
for compute in computes:
unlock_host(compute, con_ssh)
storage_helper.wait_for_ceph_health_ok(con_ssh=con_ssh, timeout=900, check_interval=30)
application_status = container_helper.get_apps(application="stx-openstack")[0]
if application_status == "applying":
container_helper.abort_app(app_name="stx-openstack")
pods_status = kube_helper.wait_for_pods_healthy(namespace="openstack", timeout=20,
con_ssh=con_ssh, fail_ok=True)
if not pods_status:
container_helper.remove_app(app_name="stx-openstack", applied_timeout=600)
container_helper.apply_app(app_name="stx-openstack", applied_timeout=3600,
check_interval=30, wait_for_alarm_gone=False)
time.sleep(60)
# create vm
launch_instance()

View File

@ -0,0 +1,35 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Export hosts information using host-bulk-export cli and Verify With existing host information.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.auth import Tenant
from keywords import system_helper
from utils import cli
from utils.clients.ssh import ControllerClient
@mark.system_inventory
def test_verify_bulk_export_hosts():
"""
355-Verify-Bulk-Export-Hosts.robot
Returns:
"""
hosts_list = system_helper.get_hosts(field="personality")
con_ssh = ControllerClient.get_active_controller()
cli.system("host-bulk-export", "--filename host_info.txt", ssh_client=con_ssh,
auth_info=Tenant.get('admin_platform'))
cmd = "cat host_info.txt | grep personality | cut -d '<' -f 2 | cut -d '>' -f 2"
out = con_ssh.exec_cmd(cmd)[1]
for host in hosts_list:
assert host in out

View File

@ -0,0 +1,38 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify that the cpu data can be seen via cli.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from keywords import system_helper
from utils import table_parser, cli
from utils.clients.ssh import ControllerClient
@mark.system_inventory
def test_verify_cpu_data_via_cli(no_aio_system):
"""
358-Verify-Cpu-Data-Via-CLI.robot
Args:
no_aio_system:
Returns:
"""
compute = system_helper.get_computes()[1]
table_ = table_parser.table(cli.system('host-cpu-list', compute)[1])
uuids = table_parser.get_values(table_=table_, target_header='uuid')
assert uuids
cmd = "system host-cpu-show controller-0 1 | grep -e assigned_function " \
"-e processor -e physical_core"
con_ssh = ControllerClient.get_active_controller()
out = con_ssh.exec_cmd(cmd)[1]
assert out

View File

@ -0,0 +1,44 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify Delete application delete the application service.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import os
import re
from consts.auth import HostLinuxUser
from keywords import container_helper
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
def test_verify_deleting_container_application():
"""
359-Verify-Removing-Container-Application.robot
361-Verify-Deleting-Container-Application.robot
Args:
Returns:
"""
app_dir = HostLinuxUser.get_home()
con_ssh = ControllerClient.get_active_controller()
container_helper.remove_app(app_name="stx-openstack", applied_timeout=600)
container_helper.delete_app(app_name="stx-openstack")
tar_file = os.path.join(app_dir, re.search(r"(stx-openstack.*?.tgz)",
con_ssh.exec_cmd("ls ~")[1]).group(1))
LOG.info("tar_file is %s" % tar_file)
container_helper.upload_app(tar_file=tar_file, app_name="stx-openstack", con_ssh=con_ssh,
uploaded_timeout=600)
# Do application apply stx-openstack.
LOG.info("Apply stx-openstack")
container_helper.apply_app(app_name="stx-openstack", applied_timeout=3600,
check_interval=30, con_ssh=con_ssh)

View File

@ -0,0 +1,28 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify the software version and patch level using cli.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from keywords import system_helper
VERSION = '21.05'
@mark.system_inventory
def test_verify_software_version():
"""
338-Verify-Software-Version.robot
Returns:
"""
sys_version = system_helper.get_system_values(fields="software_version")[0]
assert VERSION == sys_version

View File

@ -0,0 +1,40 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify CLI system ntp-modify.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from keywords import system_helper, host_helper, container_helper, kube_helper
from utils import cli
@mark.system_inventory
def test_verify_system_ntp_modify_rejects_action(no_simplex):
"""
129-Verify-System-Ntp-Modify-Rejects-Action.robot
Args:
no_simplex:
Returns:
"""
mgmt_ip = system_helper.get_host_values(host="controller-0", fields='mgmt_ip')[0]
code, out = cli.system('ntp-modify', 'ntpservers={} action=any_value_not_apply'.format(mgmt_ip))
assert code == 0, out
host_helper.lock_unlock_hosts("controller-1")
host_helper.swact_host("controller-0")
host_helper.lock_unlock_hosts("controller-0")
host_helper.swact_host("controller-1")
status = system_helper.get_host_values("controller-0", fields="config_status")[0]
assert status != "Config out-of-date"
pods_status = kube_helper.wait_for_pods_healthy(namespace="openstack", timeout=20, fail_ok=True)
if not pods_status:
container_helper.apply_app(app_name="stx-openstack")

View File

@ -0,0 +1,39 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify CLI system pm-modify does not require option action apply.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.auth import Tenant
from keywords import system_helper
from utils import cli
from utils.clients.ssh import ControllerClient
@mark.system_inventory
def test_verify_system_pm_modify_does_not_require_action_apply():
"""
147-Verify-System-Pm-Modify-Does-Not-Require-Action-Apply.robot
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
args = "network ml2 --resource openstac extension_drivers=dns"
res, out = cli.system('service-parameter-add', args, ssh_client=con_ssh,
auth_info=Tenant.get('admin_platform'))
assert res == 0, out
system_helper.modify_service_parameter("network", "ml2",
name="extension_drivers", value="port_security")
uuid = system_helper.get_service_parameter_values(field='uuid', service="network",
section="ml2", name="extension_drivers",
con_ssh=con_ssh,
auth_info=Tenant.get('admin_platform'))[0]
system_helper.delete_service_parameter(uuid)

View File

@ -0,0 +1,43 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# verify the system type is read-only and cannot be changed via gui.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from keywords import system_helper
from utils.tis_log import LOG
from testfixtures.horizon import admin_home_pg, driver
from utils.horizon.pages.admin.platform import systemconfigurationpage
@mark.system_inventory
def test_verify_system_type_is_read_only(admin_home_pg):
"""
130-Verify-System-Type-Is-Read-Only.robot
Args:
admin_home_pg:
Returns:
"""
LOG.info("Go to System Configuration Page")
systemconfiguration_pg = systemconfigurationpage.SystemConfigurationPage(admin_home_pg.driver)
systemconfiguration_pg.go_to_target_page()
systemconfiguration_pg.go_to_systems_tab()
system_name = system_helper.get_system_values()[0]
LOG.info("system name is {}".format(system_name))
row = systemconfiguration_pg._get_row_with_system_name(system_name)
system_form = systemconfiguration_pg.systems_table.edit_system(row)
elements = system_form.driver.find_elements_by_class_name("field-label")
for element in elements:
LOG.info("element text is {}".format(element.text))
assert "System Type" != element.text
system_form.cancel()

View File

@ -0,0 +1,33 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify the system type is read-only and cannot be changed.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.auth import Tenant
from keywords import host_helper
from utils import cli, table_parser
from utils.clients.ssh import ControllerClient
@mark.system_inventory
def test_verify_the_system_type_read_only_cli():
"""
116-Verify-The-System-Type-Read-Only-CLI.robot
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
table_ = host_helper.get_host_cpu_list_table(host="1")
uuid_list = table_parser.get_values(table_, "uuid")
for uuid in uuid_list:
cli.system("host-cpu-show", "1 {}".format(uuid), ssh_client=con_ssh,
auth_info=Tenant.get('admin_platform'))

View File

@ -0,0 +1,48 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to verify updating the override value is accepted and delete the override value as well.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.auth import Tenant
from keywords import container_helper
from utils import cli, table_parser
from utils.clients.ssh import ControllerClient
def check_helmchart_status(chart, expected):
value = container_helper.get_helm_override_values(chart, 'openstack',
fields="user_overrides")[0]
assert value.replace(" ", "") == expected
@mark.system_inventory
def test_verify_update_and_delete_helmchart_override():
"""
366-Verify-Update-And-Delete-Helmchart-Override.robot
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
chart = container_helper.get_helm_overrides(field="chart name", charts="horizon")[0]
check_helmchart_status(chart, 'None')
cli.system('helm-override-update', "--set lockout_retries_num=5 stx-openstack {} openstack"
.format(chart), ssh_client=con_ssh, auth_info=Tenant.get('admin_platform'))
args = 'stx-openstack {} openstack'.format(chart)
table_ = table_parser.table(
cli.system('helm-override-show', args, ssh_client=con_ssh,
auth_info=Tenant.get('admin_platform'))[1])
value = table_parser.get_value_two_col_table(table_, field="user_overrides",
merge_lines=False)
assert "lockout_retries_num:\"5\"" == value.replace(" ", "")
cli.system("helm-override-delete", "stx-openstack {} openstack".format(chart),
ssh_client=con_ssh, auth_info=Tenant.get('admin_platform'))
check_helmchart_status(chart, 'None')

View File

@ -65,7 +65,7 @@ class HorizonDriver:
False)
profile.set_preference("browser.download.dir", horizon_dir)
profile.set_preference("browser.helperApps.neverAsk.saveToDisk",
"text/plain,application/x-shellscript")
"text/plain,application/x-shellscript,text/csv")
# profile.update_preferences()
display = None
if Display is not None:

View File

@ -1,7 +1,34 @@
from selenium.webdriver.common import by
from utils.horizon.pages import basepage
from utils.horizon.regions import forms
from utils.horizon.regions import tables
class UsageTable(tables.TableRegion):
name = 'global_usage'
@tables.bind_table_action('csv_summary')
def csv_summary(self, summary_button):
summary_button.click()
class OverviewPage(basepage.BasePage):
def __init__(self, driver):
super(OverviewPage, self).__init__(driver)
self._page_title = "Usage Overview"
PARTIAL_URL = 'admin'
_date_form_locator = (by.By.ID, 'date_form')
@property
def usage_table(self):
return UsageTable(self.driver)
@property
def date_form(self):
src_elem = self._get_element(*self._date_form_locator)
return forms.DateFormRegion(self.driver, src_elem)
def set_usage_query_time_period(self, start_date, end_date):
self.date_form.query(start_date, end_date)
def csv_summary(self):
self.usage_table.csv_summary()

View File

@ -97,7 +97,6 @@ class ComputeHostsTable(HostsTable):
class HostInventoryPage(basepage.BasePage):
PARTIAL_URL = 'admin'
HOSTS_TABLE_NAME_COLUMN = 'Host Name'
@ -121,7 +120,6 @@ class HostInventoryPage(basepage.BasePage):
def edit_host(self, name):
row = self._get_row_with_host_name(name)
host_edit_form = self.hosts_table(name).edit_host(row)
# ...
host_edit_form.submit()
def lock_host(self, name):
@ -215,15 +213,24 @@ class MemoryTable(tables.TableRegion):
'mem_avail(MiB)': 'Memory',
}
UPDATE_MEMORY_FORM_FIELDS = ('platform_memory', 'vm_hugepages_nr_2M', 'vm_hugepages_nr_1G')
CREATE_MEMORY_PROFILE_FORM_FIELDS = 'profilename'
@tables.bind_table_action('createMemoryProfile')
def create_memory_profile(self, create_button):
create_button.click()
self.wait_till_spinner_disappears()
return forms.TabbedFormRegion(
return forms.FormRegion(
self.driver, field_mappings=self.CREATE_MEMORY_PROFILE_FORM_FIELDS)
@tables.bind_table_action('updatememory')
def update_memory(self, update_button):
update_button.click()
self.wait_till_spinner_disappears()
return forms.FormRegion(
self.driver, field_mappings=self.UPDATE_MEMORY_FORM_FIELDS)
class StorageDisksTable(tables.TableRegion):
name = "disks"
@ -325,14 +332,22 @@ class InterfaceTable(tables.TableRegion):
name = "interfaces"
CREATE_INTERFACE_PROFILE_FORM_FIELDS = "profilename"
EDIT_INTERFACE_FORM_FIELDS = ("ifname", "ifclass", "iftype", "imtu")
@tables.bind_table_action('createprofile')
def create_inferface_profile(self, create_button):
create_button.click()
self.wait_till_spinner_disappears()
return forms.TabbedFormRegion(
return forms.FormRegion(
self.driver, field_mappings=self.CREATE_INTERFACE_PROFILE_FORM_FIELDS)
@tables.bind_row_action('update')
def edit_interface(self, edit_button, row):
edit_button.click()
self.wait_till_spinner_disappears()
return forms.FormRegion(
self.driver, field_mappings=self.EDIT_INTERFACE_FORM_FIELDS)
class LLDPTable(tables.TableRegion):
name = "neighbours"
@ -351,12 +366,15 @@ class HostInventoryDetailPage(basepage.BasePage):
PROCESSOR_TAB_INDEX = 1
MEMORY_TAB_INDEX = 2
SOTRAGE_TAB_INDEX = 3
PORTS_TAB_INDEX = 4
INTEFACES_TAB_INDEX = 5
LLDP_TAB_INDEX = 6
SENSORS_TAB_INDEX = 7
DEVICES_TAB_INDEX = 8
FILESYSTEMS_TAB_INDEX = 4
PORTS_TAB_INDEX = 5
INTERFACES_TAB_INDEX = 6
LLDP_TAB_INDEX = 7
SENSORS_TAB_INDEX = 8
DEVICES_TAB_INDEX = 9
LABELS_TAB_INDEX = 10
MEMORYTABLE_PROCESSOR_COL = 'Processor'
INTERFACES_TABLE_NAME_COLUMN = 'Name'
def __init__(self, driver, host_name):
super(HostInventoryDetailPage, self).__init__(driver)
@ -365,6 +383,16 @@ class HostInventoryDetailPage(basepage.BasePage):
def _get_memory_table_row_with_processor(self, processor):
return self.memory_table.get_row(self.MEMORYTABLE_PROCESSOR_COL, processor)
def _get_row_with_interface_name(self, name):
return self.interfaces_table.get_row(self.INTERFACES_TABLE_NAME_COLUMN, name)
def get_interface_table_info(self, name, header):
row = self._get_row_with_interface_name(name)
if row.cells[header].text == '':
return None
else:
return row.cells[header].text
def get_memory_table_info(self, processor, header):
row = self._get_memory_table_row_with_processor(processor)
if row.cells[header].text == '':
@ -414,6 +442,10 @@ class HostInventoryDetailPage(basepage.BasePage):
def host_detail_overview(self, driver):
return HostDetailOverviewDescription(driver)
@property
def interfaces_table(self):
return InterfaceTable(self.driver)
@property
def inventory_details_processor_info(self):
return HostDetailProcessorDescription(self.driver)
@ -438,6 +470,22 @@ class HostInventoryDetailPage(basepage.BasePage):
def storage_pv_table(self):
return StoragePhysicalVolumeTable(self.driver)
def edit_interface(self, name, mtu):
row = self._get_row_with_interface_name(name)
edit_interface_form = self.interfaces_table.edit_interface(row)
edit_interface_form.imtu.text = mtu
edit_interface_form.submit()
def update_memory(self, memory=None, hugepages_2M=None, hugepages_1G=None):
memory_form = self.memory_table.update_memory()
if memory is not None:
memory_form.platform_memory.text = memory
if hugepages_2M is not None:
memory_form.vm_hugepages_nr_2M.text = hugepages_2M
if hugepages_1G is not None:
memory_form.vm_hugepages_nr_1G.text = hugepages_1G
memory_form.submit()
def ports_table(self):
return PortsTable(self.driver)
@ -460,7 +508,7 @@ class HostInventoryDetailPage(basepage.BasePage):
self.go_to_tab(self.PORTS_TAB_INDEX)
def go_to_interfaces_tab(self):
self.go_to_tab(self.SOTRAGE_TAB_INDEX)
self.go_to_tab(self.INTERFACES_TAB_INDEX)
def go_to_lldp_tab(self):
self.go_to_tab(self.LLDP_TAB_INDEX)

View File

@ -141,11 +141,12 @@ class ControlerfsTable(tables.TableRegion):
class CephStoragePoolsTable(tables.TableRegion):
name = "storage_pools_table"
EDIT_POOL_QUOTAS_FIELDS = ("cinder_pool_gib", "glance_pool_gib",
EDIT_POOL_QUOTAS_FIELDS = ("cinder_pool_gib", "kube_pool_gib", "glance_pool_gib",
"ephemeral_pool_gib", "object_pool_gib")
CEPH_STORAGE_POOLS_MAP = {
'tier_name': 'Ceph Storage Tier',
'cinder_pool_gib': 'Cinder Volume Storage (GiB)',
'kube_pool_gib': 'Kubernetes Storage (GiB)',
'glance_pool_gib': 'Glance Image Storage (GiB)',
'ephemeral_pool_gib': 'Nova Ephemeral Disk Storage (GiB)',
'object_pool_gib': 'Object Storage (GiB)',
@ -160,7 +161,6 @@ class CephStoragePoolsTable(tables.TableRegion):
class SystemConfigurationPage(basepage.BasePage):
PARTIAL_URL = 'admin/system_config'
SYSTEMS_TAB_INDEX = 0
ADDRESS_POOLS_TAB_INDEX = 1
@ -395,7 +395,9 @@ class SystemConfigurationPage(basepage.BasePage):
if cancel:
edit_form.cancel()
else:
edit_form.submit()
edit_form._submit_element.click()
edit_form.driver.switch_to_alert().accept()
edit_form.wait_till_spinner_disappears()
def edit_ptp(self, mode=None, transport=None, mechanism=None, cancel=False):
edit_form = self.ptp_table.edit_ptp()
@ -426,7 +428,9 @@ class SystemConfigurationPage(basepage.BasePage):
if cancel:
edit_form.cancel()
else:
edit_form.submit()
edit_form._submit_element.click()
edit_form.driver.switch_to_alert().accept()
edit_form.wait_till_spinner_disappears()
def edit_filesystem(self, database=None, glance=None, backup=None,
scratch=None, extension=None, img_conversions=None,
@ -449,12 +453,14 @@ class SystemConfigurationPage(basepage.BasePage):
else:
edit_form.submit()
def edit_storage_pool(self, tier_name, cinder_pool=None, glance_pool=None,
def edit_storage_pool(self, tier_name, cinder_pool=None, kube_pool=None, glance_pool=None,
ephemeral_pool=None, object_pool=None, cancel=False):
row = self._get_row_with_ceph_tier_name(tier_name)
edit_form = self.ceph_storage_pools_table.edit_ceph_storage_pools(row)
if cinder_pool is not None:
edit_form.cinder_pool_gib.value = cinder_pool
if kube_pool is not None:
edit_form.kube_pool_gib.value = kube_pool
if glance_pool is False:
edit_form.glance_pool_gib.value = glance_pool
if ephemeral_pool is True:

View File

@ -10,7 +10,6 @@ from utils.horizon.regions import tables
from utils.horizon.regions import menus
from consts.stx import Networks
class LaunchInstanceForm(forms.TabbedFormRegion):
_submit_locator = (by.By.XPATH, '//button[@class="btn btn-primary finish"]')
_fields_locator = (by.By.XPATH, "//div[starts-with(@class,'step ng-scope')]")
@ -237,16 +236,17 @@ class InstancesPage(basepage.BasePage):
instance_form.fields['boot-source-type'].text = boot_source_type
time.sleep(1)
instance_form._init_tab_fields(1)
if create_new_volume is True:
instance_form.fields['Create New Volume'].click_yes()
if delete_volume_on_instance_delete is True:
instance_form.fields['Delete Volume on Instance Delete'].click_yes()
if delete_volume_on_instance_delete is False:
instance_form.fields['Delete Volume on Instance Delete'].click_no()
if create_new_volume is False:
instance_form.fields['Create New Volume'].click_no()
if volume_size is not None:
instance_form.fields['volume-size'].value = volume_size
if boot_source_type == "Image" or boot_source_type == "Instance Snapshot":
if create_new_volume is True:
instance_form.fields['Create New Volume'].click_yes()
if delete_volume_on_instance_delete is True:
instance_form.fields['Delete Volume on Instance Delete'].click_yes()
if delete_volume_on_instance_delete is False:
instance_form.fields['Delete Volume on Instance Delete'].click_no()
if create_new_volume is False:
instance_form.fields['Create New Volume'].click_no()
if volume_size is not None:
instance_form.fields['volume-size'].value = volume_size
instance_form.addelement('Name', source_name)
instance_form.switch_to(2)
instance_form.addelement('Name', flavor_name)