Some modifications for the sanity tests ported from reboot

Description:

  utils/clients/ssh.py:
    during "connect" get_logpath only when self.logpath is
      empty so just one log folder is created if
      reconecting during the same test

  keywords/vm_helper.py:
    modified evacuate_vms to use force lock (unlock in the finnaly block)
      instead of reboot, because while using reboot the instances
      don't get transfered to the other controller/compute

  keywords/host_helper.py:
    in wait_for_hypervisors_up's timeout loop added try for
      get_hypervisors so it does not exit if it fails
    fixed a log call to use both variables from format

  consts/timeout.py:
    modified HYPERVISOR_UP timeout for virtual environment

  testcases/sanity/sanity_openstack/test_openstack_pod_healthy.py:
    modified asserts from wait_for_pods_healthy calls to check
      for True not None
    modified base path testcases/sanity/sanity_platform to the current
      testcases/sanity/sanity_openstack

  testcases/sanity/sanity_openstack/test_instance_from_volume.py:
    use correct value for net_id
    use correct value for host
    added test_evacuate_instances_from_hosts

  testcases/sanity/sanity_openstack/test_instance_from_snapshot.py:
    use correct value for net_id
    use correct value for host
    added test_evacuate_instances_from_hosts

  testcases/sanity/sanity_openstack/test_instance_from_image.py:
    use correct value for net_id
    use correct value for host
    added test_evacuate_instances_from_hosts

  Patch Set 2:
    reworked the evacuate_vms test case

  Patch Set 3:
    delete volumes with no name leftover after deleteing
      servers whene deleteing snapshots
    fixed spacing and removed unused imports

  Patch Set 4:
    uncommened tests from test_instance_from_snapshot

  Patch Set 5:
    fixed the wrong names of services in test_openstack_pod_healthy

  Patch Set 6:
    reapply of patchset 4 and fix commit message

  Patch Set 7:
    increased timeout for volume delete

  Patch Set 8:
    put unlock_host from evacuate_instances in a timedout loop
      when applications are applied updated or recovered

  Patch Set 9:
    reverted modification to evacuate_vms from vm_helper
      (originaly reboot instead of force_lock), but left in
      the wait_for_pods_healthy athe the end if wait_for_host_up
      is True
    added test wait_for_host_up=True in the tests when
      calling evacuate_vms

  PatchSet 10:
    cinder_helper.py:
      added auth_info and con_ssh to calls
    host_helper.py:
      removed try around get_hypervisors call
    vm_helper.py:
      removed leftover unlock_host
    test_instance_from_image.py:
      used get_hypervisors to get host
    test_instance_from_snapshot.py:
      used get_hypervisors to get host
    test_instance_from_volume.py:
      used get_hypervisors to get host

  PatchSet 11:
     test_openstack_pod_healthy.py:
      updated the nginx controller services names

Change-Id: Idaea73c8b9d8c05c82f5eb7fea1ef31a6e1d0f98
Signed-off-by: George Postolache <george.postolache@intel.com>
This commit is contained in:
George Postolache 2020-08-12 12:33:29 +03:00 committed by Thiago Brito
parent 005b2d5dc2
commit aeec1d38e0
9 changed files with 127 additions and 38 deletions

View File

@ -30,7 +30,7 @@ class HostTimeout:
FAIL_AFTER_REBOOT = 120
# Hypervsior in enabled/up state after host in available state and task
# clears
HYPERVISOR_UP = 300
HYPERVISOR_UP = 1800
# Web service up in sudo sm-dump after host in available state and task
# clears
WEB_SERVICE_UP = 180
@ -91,7 +91,7 @@ class VMTimeout:
class VolumeTimeout:
STATUS_CHANGE = 2700 # Windows guest takes a long time
DELETE = 90
DELETE = 160
class SysInvTimeout:

View File

@ -749,6 +749,15 @@ def delete_volume_snapshots(snapshots=None, force=False, check_first=True,
LOG.info(msg)
return -1, msg
# unable to delete snapshots if there are volumes with no name in volume list
# volumes with no name are leftover after deleteing servers
vol_list = get_volumes(auth_info=auth_info, con_ssh=con_ssh)
for i in reversed(vol_list):
vol_name = get_volume_show_values(i, "Name", con_ssh=con_ssh, auth_info=auth_info)[0]
if vol_name != '':
vol_list.remove(i)
delete_volumes(volumes=vol_list, con_ssh=con_ssh, auth_info=auth_info)
args_ = '{}{}'.format('--force ' if force else '',
' '.join(snapshots_to_del))
code, output = cli.openstack('volume snapshot delete', args_, ssh_client=con_ssh,

0
automated-pytest-suite/keywords/host_helper.py Executable file → Normal file
View File

4
automated-pytest-suite/keywords/vm_helper.py Executable file → Normal file
View File

@ -30,8 +30,7 @@ from utils.guest_scripts.scripts import TisInitServiceScript
from utils.multi_thread import MThread, Events
from utils.tis_log import LOG
from keywords import network_helper, nova_helper, cinder_helper, host_helper, \
glance_helper, common, system_helper, \
storage_helper
glance_helper, common, system_helper, kube_helper, storage_helper
from testfixtures.fixture_resources import ResourceCleanup
from testfixtures.recover_hosts import HostsToRecover
@ -4777,6 +4776,7 @@ def evacuate_vms(host, vms_to_check, con_ssh=None, timeout=600,
system_helper.wait_for_alarm_gone(
alarm_id=EventLogID.CPU_USAGE_HIGH, fail_ok=True,
check_interval=30)
kube_helper.wait_for_pods_healthy(all_namespaces=True, timeout=timeout, exclude=True)
def boot_vms_various_types(storage_backing=None, target_host=None,

View File

@ -14,7 +14,7 @@ from pytest import mark, fixture
from consts.stx import GuestImages, VMStatus, FlavorSpec
from keywords import nova_helper, glance_helper, vm_helper, system_helper
# TODO this will be used in evacuate test
# from testfixtures.pre_checks_and_configs import no_simplex
from testfixtures.pre_checks_and_configs import no_simplex
from utils import cli
# TODO maybe add cirros image name to Guest images and use it from there
@ -43,9 +43,9 @@ cirros_params = {
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
"disk_format": "qcow2"
}
dict_params = (centos_params, cirros_params)
# I think this should be moved into vm_helper
# Does this require a check after to see that only admin is working?
def lock_instance(vm_id):
@ -54,6 +54,7 @@ def lock_instance(vm_id):
"""
cli.openstack(cmd='server lock', positional_args=vm_id)
# I think this should be moved into vm_helper
# Does this require a check after to see that only admin is working?
def unlock_instance(vm_id):
@ -62,6 +63,7 @@ def unlock_instance(vm_id):
"""
cli.openstack(cmd='server unlock', positional_args=vm_id)
@fixture(params=dict_params, scope="module", ids=["centos", "cirros"])
def create_flavors_and_images(request):
# TODO need to check with add_default_specs set to True on baremetal
@ -87,12 +89,13 @@ def create_flavors_and_images(request):
"image": im_id
}
# this should be modified to call boot_vm_openstack when implemented
@fixture(scope="module")
def launch_instances(create_flavors_and_images, create_network_sanity):
net_id_list = list()
net_id_list.append({"net-id": create_network_sanity[0]})
host = system_helper.get_active_controller_name()
host = system_helper.get_hypervisors()[0]
vm_id = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor1"],
nics=net_id_list, source="image",
source_id=create_flavors_and_images["image"],
@ -101,11 +104,13 @@ def launch_instances(create_flavors_and_images, create_network_sanity):
VM_IDS.append(vm_id)
return vm_id
@mark.robotsanity
def test_suspend_resume_instances(launch_instances):
vm_helper.suspend_vm(vm_id=launch_instances)
vm_helper.resume_vm(vm_id=launch_instances)
@mark.robotsanity
@mark.parametrize(
('status'), [
@ -116,34 +121,41 @@ def test_suspend_resume_instances(launch_instances):
def test_set_error_active_flags_instances(status, launch_instances):
vm_helper.set_vm(vm_id=launch_instances, state=status)
@mark.robotsanity
def test_pause_unpause_instances(launch_instances):
vm_helper.pause_vm(vm_id=launch_instances)
vm_helper.unpause_vm(vm_id=launch_instances)
@mark.robotsanity
def test_stop_start_instances(launch_instances):
vm_helper.stop_vms(vms=launch_instances)
vm_helper.start_vms(vms=launch_instances)
@mark.robotsanity
def test_lock_unlock_instances(launch_instances):
lock_instance(launch_instances)
unlock_instance(launch_instances)
@mark.robotsanity
def test_reboot_instances(launch_instances):
vm_helper.reboot_vm(vm_id=launch_instances)
@mark.robotsanity
def test_rebuild_instances(launch_instances, create_flavors_and_images):
vm_helper.rebuild_vm(vm_id=launch_instances, image_id=create_flavors_and_images["image"])
@mark.robotsanity
def test_resize_instances(launch_instances, create_flavors_and_images):
vm_helper.resize_vm(vm_id=launch_instances, flavor_id=create_flavors_and_images["flavor2"])
vm_helper.resize_vm(vm_id=launch_instances, flavor_id=create_flavors_and_images["flavor1"])
@mark.robotsanity
def test_set_unset_properties_instances(launch_instances):
vm_helper.set_vm(vm_id=launch_instances, **{FlavorSpec.AUTO_RECOVERY: "true",
@ -153,9 +165,27 @@ def test_set_unset_properties_instances(launch_instances):
FlavorSpec.LIVE_MIG_MAX_DOWNTIME,
FlavorSpec.LIVE_MIG_TIME_OUT])
# @mark.robotsanity
# def test_evacuate_instances_from_hosts(no_simplex):
# TODO this is not yet completed
# vm_helper.evacuate_vms(host="controller-0", vms_to_check=VM_IDS)
# vm_helper.evacuate_vms(host="controller-1", vms_to_check=VM_IDS)
# pass
@mark.robotsanity
def test_evacuate_instances_from_hosts(no_simplex):
host_list = list()
if system_helper.is_aio_duplex():
host_list = system_helper.get_controllers()
else:
host_list = system_helper.get_computes()
h0_instance_list = list()
for i in VM_IDS:
host_name = vm_helper.get_vm_host(i)
if host_name == host_list[0]:
h0_instance_list.append(i)
if not h0_instance_list:
vm_helper.evacuate_vms(host=host_list[1], vms_to_check=VM_IDS,
wait_for_host_up=True)
vm_helper.evacuate_vms(host=host_list[0], vms_to_check=VM_IDS,
wait_for_host_up=True)
else:
vm_helper.evacuate_vms(host=host_list[0], vms_to_check=h0_instance_list,
wait_for_host_up=True)
vm_helper.evacuate_vms(host=host_list[1], vms_to_check=VM_IDS,
wait_for_host_up=True)

View File

@ -35,7 +35,6 @@ cirros_params = {
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
"disk_format": "qcow2"
}
dict_params = ("snapshot_name_1", "snapshot_name_2")
@ -89,8 +88,8 @@ def snapshot_from_instance(request, create_flavour_and_image, volume_for_instanc
def launch_instances(create_flavour_and_image, create_network_sanity, snapshot_from_instance):
global VM_IDS
net_id_list = list()
net_id_list.append({"net-id": create_network_sanity})
host = system_helper.get_active_controller_name()
net_id_list.append({"net-id": create_network_sanity[0]})
host = system_helper.get_hypervisors()[0]
launch_instances = vm_helper.boot_vm(flavor=create_flavour_and_image["flavor1"],
nics=net_id_list, source="snapshot",
source_id=snapshot_from_instance,
@ -155,9 +154,26 @@ def test_set_unset_properties_instances(launch_instances):
FlavorSpec.LIVE_MIG_TIME_OUT])
# @mark.robotsanity
# def test_evacuate_instances_from_hosts(no_simplex):
# TODO this is not yet completed
# vm_helper.evacuate_vms(host="controller-0", vms_to_check=VM_IDS)
# vm_helper.evacuate_vms(host="controller-1", vms_to_check=VM_IDS)
# pass
@mark.robotsanity
def test_evacuate_instances_from_hosts(no_simplex):
host_list = list()
if system_helper.is_aio_duplex():
host_list = system_helper.get_controllers()
else:
host_list = system_helper.get_computes()
h0_instance_list = list()
for i in VM_IDS:
host_name = vm_helper.get_vm_host(i)
if host_name == host_list[0]:
h0_instance_list.append(i)
if not h0_instance_list:
vm_helper.evacuate_vms(host=host_list[1], vms_to_check=VM_IDS,
wait_for_host_up=True)
vm_helper.evacuate_vms(host=host_list[0], vms_to_check=VM_IDS,
wait_for_host_up=True)
else:
vm_helper.evacuate_vms(host=host_list[0], vms_to_check=h0_instance_list,
wait_for_host_up=True)
vm_helper.evacuate_vms(host=host_list[1], vms_to_check=VM_IDS,
wait_for_host_up=True)

View File

@ -12,7 +12,7 @@ from pytest import mark, fixture
from consts.stx import GuestImages, VMStatus, FlavorSpec
from keywords import nova_helper, glance_helper, vm_helper, system_helper
from keywords import network_helper, cinder_helper
from keywords import cinder_helper
from testfixtures.pre_checks_and_configs import no_simplex
from utils import cli
@ -32,9 +32,9 @@ cirros_params = {
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
"disk_format": "qcow2"
}
dict_params = ("vol-cirros-1", "vol-cirros-2")
def lock_instance(launch_instances):
cli.openstack(cmd='server lock', positional_args=launch_instances)
@ -42,6 +42,7 @@ def lock_instance(launch_instances):
def unlock_instance(launch_instances):
cli.openstack(cmd='server unlock', positional_args=launch_instances)
@fixture(scope="module")
def create_flavour_and_image():
fl_id = nova_helper.create_flavor(name=cirros_params['flavor_name_1'],
@ -68,6 +69,7 @@ def create_flavour_and_image():
"image": im_id
}
# Creating Volume For Instances
@fixture(params=dict_params, scope="module")
def volume_from_instance(request, create_flavour_and_image):
@ -75,12 +77,13 @@ def volume_from_instance(request, create_flavour_and_image):
source_id=create_flavour_and_image['image'],
size=cirros_params['flavor_disk'], cleanup="module")[1]
@fixture(scope="module")
def launch_instances(create_flavour_and_image, create_network_sanity, volume_from_instance):
global VM_IDS
net_id_list = list()
net_id_list.append({"net-id": create_network_sanity})
host = system_helper.get_active_controller_name()
net_id_list.append({"net-id": create_network_sanity[0]})
host = system_helper.get_hypervisors()[0]
launch_instances = vm_helper.boot_vm(flavor=create_flavour_and_image["flavor1"],
nics=net_id_list, source="volume",
source_id=volume_from_instance,
@ -88,52 +91,61 @@ def launch_instances(create_flavour_and_image, create_network_sanity, volume_fro
VM_IDS.append(launch_instances)
return launch_instances
# Suspend Resume Instances
@mark.robotsanity
def test_suspend_resume_instances(launch_instances):
vm_helper.suspend_vm(vm_id=launch_instances)
vm_helper.resume_vm(vm_id=launch_instances)
# Set error Active Flags Instance
@mark.robotsanity
@mark.parametrize(('status'), [(VMStatus.ERROR), (VMStatus.ACTIVE)])
def test_set_error_active_flags_instances(launch_instances, status):
vm_helper.set_vm(vm_id=launch_instances, state=status)
# Pause Unpause Instances
@mark.robotsanity
def test_pause_unpause_instances(launch_instances):
vm_helper.pause_vm(vm_id=launch_instances)
vm_helper.unpause_vm(vm_id=launch_instances)
# Stop Start Instances
@mark.robotsanity
def test_stop_start_instances(launch_instances):
vm_helper.stop_vms(vms=launch_instances)
vm_helper.start_vms(vms=launch_instances)
# Lock Unlock Instances
@mark.robotsanity
def test_lock_unlock_instances(launch_instances):
lock_instance(launch_instances)
unlock_instance(launch_instances)
# Reboot Instances
@mark.robotsanity
def test_reboot_instances(launch_instances):
vm_helper.reboot_vm(vm_id=launch_instances)
# Rebuild Instances (from Volume)
@mark.robotsanity
def test_rebuild_instances(launch_instances, create_flavour_and_image):
vm_helper.rebuild_vm(vm_id=launch_instances, image_id=create_flavour_and_image["image"])
# Resize Instances
@mark.robotsanity
def test_resize_instances(launch_instances, create_flavour_and_image):
vm_helper.resize_vm(vm_id=launch_instances, flavor_id=create_flavour_and_image["flavor2"])
vm_helper.resize_vm(vm_id=launch_instances, flavor_id=create_flavour_and_image["flavor1"])
# Set Unset Properties Instances
@mark.robotsanity
def test_set_unset_properties_instances(launch_instances):
@ -144,10 +156,28 @@ def test_set_unset_properties_instances(launch_instances):
FlavorSpec.LIVE_MIG_MAX_DOWNTIME,
FlavorSpec.LIVE_MIG_TIME_OUT])
# Evacuate Instances From Hosts
# @mark.robotsanity
# def test_evacuate_instances_from_hosts(no_simplex):
# TODO this is not yet completed
# vm_helper.evacuate_vms(host="controller-0", vms_to_check=VM_IDS)
# vm_helper.evacuate_vms(host="controller-1", vms_to_check=VM_IDS)
# pass
@mark.robotsanity
def test_evacuate_instances_from_hosts(no_simplex):
host_list = list()
if system_helper.is_aio_duplex():
host_list = system_helper.get_controllers()
else:
host_list = system_helper.get_computes()
h0_instance_list = list()
for i in VM_IDS:
host_name = vm_helper.get_vm_host(i)
if host_name == host_list[0]:
h0_instance_list.append(i)
if not h0_instance_list:
vm_helper.evacuate_vms(host=host_list[1], vms_to_check=VM_IDS,
wait_for_host_up=True)
vm_helper.evacuate_vms(host=host_list[0], vms_to_check=VM_IDS,
wait_for_host_up=True)
else:
vm_helper.evacuate_vms(host=host_list[0], vms_to_check=h0_instance_list,
wait_for_host_up=True)
vm_helper.evacuate_vms(host=host_list[1], vms_to_check=VM_IDS,
wait_for_host_up=True)

View File

@ -30,7 +30,7 @@ def test_openstack_pods_healthy():
application_status = container_helper.get_apps(application="stx-openstack")[0]
assert application_status == "applied", "System status is not in state applied"
command_health = kube_helper.wait_for_pods_healthy(namespace="stx-openstack")
assert command_health is None, "Check PODs health has failed"
assert command_health is True, "Check PODs health has failed"
# Reapply STX OpenStack
@ -86,10 +86,13 @@ def test_kube_system_services():
"""
# Check PODs Health
command_health = kube_helper.wait_for_pods_healthy(namespace="stx-openstack")
assert command_health is None, "Check PODs health has failed"
assert command_health is True, "Check PODs health has failed"
# Check Kube System Services
services_to_check = ['ingress', 'ingress-error-pages', 'ingress-exporter', 'kube-dns',
'tiller-deploy']
# services_to_check = ['ingress', 'ingress-error-pages', 'ingress-exporter', 'kube-dns',
# 'tiller-deploy']
services_to_check = ['ic-nginx-ingress-ingress-nginx-controller',
'ic-nginx-ingress-ingress-nginx-controller-admission',
'kube-dns']
services_to_check.sort()
services_list = kube_helper.get_resources(field="NAME", namespace="kube-system",
resource_type="service")
@ -106,7 +109,7 @@ def test_create_check_delete_pod():
Launch a POD via kubectl, wait until it is active, then delete it.
"""
# Create pod
test_pod_yaml_path = os.path.join(os.getcwd(), "testcases/sanity/sanity_platform", POD_YAML)
test_pod_yaml_path = os.path.join(os.getcwd(), "testcases/sanity/sanity_openstack", POD_YAML)
stx_path = STX_HOME + POD_YAML
current_controller = ControllerClient.get_active_controller()
if not current_controller.file_exists(test_pod_yaml_path):

View File

@ -161,7 +161,8 @@ class SSHClient:
self.session.SSH_OPTS = _SSH_OPTS
self.session.force_password = self.force_password
self.session.maxread = 100000
self.logpath = self._get_logpath()
if not self.logpath:
self.logpath = self._get_logpath()
if self.logpath:
self.session.logfile = open(self.logpath, 'a+')