From e32cb6361acad0d590209a96442ec48ec530704b Mon Sep 17 00:00:00 2001 From: George Postolache Date: Thu, 19 Mar 2020 10:46:01 +0200 Subject: [PATCH] Test suite instance from image port from robot Added conftest.py for sanity group which imports the delete_resource functions from resource_mgmt Added conftest.py for sanity_openstack group which contains a fixture for creating netwok and subnet used in the suites Added suite test_instance_from_image Story: 2007472 Task: 39164 Patch Set 2: Fixed following pep8 and pylint errors: E501 line too long E225 missing whitespace around operator E222 multiple spaces after operator W0106: Expression is assigned to nothing (expression-not-assigned) Patch Set 3: added Signed off by added Story added Task Patch Set 4: Shoortened the lines to under 100 chars Added apache-2 Added new marker "robotsanity" to pytest.ini Modified test markers to use robotsanity instead of sanity Commented evacuate instances test Patch Set 5: Commented evacuate instances test, forgot to do it in PS4 Patch Set 6: Added space after # for comments Change-Id: Id17e0b65bace265f4bd2166c7bd7258ba76061f4 Signed-off-by: George Postolache --- automated-pytest-suite/pytest.ini | 1 + .../testcases/sanity/__init__.py | 0 .../testcases/sanity/conftest.py | 2 + .../sanity/sanity_openstack/__init__.py | 0 .../sanity/sanity_openstack/conftest.py | 19 +++ .../test_instance_from_image.py | 161 ++++++++++++++++++ 6 files changed, 183 insertions(+) create mode 100755 automated-pytest-suite/testcases/sanity/__init__.py create mode 100755 automated-pytest-suite/testcases/sanity/conftest.py create mode 100755 automated-pytest-suite/testcases/sanity/sanity_openstack/__init__.py create mode 100755 automated-pytest-suite/testcases/sanity/sanity_openstack/conftest.py create mode 100755 automated-pytest-suite/testcases/sanity/sanity_openstack/test_instance_from_image.py diff --git a/automated-pytest-suite/pytest.ini b/automated-pytest-suite/pytest.ini index 73c4dfe..ba841f1 100644 --- a/automated-pytest-suite/pytest.ini +++ b/automated-pytest-suite/pytest.ini @@ -4,6 +4,7 @@ testpaths = testcases/functional log_print = False markers = sanity: mark test for sanity run + robotsanity: temporary mark for the tests from robotframework cpe_sanity: mark tests for cpe sanity storage_sanity: mark tests for storage sanity sx_sanity: mark tests for simplex sanity diff --git a/automated-pytest-suite/testcases/sanity/__init__.py b/automated-pytest-suite/testcases/sanity/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/sanity/conftest.py b/automated-pytest-suite/testcases/sanity/conftest.py new file mode 100755 index 0000000..6438efe --- /dev/null +++ b/automated-pytest-suite/testcases/sanity/conftest.py @@ -0,0 +1,2 @@ +from testfixtures.resource_mgmt import delete_resources_func, delete_resources_class, \ + delete_resources_module, delete_resources_session diff --git a/automated-pytest-suite/testcases/sanity/sanity_openstack/__init__.py b/automated-pytest-suite/testcases/sanity/sanity_openstack/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/sanity/sanity_openstack/conftest.py b/automated-pytest-suite/testcases/sanity/sanity_openstack/conftest.py new file mode 100755 index 0000000..3c3886a --- /dev/null +++ b/automated-pytest-suite/testcases/sanity/sanity_openstack/conftest.py @@ -0,0 +1,19 @@ +from pytest import fixture + +from keywords import network_helper + +NETWORK_NAME = "network-1" +SUBNET_NAME = "subnet" +SUBNET_RANGE = "192.168.0.0/24" +IP_VERSION = 4 + +@fixture(scope="module") +def create_network_sanity(): + """ + Create network and subnetwork used in sanity_openstack tests + """ + net_id = network_helper.create_network(name=NETWORK_NAME, cleanup="module")[1] + subnet_id = network_helper.create_subnet(name=SUBNET_NAME, network=NETWORK_NAME, + subnet_range=SUBNET_RANGE, dhcp=True, + ip_version=IP_VERSION, cleanup="module")[1] + return net_id, subnet_id diff --git a/automated-pytest-suite/testcases/sanity/sanity_openstack/test_instance_from_image.py b/automated-pytest-suite/testcases/sanity/sanity_openstack/test_instance_from_image.py new file mode 100755 index 0000000..d680dae --- /dev/null +++ b/automated-pytest-suite/testcases/sanity/sanity_openstack/test_instance_from_image.py @@ -0,0 +1,161 @@ +### +# +# Copyright (c) 2020 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +# Create instances from image, perform different power status and set properties, +# using Cirros OS and Centos OS. +### + +import os +from pytest import mark, fixture + +from consts.stx import GuestImages, VMStatus, FlavorSpec +from keywords import nova_helper, glance_helper, vm_helper, system_helper +# TODO this will be used in evacuate test +# from testfixtures.pre_checks_and_configs import no_simplex +from utils import cli + +# TODO maybe add cirros image name to Guest images and use it from there +VM_IDS = list() +centos_params = { + "flavor_name_1": "f1.medium", + "flavor_name_2": "f2.medium", + "flavor_vcpus": 2, + "flavor_ram": 4096, + "flavor_disk": 40, + "properties": {FlavorSpec.GUEST_HEARTBEAT: 'false', + FlavorSpec.CPU_POLICY: 'shared'}, + "image_name": "centos", + "image_file": os.path.join(GuestImages.DEFAULT['image_dir'], + GuestImages.IMAGE_FILES['centos_7'][0]), + "disk_format": GuestImages.IMAGE_FILES['centos_7'][3] +} +cirros_params = { + "flavor_name_1": "f1.small", + "flavor_name_2": "f2.small", + "flavor_vcpus": 1, + "flavor_ram": 2048, + "flavor_disk": 20, + "properties": None, + "image_name": "cirros", + "image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"), + "disk_format": "qcow2" +} + +dict_params = (centos_params, cirros_params) + +# I think this should be moved into vm_helper +# Does this require a check after to see that only admin is working? +def lock_instance(vm_id): + """ + Lock server(s). A non-admin user will not be able to execute actions + """ + cli.openstack(cmd='server lock', positional_args=vm_id) + +# I think this should be moved into vm_helper +# Does this require a check after to see that only admin is working? +def unlock_instance(vm_id): + """ + Unlock server(s) + """ + cli.openstack(cmd='server unlock', positional_args=vm_id) + +@fixture(params=dict_params, scope="module", ids=["centos", "cirros"]) +def create_flavors_and_images(request): + # TODO need to check with add_default_specs set to True on baremetal + fl_id = nova_helper.create_flavor(name=request.param['flavor_name_1'], + vcpus=request.param['flavor_vcpus'], + ram=request.param['flavor_ram'], + root_disk=request.param['flavor_disk'], + properties=request.param['properties'], is_public=True, + add_default_specs=False, cleanup="module")[1] + fl_id_2 = nova_helper.create_flavor(name=request.param["flavor_name_2"], + vcpus=request.param["flavor_vcpus"], + ram=request.param["flavor_ram"], + root_disk=request.param["flavor_disk"], + properties=request.param["properties"], is_public=True, + add_default_specs=False, cleanup="module")[1] + im_id = glance_helper.create_image(name=request.param['image_name'], + source_image_file=request.param['image_file'], + disk_format=request.param['disk_format'], + cleanup="module")[1] + return { + "flavor1": fl_id, + "flavor2": fl_id_2, + "image": im_id + } + +# this should be modified to call boot_vm_openstack when implemented +@fixture(scope="module") +def launch_instances(create_flavors_and_images, create_network_sanity): + net_id_list = list() + net_id_list.append({"net-id": create_network_sanity[0]}) + host = system_helper.get_active_controller_name() + vm_id = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor1"], + nics=net_id_list, source="image", + source_id=create_flavors_and_images["image"], + vm_host=host, cleanup="module")[1] + # TODO check power state RUNING + VM_IDS.append(vm_id) + return vm_id + +@mark.robotsanity +def test_suspend_resume_instances(launch_instances): + vm_helper.suspend_vm(vm_id=launch_instances) + vm_helper.resume_vm(vm_id=launch_instances) + +@mark.robotsanity +@mark.parametrize( + ('status'), [ + (VMStatus.ERROR), + (VMStatus.ACTIVE) + ] +) +def test_set_error_active_flags_instances(status, launch_instances): + vm_helper.set_vm(vm_id=launch_instances, state=status) + +@mark.robotsanity +def test_pause_unpause_instances(launch_instances): + vm_helper.pause_vm(vm_id=launch_instances) + vm_helper.unpause_vm(vm_id=launch_instances) + +@mark.robotsanity +def test_stop_start_instances(launch_instances): + vm_helper.stop_vms(vms=launch_instances) + vm_helper.start_vms(vms=launch_instances) + +@mark.robotsanity +def test_lock_unlock_instances(launch_instances): + lock_instance(launch_instances) + unlock_instance(launch_instances) + +@mark.robotsanity +def test_reboot_instances(launch_instances): + vm_helper.reboot_vm(vm_id=launch_instances) + +@mark.robotsanity +def test_rebuild_instances(launch_instances, create_flavors_and_images): + vm_helper.rebuild_vm(vm_id=launch_instances, image_id=create_flavors_and_images["image"]) + +@mark.robotsanity +def test_resize_instances(launch_instances, create_flavors_and_images): + vm_helper.resize_vm(vm_id=launch_instances, flavor_id=create_flavors_and_images["flavor2"]) + vm_helper.resize_vm(vm_id=launch_instances, flavor_id=create_flavors_and_images["flavor1"]) + +@mark.robotsanity +def test_set_unset_properties_instances(launch_instances): + vm_helper.set_vm(vm_id=launch_instances, **{FlavorSpec.AUTO_RECOVERY: "true", + FlavorSpec.LIVE_MIG_MAX_DOWNTIME: "500", + FlavorSpec.LIVE_MIG_TIME_OUT: "180"}) + vm_helper.unset_vm(vm_id=launch_instances, properties=[FlavorSpec.AUTO_RECOVERY, + FlavorSpec.LIVE_MIG_MAX_DOWNTIME, + FlavorSpec.LIVE_MIG_TIME_OUT]) + +# @mark.robotsanity +# def test_evacuate_instances_from_hosts(no_simplex): +# TODO this is not yet completed +# vm_helper.evacuate_vms(host="controller-0", vms_to_check=VM_IDS) +# vm_helper.evacuate_vms(host="controller-1", vms_to_check=VM_IDS) +# pass