Submission for starlingx pytest framework

Include:
- added automated test cases for k8s hugepage, qos, large amount of
pods on workers
- added openstack check in test_openstack_services
- added test yaml file for k8s kugepage, qos, large amount of pods
on workers

Story: 2007406
Task: 39012

Author: Ayyappa Mantri <ayyappa.mantri@windriver.com>
Co-Authored-By: Yvonne Ding <yvonne.ding@windriver.com>
Signed-off-by: Yvonne Ding <yvonne.ding@windriver.com>

Change-Id: I5f795beec2218afae74b9cba356d74cd34d11295
This commit is contained in:
Yvonne Ding 2020-03-09 13:05:28 -04:00
parent f5dadb4ccf
commit 25a3a344de
10 changed files with 432 additions and 7 deletions

View File

@ -659,6 +659,7 @@ class PodStatus:
POD_INIT = 'PodInitializing'
INIT = 'Init:0/1'
PENDING = 'Pending'
TERMINATING = 'Terminating'
class AppStatus:

View File

@ -183,6 +183,36 @@ def get_computes(administrative=None, operational=None, availability=None,
auth_info=auth_info)
def get_hypervisors(administrative=None, operational=None,
availability=None, con_ssh=None,
auth_info=Tenant.get('admin_platform')):
"""
Get nodes that can be used as hypervisor/worker.
e.g., in standard config, it will mean worker nodes. In DX+worker config, it will mean worker
nodes and controller nodes.
Args:
administrative:
operational:
availability:
con_ssh:
auth_info:
Returns (list):
"""
computes = get_computes(administrative=administrative, operational=operational,
availability=availability, con_ssh=con_ssh,
auth_info=auth_info)
if is_aio_system(controller_ssh=con_ssh,
auth_info=auth_info):
computes += get_controllers(administrative=administrative, operational=operational,
availability=availability,
con_ssh=con_ssh, auth_info=auth_info)
return computes
def get_hosts(personality=None, administrative=None, operational=None,
availability=None, hostname=None, strict=True,
exclude=False, con_ssh=None,

View File

@ -0,0 +1,115 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import yaml
from pytest import fixture
from utils import cli
from utils.tis_log import LOG
from consts.proj_vars import ProjVar
from consts.auth import HostLinuxUser
from consts.auth import Tenant
from keywords import common
from keywords import host_helper
from keywords import kube_helper
from keywords import system_helper
from testfixtures.recover_hosts import HostsToRecover
def modify_yaml(file_dir, file_name, str_to_add, hugepage_value):
"""
Add hugepages value to hugepages_pod.yaml file
Args:
file_dir(str): deployment file directory
file_name(str): deployment file name
str_to_add(str): 2M or 1G hugepage to add
hugepage_value(str): hugepage value to assign to str_to_add
Return(str):
returns the file_dir and filename with modified values
"""
with open("{}/{}".format(file_dir, file_name), 'r') as f:
data = yaml.safe_load(f)
data['spec']['containers'][0]['resources']['limits'][str_to_add] = hugepage_value
newfile = "hugepages_pod_{}.yaml".format(hugepage_value)
with open("{}/{}".format(ProjVar.get_var('LOG_DIR'), newfile), 'w') as f:
yaml.dump(data, f)
return ProjVar.get_var('LOG_DIR'), newfile
@fixture(scope="module")
def get_hugepage_pod_file():
"""
Fixture used to return the hugepage deployment file
- Get the compute-0 if exist, else standby controller
- Check 2M hugepages configured, elsif check 1G is configured
else lock,configure 2G of 1G hugepages and unlock host
- Call modify_yaml function to modify the yaml
file with the values
- Modified file scps to host to deploy hugepages pod
- Deletes the hugepages pod from the host after the test
"""
if system_helper.is_aio_duplex():
hostname = system_helper.get_standby_controller_name()
else:
hostname = system_helper.get_hypervisors()[0]
LOG.fixture_step("Checking hugepage values on {}".format(hostname))
proc_id = 0
out = host_helper.get_host_memories(
hostname, ('app_hp_avail_2M', 'app_hp_avail_1G'), proc_id)
if out[proc_id][0] > 0:
hugepage_val = "{}Mi".format(out[proc_id][0])
hugepage_str = "hugepages-2Mi"
elif out[proc_id][1] > 0:
hugepage_val = "{}Gi".format(out[proc_id][1])
hugepage_str = "hugepages-1Gi"
else:
hugepage_val = "{}Gi".format(2)
cmd = "{} -1G {}".format(proc_id, 2)
hugepage_str = "hugepages-1Gi"
HostsToRecover.add(hostname)
host_helper.lock_host(hostname)
LOG.fixture_step("Configuring hugepage values {} on {}".format(
hugepage_val, hostname))
cli.system('host-memory-modify {} {}'.format(hostname, cmd), ssh_client=None,
auth_info=Tenant.get('admin_platform'))
host_helper.unlock_host(hostname)
LOG.fixture_step("{} {} pod will be configured on {} proc id {}".format(
hugepage_str, hugepage_val, hostname, proc_id))
file_dir, file_name = modify_yaml(
"utils/test_files/", "hugepages_pod.yaml", hugepage_str, hugepage_val)
source_path = "{}/{}".format(file_dir, file_name)
home_dir = HostLinuxUser.get_home()
common.scp_from_localhost_to_active_controller(
source_path, dest_path=home_dir)
yield file_name
LOG.fixture_step("Delete hugepages pod")
kube_helper.delete_resources(
resource_names="hugepages-pod")
def test_hugepage_pod(get_hugepage_pod_file):
"""
Verify hugepage pod is deployed and running
Args:
get_hugepage_pod_file: module fixture
Steps:
- Create hugepage pod with deployment file
- Verifies hugepage pod is deployed and running
Teardown:
- Deletes the hugepages pod from the host
"""
LOG.tc_step("Create hugepage pod with deployment file")
kube_helper.exec_kube_cmd(
sub_cmd="create -f {}".format(get_hugepage_pod_file))
LOG.tc_step("Verifies hugepage pod is deployed and running")
kube_helper.wait_for_pods_status(
pod_names="hugepages-pod", namespace="default")

View File

@ -15,12 +15,23 @@ from utils.tis_log import LOG
def get_valid_controllers():
controllers = system_helper.get_controllers(
availability=(HostAvailState.AVAILABLE, HostAvailState.DEGRADED,
HostAvailState.ONLINE))
controllers = system_helper.get_controllers(availability=(HostAvailState.AVAILABLE,
HostAvailState.DEGRADED,
HostAvailState.ONLINE))
return controllers
@fixture(scope='module', autouse=True)
def check_openstack(stx_openstack_required):
pass
@fixture(scope='module')
def check_nodes():
if kube_helper.get_nodes(status='Ready', field='NAME', exclude=True, fail_ok=True):
skip('Not all nodes are ready. Skip stx-openstack re-apply test.')
def check_openstack_pods_healthy(host, timeout):
with host_helper.ssh_to_host(hostname=host) as con_ssh:
kube_helper.wait_for_pods_healthy(namespace='stx-openstack',
@ -63,10 +74,10 @@ def test_openstack_services_healthy():
'controller-0',
'controller-1'
])
def test_reapply_stx_openstack_no_change(stx_openstack_required, controller):
def test_reapply_stx_openstack_no_change(stx_openstack_applied_required, check_nodes, controller):
"""
Args:
stx_openstack_required:
stx_openstack_applied_required:
Pre-requisite:
- stx-openstack application in applied state
@ -193,7 +204,7 @@ def reset_if_modified(request):
@mark.sanity
@mark.sx_sanity
@mark.cpe_sanity
def test_stx_openstack_helm_override_update_and_reset(reset_if_modified):
def test_stx_openstack_helm_override_update_and_reset(check_nodes, reset_if_modified):
"""
Test helm override for openstack nova chart and reset
Args:

View File

@ -0,0 +1,65 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import json
from pytest import fixture
from pytest import mark
from utils.tis_log import LOG
from keywords import common
from keywords import kube_helper
from consts.auth import HostLinuxUser
@fixture(scope='module')
def copy_pod_yamls():
home_dir = HostLinuxUser.get_home()
filename = "qos_deployment.yaml"
ns = "qos"
LOG.fixture_step("Copying deployment yaml file")
common.scp_from_localhost_to_active_controller(
source_path="utils/test_files/{}".format(filename), dest_path=home_dir)
kube_helper.exec_kube_cmd(
sub_cmd="create -f {}".format(filename))
yield ns
LOG.fixture_step("Delete all pods in namespace {}".format(ns))
kube_helper.exec_kube_cmd(
sub_cmd="delete pods --all --namespace={}".format(ns))
LOG.fixture_step("Delete the namespace")
kube_helper.exec_kube_cmd(sub_cmd="delete namespace {}".format(ns))
@mark.parametrize('expected,pod', [("guaranteed", "qos-pod-1"),
("burstable", "qos-pod-2"),
("besteffort", "qos-pod-3"),
("burstable", "qos-pod-with-two-containers")])
def test_qos_class(copy_pod_yamls, expected, pod):
"""
Testing the Qos class for pods
Args:
copy_pod_yamls : module fixture
expected : test param
pod : test param
Setup:
- Scp qos pod yaml files(module)
- Create the deployment of namespace and qos pods
Steps:
- Check status of the pod
- Check the qos-class type is as expected
Teardown:
- Delete all pods in the namespace
- Delete the namespace
"""
ns = copy_pod_yamls
kube_helper.wait_for_pods_status(pod_names=pod, namespace=ns)
_, out = kube_helper.exec_kube_cmd(
sub_cmd="get pod {} --namespace={} --output=json".format(pod, ns))
out = json.loads(out)
LOG.tc_step("pod qos class is {} and expected is {}".format(
out["status"]["qosClass"], expected))
assert out["status"]["qosClass"].lower() == expected

View File

@ -0,0 +1,69 @@
"""
Xstudio testcase reference http://162.248.221.232:8080/nice/xstudio/xstudio.jsp?id=T_18189
"""
from pytest import fixture
from pytest import skip
from utils.tis_log import LOG
from keywords import common
from keywords import kube_helper
from keywords import system_helper
from consts.auth import HostLinuxUser
from consts.stx import PodStatus
@fixture(scope='module')
def get_yaml():
filename = "rc_deployment.yaml"
ns = "rc"
number_nodes = 98
relicas = number_nodes*len(system_helper.get_hypervisors())
source_path = "utils/test_files/{}".format(filename)
home_dir = HostLinuxUser.get_home()
common.scp_from_localhost_to_active_controller(
source_path, dest_path=home_dir)
yield ns, relicas, filename
LOG.fixture_step("Delete the deployment")
kube_helper.exec_kube_cmd(
"delete deployment --namespace={} resource-consumer".format(ns))
LOG.fixture_step("Check pods are terminating")
kube_helper.wait_for_pods_status(
namespace=ns, status=PodStatus.TERMINATING)
LOG.fixture_step("Wait for all pods are deleted")
kube_helper.wait_for_resources_gone(namespace=ns)
LOG.fixture_step("Delete the service and namespace")
kube_helper.exec_kube_cmd(
"delete service rc-service --namespace={}".format(ns))
kube_helper.exec_kube_cmd("delete namespace {}".format(ns))
def test_scale_pods(get_yaml):
"""
Testing the deployment of high number of pods
Args:
get_yaml : module fixture
Setup:
- Scp deployment file
Steps:
- Check the deployment of resource-consumer
- Check the pods up
- Scale to 99* number of worker nodes
- Check all the pods are running
Teardown:
- Delete the deployment and service
"""
ns, replicas, filename = get_yaml
LOG.tc_step("Create the deployment")
kube_helper.exec_kube_cmd(
sub_cmd="create -f {}".format(filename))
LOG.tc_step("Check resource consumer pods are running")
state, _ = kube_helper.wait_for_pods_status(namespace=ns, timeout=180)
if state:
LOG.tc_step(
"Scale the resource consumer app to {}* no of worker nodes".format(replicas))
kube_helper.exec_kube_cmd(
"scale deployment --namespace={} resource-consumer --replicas={}".format(ns, replicas))
kube_helper.wait_for_pods_status(namespace=ns, timeout=180)
else:
skip("resource consumer deployment failed")

View File

@ -18,7 +18,7 @@ from utils.tis_log import LOG
@fixture(scope='function')
def stx_openstack_required(request):
def stx_openstack_applied_required(request):
app_name = 'stx-openstack'
if not container_helper.is_stx_openstack_deployed(applied_only=True):
skip('stx-openstack application is not applied')
@ -41,6 +41,12 @@ def stx_openstack_required(request):
request.addfinalizer(wait_for_recover)
@fixture(scope='module')
def stx_openstack_required():
if not container_helper.is_stx_openstack_deployed():
skip('stx-openstack application is not deployed')
@fixture(scope='session')
def skip_for_one_proc():
hypervisor = host_helper.get_up_hypervisors()

View File

@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
name: hugepages-pod
spec:
containers:
- name: example
image: fedora:latest
command:
- sleep
- inf
volumeMounts:
- mountPath: /hugepages
name: hugepage
resources:
limits:
memory: 100Mi
requests:
memory: 100Mi
volumes:
- name: hugepage
emptyDir:
medium: HugePages

View File

@ -0,0 +1,62 @@
apiVersion: v1
kind: Namespace
metadata:
name: qos
---
apiVersion: v1
kind: Pod
metadata:
name: qos-pod-1
namespace: qos
spec:
containers:
- name: qos-demo-ctr
image: nginx
resources:
limits:
memory: "200Mi"
cpu: "700m"
requests:
memory: "200Mi"
cpu: "700m"
---
apiVersion: v1
kind: Pod
metadata:
name: qos-pod-2
namespace: qos
spec:
containers:
- name: qos-demo-2-ctr
image: nginx
resources:
limits:
memory: "200Mi"
requests:
memory: "100Mi"
---
apiVersion: v1
kind: Pod
metadata:
name: qos-pod-3
namespace: qos
spec:
containers:
- name: qos-demo-3-ctr
image: nginx
---
apiVersion: v1
kind: Pod
metadata:
name: qos-pod-with-two-containers
namespace: qos
spec:
containers:
- name: qos-demo-4-ctr-1
image: nginx
resources:
requests:
memory: "200Mi"
- name: qos-demo-4-ctr-2
image: redis

View File

@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: rc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: resource-consumer
namespace: rc
spec:
replicas: 1
selector:
matchLabels:
app: resource-consumer
template:
metadata:
labels:
app: resource-consumer
spec:
containers:
- name: resource-consumer
image: "gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4"
ports:
- containerPort: 80
resources:
requests:
cpu: 100m
memory: 256Mi
---
apiVersion: v1
kind: Service
metadata:
name: rc-service
namespace: rc
spec:
selector:
app: resource-consumer
ports:
- protocol: TCP
port: 80
targetPort: 9376
type: LoadBalancer