Pytest: Add installation-and-config test cases

1. 107-Logical-Volume-Resize.robot
2. 110-Validate-Partition-Modify-Operations.robot
3. 122-Attempt-Unlock-Host.robot
4. 123-Validate-Modified-System-Host-Disk-List.robot
5. 128-Test-Attempt-To-Increase-Partition-Size.robot
6. 133-Attempt-To-Decrease-Partition-Size.robot
7. 140-Controller-Node-Basic-Provisioning-Check.robot
8. 143-Ifprofile-Create-Show-Delete.robot
9. 165-Invalid-Values-Controllerfs.robot
10. 199-Check-Deleted-Compute-Properties.robot
11. 215-Verify-Profile-Imported-In-Compute.robot

Signed-off-by: Yong Fu <fuyong@neusoft.com>
Change-Id: Id5c4942d1679578a581532ee16daf7a7c490d3ba
This commit is contained in:
Yong Fu 2021-07-07 16:57:02 +08:00
parent a68778f64d
commit f33cc51cf8
16 changed files with 698 additions and 2 deletions

View File

@ -2,4 +2,4 @@
host=review.opendev.org
port=29418
project=starlingx/test.git
defaultbranch=r/stx.4.0
defaultbranch=devel

View File

@ -0,0 +1,22 @@
from pytest import fixture, skip
from testfixtures.resource_mgmt import *
from keywords import system_helper
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
@fixture(scope='session')
def no_aio_system():
LOG.fixture_step("(Session) Skip if AIO system")
if system_helper.is_aio_system():
skip('skip if AIO system')
@fixture(scope='session')
def no_bare_metal():
LOG.fixture_step("(Session) Skip if bare metal")
con_ssh = ControllerClient.get_active_controller()
output = con_ssh.exec_sudo_cmd('dmesg | grep -i paravirtualized')[1]
if 'KVM' not in output and 'bare hardware' in output:
skip('bare metal does not support')

View File

@ -0,0 +1,64 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Attempt to decrease the size of a partition.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from consts.auth import Tenant
from keywords import storage_helper, system_helper
from utils import cli
from utils.clients.ssh import ControllerClient
from utils.horizon.pages.admin.platform import systemconfigurationpage
from utils.horizon.regions import forms
from utils.tis_log import LOG
@mark.installation
def test_attempt_to_decrease_partition_size(admin_home_pg):
"""
133-Attempt-To-Decrease-Partition-Size.robot
Args:
admin_home_pg:
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
if not system_helper.is_aio_system():
host = system_helper.get_computes()[0]
else:
host = 'controller-0'
uuid = storage_helper.get_host_partitions(host, **{'status': 'In-Use'})[0]
size = storage_helper.get_host_partition_values(host, uuid, fields='size_mib')[0]
new_size = int(size) // 2048
out = cli.system('host-disk-partition-modify', "{} {} -s {}".
format(host, uuid, new_size), fail_ok=True, ssh_client=con_ssh,
auth_info=Tenant.get('admin_platform'))[1]
assert 'Requested partition size must be larger than current size' in out
fs_size = storage_helper.get_controllerfs_list(fs_name='extension')[0]
LOG.info("Go to System Configuration Page")
systemconfiguration_pg = systemconfigurationpage.SystemConfigurationPage(admin_home_pg.driver)
systemconfiguration_pg.go_to_target_page()
systemconfiguration_pg.go_to_controller_filesystem_tab()
size = fs_size // 2
edit_form = systemconfiguration_pg.controllerfs_table.edit_filesystem()
edit_form.extension.value = size
edit_form._submit_element.click()
edit_form.driver.switch_to_alert().accept()
edit_form.wait_till_spinner_disappears()
tab = systemconfiguration_pg.controllerfs_table
edit_form = forms.FormRegion(tab.driver, field_mappings=tab.EDIT_FILESYSTEM_FORM_FIELDS)
edit_form.cancel()
element = systemconfiguration_pg.driver.find_element_by_xpath(
"//div[@class=\"alert alert-dismissable fade in alert-danger\"]")
assert "should be bigger than" in element.get_attribute('innerText')

View File

@ -0,0 +1,39 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to Attempt increase partition size.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.auth import Tenant
from keywords import storage_helper, system_helper
from utils import cli
from utils.clients.ssh import ControllerClient
@mark.installation
def test_attempt_to_increase_partition_size():
"""
128-Test-Attempt-To-Increase-Partition-Size.robot
Args:
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
if not system_helper.is_aio_system():
host = system_helper.get_computes()[0]
else:
host = 'controller-0'
uuid = storage_helper.get_host_partitions(host, **{'status': 'In-Use'})[0]
out = cli.system('host-disk-partition-modify', "{} {} -s '1111111111111111111111111'".
format(host, uuid), fail_ok=True, ssh_client=con_ssh,
auth_info=Tenant.get('admin_platform'))[1]
assert "Expected '<type 'int'>', got '<type 'long'>'" in out

View File

@ -0,0 +1,70 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Attempt unlock host where nova-local lvg exists but does not have physical volume.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import time
from pytest import mark
from consts.auth import Tenant
from keywords import system_helper, host_helper
from utils import cli, table_parser, exceptions
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
def wait_for_host_lvg_status(host):
LOG.info("waiting for state: provisioned")
end_time = time.time() + 300
table_ = table_parser.table(cli.system("host-lvg-list", host)[1])
current_status = table_parser.get_values(table_, "State", **{"LVG Name": "nova-local"})[0]
while time.time() < end_time:
if current_status == "provisioned":
LOG.info("host status has reached provisioned")
return 0
time.sleep(30)
table_ = table_parser.table(cli.system("host-lvg-list", host)[1])
current_status = table_parser.get_values(table_, "State", **{"LVG Name": "nova-local"})[0]
err_msg = "Timed out waiting for state: provisioned."
raise exceptions.VMTimeout(err_msg)
@mark.installation
def test_attempt_unlock_host(no_aio_system):
"""
122-Attempt-Unlock-Host.robot
Args:
no_aio_system:
Returns:
"""
host = system_helper.get_computes()[1]
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin_platform')
host_helper.lock_host(host)
table_ = table_parser.table(cli.system("host-lvg-list", host,
ssh_client=con_ssh, auth_info=auth_info)[1])
state = table_parser.get_values(table_, "State", **{"LVG Name": "nova-local"})[0]
assert 'provisioned' == state
cli.system("host-lvg-delete", "{} nova-local".format(host), ssh_client=con_ssh, auth_info=auth_info)
cli.system("host-lvg-add", "{} nova-local".format(host), ssh_client=con_ssh, auth_info=auth_info)
wait_for_host_lvg_status(host)
assert 'The nova-local volume group does not contain any physical volumes' in \
host_helper.unlock_host(host, fail_ok=True)[1]
table_ = table_parser.table(cli.system("host-pv-list", host,
ssh_client=con_ssh, auth_info=auth_info)[1])
uuid = table_parser.get_values(table_, "disk_or_part_uuid", **{"lvm_vg_name": "nova-local"})[0]
cli.system("host-pv-add", "{} nova-local {}".format(host, uuid), ssh_client=con_ssh,
auth_info=auth_info)
host_helper.unlock_host(host)

View File

@ -0,0 +1,154 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Test to verify that after deleted, a compute can be asigned a
# new personality, and is show on the inventory but not defined.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import subprocess
import time
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from consts.auth import Tenant
from keywords import system_helper, host_helper, storage_helper, container_helper, kube_helper
from utils import cli, exceptions, table_parser
from utils.clients.ssh import ControllerClient
from utils.horizon.pages.admin.platform import hostinventorypage
from utils.tis_log import LOG
def install_virtual_node(host):
"""
Install the compute nodes of the system with given storage nodes list.
Args:
host:
Returns:
"""
LOG.info("install {} virtual start".format(host))
cmd = "virsh list | grep {} | awk '{{print$2}}'".format(host)
obj = subprocess.Popen(cmd, shell=True, close_fds=True, stdout=subprocess.PIPE)
virsh_name = str(obj.stdout.readline(), encoding="utf-8").replace('\n', '')
cmd = "virsh -c qemu:///system domiflist {} | grep br2 | awk '{{print $5}}'".format(virsh_name)
obj = subprocess.Popen(cmd, shell=True, close_fds=True, stdout=subprocess.PIPE)
assert obj, "failed to get domain {}".format(virsh_name)
mac_address = str(obj.stdout.readline(), encoding="utf-8")
cli.system('host-add', ' -n {} -p worker -m {}'.format(host, mac_address))
wait_for_host_install_status(host)
def wait_for_host_install_status(host):
LOG.info("waiting for {} install_state status: completed".format(host))
end_time = time.time() + 2400
current_status = system_helper.get_host_values(host, "install_state")[0]
while time.time() < end_time:
if current_status == "completed":
LOG.info("host status has reached completed")
return 0
time.sleep(30)
current_status = system_helper.get_host_values(host, "install_state")[0]
err_msg = "Timed out waiting for {} install_state status: completed. {} " \
"install_state status: {}".format(host, host, current_status)
raise exceptions.VMTimeout(err_msg)
def wait_for_host_delete_status(host):
LOG.info("waiting for {} to delete".format(host))
end_time = time.time() + 300
exists = system_helper.host_exists(host)
while time.time() < end_time:
if not exists:
LOG.info("{} has been deleted".format(host))
return 0
time.sleep(20)
exists = system_helper.host_exists(host)
err_msg = "Timed out waiting for {} to delete".format(host)
raise exceptions.VMTimeout(err_msg)
def unlock_host(host, con_ssh):
application_status = container_helper.get_apps(application="stx-openstack")[0]
if application_status == "applying":
container_helper.abort_app("stx-openstack")
host_helper.unlock_host(host, con_ssh=con_ssh, available_only=False,
check_hypervisor_up=False, check_webservice_up=False,
check_subfunc=False, check_containers=False)
@mark.installation
def test_check_deleted_compute_properties(no_aio_system, admin_home_pg, no_bare_metal):
"""
199-Check-Deleted-Compute-Properties.robot
Args:
no_aio_system:
admin_home_pg:
no_bare_metal:
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin_platform')
compute = system_helper.get_computes()[0]
host_helper.lock_host(compute)
with host_helper.ssh_to_host(compute) as host_ssh:
LOG.info("Clear partition information")
# In order to start the target host from ipxe, Need to clear the data in /dev/sda
host_ssh.exec_sudo_cmd("dd if=/dev/zero of=/dev/sda bs=512 count=1")
cli.system('host-delete', compute, ssh_client=con_ssh, auth_info=auth_info)
wait_for_host_delete_status(compute)
LOG.info("Go to Host Inventory")
hostinventory_pg = hostinventorypage.HostInventoryPage(admin_home_pg.driver)
hostinventory_pg.go_to_target_page()
hostinventory_pg.go_to_hosts_tab()
row = hostinventory_pg._get_row_with_host_name(compute)
assert row is None
install_virtual_node(compute)
cli.system("interface-network-assign", "{} mgmt0 cluster-host".format(compute),
ssh_client=con_ssh, auth_info=auth_info)
LOG.info("Configure data interfaces for compute.")
# Get Interface UUID
table_ = table_parser.table(cli.system("host-if-list", "{} -a".format(compute))[1])
data_uuid = table_parser.get_values(table_, "uuid", **{"class": "None"})
# Add Interface To Data Network
args0 = "-m 1500 -n data0 -c data {} {}".format(compute, data_uuid[0])
args1 = "-m 1500 -n data1 -c data {} {}".format(compute, data_uuid[-1])
cli.system('host-if-modify', args0, ssh_client=con_ssh, auth_info=auth_info)
cli.system('host-if-modify', args1, ssh_client=con_ssh, auth_info=auth_info)
cli.system("interface-datanetwork-assign", "{} {} physnet0"
.format(compute, data_uuid[0]),
ssh_client=con_ssh, auth_info=auth_info)
cli.system("interface-datanetwork-assign", "{} {} physnet1"
.format(compute, data_uuid[-1]),
ssh_client=con_ssh, auth_info=auth_info)
uuid = storage_helper.get_host_disks(compute, **{"device_node": "/dev/sdb"})[0]
# Add Local Volume Group
cli.system("host-lvg-add", "{} nova-local".format(compute), ssh_client=con_ssh,
auth_info=auth_info)
# Add Physical Volume
cli.system("host-pv-add", "{} nova-local {}".format(compute, uuid), ssh_client=con_ssh,
auth_info=auth_info)
labels = ["openstack-compute-node", "openvswitch", "sriov"]
host_helper.assign_host_labels(compute, labels, unlock=False)
unlock_host(compute, con_ssh)
storage_helper.wait_for_ceph_health_ok(con_ssh=con_ssh, timeout=900, check_interval=30)
application_status = container_helper.get_apps(application="stx-openstack")[0]
if application_status == "applying":
container_helper.abort_app(app_name="stx-openstack")
pods_status = kube_helper.wait_for_pods_healthy(namespace="openstack", timeout=20,
con_ssh=con_ssh, fail_ok=True)
if not pods_status:
container_helper.remove_app(app_name="stx-openstack", applied_timeout=600)
container_helper.apply_app(app_name="stx-openstack", applied_timeout=3600,
check_interval=30, wait_for_alarm_gone=False)

View File

@ -0,0 +1,46 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Controller Node Basic Provisioning Check Display all the devices (sudo vgdisplay)
# Display the list for controller-0 (system host-disk-list)
# Use grep to obtain the device uuid and path
# Create a list for uuid and another for path
# list, Scan, verify the device is inside (system host-disk-partition-show).
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.auth import Tenant
from keywords import storage_helper
from utils import cli
from utils.clients.ssh import ControllerClient
@mark.installation
def test_controller_node_basic_provisioning_check():
"""
140-Controller-Node-Basic-Provisioning-Check.robot
Args:
Returns:
"""
host = 'controller-0'
con_ssh = ControllerClient.get_active_controller()
assert 'cgts-vg' in con_ssh.exec_sudo_cmd("vgdisplay")[1]
uuids = storage_helper.get_host_disks(host)
device_paths = storage_helper.get_host_disks(host, field='device_path')
for uuid in uuids:
out = cli.system('host-disk-partition-show', '{} {}'.format(host, uuid), fail_ok=True,
ssh_client=con_ssh, auth_info=Tenant.get('admin_platform'))[1]
assert 'Partition not found on host' in out
for device_path in device_paths:
out = cli.system('host-disk-partition-show', '{} {}'.format(host, device_path), fail_ok=True,
ssh_client=con_ssh, auth_info=Tenant.get('admin_platform'))[1]
assert 'Partition not found on host' in out

View File

@ -0,0 +1,46 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Ifprofile-Create-Show-Delete.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.auth import Tenant
from keywords import system_helper
from utils import cli, table_parser
from utils.clients.ssh import ControllerClient
@mark.installation
def test_ifprofile_create_show_delete(no_aio_system):
"""
143-Ifprofile-Create-Show-Delete.robot
Args:
no_aio_system:
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
auth_info = Tenant.get('admin_platform')
operationals = system_helper.get_hosts(field='operational')
assert 'disable' not in operationals
compute = system_helper.get_computes()[0]
cli.system('ifprofile-add', 'data {}'.format(compute), ssh_client=con_ssh, auth_info=auth_info)
table_ = table_parser.table(cli.system('ifprofile-show', 'data',
ssh_client=con_ssh, auth_info=auth_info)[1])
port = table_parser.get_value_two_col_table(table_, 'port config')[0]
assert port
interface = table_parser.get_value_two_col_table(table_, 'interface config')[0]
assert interface
uuid = table_parser.get_value_two_col_table(table_, 'uuid')[0]
assert uuid
cli.system('ifprofile-delete', 'data', ssh_client=con_ssh, auth_info=auth_info)

View File

@ -0,0 +1,41 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify Invalid Values To Modify Controller FS.
# Ensure Input Values To Reduce Or Increase More Than The
# Size Of Total File System Partition Are Rejected.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import subprocess
from pytest import mark
from keywords import storage_helper
@mark.installation
def test_invalid_values_controllerfs():
"""
165-Invalid-Values-Controllerfs.robot
Args:
Returns:
"""
filename = "qemu_setup.yaml"
cmd = "cat utils/test_files/{} | grep controller-0 -A 1 | awk -F ' ' '{{print$2}}' | tail -n 1" \
.format(filename)
obj = subprocess.Popen(cmd, shell=True, close_fds=True, stdout=subprocess.PIPE)
maxsize = str(obj.stdout.readline(), encoding="utf-8")
size = storage_helper.get_controllerfs_values('database')[0]
minsize = size - 1
stderr = storage_helper.modify_controllerfs(**{'database': minsize}, fail_ok=True)[1]
assert 'should be bigger' in stderr
stderr = storage_helper.modify_controllerfs(**{'database': int(maxsize)}, fail_ok=True)[1]
assert 'Rejecting' in stderr

View File

@ -0,0 +1,61 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify that logical volumes can be resized the controller (CLI/GUI).
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import time
from pytest import mark
from testfixtures.horizon import admin_home_pg, driver
from keywords import storage_helper
from utils import exceptions
from utils.horizon.pages.admin.platform import systemconfigurationpage
from utils.tis_log import LOG
def wait_for_controllerfs_status():
LOG.info("waiting for state: available")
end_time = time.time() + 300
current_status = storage_helper.get_controllerfs_values('extension', fields='state')[0]
while time.time() < end_time:
if current_status == "available":
LOG.info("host status has reached available")
return 0
time.sleep(30)
current_status = storage_helper.get_controllerfs_values('extension', fields='state')[0]
err_msg = "Timed out waiting for state: available."
raise exceptions.VMTimeout(err_msg)
@mark.installation
def test_logical_volume_resize(admin_home_pg):
"""
107-Logical-Volume-Resize.robot
Args:
admin_home_pg:
Returns:
"""
fs_size = storage_helper.get_controllerfs_list(fs_name='extension')[0]
next_size = fs_size + 1
storage_helper.modify_controllerfs(**{'extension': next_size})
assert next_size == storage_helper.get_controllerfs_list(fs_name='extension')[0]
wait_for_controllerfs_status()
LOG.info("Go to System Configuration Page")
systemconfiguration_pg = systemconfigurationpage.SystemConfigurationPage(admin_home_pg.driver)
systemconfiguration_pg.go_to_target_page()
systemconfiguration_pg.go_to_controller_filesystem_tab()
size = next_size + 1
systemconfiguration_pg.edit_filesystem(extension=size)
assert size == storage_helper.get_controllerfs_list(fs_name='extension')[0]
wait_for_controllerfs_status()

View File

@ -0,0 +1,30 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify Gib Column Present.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from keywords import system_helper, storage_helper
@mark.installation
def test_validate_modified_system_host_disk_list(no_aio_system):
"""
123-Validate-Modified-System-Host-Disk-List.robot
Args:
no_aio_system:
Returns:
"""
compute = system_helper.get_computes()[0]
out = storage_helper.get_host_disks(compute, field='available_gib')
assert out

View File

@ -0,0 +1,39 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# This test case validates that the partition has no changes cause
# the input has not the required parameter.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
from pytest import mark
from consts.auth import Tenant
from keywords import storage_helper, system_helper
from utils import cli
from utils.clients.ssh import ControllerClient
@mark.installation
def test_validate_partition_modify_operations():
"""
110-Validate-Partition-Modify-Operations.robot
Args:
Returns:
"""
con_ssh = ControllerClient.get_active_controller()
if system_helper.is_aio_system():
host = 'controller-0'
else:
host = system_helper.get_computes()[0]
uuid = storage_helper.get_host_partitions(host)[0]
code, msg = cli.system('host-disk-partition-modify', "{} {}".format(host, uuid), fail_ok=True,
ssh_client=con_ssh, auth_info=Tenant.get('admin_platform'))
assert code != 0 and 'No update parameters specified, partition is unchanged.' == msg

View File

@ -0,0 +1,50 @@
###
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Verify Import profile and apply to multiple nodes.
#
# Author(s): Yong.Fu <yongx.fu@intel.com>
#
###
import subprocess
from pytest import mark
from keywords import system_helper, host_helper
def get_qemu_info(tagert, virsh_name):
cmd = "virsh dumpxml {} | grep {}".format(virsh_name, tagert)
obj = subprocess.Popen(cmd, shell=True, close_fds=True, stdout=subprocess.PIPE)
return str(obj.stdout.readline(), encoding="utf-8")
@mark.installation
def test_verify_profile_imported_in_compute(no_aio_system):
"""
215-Verify-Profile-Imported-In-Compute.robot
Args:
no_aio_system:
Returns:
"""
computes = system_helper.get_computes()
for compute in computes:
cmd = "virsh list | grep {} | awk '{{print$2}}'".format(compute)
obj = subprocess.Popen(cmd, shell=True, close_fds=True, stdout=subprocess.PIPE)
virsh_name = str(obj.stdout.readline(), encoding="utf-8").replace('\n', '')
vcpu = get_qemu_info('vcpu', virsh_name)
cores = get_qemu_info('cores', virsh_name)
current_memory = get_qemu_info('currentMemory', virsh_name)
with host_helper.ssh_to_host(compute) as host_ssh:
cpu = host_ssh.exec_cmd("lscpu | grep 'CPU(s):' | head -1 | awk '{{print$2}}'")[1]
assert cpu in vcpu
core = host_ssh.exec_cmd("lscpu | grep 'Core(s)' | awk '{{print$4}}'")[1]
assert core in cores
memory = host_ssh.exec_sudo_cmd("dmidecode | grep 'Size:.*MB'| awk '{{print$2}}'")[1]
assert memory in current_memory or str(int(memory)*1024) in current_memory

View File

@ -447,7 +447,9 @@ class SystemConfigurationPage(basepage.BasePage):
if cancel:
edit_form.cancel()
else:
edit_form.submit()
edit_form._submit_element.click()
edit_form.driver.switch_to_alert().accept()
edit_form.wait_till_spinner_disappears()
def edit_storage_pool(self, tier_name, cinder_pool=None, glance_pool=None,
ephemeral_pool=None, object_pool=None, cancel=False):

View File

@ -0,0 +1,32 @@
configuration_0:
controller-0:
controller_0_partition_a: 200
controller_0_partition_b: 200
controller_0_memory_size: 10240
controller_0_system_cores: 4
controller-0-compute-0:
controller_0_compute_0_partition_a: 200
controller_0_compute_0_partition_b: 200
controller_0_compute_0_memory_size: 16384
controller_0_compute_0_system_cores: 4
controller-0-compute-1:
controller_0_compute_1_partition_a: 200
controller_0_compute_1_partition_b: 200
controller_0_compute_1_memory_size: 16384
controller_0_compute_1_system_cores: 4
configuration_1:
controller-1:
controller_1_partition_a: 200
controller_1_partition_b: 200
controller_1_memory_size: 10240
controller_1_system_cores: 4
controller-1-compute-0:
controller_1_compute_0_partition_a: 200
controller_1_compute_0_partition_b: 200
controller_1_compute_0_memory_size: 3072
controller_1_compute_0_system_cores: 2
general_system_configurations:
os_system_memory: 1024
disk_space_allocated_to_os: 20
os_system_cores: 2
default_mount_point: '/'