From 6256b0d106ad97507373f1287d9ed3d292dcea6e Mon Sep 17 00:00:00 2001 From: Tao Liu Date: Tue, 11 Dec 2018 12:13:44 -0500 Subject: [PATCH] Change compute node to worker node personality This update replaced the compute personality & subfunction to worker, and updated internal and customer visible references. In addition, the compute-huge package has been renamed to worker-utils as it contains various scripts/services that used to affine running tasks or interface IRQ to specific CPUs. The worker_reserved.conf is now installed to /etc/platform. The cpu function 'VM' has also been renamed to 'Application'. Tests Performed: Non-containerized deployment AIO-SX: Sanity and Nightly automated test suite AIO-DX: Sanity and Nightly automated test suite 2+2 System: Sanity and Nightly automated test suite 2+2 System: Horizon Patch Orchestration Kubernetes deployment: AIO-SX: Create, delete, reboot and rebuild instances 2+2+2 System: worker nodes are unlock enable and no alarms Story: 2004022 Task: 27013 Change-Id: I0e0be6b3a6f25f7fb8edf64ea4326854513aa396 Signed-off-by: Tao Liu --- centos_iso_image.inc | 14 +- centos_pkg_dirs | 4 +- compute-huge/PKG-INFO | 13 - compute-huge/centos/build_srpm.data | 3 - .../compute-huge/compute-huge-goenabled.sh | 24 -- computeconfig/PKG-INFO | 13 - computeconfig/centos/build_srpm.data | 2 - computeconfig/centos/computeconfig.spec | 85 ------- config-gate/centos/config-gate.spec | 18 +- config-gate/files/Makefile | 4 +- config-gate/files/wait_for_config_init.sh | 4 +- ...init.sh => wait_for_worker_config_init.sh} | 6 +- ...ate.service => worker-config-gate.service} | 6 +- .../controllerconfig/backup_restore.py | 14 +- .../controllerconfig/sysinv_api.py | 22 +- .../controllerconfig/utils.py | 2 +- .../controllerconfig/scripts/install_clone.py | 24 +- .../src/bin/puppet-manifest-apply.sh | 6 +- .../src/hieradata/controller.yaml | 2 +- .../hieradata/{compute.yaml => worker.yaml} | 2 +- .../src/manifests/{compute.pp => worker.pp} | 4 +- .../modules/openstack/manifests/ceilometer.pp | 4 +- .../modules/openstack/manifests/neutron.pp | 2 +- .../lib/facter/disable_compute_services.rb | 7 - .../lib/facter/disable_worker_services.rb | 7 + .../src/modules/platform/manifests/compute.pp | 12 +- .../src/modules/platform/manifests/config.pp | 6 +- .../modules/platform/manifests/kubernetes.pp | 2 +- .../src/modules/platform/manifests/mtce.pp | 2 +- .../modules/platform/manifests/postgresql.pp | 2 +- ...rved.conf.erb => worker_reserved.conf.erb} | 12 +- .../src/mtce/templates/mtc_ini.erb | 2 +- .../cgtsclient/common/constants.py | 2 +- .../cgts-client/cgtsclient/v1/iHost_shell.py | 2 +- .../cgts-client/cgtsclient/v1/icpu.py | 18 +- .../cgtsclient/v1/imemory_shell.py | 8 +- .../cgtsclient/v1/iprofile_shell.py | 4 +- .../cgtsclient/v1/pci_device_shell.py | 2 +- sysinv/sysinv/sysinv/sysinv/agent/manager.py | 18 +- sysinv/sysinv/sysinv/sysinv/agent/node.py | 40 ++- sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py | 2 +- .../sysinv/sysinv/api/controllers/v1/cpu.py | 10 +- .../sysinv/api/controllers/v1/cpu_utils.py | 34 +-- .../sysinv/sysinv/api/controllers/v1/disk.py | 6 +- .../sysinv/sysinv/api/controllers/v1/host.py | 180 ++++++------- .../sysinv/api/controllers/v1/interface.py | 4 +- .../sysinv/sysinv/api/controllers/v1/lvg.py | 18 +- .../sysinv/api/controllers/v1/memory.py | 2 +- .../sysinv/api/controllers/v1/partition.py | 10 +- .../sysinv/api/controllers/v1/pci_device.py | 4 +- .../sysinv/api/controllers/v1/profile.py | 20 +- .../sysinv/sysinv/api/controllers/v1/pv.py | 14 +- .../api/controllers/v1/sdn_controller.py | 4 +- .../sysinv/api/controllers/v1/storage_ceph.py | 2 +- .../sysinv/api/controllers/v1/system.py | 2 +- .../sysinv/sysinv/api/controllers/v1/utils.py | 6 +- sysinv/sysinv/sysinv/sysinv/common/ceph.py | 2 +- .../sysinv/sysinv/sysinv/common/constants.py | 24 +- sysinv/sysinv/sysinv/sysinv/common/health.py | 2 +- .../sysinv/sysinv/common/service_parameter.py | 20 +- .../sysinv/common/storage_backend_conf.py | 2 +- sysinv/sysinv/sysinv/sysinv/common/utils.py | 24 +- .../sysinv/sysinv/conductor/kube_app.py | 4 +- .../sysinv/sysinv/sysinv/conductor/manager.py | 132 +++++----- .../sysinv/sysinv/conductor/openstack.py | 2 +- .../sysinv/sysinv/sysinv/conductor/rpcapi.py | 14 +- sysinv/sysinv/sysinv/sysinv/db/api.py | 2 +- .../migrate_repo/versions/001_init.py | 2 +- .../migrate_repo/versions/035_system_type.py | 2 +- .../sysinv/sysinv/db/sqlalchemy/models.py | 2 +- sysinv/sysinv/sysinv/sysinv/helm/neutron.py | 2 +- sysinv/sysinv/sysinv/sysinv/helm/nova.py | 4 +- sysinv/sysinv/sysinv/sysinv/puppet/ceph.py | 4 +- sysinv/sysinv/sysinv/sysinv/puppet/device.py | 2 +- .../sysinv/sysinv/sysinv/puppet/interface.py | 16 +- .../sysinv/sysinv/sysinv/puppet/kubernetes.py | 2 +- sysinv/sysinv/sysinv/sysinv/puppet/ldap.py | 2 +- sysinv/sysinv/sysinv/sysinv/puppet/neutron.py | 2 +- sysinv/sysinv/sysinv/sysinv/puppet/nova.py | 6 +- sysinv/sysinv/sysinv/sysinv/puppet/ovs.py | 4 +- .../sysinv/sysinv/sysinv/puppet/platform.py | 12 +- sysinv/sysinv/sysinv/sysinv/puppet/storage.py | 2 +- .../sysinv/sysinv/tests/api/test_interface.py | 236 +++++++++--------- .../tests/api/test_interface_network.py | 96 +++---- .../sysinv/sysinv/tests/api/test_profile.py | 50 ++-- .../sysinv/tests/conductor/test_manager.py | 18 +- .../sysinv/tests/conductor/test_rpcapi.py | 2 +- .../sysinv/tests/events_for_testing.yaml | 22 +- .../sysinv/tests/puppet/test_interface.py | 144 +++++------ {compute-huge => worker-utils}/.gitignore | 2 +- worker-utils/centos/build_srpm.data | 3 + .../centos/worker-utils.spec | 12 +- .../worker-utils}/LICENSE | 0 .../worker-utils}/Makefile | 8 +- .../worker-utils}/affine-interrupts.sh | 2 +- .../worker-utils}/affine-platform.sh | 6 +- .../worker-utils}/affine-platform.sh.service | 2 +- .../worker-utils}/cpumap_functions.sh | 14 +- .../cpumap_functions_unit_test.sh | 0 .../worker-utils}/ps-sched.sh | 0 .../worker-utils}/set-cpu-wakeup-latency.sh | 4 +- .../worker-utils}/task_affinity_functions.sh | 6 +- .../worker-utils}/topology | 0 .../worker-utils}/topology.py | 0 worker-utils/worker-utils/worker-goenabled.sh | 24 ++ .../worker-utils/worker_reserved.conf | 45 +--- {computeconfig => workerconfig}/.gitignore | 2 +- workerconfig/PKG-INFO | 13 + workerconfig/centos/build_srpm.data | 2 + workerconfig/centos/workerconfig.spec | 85 +++++++ .../workerconfig}/LICENSE | 0 .../workerconfig}/Makefile | 8 +- .../workerconfig}/config_goenabled_check.sh | 0 .../workerconfig/worker_config | 42 ++-- .../workerconfig/worker_services | 38 +-- .../workerconfig-combined.service | 6 +- .../workerconfig/workerconfig.service | 8 +- 117 files changed, 946 insertions(+), 988 deletions(-) delete mode 100644 compute-huge/PKG-INFO delete mode 100644 compute-huge/centos/build_srpm.data delete mode 100644 compute-huge/compute-huge/compute-huge-goenabled.sh delete mode 100644 computeconfig/PKG-INFO delete mode 100644 computeconfig/centos/build_srpm.data delete mode 100644 computeconfig/centos/computeconfig.spec rename config-gate/files/{wait_for_compute_config_init.sh => wait_for_worker_config_init.sh} (61%) rename config-gate/files/{compute-config-gate.service => worker-config-gate.service} (55%) rename puppet-manifests/src/hieradata/{compute.yaml => worker.yaml} (98%) rename puppet-manifests/src/manifests/{compute.pp => worker.pp} (94%) delete mode 100644 puppet-manifests/src/modules/platform/lib/facter/disable_compute_services.rb create mode 100644 puppet-manifests/src/modules/platform/lib/facter/disable_worker_services.rb rename puppet-manifests/src/modules/platform/templates/{compute_reserved.conf.erb => worker_reserved.conf.erb} (90%) rename {compute-huge => worker-utils}/.gitignore (71%) create mode 100644 worker-utils/centos/build_srpm.data rename compute-huge/centos/compute-huge.spec => worker-utils/centos/worker-utils.spec (74%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/LICENSE (100%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/Makefile (81%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/affine-interrupts.sh (99%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/affine-platform.sh (98%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/affine-platform.sh.service (92%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/cpumap_functions.sh (97%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/cpumap_functions_unit_test.sh (100%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/ps-sched.sh (100%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/set-cpu-wakeup-latency.sh (97%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/task_affinity_functions.sh (99%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/topology (100%) rename {compute-huge/compute-huge => worker-utils/worker-utils}/topology.py (100%) create mode 100644 worker-utils/worker-utils/worker-goenabled.sh rename compute-huge/compute-huge/compute_reserved.conf => worker-utils/worker-utils/worker_reserved.conf (57%) rename {computeconfig => workerconfig}/.gitignore (71%) create mode 100644 workerconfig/PKG-INFO create mode 100644 workerconfig/centos/build_srpm.data create mode 100644 workerconfig/centos/workerconfig.spec rename {computeconfig/computeconfig => workerconfig/workerconfig}/LICENSE (100%) rename {computeconfig/computeconfig => workerconfig/workerconfig}/Makefile (52%) rename {computeconfig/computeconfig => workerconfig/workerconfig}/config_goenabled_check.sh (100%) rename computeconfig/computeconfig/compute_config => workerconfig/workerconfig/worker_config (92%) rename computeconfig/computeconfig/compute_services => workerconfig/workerconfig/worker_services (85%) rename computeconfig/computeconfig/computeconfig-combined.service => workerconfig/workerconfig/workerconfig-combined.service (73%) rename computeconfig/computeconfig/computeconfig.service => workerconfig/workerconfig/workerconfig.service (65%) diff --git a/centos_iso_image.inc b/centos_iso_image.inc index 2d9159bfac..0695828b5a 100644 --- a/centos_iso_image.inc +++ b/centos_iso_image.inc @@ -2,13 +2,13 @@ # If these have dependencies, they will be pulled in automatically # -# compute-huge -compute-huge +# worker-utils +worker-utils -# computeconfig -computeconfig -computeconfig-standalone -computeconfig-subfunction +# workerconfig +workerconfig +workerconfig-standalone +workerconfig-subfunction # configutilities configutilities @@ -30,7 +30,7 @@ sysinv # config-gate config-gate -config-gate-compute +config-gate-worker # puppet-manifests puppet-manifests diff --git a/centos_pkg_dirs b/centos_pkg_dirs index 83e3a8c1bc..499c18faa1 100644 --- a/centos_pkg_dirs +++ b/centos_pkg_dirs @@ -1,5 +1,5 @@ -compute-huge -computeconfig +worker-utils +workerconfig configutilities controllerconfig storageconfig diff --git a/compute-huge/PKG-INFO b/compute-huge/PKG-INFO deleted file mode 100644 index 6a911af24a..0000000000 --- a/compute-huge/PKG-INFO +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 1.1 -Name: compute-huge -Version: 1.0 -Summary: Initial compute node hugepages and reserved cpus configuration -Home-page: -Author: Windriver -Author-email: info@windriver.com -License: Apache-2.0 - -Description: Initial compute node hugepages and reserved cpus configuration - - -Platform: UNKNOWN diff --git a/compute-huge/centos/build_srpm.data b/compute-huge/centos/build_srpm.data deleted file mode 100644 index 7e36f63cc1..0000000000 --- a/compute-huge/centos/build_srpm.data +++ /dev/null @@ -1,3 +0,0 @@ -SRC_DIR="compute-huge" -COPY_LIST="$SRC_DIR/LICENSE" -TIS_PATCH_VER=10 diff --git a/compute-huge/compute-huge/compute-huge-goenabled.sh b/compute-huge/compute-huge/compute-huge-goenabled.sh deleted file mode 100644 index fc909a00ca..0000000000 --- a/compute-huge/compute-huge/compute-huge-goenabled.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2014,2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -# compute-huge.sh "goenabled" check. -# -# If a problem was detected during configuration of huge pages and compute -# resources then the board is not allowed to enable. -# -COMPUTE_HUGE_GOENABLED="/var/run/compute_huge_goenabled" - -source "/etc/init.d/log_functions.sh" -source "/usr/bin/tsconfig" - -if [ -e ${VOLATILE_COMPUTE_CONFIG_COMPLETE} -a ! -f ${COMPUTE_HUGE_GOENABLED} ]; then - log_error "Compute manifest CPU configuration check failed. Failing goenabled check." - exit 1 -fi - -exit 0 diff --git a/computeconfig/PKG-INFO b/computeconfig/PKG-INFO deleted file mode 100644 index 9b1ac69c21..0000000000 --- a/computeconfig/PKG-INFO +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 1.1 -Name: computeconfig -Version: 1.0 -Summary: Initial compute node configuration -Home-page: -Author: Windriver -Author-email: info@windriver.com -License: Apache-2.0 - -Description: Initial compute node configuration - - -Platform: UNKNOWN diff --git a/computeconfig/centos/build_srpm.data b/computeconfig/centos/build_srpm.data deleted file mode 100644 index edabe97e4d..0000000000 --- a/computeconfig/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -SRC_DIR="computeconfig" -TIS_PATCH_VER=11 diff --git a/computeconfig/centos/computeconfig.spec b/computeconfig/centos/computeconfig.spec deleted file mode 100644 index a58a1bad5a..0000000000 --- a/computeconfig/centos/computeconfig.spec +++ /dev/null @@ -1,85 +0,0 @@ -Summary: computeconfig -Name: computeconfig -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -Source0: %{name}-%{version}.tar.gz - -%define debug_package %{nil} - -Requires: systemd - -%description -Initial compute node configuration - -%package -n computeconfig-standalone -Summary: computeconfig -Group: base - -%description -n computeconfig-standalone -Initial compute node configuration - -%package -n computeconfig-subfunction -Summary: computeconfig -Group: base - -%description -n computeconfig-subfunction -Initial compute node configuration - -%define initddir /etc/init.d/ -%define goenableddir /etc/goenabled.d/ -%define systemddir /etc/systemd/system/ - -%prep -%setup - -%build - -%install -make install INITDDIR=%{buildroot}%{initddir} GOENABLEDDIR=%{buildroot}%{goenableddir} SYSTEMDDIR=%{buildroot}%{systemddir} - -%post -n computeconfig-standalone -if [ ! -e $D%{systemddir}/computeconfig.service ]; then - cp $D%{systemddir}/config/computeconfig-standalone.service $D%{systemddir}/computeconfig.service -else - cmp -s $D%{systemddir}/config/computeconfig-standalone.service $D%{systemddir}/computeconfig.service - if [ $? -ne 0 ]; then - rm -f $D%{systemddir}/computeconfig.service - cp $D%{systemddir}/config/computeconfig-standalone.service $D%{systemddir}/computeconfig.service - fi -fi -systemctl enable computeconfig.service - - -%post -n computeconfig-subfunction -if [ ! -e $D%{systemddir}/computeconfig.service ]; then - cp $D%{systemddir}/config/computeconfig-combined.service $D%{systemddir}/computeconfig.service -else - cmp -s $D%{systemddir}/config/computeconfig-combined.service $D%{systemddir}/computeconfig.service - if [ $? -ne 0 ]; then - rm -f $D%{systemddir}/computeconfig.service - cp $D%{systemddir}/config/computeconfig-combined.service $D%{systemddir}/computeconfig.service - fi -fi -systemctl enable computeconfig.service - -%clean - -%files -%defattr(-,root,root,-) -%doc LICENSE -%{initddir}/* - -%files -n computeconfig-standalone -%defattr(-,root,root,-) -%dir %{systemddir}/config -%{systemddir}/config/computeconfig-standalone.service -%{goenableddir}/* - -%files -n computeconfig-subfunction -%defattr(-,root,root,-) -%dir %{systemddir}/config -%{systemddir}/config/computeconfig-combined.service diff --git a/config-gate/centos/config-gate.spec b/config-gate/centos/config-gate.spec index a15b4d38e4..b395b3e146 100644 --- a/config-gate/centos/config-gate.spec +++ b/config-gate/centos/config-gate.spec @@ -15,12 +15,12 @@ Requires: systemd %description Startup configuration gate -%package -n %{name}-compute -Summary: config-gate-compute +%package -n %{name}-worker +Summary: config-gate-worker Group: base -%description -n %{name}-compute -Startup compute configuration gate +%description -n %{name}-worker +Startup worker configuration gate %define local_etc_systemd /etc/systemd/system/ @@ -35,8 +35,8 @@ make install SBINDIR=%{buildroot}%{_sbindir} SYSTEMDDIR=%{buildroot}%{local_etc_ %post systemctl enable config.service -%post -n %{name}-compute -systemctl enable compute-config-gate.service +%post -n %{name}-worker +systemctl enable worker-config-gate.service %clean @@ -46,7 +46,7 @@ systemctl enable compute-config-gate.service %{_sbindir}/wait_for_config_init.sh %{local_etc_systemd}/config.service -%files -n %{name}-compute +%files -n %{name}-worker %defattr(-,root,root,-) -%{_sbindir}/wait_for_compute_config_init.sh -%{local_etc_systemd}/compute-config-gate.service +%{_sbindir}/wait_for_worker_config_init.sh +%{local_etc_systemd}/worker-config-gate.service diff --git a/config-gate/files/Makefile b/config-gate/files/Makefile index 6aa2736e7f..f513812f6a 100644 --- a/config-gate/files/Makefile +++ b/config-gate/files/Makefile @@ -9,6 +9,6 @@ install: install -d -m 755 $(SBINDIR) install -d -m 755 $(SYSTEMDDIR) install -p -D -m 555 wait_for_config_init.sh $(SBINDIR)/wait_for_config_init.sh - install -p -D -m 555 wait_for_compute_config_init.sh $(SBINDIR)/wait_for_compute_config_init.sh + install -p -D -m 555 wait_for_worker_config_init.sh $(SBINDIR)/wait_for_worker_config_init.sh install -p -D -m 444 config.service $(SYSTEMDDIR)/config.service - install -p -D -m 444 compute-config-gate.service $(SYSTEMDDIR)/compute-config-gate.service + install -p -D -m 444 worker-config-gate.service $(SYSTEMDDIR)/worker-config-gate.service diff --git a/config-gate/files/wait_for_config_init.sh b/config-gate/files/wait_for_config_init.sh index 7f22c31363..670ec99b85 100644 --- a/config-gate/files/wait_for_config_init.sh +++ b/config-gate/files/wait_for_config_init.sh @@ -14,8 +14,8 @@ case $nodetype in controller) SERVICE=controllerconfig.service ;; - compute) - SERVICE=computeconfig.service + worker) + SERVICE=workerconfig.service ;; storage) SERVICE=storageconfig.service diff --git a/config-gate/files/wait_for_compute_config_init.sh b/config-gate/files/wait_for_worker_config_init.sh similarity index 61% rename from config-gate/files/wait_for_compute_config_init.sh rename to config-gate/files/wait_for_worker_config_init.sh index 6a7b771cc6..9f3b18d417 100644 --- a/config-gate/files/wait_for_compute_config_init.sh +++ b/config-gate/files/wait_for_worker_config_init.sh @@ -1,13 +1,13 @@ #!/bin/bash # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016-2018 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -# Wait for compute config service +# Wait for worker config service -SERVICE=computeconfig.service +SERVICE=workerconfig.service while :; do systemctl status $SERVICE |grep -q running diff --git a/config-gate/files/compute-config-gate.service b/config-gate/files/worker-config-gate.service similarity index 55% rename from config-gate/files/compute-config-gate.service rename to config-gate/files/worker-config-gate.service index aef64474c9..62ed6f75c7 100644 --- a/config-gate/files/compute-config-gate.service +++ b/config-gate/files/worker-config-gate.service @@ -1,11 +1,11 @@ [Unit] -Description=TIS compute config gate -After=sw-patch.service computeconfig.service +Description=STX worker config gate +After=sw-patch.service workerconfig.service Before=serial-getty@ttyS0.service getty@tty1.service [Service] Type=oneshot -ExecStart=/usr/sbin/wait_for_compute_config_init.sh +ExecStart=/usr/sbin/wait_for_worker_config_init.sh ExecStop= ExecReload= RemainAfterExit=yes diff --git a/controllerconfig/controllerconfig/controllerconfig/backup_restore.py b/controllerconfig/controllerconfig/controllerconfig/backup_restore.py index 5e10b542d6..356c1cfc37 100644 --- a/controllerconfig/controllerconfig/controllerconfig/backup_restore.py +++ b/controllerconfig/controllerconfig/controllerconfig/backup_restore.py @@ -1202,8 +1202,8 @@ def overwrite_iscsi_target_config(): def restore_complete(): """ Restore proper ISCSI configuration file after cinder restore. - Enable compute functionality for AIO system. - :return: True if compute-config-complete is executed + Enable worker functionality for AIO system. + :return: True if worker-config-complete is executed """ if utils.get_system_type() == sysinv_constants.TIS_AIO_BUILD: if not os.path.isfile(restore_system_ready): @@ -1223,21 +1223,21 @@ def restore_complete(): # we use use that. overwrite_iscsi_target_config() - print("\nApplying compute manifests for %s. " % + print("\nApplying worker manifests for %s. " % (utils.get_controller_hostname())) print("Node will reboot on completion.") - sysinv.do_compute_config_complete(utils.get_controller_hostname()) + sysinv.do_worker_config_complete(utils.get_controller_hostname()) # show in-progress log on console every 30 seconds # until self reboot or timeout os.remove(restore_system_ready) time.sleep(30) for i in range(1, 10): - print("compute manifest apply in progress ... ") + print("worker manifest apply in progress ... ") time.sleep(30) - raise RestoreFail("Timeout running compute manifests, " + raise RestoreFail("Timeout running worker manifests, " "reboot did not occur") else: @@ -1655,7 +1655,7 @@ def restore_system(backup_file, include_storage_reinstall=False, clone=False): print(textwrap.fill( "Failed to lock at least one node. " + "Please lock the unlocked controller-1 or " + - "compute nodes manually.", 80 + "worker nodes manually.", 80 )) if not clone: diff --git a/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py b/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py index fa2ad9e43a..b0d963db9d 100644 --- a/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py +++ b/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2014-2017 Wind River Systems, Inc. +# Copyright (c) 2014-2018 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -23,7 +23,7 @@ API_VERSION = 1 HOST_PERSONALITY_NOT_SET = "" HOST_PERSONALITY_UNKNOWN = "unknown" HOST_PERSONALITY_CONTROLLER = "controller" -HOST_PERSONALITY_COMPUTE = "compute" +HOST_PERSONALITY_WORKER = "worker" HOST_PERSONALITY_STORAGE = "storage" # Host Administrative State Constants @@ -87,8 +87,8 @@ class Host(object): # Set personality if host_data['personality'] == "controller": self.personality = HOST_PERSONALITY_CONTROLLER - elif host_data['personality'] == "compute": - self.personality = HOST_PERSONALITY_COMPUTE + elif host_data['personality'] == "worker": + self.personality = HOST_PERSONALITY_WORKER elif host_data['personality'] == "storage": self.personality = HOST_PERSONALITY_STORAGE else: @@ -334,8 +334,8 @@ def get_hosts(admin_token, region_name, personality=None, personality == HOST_PERSONALITY_CONTROLLER): host_list.append(Host(host['hostname'], host)) - elif (host['personality'] == "compute" and - personality == HOST_PERSONALITY_COMPUTE): + elif (host['personality'] == "worker" and + personality == HOST_PERSONALITY_WORKER): host_list.append(Host(host['hostname'], host)) elif (host['personality'] == "storage" and @@ -537,24 +537,24 @@ def get_host_data(hostname): return None -def do_compute_config_complete(hostname): - """ enable compute functionality """ +def do_worker_config_complete(hostname): + """ enable worker functionality """ try: with openstack.OpenStack() as client: hosts = get_hosts(client.admin_token, client.conf['region_name']) for host in hosts: if hostname == host.name: - # Create/apply compute manifests + # Create/apply worker manifests values = { 'action': "subfunction_config" } patch = dict_to_patch(values) - LOG.info("Applying compute manifests: {} [{}]" + LOG.info("Applying worker manifests: {} [{}]" .format(host, patch)) client.sysinv.ihost.update(host.uuid, patch) except Exception as e: - LOG.exception("compute_config_complete failed") + LOG.exception("worker_config_complete failed") raise e diff --git a/controllerconfig/controllerconfig/controllerconfig/utils.py b/controllerconfig/controllerconfig/controllerconfig/utils.py index 00fbfc51cc..9b71b9c759 100644 --- a/controllerconfig/controllerconfig/controllerconfig/utils.py +++ b/controllerconfig/controllerconfig/controllerconfig/utils.py @@ -325,7 +325,7 @@ def lag_mode_to_str(lag_mode): def is_combined_load(): - return 'compute' in tsconfig.subfunctions + return 'worker' in tsconfig.subfunctions def get_system_type(): diff --git a/controllerconfig/controllerconfig/scripts/install_clone.py b/controllerconfig/controllerconfig/scripts/install_clone.py index 8f652567c2..2e42c07202 100755 --- a/controllerconfig/controllerconfig/scripts/install_clone.py +++ b/controllerconfig/controllerconfig/scripts/install_clone.py @@ -184,27 +184,27 @@ def update_db(archive_dir, backup_name): shutil.rmtree(tmpdir, ignore_errors=True) -def config_compute(): +def config_worker(): """ - Enable compute functionality for AIO system. - :return: True if compute-config-complete is executed + Enable worker functionality for AIO system. + :return: True if worker-config-complete is executed """ if utils.get_system_type() == si_const.TIS_AIO_BUILD: - console_log("Applying compute manifests for {}. " + console_log("Applying worker manifests for {}. " "Node will reboot on completion." .format(utils.get_controller_hostname())) - sysinv.do_compute_config_complete(utils.get_controller_hostname()) + sysinv.do_worker_config_complete(utils.get_controller_hostname()) time.sleep(30) - # compute-config-complete has no logs to console. So, wait + # worker-config-complete has no logs to console. So, wait # for some time before showing the login prompt. for i in range(1, 10): - console_log("compute-config in progress..") + console_log("worker-config in progress..") time.sleep(30) - console_log("Timed out on do_compute_config_complete") - raise CloneFail("Timed out on do_compute_config_complete") + console_log("Timed out on do_worker_config_complete") + raise CloneFail("Timed out on do_worker_config_complete") return True else: - # compute_config_complete is not needed. + # worker_config_complete is not needed. return False @@ -302,8 +302,8 @@ if os.path.exists(INI_FILE): console_log("Images archive installed from [%s]" % clone_name) finalize_install() set_result(clone.OK) - if not config_compute(): - # do cleanup if compute_config_complete is not required + if not config_worker(): + # do cleanup if worker_config_complete is not required cleanup() elif last_result == clone.OK: # Installation completed successfully before last reboot diff --git a/puppet-manifests/src/bin/puppet-manifest-apply.sh b/puppet-manifests/src/bin/puppet-manifest-apply.sh index 29b6852a8c..84cd4ebee2 100755 --- a/puppet-manifests/src/bin/puppet-manifest-apply.sh +++ b/puppet-manifests/src/bin/puppet-manifest-apply.sh @@ -53,11 +53,11 @@ mkdir -p ${PUPPET_TMP}/hieradata cp /etc/puppet/hieradata/global.yaml ${PUPPET_TMP}/hieradata/global.yaml cp /etc/puppet/hieradata/${PERSONALITY}.yaml ${PUPPET_TMP}/hieradata/personality.yaml -# When the compute node is first booted and goes online, sysinv-agent reports +# When the worker node is first booted and goes online, sysinv-agent reports # host CPU inventory which triggers the first runtime manifest apply that updates # the grub. At this time, copying the host file failed due to a timing issue that -# has not yet been fully understood. Subsequent retries worked. -if [ "${PERSONALITY}" = "compute" ]; then +# has not yet been fully understood. Subsequent retries worked. +if [ "${PERSONALITY}" = "worker" ]; then n=0 until [ $n -ge 3 ]; do cp -f ${HIERADATA}/${HOST}.yaml ${PUPPET_TMP}/hieradata/host.yaml && break diff --git a/puppet-manifests/src/hieradata/controller.yaml b/puppet-manifests/src/hieradata/controller.yaml index 4a8900d4f2..c7e1336342 100644 --- a/puppet-manifests/src/hieradata/controller.yaml +++ b/puppet-manifests/src/hieradata/controller.yaml @@ -46,7 +46,7 @@ CONFIG_ADMIN_PROJECT_DOMAIN_NAME: Default # mtce -platform::mtce::agent::params::compute_boot_timeout: 720 +platform::mtce::agent::params::worker_boot_timeout: 720 platform::mtce::agent::params::controller_boot_timeout: 1200 platform::mtce::agent::params::heartbeat_period: 100 platform::mtce::agent::params::heartbeat_failure_action: 'fail' diff --git a/puppet-manifests/src/hieradata/compute.yaml b/puppet-manifests/src/hieradata/worker.yaml similarity index 98% rename from puppet-manifests/src/hieradata/compute.yaml rename to puppet-manifests/src/hieradata/worker.yaml index 041d2a4f4f..f1945232a3 100644 --- a/puppet-manifests/src/hieradata/compute.yaml +++ b/puppet-manifests/src/hieradata/worker.yaml @@ -1,4 +1,4 @@ -# compute specific configuration data +# worker specific configuration data --- # vswitch diff --git a/puppet-manifests/src/manifests/compute.pp b/puppet-manifests/src/manifests/worker.pp similarity index 94% rename from puppet-manifests/src/manifests/compute.pp rename to puppet-manifests/src/manifests/worker.pp index efd30f229e..57b3a33587 100644 --- a/puppet-manifests/src/manifests/compute.pp +++ b/puppet-manifests/src/manifests/worker.pp @@ -1,5 +1,5 @@ # -# puppet manifest for compute hosts +# puppet manifest for worker nodes # Exec { @@ -48,7 +48,7 @@ include ::openstack::nova::placement include ::openstack::ceilometer include ::openstack::ceilometer::polling -class { '::platform::config::compute::post': +class { '::platform::config::worker::post': stage => post, } diff --git a/puppet-manifests/src/modules/openstack/manifests/ceilometer.pp b/puppet-manifests/src/modules/openstack/manifests/ceilometer.pp index fd7586586d..58587011a3 100644 --- a/puppet-manifests/src/modules/openstack/manifests/ceilometer.pp +++ b/puppet-manifests/src/modules/openstack/manifests/ceilometer.pp @@ -227,7 +227,7 @@ class openstack::ceilometer::polling ( $central_namespace = false } - if (str2bool($::disable_compute_services) or + if (str2bool($::disable_worker_services) or $::platform::kubernetes::params::enabled) { $agent_enable = false $compute_namespace = false @@ -238,7 +238,7 @@ class openstack::ceilometer::polling ( } else { $agent_enable = true - if str2bool($::is_compute_subfunction) { + if str2bool($::is_worker_subfunction) { $pmon_target = "/etc/ceilometer/ceilometer-polling-compute.conf.pmon" $compute_namespace = true } else { diff --git a/puppet-manifests/src/modules/openstack/manifests/neutron.pp b/puppet-manifests/src/modules/openstack/manifests/neutron.pp index 2df8553432..ac07b5cd7f 100644 --- a/puppet-manifests/src/modules/openstack/manifests/neutron.pp +++ b/puppet-manifests/src/modules/openstack/manifests/neutron.pp @@ -195,7 +195,7 @@ class openstack::neutron::agents include ::platform::kubernetes::params - if (str2bool($::disable_compute_services) or + if (str2bool($::disable_worker_services) or $::platform::kubernetes::params::enabled) { $pmon_ensure = absent diff --git a/puppet-manifests/src/modules/platform/lib/facter/disable_compute_services.rb b/puppet-manifests/src/modules/platform/lib/facter/disable_compute_services.rb deleted file mode 100644 index 250c1b13f3..0000000000 --- a/puppet-manifests/src/modules/platform/lib/facter/disable_compute_services.rb +++ /dev/null @@ -1,7 +0,0 @@ -# Returns true if compute services should be disabled - -Facter.add("disable_compute_services") do - setcode do - File.exist?('/var/run/.disable_compute_services') - end -end diff --git a/puppet-manifests/src/modules/platform/lib/facter/disable_worker_services.rb b/puppet-manifests/src/modules/platform/lib/facter/disable_worker_services.rb new file mode 100644 index 0000000000..8c40aa1e7d --- /dev/null +++ b/puppet-manifests/src/modules/platform/lib/facter/disable_worker_services.rb @@ -0,0 +1,7 @@ +# Returns true if worker services should be disabled + +Facter.add("disable_worker_services") do + setcode do + File.exist?('/var/run/.disable_worker_services') + end +end diff --git a/puppet-manifests/src/modules/platform/manifests/compute.pp b/puppet-manifests/src/modules/platform/manifests/compute.pp index f150c5afb0..51588a0765 100644 --- a/puppet-manifests/src/modules/platform/manifests/compute.pp +++ b/puppet-manifests/src/modules/platform/manifests/compute.pp @@ -1,19 +1,19 @@ class platform::compute::params ( - $compute_cpu_list = '', + $worker_cpu_list = '', $platform_cpu_list = '', $reserved_vswitch_cores = '', $reserved_platform_cores = '', - $compute_base_reserved = '', + $worker_base_reserved = '', $compute_vswitch_reserved = '', ) { } class platform::compute::config inherits ::platform::compute::params { - file { "/etc/nova/compute_reserved.conf": + file { "/etc/platform/worker_reserved.conf": ensure => 'present', replace => true, - content => template('platform/compute_reserved.conf.erb') + content => template('platform/worker_reserved.conf.erb') } } @@ -88,7 +88,7 @@ class platform::compute::grub::audit } } - file { "/var/run/compute_huge_goenabled": + file { "/var/run/worker_goenabled": ensure => $ensure, owner => 'root', group => 'root', @@ -276,7 +276,7 @@ class platform::compute::pmqos ( $hight_wakeup_cpus = '', ) { - if str2bool($::is_compute_subfunction) and str2bool($::is_lowlatency_subfunction) { + if str2bool($::is_worker_subfunction) and str2bool($::is_lowlatency_subfunction) { $script = "/usr/bin/set-cpu-wakeup-latency.sh" diff --git a/puppet-manifests/src/modules/platform/manifests/config.pp b/puppet-manifests/src/modules/platform/manifests/config.pp index 77eb0fd35d..a705548b28 100644 --- a/puppet-manifests/src/modules/platform/manifests/config.pp +++ b/puppet-manifests/src/modules/platform/manifests/config.pp @@ -289,13 +289,13 @@ class platform::config::controller::post } } -class platform::config::compute::post +class platform::config::worker::post { - file { "/etc/platform/.initial_compute_config_complete": + file { "/etc/platform/.initial_worker_config_complete": ensure => present, } - file { "/var/run/.compute_config_complete": + file { "/var/run/.worker_config_complete": ensure => present, } } diff --git a/puppet-manifests/src/modules/platform/manifests/kubernetes.pp b/puppet-manifests/src/modules/platform/manifests/kubernetes.pp index 964e864184..8e20b313b5 100644 --- a/puppet-manifests/src/modules/platform/manifests/kubernetes.pp +++ b/puppet-manifests/src/modules/platform/manifests/kubernetes.pp @@ -285,7 +285,7 @@ class platform::kubernetes::worker } if $enabled { - file { "/var/run/.disable_compute_services": + file { "/var/run/.disable_worker_services": ensure => file, replace => no, } diff --git a/puppet-manifests/src/modules/platform/manifests/mtce.pp b/puppet-manifests/src/modules/platform/manifests/mtce.pp index b38087ef0e..162dcd6c47 100644 --- a/puppet-manifests/src/modules/platform/manifests/mtce.pp +++ b/puppet-manifests/src/modules/platform/manifests/mtce.pp @@ -8,7 +8,7 @@ class platform::mtce::params ( $auth_user_domain = undef, $auth_project_domain = undef, $auth_region = undef, - $compute_boot_timeout = undef, + $worker_boot_timeout = undef, $controller_boot_timeout = undef, $heartbeat_degrade_threshold = undef, $heartbeat_failure_threshold = undef, diff --git a/puppet-manifests/src/modules/platform/manifests/postgresql.pp b/puppet-manifests/src/modules/platform/manifests/postgresql.pp index 371ed42c02..22739eac31 100644 --- a/puppet-manifests/src/modules/platform/manifests/postgresql.pp +++ b/puppet-manifests/src/modules/platform/manifests/postgresql.pp @@ -67,7 +67,7 @@ class platform::postgresql::server ( # work_mem 512 MB since some ceilometer queries entail extensive # sorting as well as hash joins and hash based aggregation. # checkpoint_segments increased to reduce frequency of checkpoints - if str2bool($::is_compute_subfunction) or str2bool($::is_virtual) { + if str2bool($::is_worker_subfunction) or str2bool($::is_virtual) { # AIO or virtual box # 700 connections needs about 80MB shared buffer # Leave work_mem as the default for vbox and AIO diff --git a/puppet-manifests/src/modules/platform/templates/compute_reserved.conf.erb b/puppet-manifests/src/modules/platform/templates/worker_reserved.conf.erb similarity index 90% rename from puppet-manifests/src/modules/platform/templates/compute_reserved.conf.erb rename to puppet-manifests/src/modules/platform/templates/worker_reserved.conf.erb index 429fded4cf..f482b48af1 100755 --- a/puppet-manifests/src/modules/platform/templates/compute_reserved.conf.erb +++ b/puppet-manifests/src/modules/platform/templates/worker_reserved.conf.erb @@ -5,7 +5,7 @@ # # - This file is managed by Puppet. DO NOT EDIT. ################################################################################ -# COMPUTE Node configuration parameters for reserved memory and physical cores +# WORKER Node configuration parameters for reserved memory and physical cores # used by Base software and VSWITCH. These are resources that libvirt cannot use. # @@ -16,7 +16,7 @@ # validity against the actual number of logical CPU instances in the system. # ################################################################################ -COMPUTE_CPU_LIST=<%= @compute_cpu_list %> +WORKER_CPU_LIST=<%= @worker_cpu_list %> ################################################################################ # @@ -32,10 +32,10 @@ PLATFORM_CPU_LIST=<%= @platform_cpu_list %> # # Example: To reserve 1500MB and 1 core on NUMA node0, and 1500MB and 1 core # on NUMA node1, the variable must be specified as follows. -# COMPUTE_BASE_MEMORY=("node0:1500MB:1" "node1:1500MB:1") +# WORKER_BASE_MEMORY=("node0:1500MB:1" "node1:1500MB:1") # ################################################################################ -COMPUTE_BASE_RESERVED=<%= @compute_base_reserved %> +WORKER_BASE_RESERVED=<%= @worker_base_reserved %> ################################################################################ # @@ -68,7 +68,7 @@ COMPUTE_VSWITCH_CORES=<%= @reserved_vswitch_cores %> # # Example: To reserve 1 core on NUMA node0, the variable must be specified # as follows. -# COMPUTE_PLATFORM_CORES=("node0:0") +# WORKER_PLATFORM_CORES=("node0:0") # ################################################################################ -COMPUTE_PLATFORM_CORES=<%= @reserved_platform_cores %> +WORKER_PLATFORM_CORES=<%= @reserved_platform_cores %> diff --git a/puppet-modules-wrs/puppet-mtce/src/mtce/templates/mtc_ini.erb b/puppet-modules-wrs/puppet-mtce/src/mtce/templates/mtc_ini.erb index cbe312b50e..c4496e6a0f 100644 --- a/puppet-modules-wrs/puppet-mtce/src/mtce/templates/mtc_ini.erb +++ b/puppet-modules-wrs/puppet-mtce/src/mtce/templates/mtc_ini.erb @@ -45,7 +45,7 @@ heartbeat_failure_action = <%= @heartbeat_failure_action %> mnfa_threshold = <%= @mnfa_threshold %> [timeouts] -compute_boot_timeout = <%= @compute_boot_timeout %> ; The max time (seconds) that Mtce waits for the mtcAlive +worker_boot_timeout = <%= @worker_boot_timeout %> ; The max time (seconds) that Mtce waits for the mtcAlive controller_boot_timeout = <%= @controller_boot_timeout %> ; message after which it will time out and fail the host. ; Multi-Node Failure Avoidance (MNFA) Lifecycle Timer. diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/common/constants.py b/sysinv/cgts-client/cgts-client/cgtsclient/common/constants.py index 47a68eaa26..7f4fefa768 100755 --- a/sysinv/cgts-client/cgts-client/cgtsclient/common/constants.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/common/constants.py @@ -45,7 +45,7 @@ SB_STATE_CONFIGURING = 'configuring' SB_TASK_NONE = None SB_TASK_RECONFIG_CONTROLLER = 'reconfig-controller' SB_TASK_PROVISION_STORAGE = 'provision-storage' -SB_TASK_RECONFIG_COMPUTE = 'reconfig-compute' +SB_TASK_RECONFIG_WORKER = 'reconfig-worker' SB_TASK_RESIZE_CEPH_MON_LV = 'resize-ceph-mon-lv' SB_TASK_ADD_OBJECT_GATEWAY = 'add-object-gateway' diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py index 9dd88a3bf2..b24c02002e 100755 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py @@ -92,7 +92,7 @@ def do_host_upgrade_list(cc, args): help='Hostname of the host') @utils.arg('-p', '--personality', metavar='', - choices=['controller', 'compute', 'storage', 'network', 'profile'], + choices=['controller', 'worker', 'storage', 'network', 'profile'], help='Personality or type of host [REQUIRED]') @utils.arg('-s', '--subfunctions', metavar='', diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/icpu.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/icpu.py index 2af53b6ef0..a15b68f46c 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/icpu.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/icpu.py @@ -20,24 +20,24 @@ CREATION_ATTRIBUTES = ['ihost_uuid', 'inode_uuid', 'cpu', 'core', 'thread', PLATFORM_CPU_TYPE = "Platform" VSWITCH_CPU_TYPE = "Vswitch" SHARED_CPU_TYPE = "Shared" -VMS_CPU_TYPE = "VMs" +APPLICATION_CPU_TYPE = "Applications" NONE_CPU_TYPE = "None" CPU_TYPE_LIST = [PLATFORM_CPU_TYPE, VSWITCH_CPU_TYPE, - SHARED_CPU_TYPE, VMS_CPU_TYPE, + SHARED_CPU_TYPE, APPLICATION_CPU_TYPE, NONE_CPU_TYPE] PLATFORM_CPU_TYPE_FORMAT = _("Platform") VSWITCH_CPU_TYPE_FORMAT = _("vSwitch") SHARED_CPU_TYPE_FORMAT = _("Shared") -VMS_CPU_TYPE_FORMAT = _("VMs") +APPLICATION_CPU_TYPE_FORMAT = _("Applications") NONE_CPU_TYPE_FORMAT = _("None") CPU_TYPE_FORMATS = {PLATFORM_CPU_TYPE: PLATFORM_CPU_TYPE_FORMAT, VSWITCH_CPU_TYPE: VSWITCH_CPU_TYPE_FORMAT, SHARED_CPU_TYPE: SHARED_CPU_TYPE_FORMAT, - VMS_CPU_TYPE: VMS_CPU_TYPE_FORMAT, + APPLICATION_CPU_TYPE: APPLICATION_CPU_TYPE_FORMAT, NONE_CPU_TYPE: NONE_CPU_TYPE_FORMAT} @@ -106,19 +106,19 @@ def check_core_functions(personality, icpus): platform_cores += 1 elif allocated_function == VSWITCH_CPU_TYPE: vswitch_cores += 1 - elif allocated_function == VMS_CPU_TYPE: + elif allocated_function == APPLICATION_CPU_TYPE: vm_cores += 1 error_string = "" if platform_cores == 0: error_string = ("There must be at least one core for %s." % PLATFORM_CPU_TYPE_FORMAT) - elif personality == 'compute' and vswitch_cores == 0: + elif personality == 'worker' and vswitch_cores == 0: error_string = ("There must be at least one core for %s." % VSWITCH_CPU_TYPE_FORMAT) - elif personality == 'compute' and vm_cores == 0: + elif personality == 'worker' and vm_cores == 0: error_string = ("There must be at least one core for %s." % - VMS_CPU_TYPE_FORMAT) + APPLICATION_CPU_TYPE_FORMAT) return error_string @@ -191,7 +191,7 @@ def restructure_host_cpu_data(host): cpufunction.socket_cores_number[s] = number_of_cores[f][s] else: if (f == PLATFORM_CPU_TYPE or (hasattr(host, 'subfunctions') - and 'compute' in host.subfunctions)): + and 'worker' in host.subfunctions)): if f != NONE_CPU_TYPE: host.core_assignment.append(cpufunction) for s in range(0, len(host.nodes)): diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/imemory_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/imemory_shell.py index b20498bc7f..601f198f29 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/imemory_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/imemory_shell.py @@ -39,11 +39,11 @@ def _print_imemory_show(imemory): 'vSwitch Huge Pages: Size (MiB)', ' Total', ' Available', - 'VM Pages (4K): Total', - 'VM Huge Pages (2M): Total', + 'Application Pages (4K): Total', + 'Application Huge Pages (2M): Total', ' Total Pending', ' Available', - 'VM Huge Pages (1G): Total', + 'Application Huge Pages (1G): Total', ' Total Pending', ' Available', 'uuid', 'ihost_uuid', 'inode_uuid', @@ -157,7 +157,7 @@ def do_host_memory_list(cc, args): metavar='<1G hugepages number>', help='The number of 1G vm huge pages for the numa node') def do_host_memory_modify(cc, args): - """Modify platform reserved and/or libvirt vm huge page memory attributes for compute nodes.""" + """Modify platform reserved and/or application huge page memory attributes for worker nodes.""" rwfields = ['platform_reserved_mib', 'vm_hugepages_nr_2M_pending', diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iprofile_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iprofile_shell.py index f8dce4238b..563dc0ec63 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iprofile_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iprofile_shell.py @@ -177,7 +177,7 @@ def get_cpuprofile_data(cc, iprofile): iprofile.platform_cores = get_core_list_str(iprofile, icpu_utils.PLATFORM_CPU_TYPE) iprofile.vswitch_cores = get_core_list_str(iprofile, icpu_utils.VSWITCH_CPU_TYPE) iprofile.shared_cores = get_core_list_str(iprofile, icpu_utils.SHARED_CPU_TYPE) - iprofile.vms_cores = get_core_list_str(iprofile, icpu_utils.VMS_CPU_TYPE) + iprofile.vms_cores = get_core_list_str(iprofile, icpu_utils.APPLICATION_CPU_TYPE) def get_core_list_str(iprofile, function): @@ -204,7 +204,7 @@ def do_cpuprofile_list(cc, args): profile.shared_cores = get_core_list_str(profile, icpu_utils.SHARED_CPU_TYPE) profile.vms_cores = get_core_list_str(profile, - icpu_utils.VMS_CPU_TYPE) + icpu_utils.APPLICATION_CPU_TYPE) field_labels = ['uuid', 'name', 'processors', 'phy cores per proc', 'hyperthreading', diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/pci_device_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/pci_device_shell.py index 14572e680e..8e2329a405 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/pci_device_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/pci_device_shell.py @@ -94,7 +94,7 @@ def do_host_device_list(cc, args): metavar='', help='The enabled status of the device') def do_host_device_modify(cc, args): - """Modify device availability for compute nodes.""" + """Modify device availability for worker nodes.""" rwfields = ['enabled', 'name'] diff --git a/sysinv/sysinv/sysinv/sysinv/agent/manager.py b/sysinv/sysinv/sysinv/sysinv/agent/manager.py index 55d39a6a7b..d7a8ed280d 100644 --- a/sysinv/sysinv/sysinv/sysinv/agent/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/agent/manager.py @@ -181,9 +181,9 @@ class AgentManager(service.PeriodicService): def _update_interface_irq_affinity(self, interface_list): cpus = {} platform_cpulist = '0' - with open('/etc/nova/compute_reserved.conf', 'r') as infile: + with open('/etc/platform/worker_reserved.conf', 'r') as infile: for line in infile: - if "COMPUTE_PLATFORM_CORES" in line: + if "WORKER_PLATFORM_CORES" in line: val = line.split("=") cores = val[1].strip('\n')[1:-1] for n in cores.split(): @@ -863,7 +863,7 @@ class AgentManager(service.PeriodicService): LOG.exception("Sysinv Agent exception updating ilvg conductor.") pass - if constants.COMPUTE in self.subfunctions_list_get(): + if constants.WORKER in self.subfunctions_list_get(): platform_interfaces = [] # retrieve the mgmt and infra interfaces and associated numa nodes try: @@ -932,8 +932,8 @@ class AgentManager(service.PeriodicService): return: Bool whether subfunctions configuration is completed. """ if (constants.CONTROLLER in subfunctions_list and - constants.COMPUTE in subfunctions_list): - if not os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE): + constants.WORKER in subfunctions_list): + if not os.path.exists(tsc.INITIAL_WORKER_CONFIG_COMPLETE): self._subfunctions_configured = False return False @@ -1011,8 +1011,8 @@ class AgentManager(service.PeriodicService): if constants.CONTROLLER in subfunctions: if not os.path.isfile(tsc.INITIAL_CONTROLLER_CONFIG_COMPLETE): return False - if constants.COMPUTE in subfunctions: - if not os.path.isfile(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE): + if constants.WORKER in subfunctions: + if not os.path.isfile(tsc.INITIAL_WORKER_CONFIG_COMPLETE): return False if constants.STORAGE in subfunctions: if not os.path.isfile(tsc.INITIAL_STORAGE_CONFIG_COMPLETE): @@ -1131,7 +1131,7 @@ class AgentManager(service.PeriodicService): subfunctions_list = self.subfunctions_list_get() if ((constants.CONTROLLER in subfunctions_list) and - (constants.COMPUTE in subfunctions_list)): + (constants.WORKER in subfunctions_list)): if self.subfunctions_configured(subfunctions_list) and \ not self._wait_for_nova_lvg(icontext, rpcapi, self._ihost_uuid): @@ -1499,7 +1499,7 @@ class AgentManager(service.PeriodicService): for subfunction in self.subfunctions_list_get(): # We need to find the subfunction that matches the personality - # being requested. e.g. in AIO systems if we request a compute + # being requested. e.g. in AIO systems if we request a worker # personality we should apply the manifest with that # personality if subfunction in personalities: diff --git a/sysinv/sysinv/sysinv/sysinv/agent/node.py b/sysinv/sysinv/sysinv/sysinv/agent/node.py index 37ea2fd222..a17dd50e1b 100644 --- a/sysinv/sysinv/sysinv/sysinv/agent/node.py +++ b/sysinv/sysinv/sysinv/sysinv/agent/node.py @@ -43,10 +43,10 @@ SIZE_1G_MB = int(SIZE_1G_KB / SIZE_KB) # Defines the minimum size of memory for a controller node in megabyte units CONTROLLER_MIN_MB = 6000 -# Defines the minimum size of memory for a compute node in megabyte units +# Defines the minimum size of memory for a worker node in megabyte units COMPUTE_MIN_MB = 1600 -# Defines the minimum size of memory for a secondary compute node in megabyte +# Defines the minimum size of memory for a secondary worker node in megabyte # units COMPUTE_MIN_NON_0_MB = 500 @@ -300,19 +300,19 @@ class NodeOperator(object): imemory = [] - initial_compute_config_completed = \ - os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE) + initial_worker_config_completed = \ + os.path.exists(tsc.INITIAL_WORKER_CONFIG_COMPLETE) # check if it is initial report before the huge pages are allocated - initial_report = not initial_compute_config_completed + initial_report = not initial_worker_config_completed - # do not send report if the initial compute config is completed and - # compute config has not finished, i.e.during subsequent + # do not send report if the initial worker config is completed and + # worker config has not finished, i.e.during subsequent # reboot before the manifest allocates the huge pages - compute_config_completed = \ - os.path.exists(tsc.VOLATILE_COMPUTE_CONFIG_COMPLETE) - if (initial_compute_config_completed and - not compute_config_completed): + worker_config_completed = \ + os.path.exists(tsc.VOLATILE_WORKER_CONFIG_COMPLETE) + if (initial_worker_config_completed and + not worker_config_completed): return imemory for node in range(self.num_nodes): @@ -461,14 +461,14 @@ class NodeOperator(object): LOG.error("Failed to execute (%s) OS error (%d)", cmd, e.errno) - # need to multiply total_mb by 1024 to match compute_huge + # need to multiply total_mb by 1024 node_total_kb = total_hp_mb * SIZE_KB + free_kb + pss_mb * SIZE_KB - # Read base memory from compute_reserved.conf + # Read base memory from worker_reserved.conf base_mem_mb = 0 - with open('/etc/nova/compute_reserved.conf', 'r') as infile: + with open('/etc/platform/worker_reserved.conf', 'r') as infile: for line in infile: - if "COMPUTE_BASE_RESERVED" in line: + if "WORKER_BASE_RESERVED" in line: val = line.split("=") base_reserves = val[1].strip('\n')[1:-1] for reserve in base_reserves.split(): @@ -585,19 +585,13 @@ class NodeOperator(object): return imemory def inodes_get_imemory(self): - '''Enumerate logical memory topology based on: - if CONF.compute_hugepages: - self._inode_get_memory_hugepages() - else: - self._inode_get_memory_nonhugepages() - + '''Collect logical memory topology :param self :returns list of memory nodes and attributes ''' imemory = [] - # if CONF.compute_hugepages: - if os.path.isfile("/etc/nova/compute_reserved.conf"): + if os.path.isfile("/etc/platform/worker_reserved.conf"): imemory = self._inode_get_memory_hugepages() else: imemory = self._inode_get_memory_nonhugepages() diff --git a/sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py b/sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py index 9879bbd604..e854d9243b 100644 --- a/sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py +++ b/sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py @@ -75,7 +75,7 @@ class AgentAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): :returns: none ... uses asynchronous cast(). """ # fanout / broadcast message to all inventory agents - # to change systemname on all nodes ... standby controller and compute nodes + # to change systemname on all nodes ... standby controller and worker nodes LOG.debug("AgentApi.configure_isystemname: fanout_cast: sending systemname to agent") retval = self.fanout_cast(context, self.make_msg('configure_isystemname', systemname=systemname)) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu.py index 79e6a429a7..dd5ba0afbd 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu.py @@ -481,8 +481,8 @@ def _check_host(ihost): elif ihost.administrative != constants.ADMIN_LOCKED and not \ utils.is_host_simplex_controller(ihost): raise wsme.exc.ClientSideError(_('Host must be locked.')) - if constants.COMPUTE not in ihost.subfunctions: - raise wsme.exc.ClientSideError(_('Can only modify compute node cores.')) + if constants.WORKER not in ihost.subfunctions: + raise wsme.exc.ClientSideError(_('Can only modify worker node cores.')) def _update_vswitch_cpu_counts(host, cpu, counts, capabilities=None): @@ -511,7 +511,7 @@ def _update_vswitch_cpu_counts(host, cpu, counts, capabilities=None): count *= 2 counts[s][constants.VSWITCH_FUNCTION] = count # let the remaining values grow/shrink dynamically - counts[s][constants.VM_FUNCTION] = 0 + counts[s][constants.APPLICATION_FUNCTION] = 0 counts[s][constants.NO_FUNCTION] = 0 return counts @@ -543,7 +543,7 @@ def _update_shared_cpu_counts(host, cpu, counts, capabilities=None): count *= 2 counts[s][constants.SHARED_FUNCTION] = count # let the remaining values grow/shrink dynamically - counts[s][constants.VM_FUNCTION] = 0 + counts[s][constants.APPLICATION_FUNCTION] = 0 counts[s][constants.NO_FUNCTION] = 0 return counts @@ -573,7 +573,7 @@ def _update_platform_cpu_counts(host, cpu, counts, capabilities=None): count *= 2 counts[s][constants.PLATFORM_FUNCTION] = count # let the remaining values grow/shrink dynamically - counts[s][constants.VM_FUNCTION] = 0 + counts[s][constants.APPLICATION_FUNCTION] = 0 counts[s][constants.NO_FUNCTION] = 0 return counts diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu_utils.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu_utils.py index df09a6e857..3b2722866c 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu_utils.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu_utils.py @@ -15,7 +15,7 @@ CORE_FUNCTIONS = [ constants.PLATFORM_FUNCTION, constants.VSWITCH_FUNCTION, constants.SHARED_FUNCTION, - constants.VM_FUNCTION, + constants.APPLICATION_FUNCTION, constants.NO_FUNCTION ] @@ -64,7 +64,7 @@ class CpuProfile(object): cur_processor.vswitch += 1 elif cpu.allocated_function == constants.SHARED_FUNCTION: cur_processor.shared += 1 - elif cpu.allocated_function == constants.VM_FUNCTION: + elif cpu.allocated_function == constants.APPLICATION_FUNCTION: cur_processor.vms += 1 self.number_of_cpu = len(self.processors) @@ -108,12 +108,12 @@ class HostCpuProfile(CpuProfile): if platform_cores == 0: error_string = "There must be at least one core for %s." % \ constants.PLATFORM_FUNCTION - elif constants.COMPUTE in self.subfunctions and vswitch_cores == 0: + elif constants.WORKER in self.subfunctions and vswitch_cores == 0: error_string = "There must be at least one core for %s." % \ constants.VSWITCH_FUNCTION - elif constants.COMPUTE in self.subfunctions and vm_cores == 0: + elif constants.WORKER in self.subfunctions and vm_cores == 0: error_string = "There must be at least one core for %s." % \ - constants.VM_FUNCTION + constants.APPLICATION_FUNCTION return error_string @@ -140,12 +140,12 @@ def check_profile_core_functions(personality, profile): if platform_cores == 0: error_string = "There must be at least one core for %s." % \ constants.PLATFORM_FUNCTION - elif constants.COMPUTE in personality and vswitch_cores == 0: + elif constants.WORKER in personality and vswitch_cores == 0: error_string = "There must be at least one core for %s." % \ constants.VSWITCH_FUNCTION - elif constants.COMPUTE in personality and vm_cores == 0: + elif constants.WORKER in personality and vm_cores == 0: error_string = "There must be at least one core for %s." % \ - constants.VM_FUNCTION + constants.APPLICATION_FUNCTION return error_string @@ -162,26 +162,26 @@ def check_core_functions(personality, icpus): vswitch_cores += 1 elif allocated_function == constants.SHARED_FUNCTION: shared_cores += 1 - elif allocated_function == constants.VM_FUNCTION: + elif allocated_function == constants.APPLICATION_FUNCTION: vm_cores += 1 error_string = "" if platform_cores == 0: error_string = "There must be at least one core for %s." % \ constants.PLATFORM_FUNCTION - elif constants.COMPUTE in personality and vswitch_cores == 0: + elif constants.WORKER in personality and vswitch_cores == 0: error_string = "There must be at least one core for %s." % \ constants.VSWITCH_FUNCTION - elif constants.COMPUTE in personality and vm_cores == 0: + elif constants.WORKER in personality and vm_cores == 0: error_string = "There must be at least one core for %s." % \ - constants.VM_FUNCTION + constants.APPLICATION_FUNCTION return error_string def get_default_function(host): """Return the default function to be assigned to cpus on this host""" - if constants.COMPUTE in host.subfunctions: - return constants.VM_FUNCTION + if constants.WORKER in host.subfunctions: + return constants.APPLICATION_FUNCTION return constants.PLATFORM_FUNCTION @@ -265,14 +265,14 @@ def check_core_allocations(host, cpu_counts, func): total_shared_cores += shared_cores if func.lower() == constants.PLATFORM_FUNCTION.lower(): if ((constants.CONTROLLER in host.subfunctions) and - (constants.COMPUTE in host.subfunctions)): + (constants.WORKER in host.subfunctions)): if total_platform_cores < 2: return "%s must have at least two cores." % \ constants.PLATFORM_FUNCTION elif total_platform_cores == 0: return "%s must have at least one core." % \ constants.PLATFORM_FUNCTION - if constants.COMPUTE in (host.subfunctions or host.personality): + if constants.WORKER in (host.subfunctions or host.personality): if func.lower() == constants.VSWITCH_FUNCTION.lower(): if host.hyperthreading: total_physical_cores = total_vswitch_cores / 2 @@ -287,7 +287,7 @@ def check_core_allocations(host, cpu_counts, func): reserved_for_vms = len(host.cpus) - total_platform_cores - total_vswitch_cores if reserved_for_vms <= 0: return "There must be at least one unused core for %s." % \ - constants. VM_FUNCTION + constants.APPLICATION_FUNCTION else: if total_platform_cores != len(host.cpus): return "All logical cores must be reserved for platform use" diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/disk.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/disk.py index ec5d82cef8..9508d6f895 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/disk.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/disk.py @@ -403,12 +403,12 @@ class DiskController(rest.RestController): def _semantic_checks_format(idisk): ihost_uuid = idisk.get('ihost_uuid') - # Check the disk belongs to a controller or compute host. + # Check the disk belongs to a controller or worker host. ihost = pecan.request.dbapi.ihost_get(ihost_uuid) - if ihost.personality not in [constants.CONTROLLER, constants.COMPUTE]: + if ihost.personality not in [constants.CONTROLLER, constants.WORKER]: raise wsme.exc.ClientSideError( _("ERROR: Host personality must be a one of %s, %s]") % - (constants.CONTROLLER, constants.COMPUTE)) + (constants.CONTROLLER, constants.WORKER)) # Check disk is not the rootfs disk. capabilities = idisk['capabilities'] diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py index d2a1ece01f..c6420a9677 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py @@ -243,7 +243,7 @@ class HostStatesController(rest.RestController): rank = 1 elif function.lower() == constants.VSWITCH_FUNCTION.lower(): rank = 2 - elif function.lower() == constants.VM_FUNCTION.lower(): + elif function.lower() == constants.APPLICATION_FUNCTION.lower(): rank = 3 else: rank = 4 @@ -535,7 +535,7 @@ class Host(base.APIBase): "Represent install state extra information if there is any" iscsi_initiator_name = wtypes.text - "The iscsi initiator name (only used for compute hosts)" + "The iscsi initiator name (only used for worker hosts)" def __init__(self, **kwargs): self.fields = objects.host.fields.keys() @@ -786,10 +786,10 @@ class Host(base.APIBase): bookmark=True) ] # Don't expose the vsc_controllers field if we are not configured with - # the nuage_vrs vswitch or we are not a compute node. + # the nuage_vrs vswitch or we are not a worker node. vswitch_type = utils.get_vswitch_type() if (vswitch_type != constants.VSWITCH_TYPE_NUAGE_VRS or - uhost.personality != constants.COMPUTE): + uhost.personality != constants.WORKER): uhost.vsc_controllers = wtypes.Unset uhost.peers = None @@ -1273,7 +1273,7 @@ class HostController(rest.RestController): ihost_dict.get('personality') not in [constants.STORAGE, constants.CONTROLLER]): raise wsme.exc.ClientSideError(_( - "Host-add Rejected: Cannot add a compute host without " + "Host-add Rejected: Cannot add a worker host without " "specifying a mgmt_ip when static address allocation is " "configured.")) @@ -1672,7 +1672,7 @@ class HostController(rest.RestController): rank = 0 elif host.personality == constants.STORAGE: rank = 1 - elif host.personality == constants.COMPUTE: + elif host.personality == constants.WORKER: rank = 2 else: rank = 3 @@ -2334,7 +2334,7 @@ class HostController(rest.RestController): if (ihost.hostname and ihost.personality and ihost.invprovision and ihost.invprovision == constants.PROVISIONED and - (constants.COMPUTE in ihost.subfunctions)): + (constants.WORKER in ihost.subfunctions)): # wait for VIM signal return @@ -2511,7 +2511,7 @@ class HostController(rest.RestController): # If this is a simplex system skip this check, there's no other nodes if simplex: pass - elif rpc_ihost.personality == constants.COMPUTE: + elif rpc_ihost.personality == constants.WORKER: self._check_personality_load(constants.CONTROLLER, new_target_load) self._check_personality_load(constants.STORAGE, new_target_load) elif rpc_ihost.personality == constants.STORAGE: @@ -2601,7 +2601,7 @@ class HostController(rest.RestController): elif upgrade.state == constants.UPGRADE_ABORTING_ROLLBACK: if rpc_ihost.hostname == constants.CONTROLLER_0_HOSTNAME: # Before we downgrade controller-0 during a rollback/reinstall - # we check that all other compute/storage nodes are locked and + # we check that all other worker/storage nodes are locked and # offline. We also disable the storage monitor on controller-1 # and set a flag on controller-1 to indicate we are in a # rollback. When controller-0 comes up it will check for this @@ -2620,7 +2620,7 @@ class HostController(rest.RestController): else: # Enforce downgrade order if rpc_ihost.personality == constants.CONTROLLER: - self._check_personality_load(constants.COMPUTE, + self._check_personality_load(constants.WORKER, new_target_load) self._check_personality_load(constants.STORAGE, new_target_load) @@ -2628,11 +2628,11 @@ class HostController(rest.RestController): self._check_host_load(constants.CONTROLLER_0_HOSTNAME, new_target_load) elif rpc_ihost.personality == constants.STORAGE: - self._check_personality_load(constants.COMPUTE, + self._check_personality_load(constants.WORKER, new_target_load) if rpc_ihost.hostname == constants.STORAGE_0_HOSTNAME: self._check_storage_downgrade(new_target_load) - # else we should be a compute node, no need to check other nodes + # else we should be a worker node, no need to check other nodes # Check upgrade state if rpc_ihost.hostname in [constants.CONTROLLER_0_HOSTNAME, @@ -2684,12 +2684,12 @@ class HostController(rest.RestController): def _semantic_check_rollback(self): hosts = pecan.request.dbapi.ihost_get_list() for host in hosts: - if host.personality not in [constants.COMPUTE, constants.STORAGE]: + if host.personality not in [constants.WORKER, constants.STORAGE]: continue if host.administrative != constants.ADMIN_LOCKED or \ host.availability != constants.AVAILABILITY_OFFLINE: raise wsme.exc.ClientSideError( - _("All compute and storage hosts must be locked and " + _("All worker and storage hosts must be locked and " "offline before this operation can proceed")) def _check_personality_load(self, personality, load): @@ -2910,7 +2910,7 @@ class HostController(rest.RestController): def _validate_hostname(self, hostname, personality): - if personality and personality == constants.COMPUTE: + if personality and personality == constants.WORKER: # Fix of invalid hostnames err_tl = 'Name restricted to at most 255 characters.' err_ic = 'Name may only contain letters, ' \ @@ -2920,9 +2920,9 @@ class HostController(rest.RestController): raise wsme.exc.ClientSideError(_(err_ic)) if len(hostname) > 255: raise wsme.exc.ClientSideError(_(err_tl)) - non_compute_hosts = ([constants.CONTROLLER_0_HOSTNAME, + non_worker_hosts = ([constants.CONTROLLER_0_HOSTNAME, constants.CONTROLLER_1_HOSTNAME]) - if (hostname and (hostname in non_compute_hosts) or + if (hostname and (hostname in non_worker_hosts) or hostname.startswith(constants.STORAGE_HOSTNAME)): raise wsme.exc.ClientSideError( @@ -2951,8 +2951,8 @@ class HostController(rest.RestController): (hostname, personality))) @staticmethod - def _check_compute(patched_ihost, hostupdate=None): - # Check for valid compute node setup + def _check_worker(patched_ihost, hostupdate=None): + # Check for valid worker node setup hostname = patched_ihost.get('hostname') or "" if not hostname: @@ -2960,12 +2960,12 @@ class HostController(rest.RestController): _("Host %s of personality %s, must be provisioned with a hostname." % (patched_ihost.get('uuid'), patched_ihost.get('personality')))) - non_compute_hosts = ([constants.CONTROLLER_0_HOSTNAME, + non_worker_hosts = ([constants.CONTROLLER_0_HOSTNAME, constants.CONTROLLER_1_HOSTNAME]) - if (hostname in non_compute_hosts or + if (hostname in non_worker_hosts or hostname.startswith(constants.STORAGE_HOSTNAME)): raise wsme.exc.ClientSideError( - _("Hostname %s is not allowed for personality 'compute'. " + _("Hostname %s is not allowed for personality 'worker'. " "Please check hostname and personality." % hostname)) def _controller_storage_node_setup(self, patched_ihost, hostupdate=None): @@ -3248,7 +3248,7 @@ class HostController(rest.RestController): data_interface_configured = True if not data_interface_configured: - msg = _("Can not unlock a compute host without data interfaces. " + msg = _("Can not unlock a worker host without data interfaces. " "Add at least one data interface before re-attempting " "this command.") raise wsme.exc.ClientSideError(msg) @@ -3275,7 +3275,7 @@ class HostController(rest.RestController): address_count += len(addresses) if address_count > 1: - msg = _("Can not unlock a compute host with multiple data " + msg = _("Can not unlock a worker host with multiple data " "addresses while in SDN mode.") raise wsme.exc.ClientSideError(msg) @@ -3292,7 +3292,7 @@ class HostController(rest.RestController): # Check whether the vsc_controllers have been configured if not ihost['vsc_controllers']: raise wsme.exc.ClientSideError( - _("Can not unlock compute host %s without " + _("Can not unlock worker host %s without " "vsc_controllers. Action: Configure " "vsc_controllers for this host prior to unlock." % ihost['hostname'])) @@ -3316,7 +3316,7 @@ class HostController(rest.RestController): self.routes._check_reachable_gateway( route['interface_id'], route) except exception.RouteGatewayNotReachable: - msg = _("Can not unlock a compute host with routes that are " + msg = _("Can not unlock a worker host with routes that are " "not reachable via a local IP address. Add an IP " "address in the same subnet as each route gateway " "address before re-attempting this command.") @@ -3364,7 +3364,7 @@ class HostController(rest.RestController): section=section) neutron_parameters = neutron_parameters + parm_list except NoResultFound: - msg = _("Cannot unock a compute host without %s->%s " + msg = _("Cannot unock a worker host without %s->%s " ",SDN service parameters being configured. " "Add appropriate service parameters before " "re-attempting this command." % @@ -3389,7 +3389,7 @@ class HostController(rest.RestController): found = True break if not found: - msg = _("Cannot unlock a compute host without " + msg = _("Cannot unlock a worker host without " "\"%s\" SDN service parameter configured. " "Add service parameter before re-attempting " "this command." % sdn_param) @@ -3598,7 +3598,7 @@ class HostController(rest.RestController): (hostupdate.displayid, action)) # Semantic Check: Auto-Provision: Reset, Reboot or Power-On case - if ((cutils.host_has_function(ihost, constants.COMPUTE)) and + if ((cutils.host_has_function(ihost, constants.WORKER)) and (ihost['administrative'] == constants.ADMIN_LOCKED) and ((patched_ihost['action'] == constants.RESET_ACTION) or (patched_ihost['action'] == constants.REBOOT_ACTION) or @@ -3756,10 +3756,10 @@ class HostController(rest.RestController): """ # Don't expose the vsc_controllers field if we are not configured with - # the nuage_vrs vswitch or we are not a compute node. + # the nuage_vrs vswitch or we are not a worker node. vswitch_type = utils.get_vswitch_type() if (vswitch_type != constants.VSWITCH_TYPE_NUAGE_VRS or - ihost['personality'] != constants.COMPUTE): + ihost['personality'] != constants.WORKER): raise wsme.exc.ClientSideError( _("The vsc_controllers property is not applicable to this " "host.")) @@ -4037,8 +4037,8 @@ class HostController(rest.RestController): def _semantic_check_nova_local_storage(ihost_uuid, personality): """ Perform semantic checking for nova local storage - :param ihost_uuid: uuid of host with compute functionality - :param personality: personality of host with compute functionality + :param ihost_uuid: uuid of host with worker functionality + :param personality: personality of host with worker functionality """ # query volume groups @@ -4055,7 +4055,7 @@ class HostController(rest.RestController): if nova_local_storage_lvg: if nova_local_storage_lvg.vg_state == constants.LVG_DEL: raise wsme.exc.ClientSideError( - _("A host with compute functionality requires a " + _("A host with worker functionality requires a " "nova-local volume group prior to being enabled. It is " "currently set to be removed on unlock. Please update " "the storage settings for the host.")) @@ -4073,7 +4073,7 @@ class HostController(rest.RestController): if not lvg_has_pvs: raise wsme.exc.ClientSideError( - _("A host with compute functionality requires a " + _("A host with worker functionality requires a " "nova-local volume group prior to being enabled." "The nova-local volume group does not contain any " "physical volumes in the adding or provisioned " @@ -4087,18 +4087,18 @@ class HostController(rest.RestController): constants.LVG_NOVA_BACKING_IMAGE, constants.LVG_NOVA_BACKING_REMOTE]: raise wsme.exc.ClientSideError( - _("A host with compute functionality and a " + _("A host with worker functionality and a " "nova-local volume group requires that a valid " "instance backing is configured. ")) else: - # This method is only called with hosts that have a compute + # This method is only called with hosts that have a worker # subfunction and is locked or if subfunction_config action is # being called. Without a nova-local volume group, prevent # unlocking. if personality == constants.CONTROLLER: - host_description = 'controller with compute functionality' + host_description = 'controller with worker functionality' else: - host_description = 'compute' + host_description = 'worker' msg = _('A %s requires a nova-local volume group prior to being ' 'enabled. Please update the storage settings for the ' @@ -4109,7 +4109,7 @@ class HostController(rest.RestController): @staticmethod def _semantic_check_restore_complete(ihost): """ - During a restore procedure, checks compute nodes can be unlocked + During a restore procedure, checks worker nodes can be unlocked only after running "config_controller --restore-complete" """ if os.path.isfile(tsc.RESTORE_SYSTEM_FLAG): @@ -4123,13 +4123,13 @@ class HostController(rest.RestController): @staticmethod def _semantic_check_cgts_storage(ihost_uuid, personality): """ - Perform semantic checking for cgts storage on compute hosts. - CGTS VG on computes used for kubernetes docker lv only at this time. - :param ihost_uuid: uuid of host with compute functionality - :param personality: personality of host with compute functionality + Perform semantic checking for cgts storage on worker hosts. + CGTS VG on workers used for kubernetes docker lv only at this time. + :param ihost_uuid: uuid of host with worker functionality + :param personality: personality of host with worker functionality """ - if personality != constants.COMPUTE: + if personality != constants.WORKER: return # query volume groups @@ -4145,7 +4145,7 @@ class HostController(rest.RestController): if cgts_local_storage_lvg.vg_state == constants.LVG_DEL: raise wsme.exc.ClientSideError( _("With kubernetes configured, " - "a compute host requires a " + "a worker host requires a " "cgts volume group prior to being enabled. It is " "currently set to be removed on unlock. Please update " "the storage settings for the host.")) @@ -4165,19 +4165,19 @@ class HostController(rest.RestController): if not lvg_has_pvs: raise wsme.exc.ClientSideError( _("With kubernetes configured, " - "a compute host requires a " + "a worker host requires a " "cgts volume group prior to being enabled." "The cgts volume group does not contain any " "physical volumes in the adding or provisioned " "state.")) else: - # This method is only called with hosts that have a compute + # This method is only called with hosts that have a worker # subfunction and is locked or if subfunction_config action is # being called. Without a cgts volume group, prevent # unlocking. msg = _('With kubernetes configured, ' - 'a compute host requires a cgts volume group prior to being ' + 'a worker host requires a cgts volume group prior to being ' 'enabled. Please update the storage settings for the ' 'host.') @@ -4494,19 +4494,19 @@ class HostController(rest.RestController): if backend.task == constants.SB_TASK_PROVISION_STORAGE: if HostController._check_provisioned_storage_hosts(): api.storage_backend_update(backend.uuid, { - 'task': constants.SB_TASK_RECONFIG_COMPUTE + 'task': constants.SB_TASK_RECONFIG_WORKER }) - # update manifest for all online/enabled compute nodes - # live apply new ceph manifest for all compute nodes that + # update manifest for all online/enabled worker nodes + # live apply new ceph manifest for all worker nodes that # are online/enabled. The rest will pickup when unlock LOG.info( - 'Apply new Ceph manifest to provisioned compute nodes.' + 'Apply new Ceph manifest to provisioned worker nodes.' ) - pecan.request.rpcapi.config_compute_for_ceph( + pecan.request.rpcapi.config_worker_for_ceph( pecan.request.context ) # mark all tasks completed after updating the manifests for - # all compute nodes. + # all worker nodes. api.storage_backend_update(backend.uuid, {'task': None}) elif backend.task == constants.SB_TASK_RESIZE_CEPH_MON_LV: @@ -4633,8 +4633,8 @@ class HostController(rest.RestController): # check the subfunctions are updated properly LOG.info("hostupdate.ihost_patch.subfunctions %s" % hostupdate.ihost_patch['subfunctions']) - elif hostupdate.ihost_patch['personality'] == constants.COMPUTE: - self._check_compute(hostupdate.ihost_patch, hostupdate) + elif hostupdate.ihost_patch['personality'] == constants.WORKER: + self._check_worker(hostupdate.ihost_patch, hostupdate) else: LOG.error("Unexpected personality: %s" % hostupdate.ihost_patch['personality']) @@ -4660,12 +4660,12 @@ class HostController(rest.RestController): "Host %s must be deleted and re-added in order to change " "the subfunctions." % hostupdate.ihost_orig['hostname'])) - if hostupdate.ihost_patch['personality'] == constants.COMPUTE: - valid_subfunctions = (constants.COMPUTE, + if hostupdate.ihost_patch['personality'] == constants.WORKER: + valid_subfunctions = (constants.WORKER, constants.LOWLATENCY) elif hostupdate.ihost_patch['personality'] == constants.CONTROLLER: valid_subfunctions = (constants.CONTROLLER, - constants.COMPUTE, + constants.WORKER, constants.LOWLATENCY) elif hostupdate.ihost_patch['personality'] == constants.STORAGE: # Comparison is expecting a list @@ -4679,11 +4679,11 @@ class HostController(rest.RestController): ("%s subfunctions %s contains unsupported values. Allowable: %s." % (hostupdate.displayid, subfunctions_set, valid_subfunctions))) - if hostupdate.ihost_patch['personality'] == constants.COMPUTE: - if constants.COMPUTE not in subfunctions_set: + if hostupdate.ihost_patch['personality'] == constants.WORKER: + if constants.WORKER not in subfunctions_set: # Automatically add it subfunctions_list = list(subfunctions_set) - subfunctions_list.insert(0, constants.COMPUTE) + subfunctions_list.insert(0, constants.WORKER) subfunctions = ','.join(subfunctions_list) LOG.info("%s update subfunctions=%s" % @@ -4732,10 +4732,10 @@ class HostController(rest.RestController): if not personality: return - if personality == constants.COMPUTE and utils.is_aio_duplex_system(): - if utils.get_compute_count() >= constants.AIO_DUPLEX_MAX_COMPUTES: + if personality == constants.WORKER and utils.is_aio_duplex_system(): + if utils.get_worker_count() >= constants.AIO_DUPLEX_MAX_WORKERS: msg = _("All-in-one Duplex is restricted to " - "%s computes.") % constants.AIO_DUPLEX_MAX_COMPUTES + "%s workers.") % constants.AIO_DUPLEX_MAX_WORKERS raise wsme.exc.ClientSideError(msg) else: return @@ -4883,8 +4883,8 @@ class HostController(rest.RestController): if personality == constants.CONTROLLER: self.check_unlock_controller(hostupdate, force_unlock) - if cutils.host_has_function(hostupdate.ihost_patch, constants.COMPUTE): - self.check_unlock_compute(hostupdate) + if cutils.host_has_function(hostupdate.ihost_patch, constants.WORKER): + self.check_unlock_worker(hostupdate) elif personality == constants.STORAGE: self.check_unlock_storage(hostupdate) @@ -4956,8 +4956,8 @@ class HostController(rest.RestController): subfunctions_set = \ set(hostupdate.ihost_patch[constants.SUBFUNCTIONS].split(',')) - if constants.COMPUTE in subfunctions_set: - self.check_lock_compute(hostupdate) + if constants.WORKER in subfunctions_set: + self.check_lock_worker(hostupdate) hostupdate.notify_vim = True hostupdate.notify_mtce = True @@ -5081,9 +5081,9 @@ class HostController(rest.RestController): if utils.get_https_enabled(): self._semantic_check_tpm_config(hostupdate.ihost_orig) - def check_unlock_compute(self, hostupdate): - """Check semantics on host-unlock of a compute.""" - LOG.info("%s ihost check_unlock_compute" % hostupdate.displayid) + def check_unlock_worker(self, hostupdate): + """Check semantics on host-unlock of a worker.""" + LOG.info("%s ihost check_unlock_worker" % hostupdate.displayid) ihost = hostupdate.ihost_orig if ihost['invprovision'] is None: raise wsme.exc.ClientSideError( @@ -5093,7 +5093,7 @@ class HostController(rest.RestController): # Check whether a restore was properly completed self._semantic_check_restore_complete(ihost) - # Disable compute unlock checks in a kubernetes config + # Disable worker unlock checks in a kubernetes config if not utils.is_kubernetes_config(): # sdn configuration check self._semantic_check_sdn_attributes(ihost) @@ -5142,7 +5142,7 @@ class HostController(rest.RestController): # calculate the VM 4K huge pages for nova self._update_vm_4k_pages(ihost) - if cutils.is_virtual() or cutils.is_virtual_compute(ihost): + if cutils.is_virtual() or cutils.is_virtual_worker(ihost): mib_platform_reserved_no_io = mib_reserved required_platform = \ constants.PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX @@ -5236,7 +5236,7 @@ class HostController(rest.RestController): personality=constants.STORAGE) except Exception: raise wsme.exc.ClientSideError( - _("Can not unlock a compute node until at " + _("Can not unlock a worker node until at " "least one storage node is unlocked and enabled.")) is_storage_host_unlocked = False if storage_nodes: @@ -5250,7 +5250,7 @@ class HostController(rest.RestController): if not is_storage_host_unlocked: raise wsme.exc.ClientSideError( - _("Can not unlock a compute node until at " + _("Can not unlock a worker node until at " "least one storage node is unlocked and enabled.")) # Local Storage checks @@ -5435,7 +5435,7 @@ class HostController(rest.RestController): elif to_host_load_id == upgrade.from_load: # On CPE loads we must abort before we swact back to the old load # Any VMs on the active controller will be lost during the swact - if constants.COMPUTE in to_host.subfunctions: + if constants.WORKER in to_host.subfunctions: raise wsme.exc.ClientSideError( _("Upgrading: %s must be using load %s before this " "operation can proceed. Currently using load %s.") % @@ -5493,7 +5493,7 @@ class HostController(rest.RestController): "Standby controller must be in available status.") % (ihost_ctr.hostname)) - if constants.COMPUTE in ihost_ctr.subfunctions: + if constants.WORKER in ihost_ctr.subfunctions: if (ihost_ctr.subfunction_oper != constants.OPERATIONAL_ENABLED): raise wsme.exc.ClientSideError( @@ -5659,10 +5659,10 @@ class HostController(rest.RestController): "and replication is lost. This may result in data loss. ") raise wsme.exc.ClientSideError(msg) - def check_lock_compute(self, hostupdate, force=False): - """Pre lock semantic checks for compute""" + def check_lock_worker(self, hostupdate, force=False): + """Pre lock semantic checks for worker""" - LOG.info("%s host check_lock_compute" % hostupdate.displayid) + LOG.info("%s host check_lock_worker" % hostupdate.displayid) if force: return @@ -5692,7 +5692,7 @@ class HostController(rest.RestController): # Allow AIO-DX lock of controller-1 return raise wsme.exc.ClientSideError( - _("Rejected: Can not lock %s with compute function " + _("Rejected: Can not lock %s with worker function " "at this upgrade stage '%s'.") % (hostupdate.displayid, upgrade_state)) @@ -5703,17 +5703,17 @@ class HostController(rest.RestController): if hostname == constants.CONTROLLER_0_HOSTNAME: return raise wsme.exc.ClientSideError( - _("Rejected: Can not lock %s with compute function " + _("Rejected: Can not lock %s with worker function " "at this upgrade stage '%s'.") % (hostupdate.displayid, upgrade_state)) def check_unlock_interfaces(self, hostupdate): """Semantic check for interfaces on host-unlock.""" ihost = hostupdate.ihost_patch - if ihost['personality'] in [constants.CONTROLLER, constants.COMPUTE, + if ihost['personality'] in [constants.CONTROLLER, constants.WORKER, constants.STORAGE]: # Check if there is an infra interface on - # controller/compute/storage + # controller/worker/storage ihost_iinterfaces = \ pecan.request.dbapi.iinterface_get_by_ihost(ihost['uuid']) @@ -5754,7 +5754,7 @@ class HostController(rest.RestController): raise wsme.exc.ClientSideError(msg) # Check if there is an management interface on - # controller/compute/storage + # controller/worker/storage ihost_iinterfaces = pecan.request.dbapi.iinterface_get_by_ihost( ihost['uuid']) network = pecan.request.dbapi.network_get_by_type( @@ -5796,7 +5796,7 @@ class HostController(rest.RestController): # management and infrastrucutre interfaces via DHCP. This # 'check' updates the 'imtu' value based on what will be served # via DHCP. - if ihost['personality'] in [constants.COMPUTE, constants.STORAGE]: + if ihost['personality'] in [constants.WORKER, constants.STORAGE]: host_list = pecan.request.dbapi.ihost_get_by_personality( personality=constants.CONTROLLER) interface_list_active = [] @@ -5938,7 +5938,7 @@ class HostController(rest.RestController): ihost_obj['hostname']) pecan.request.rpcapi.configure_ihost(pecan.request.context, ihost_obj, - do_compute_apply=True) + do_worker_apply=True) @staticmethod def _stage_reboot(hostupdate): @@ -6186,7 +6186,7 @@ class HostController(rest.RestController): def _create_node(host, xml_node, personality, is_dynamic_ip): host_node = et.SubElement(xml_node, 'host') et.SubElement(host_node, 'personality').text = personality - if personality == constants.COMPUTE: + if personality == constants.WORKER: et.SubElement(host_node, 'hostname').text = host.hostname et.SubElement(host_node, 'subfunctions').text = host.subfunctions diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py index d1bec4391c..8153a78a68 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py @@ -979,9 +979,9 @@ def _check_network_type_validity(networktypelist): def _check_network_type_and_host_type(ihost, networktypelist): for nt in DATA_NETWORK_TYPES: if (nt in networktypelist and - constants.COMPUTE not in ihost['subfunctions']): + constants.WORKER not in ihost['subfunctions']): msg = _("The '%s' network type is only supported on nodes " - "supporting compute functions" % nt) + "supporting worker functions" % nt) raise wsme.exc.ClientSideError(msg) if (constants.NETWORK_TYPE_OAM in networktypelist and diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/lvg.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/lvg.py index 2d2a01c293..70b72273b8 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/lvg.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/lvg.py @@ -513,32 +513,32 @@ def _check_host(lvg): raise wsme.exc.ClientSideError(_("Volume group operations not allowed " "on hosts with personality: %s") % constants.STORAGE) - elif (constants.COMPUTE not in ihost.subfunctions and + elif (constants.WORKER not in ihost.subfunctions and lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL): raise wsme.exc.ClientSideError(_("%s can only be added to a host which " "has a %s subfunction.") % (constants.LVG_NOVA_LOCAL, - constants.COMPUTE)) - elif (ihost.personality == constants.COMPUTE and + constants.WORKER)) + elif (ihost.personality == constants.WORKER and lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and not utils.is_kubernetes_config()): raise wsme.exc.ClientSideError(_("%s can not be provisioned for %s " "hosts.") % (constants.LVG_CGTS_VG, - constants.COMPUTE)) - elif (ihost.personality in [constants.COMPUTE, constants.STORAGE] and + constants.WORKER)) + elif (ihost.personality in [constants.WORKER, constants.STORAGE] and lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES): raise wsme.exc.ClientSideError(_("%s can only be provisioned for %s " "hosts.") % (constants.LVG_CINDER_VOLUMES, constants.CONTROLLER)) - if (constants.COMPUTE in ihost['subfunctions'] and + if (constants.WORKER in ihost['subfunctions'] and lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and (ihost['administrative'] != constants.ADMIN_LOCKED or ihost['ihost_action'] == constants.UNLOCK_ACTION)): raise wsme.exc.ClientSideError(_("Host must be locked")) if utils.is_kubernetes_config(): - if (ihost.personality == constants.COMPUTE and + if (ihost.personality == constants.WORKER and lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and (ihost['administrative'] != constants.ADMIN_LOCKED or ihost['ihost_action'] == constants.UNLOCK_ACTION)): @@ -662,7 +662,7 @@ def _check(op, lvg): raise wsme.exc.ClientSideError( _("Can't modify the volume group: %s. There are currently " "%d instance volumes present in the volume group. " - "Terminate or migrate all instances from the compute to " + "Terminate or migrate all instances from the worker to " "allow volume group madifications." % (lvg['lvm_vg_name'], lvg['lvm_cur_lv'] - 1))) @@ -683,7 +683,7 @@ def _check(op, lvg): raise wsme.exc.ClientSideError( _("Can't delete volume group: %s. There are currently %d " "instance volumes present in the volume group. Terminate" - " or migrate all instances from the compute to allow " + " or migrate all instances from the worker to allow " "volume group deletion." % (lvg['lvm_vg_name'], lvg['lvm_cur_lv'] - 1))) else: diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/memory.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/memory.py index a9f7afa22a..a7f713485c 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/memory.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/memory.py @@ -559,7 +559,7 @@ def _check_memory(rpc_port, ihost, platform_reserved_mib=None, required_platform_reserved, max_platform_reserved)) - if cutils.is_virtual() or cutils.is_virtual_compute(ihost): + if cutils.is_virtual() or cutils.is_virtual_worker(ihost): LOG.warn(msg_platform_over) else: raise wsme.exc.ClientSideError(msg_platform_over) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py index 75bb8da14d..a951423902 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py @@ -336,16 +336,16 @@ class PartitionController(rest.RestController): def _check_host(partition, ihost, idisk): """Semantic checks for valid host""" - # Partitions should only be created on computes/controllers. + # Partitions should only be created on workers/controllers. if not ihost.personality: raise wsme.exc.ClientSideError(_("Host %s has uninitialized " "personality.") % ihost.hostname) - elif ihost.personality not in [constants.CONTROLLER, constants.COMPUTE]: + elif ihost.personality not in [constants.CONTROLLER, constants.WORKER]: raise wsme.exc.ClientSideError(_("Host personality must be a one of " "[%s, %s]") % (constants.CONTROLLER, - constants.COMPUTE)) + constants.WORKER)) # The disk must be present on the specified host. if ihost['id'] != idisk['forihostid']: @@ -656,8 +656,8 @@ def _create(partition, iprofile=None, applyprofile=None): # Check if this host has been provisioned. If so, attempt an in-service # action. If not, we'll just stage the DB changes to and let the unlock # apply the manifest changes - # - PROVISIONED: standard controller/compute (after config_controller) - # - PROVISIONING: AIO (after config_controller) and before compute + # - PROVISIONED: standard controller/worker (after config_controller) + # - PROVISIONING: AIO (after config_controller) and before worker # configuration if (ihost.invprovision in [constants.PROVISIONED, constants.PROVISIONING] and diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pci_device.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pci_device.py index ec3cfc7590..84d65c18bd 100755 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pci_device.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pci_device.py @@ -294,8 +294,8 @@ def _check_host(host): elif host.administrative != constants.ADMIN_LOCKED and not \ utils.is_host_simplex_controller(host): raise wsme.exc.ClientSideError(_('Host must be locked.')) - if constants.COMPUTE not in host.subfunctions: - raise wsme.exc.ClientSideError(_('Can only modify compute node cores.')) + if constants.WORKER not in host.subfunctions: + raise wsme.exc.ClientSideError(_('Can only modify worker node cores.')) def _check_field(field): diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py index 38da3fc9ad..3c918e3378 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py @@ -898,7 +898,7 @@ class ProfileController(rest.RestController): if 'profiletype' in profile_dict and profile_dict['profiletype']: profiletype = profile_dict['profiletype'] if profiletype == constants.PROFILE_TYPE_STORAGE: - if constants.COMPUTE in from_ihost.subfunctions: + if constants.WORKER in from_ihost.subfunctions: # combo has no ceph profiletype = constants.PROFILE_TYPE_LOCAL_STORAGE LOG.info("No ceph backend for stor profile, assuming " @@ -1136,7 +1136,7 @@ def _create_cpu_profile(profile_name, profile_node): self.processor_index = p_index self.core_index = c_index self.thread_index = t_index - self.core_function = constants.VM_FUNCTION + self.core_function = constants.APPLICATION_FUNCTION # The xml is validated against schema. # Validations that are covered by the schema are not checked below. @@ -1750,7 +1750,7 @@ def _create_localstorage_profile(profile_name, profile_node): """ values = dict(recordtype="profile", hostname=profile_name, - subfunctions=constants.COMPUTE) + subfunctions=constants.WORKER) disks = profile_node.findall('disk') all_ilvg_nodes = profile_node.findall('lvg') # should only be ONE ? @@ -2179,7 +2179,7 @@ def _create_device_profile(device, pv_type, iprofile_id): def localstorageprofile_copy_data(host, profile): """Create nova-local storage profile from host data - All computes will have nova local storage and is independent of + All workers will have nova local storage and is independent of the Cinder backend. Controller nodes in the small footprint scenario will always be @@ -2189,7 +2189,7 @@ def localstorageprofile_copy_data(host, profile): A storage node should be the only host with a stor profile (idisks + istors). - A compute will only have a local stor profile + A worker will only have a local stor profile (idisks + ipvs + ilvgs). A combo controller should have a local stor profile @@ -2467,7 +2467,7 @@ def cpuprofile_apply_to_host(host, profile): elif core_idx < vm_core_start: new_func = constants.SHARED_FUNCTION elif core_idx < vm_core_end: - new_func = constants.VM_FUNCTION + new_func = constants.APPLICATION_FUNCTION if new_func != hcpu.allocated_function: values = {'allocated_function': new_func} @@ -2949,10 +2949,10 @@ def check_localstorageprofile_applicable(host, profile): """ subfunctions = host.subfunctions - if constants.COMPUTE not in subfunctions: + if constants.WORKER not in subfunctions: raise wsme.exc.ClientSideError(_("%s with subfunctions: %s " "profile %s: Local storage profiles are applicable only to " - "hosts with 'compute' subfunction." % + "hosts with 'worker' subfunction." % (host.hostname, host.subfunctions, profile.hostname))) if not profile.disks: @@ -3143,8 +3143,8 @@ def memoryprofile_applicable(host, profile): LOG.warn("Host nodes %s not same as profile nodes=%s" % (len(host.nodes), len(profile.nodes))) return False - if constants.COMPUTE not in host.subfunctions: - LOG.warn("Profile cannot be applied to non-compute host") + if constants.WORKER not in host.subfunctions: + LOG.warn("Profile cannot be applied to non-worker host") return False return True diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pv.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pv.py index ba020595da..e33210beef 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pv.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pv.py @@ -477,12 +477,12 @@ def _check_host(pv, ihost, op): if utils.is_kubernetes_config(): if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG): if (ihost['personality'] != constants.CONTROLLER and - ihost['personality'] != constants.COMPUTE): + ihost['personality'] != constants.WORKER): raise wsme.exc.ClientSideError( _("Physical volume operations for %s are only " "supported on %s and %s hosts" % (constants.LVG_CGTS_VG, - constants.COMPUTE, + constants.WORKER, constants.CONTROLLER))) elif (ilvg.lvm_vg_name == constants.LVG_CGTS_VG): if ihost['personality'] != constants.CONTROLLER: @@ -492,17 +492,17 @@ def _check_host(pv, ihost, op): constants.CONTROLLER)) # semantic check: host must be locked for a nova-local change on - # a host with a compute subfunction (compute or AIO) - if (constants.COMPUTE in ihost['subfunctions'] and + # a host with a worker subfunction (worker or AIO) + if (constants.WORKER in ihost['subfunctions'] and ilvg.lvm_vg_name == constants.LVG_NOVA_LOCAL and (ihost['administrative'] != constants.ADMIN_LOCKED or ihost['ihost_action'] == constants.UNLOCK_ACTION)): raise wsme.exc.ClientSideError(_("Host must be locked")) # semantic check: host must be locked for a CGTS change on - # a compute host. + # a worker host. if utils.is_kubernetes_config(): - if (ihost['personality'] == constants.COMPUTE and + if (ihost['personality'] == constants.WORKER and ilvg.lvm_vg_name == constants.LVG_CGTS_VG and (ihost['administrative'] != constants.ADMIN_LOCKED or ihost['ihost_action'] == constants.UNLOCK_ACTION)): @@ -599,7 +599,7 @@ def _check_lvg(op, pv): raise wsme.exc.ClientSideError(msg) elif op == "delete": - # Possible Kubernetes issue, do we want to allow this on compute nodes? + # Possible Kubernetes issue, do we want to allow this on worker nodes? if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG): raise wsme.exc.ClientSideError( _("Physical volumes cannot be removed from the cgts-vg volume " diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/sdn_controller.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/sdn_controller.py index 1a89d27ed4..140a90837b 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/sdn_controller.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/sdn_controller.py @@ -205,7 +205,7 @@ class SDNControllerController(rest.RestController): # Clear any existing OVSDB manager alarm, corresponding # to this SDN controller. We need to clear this alarm # for all hosts on which it is set, i.e. all unlocked - # compute nodes. + # worker nodes. key = "sdn-controller=%s" % uuid obj = fm_api.FaultAPIs() @@ -220,7 +220,7 @@ class SDNControllerController(rest.RestController): # Clear any existing Openflow Controller alarm, corresponding # to this SDN controller. We need need to clear this alarm - # for all hosts on which it is set, i.e. all unlocked computes. + # for all hosts on which it is set, i.e. all unlocked workers. sdn_controller = objects.sdn_controller.get_by_uuid( pecan.request.context, uuid) uri = "%s://%s" % (sdn_controller.transport, diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph.py index d525478af6..ebdc0fbb8e 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph.py @@ -733,7 +733,7 @@ def _apply_backend_changes(op, sb_obj): def _apply_nova_specific_changes(sb_obj, old_sb_obj=None): """If the backend's services have been modified and nova has been either - added or (re)moved, set the hosts with compute functionality and a + added or (re)moved, set the hosts with worker functionality and a certain nova-local instance backing to Config out-of-date. """ services = api_helper.getListFromServices(sb_obj.as_dict()) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/system.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/system.py index b2447d0f2d..4679303cc0 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/system.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/system.py @@ -282,7 +282,7 @@ class SystemController(rest.RestController): raise wsme.exc.ClientSideError( _("Host {} must be locked.".format(h['hostname']))) elif (h['administrative'] != constants.ADMIN_LOCKED and - constants.COMPUTE in h['subfunctions'] and + constants.WORKER in h['subfunctions'] and not api_utils.is_host_active_controller(h) and not api_utils.is_host_simplex_controller(h)): raise wsme.exc.ClientSideError( diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/utils.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/utils.py index d52de1ee40..8106f1011a 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/utils.py @@ -317,7 +317,7 @@ class SystemHelper(object): @staticmethod def get_product_build(): active_controller = HostHelper.get_active_controller() - if constants.COMPUTE in active_controller.subfunctions: + if constants.WORKER in active_controller.subfunctions: return constants.TIS_AIO_BUILD return constants.TIS_STD_BUILD @@ -413,10 +413,10 @@ def is_aio_kubernetes(dbapi=None): is_kubernetes_config(dbapi) -def get_compute_count(dbapi=None): +def get_worker_count(dbapi=None): if not dbapi: dbapi = pecan.request.dbapi - return len(dbapi.ihost_get_by_personality(constants.COMPUTE)) + return len(dbapi.ihost_get_by_personality(constants.WORKER)) class SBApiHelper(object): diff --git a/sysinv/sysinv/sysinv/sysinv/common/ceph.py b/sysinv/sysinv/sysinv/sysinv/common/ceph.py index 371701bbdd..f0ca9bbce9 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/common/ceph.py @@ -649,7 +649,7 @@ class CephApiOperator(object): inventory_monitor_names = [] ihosts = db_api.ihost_get_list() for ihost in ihosts: - if ihost['personality'] == constants.COMPUTE: + if ihost['personality'] == constants.WORKER: continue capabilities = ihost['capabilities'] if 'stor_function' in capabilities: diff --git a/sysinv/sysinv/sysinv/sysinv/common/constants.py b/sysinv/sysinv/sysinv/sysinv/common/constants.py index aa18db9511..3b7454988e 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/constants.py +++ b/sysinv/sysinv/sysinv/sysinv/common/constants.py @@ -102,9 +102,9 @@ CONFIG_ACTIONS = [SUBFUNCTION_CONFIG_ACTION, # Personalities CONTROLLER = 'controller' STORAGE = 'storage' -COMPUTE = 'compute' +WORKER = 'worker' -PERSONALITIES = [CONTROLLER, STORAGE, COMPUTE] +PERSONALITIES = [CONTROLLER, STORAGE, WORKER] # SUBFUNCTION FEATURES SUBFUNCTIONS = 'subfunctions' @@ -114,7 +114,7 @@ LOWLATENCY = 'lowlatency' PLATFORM_FUNCTION = "Platform" VSWITCH_FUNCTION = "Vswitch" SHARED_FUNCTION = "Shared" -VM_FUNCTION = "VMs" +APPLICATION_FUNCTION = "Applications" NO_FUNCTION = "None" # Host Personality Sub-Types @@ -223,8 +223,8 @@ COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB_XEOND = 7000 # Max number of physical cores in a xeon-d cpu NUMBER_CORES_XEOND = 8 -# Max number of computes that can be added to an AIO duplex system -AIO_DUPLEX_MAX_COMPUTES = 4 +# Max number of workers that can be added to an AIO duplex system +AIO_DUPLEX_MAX_WORKERS = 4 # Network overhead for DHCP or vrouter, assume 100 networks * 40 MB each NETWORK_METADATA_OVERHEAD_MIB = 4000 @@ -244,7 +244,7 @@ NEUTRON_PROVIDERNET_FLAT = "flat" NEUTRON_PROVIDERNET_VXLAN = "vxlan" NEUTRON_PROVIDERNET_VLAN = "vlan" -# Supported compute node vswitch types +# Supported worker node vswitch types VSWITCH_TYPE_OVS_DPDK = "ovs-dpdk" VSWITCH_TYPE_NUAGE_VRS = "nuage_vrs" @@ -420,7 +420,7 @@ SB_TASK_APPLY_CONFIG_FILE = 'applying-config-file' SB_TASK_RECONFIG_CONTROLLER = 'reconfig-controller' SB_TASK_PROVISION_STORAGE = 'provision-storage' SB_TASK_PROVISION_SERVICES = 'provision-services' -SB_TASK_RECONFIG_COMPUTE = 'reconfig-compute' +SB_TASK_RECONFIG_WORKER = 'reconfig-worker' SB_TASK_RESIZE_CEPH_MON_LV = 'resize-ceph-mon-lv' SB_TASK_ADD_OBJECT_GATEWAY = 'add-object-gateway' SB_TASK_RESTORE = 'restore' @@ -1003,7 +1003,7 @@ SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE = 'maintenance' SERVICE_PARAM_SECTION_PLATFORM_SYSINV = 'sysinv' SERVICE_PARAM_NAME_SYSINV_FIREWALL_RULES_ID = 'firewall_rules_id' -SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT = 'compute_boot_timeout' +SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT = 'worker_boot_timeout' SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT = 'controller_boot_timeout' SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD = 'heartbeat_period' SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION = 'heartbeat_failure_action' @@ -1012,7 +1012,7 @@ SERVICE_PARAM_PLAT_MTCE_HBS_DEGRADE_THRESHOLD = 'heartbeat_degrade_threshold' SERVICE_PARAM_PLAT_MTCE_MNFA_THRESHOLD = 'mnfa_threshold' SERVICE_PARAM_PLAT_MTCE_MNFA_TIMEOUT = 'mnfa_timeout' -SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_DEFAULT = 720 +SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_DEFAULT = 720 SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT_DEFAULT = 1200 SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD_DEFAULT = 100 SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION_DEFAULT = 'fail' @@ -1286,11 +1286,11 @@ WARN_CINDER_ON_ROOT_WITH_CEPH = 2 WARNING_ROOT_PV_CINDER_LVM_MSG = ( "Warning: All deployed VMs must be booted from Cinder volumes and " "not use ephemeral or swap disks. See Titanium Cloud System Engineering " - "Guidelines for more details on supported compute configurations.") + "Guidelines for more details on supported worker configurations.") WARNING_ROOT_PV_CINDER_CEPH_MSG = ( - "Warning: This compute must have instance_backing set to 'remote' " + "Warning: This worker must have instance_backing set to 'remote' " "or use a secondary disk for local storage. See Titanium Cloud System " - "Engineering Guidelines for more details on supported compute configurations.") + "Engineering Guidelines for more details on supported worker configurations.") PV_WARNINGS = {WARN_CINDER_ON_ROOT_WITH_LVM: WARNING_ROOT_PV_CINDER_LVM_MSG, WARN_CINDER_ON_ROOT_WITH_CEPH: WARNING_ROOT_PV_CINDER_CEPH_MSG} diff --git a/sysinv/sysinv/sysinv/sysinv/common/health.py b/sysinv/sysinv/sysinv/sysinv/common/health.py index 8ed8a065e7..1713132250 100755 --- a/sysinv/sysinv/sysinv/sysinv/common/health.py +++ b/sysinv/sysinv/sysinv/sysinv/common/health.py @@ -336,7 +336,7 @@ class Health(object): # If we are running on CPE we don't want any instances running # on controller-1 before we start the upgrade, otherwise the # databases will be out of sync after we lock controller-1 - if constants.COMPUTE in controller_1.subfunctions: + if constants.WORKER in controller_1.subfunctions: success, running_instances = self._check_running_instances( controller_1) output += \ diff --git a/sysinv/sysinv/sysinv/sysinv/common/service_parameter.py b/sysinv/sysinv/sysinv/sysinv/common/service_parameter.py index fcf38c8b33..092fac3b40 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/service_parameter.py +++ b/sysinv/sysinv/sysinv/sysinv/common/service_parameter.py @@ -244,7 +244,7 @@ def _validate_ip_address(name, value): def _validate_emc_vnx_iscsi_initiators(name, value): """Check if iscsi_initiators value is valid. An example of valid iscsi_initiators string: - {"compute-0": ["10.0.0.1", "10.0.0.2"], "compute-1": ["10.0.0.3"]} + {"worker-0": ["10.0.0.1", "10.0.0.2"], "worker-1": ["10.0.0.3"]} """ try: iscsi_initiators = json.loads(value) @@ -527,10 +527,10 @@ def _emc_vnx_destroy_data_san_address(data_san_addr_param, data_san_db): raise wsme.exc.ClientSideError(msg) -def _validate_compute_boot_timeout(name, value): +def _validate_worker_boot_timeout(name, value): _validate_range(name, value, - SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_MIN, - SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_MAX) + SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_MIN, + SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_MAX) def _validate_controller_boot_timeout(name, value): @@ -1353,7 +1353,7 @@ CINDER_HPELEFTHAND_PARAMETER_RESOURCE = { # Maintenance Service Parameters PLATFORM_MTCE_PARAMETER_MANDATORY = [ - constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT, + constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT, constants.SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT, constants.SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD, constants.SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION, @@ -1365,8 +1365,8 @@ PLATFORM_MTCE_PARAMETER_MANDATORY = [ PLATFORM_SYSINV_PARAMETER_PROTECTED = ['firewall_rules_id'] -SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_MIN = 720 -SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_MAX = 1800 +SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_MIN = 720 +SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_MAX = 1800 SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT_MIN = 1200 SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT_MAX = 1800 SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD_MIN = 100 @@ -1385,8 +1385,8 @@ SERVICE_PARAM_PLAT_MTCE_MNFA_TIMEOUT_MIN = 100 SERVICE_PARAM_PLAT_MTCE_MNFA_TIMEOUT_MAX = 86400 PLATFORM_MTCE_PARAMETER_VALIDATOR = { - constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT: - _validate_compute_boot_timeout, + constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT: + _validate_worker_boot_timeout, constants.SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT: _validate_controller_boot_timeout, constants.SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD: @@ -1404,7 +1404,7 @@ PLATFORM_MTCE_PARAMETER_VALIDATOR = { } PLATFORM_MTCE_PARAMETER_RESOURCE = { - constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT: 'platform::mtce::params::compute_boot_timeout', + constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT: 'platform::mtce::params::worker_boot_timeout', constants.SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT: 'platform::mtce::params::controller_boot_timeout', constants.SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD: 'platform::mtce::params::heartbeat_period', constants.SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION: 'platform::mtce::params::heartbeat_failure_action', diff --git a/sysinv/sysinv/sysinv/sysinv/common/storage_backend_conf.py b/sysinv/sysinv/sysinv/sysinv/common/storage_backend_conf.py index a044d12a93..7be59a29b4 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/storage_backend_conf.py +++ b/sysinv/sysinv/sysinv/sysinv/common/storage_backend_conf.py @@ -262,7 +262,7 @@ class StorageBackendConfig(object): return False # if both controllers are reconfigured and 1st pair storage nodes - # are provisioned, the task will be either reconfig_compute or none + # are provisioned, the task will be either reconfig_worker or none return True @staticmethod diff --git a/sysinv/sysinv/sysinv/sysinv/common/utils.py b/sysinv/sysinv/sysinv/sysinv/common/utils.py index f216eaca07..e558b36337 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/common/utils.py @@ -879,12 +879,12 @@ def is_virtual(): return bool(result == 'true') -def is_virtual_compute(ihost): - if not(os.path.isdir("/etc/sysinv/.virtual_compute_nodes")): +def is_virtual_worker(ihost): + if not(os.path.isdir("/etc/sysinv/.virtual_worker_nodes")): return False try: ip = ihost['mgmt_ip'] - return os.path.isfile("/etc/sysinv/.virtual_compute_nodes/%s" % ip) + return os.path.isfile("/etc/sysinv/.virtual_worker_nodes/%s" % ip) except AttributeError: return False @@ -913,9 +913,9 @@ def get_minimum_platform_reserved_memory(ihost, numa_node): reserved = 0 if numa_node is None: return reserved - if is_virtual() or is_virtual_compute(ihost): + if is_virtual() or is_virtual_worker(ihost): # minimal memory requirements for VirtualBox - if host_has_function(ihost, constants.COMPUTE): + if host_has_function(ihost, constants.WORKER): if numa_node == 0: reserved += 1200 if host_has_function(ihost, constants.CONTROLLER): @@ -923,7 +923,7 @@ def get_minimum_platform_reserved_memory(ihost, numa_node): else: reserved += 500 else: - if host_has_function(ihost, constants.COMPUTE): + if host_has_function(ihost, constants.WORKER): # Engineer 2G per numa node for disk IO RSS overhead reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB return reserved @@ -939,10 +939,10 @@ def get_required_platform_reserved_memory(ihost, numa_node, low_core=False): required_reserved = 0 if numa_node is None: return required_reserved - if is_virtual() or is_virtual_compute(ihost): + if is_virtual() or is_virtual_worker(ihost): # minimal memory requirements for VirtualBox required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX - if host_has_function(ihost, constants.COMPUTE): + if host_has_function(ihost, constants.WORKER): if numa_node == 0: required_reserved += \ constants.PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX @@ -957,11 +957,11 @@ def get_required_platform_reserved_memory(ihost, numa_node, low_core=False): required_reserved += \ constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX else: - if host_has_function(ihost, constants.COMPUTE): + if host_has_function(ihost, constants.WORKER): # Engineer 2G per numa node for disk IO RSS overhead required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB if numa_node == 0: - # Engineer 2G for compute to give some headroom; + # Engineer 2G for worker to give some headroom; # typically requires 650 MB PSS required_reserved += \ constants.PLATFORM_CORE_MEMORY_RESERVED_MIB @@ -1005,7 +1005,7 @@ def get_primary_network_type(interface): have 1 primary network type. The additional network type can only be 'data' and is used as a placeholder to indicate that there is at least one VLAN based neutron provider network associated to the interface. This - information is used to determine whether the vswitch on the compute needs + information is used to determine whether the vswitch on the worker needs to control the interface or not. This function examines the list of network types, discards the secondary type (if any) and returns the primary network type. @@ -1215,7 +1215,7 @@ def get_personalities(host_obj): def is_cpe(host_obj): return (host_has_function(host_obj, constants.CONTROLLER) and - host_has_function(host_obj, constants.COMPUTE)) + host_has_function(host_obj, constants.WORKER)) def output_to_dict(output): diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py b/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py index 4a86b79876..4806b566fe 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py @@ -584,7 +584,7 @@ class AppOperator(object): # Get controller host(s) controller_hosts =\ self._dbapi.ihost_get_by_personality(constants.CONTROLLER) - if constants.COMPUTE in controller_hosts[0].subfunctions: + if constants.WORKER in controller_hosts[0].subfunctions: # AIO system labels = controller_labels_set.union(compute_labels_set) if op == constants.LABEL_ASSIGN_OP: @@ -594,7 +594,7 @@ class AppOperator(object): else: # Standard system compute_hosts =\ - self._dbapi.ihost_get_by_personality(constants.COMPUTE) + self._dbapi.ihost_get_by_personality(constants.WORKER) if op == constants.LABEL_ASSIGN_OP: self._assign_host_labels(controller_hosts, controller_labels_set) self._assign_host_labels(compute_hosts, compute_labels_set) diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py index ea1b071424..b205aa5ed1 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py @@ -347,13 +347,13 @@ class ConductorManager(service.PeriodicService): # At this point we are swacting to controller-0 which has just been # downgraded. - # Before downgrading controller-0 all storage/compute nodes were locked + # Before downgrading controller-0 all storage/worker nodes were locked # The database of the from_load is not aware of this, so we set the # state in the database to match the state of the system. This does not # actually lock the nodes. hosts = self.dbapi.ihost_get_list() for host in hosts: - if host.personality not in [constants.COMPUTE, constants.STORAGE]: + if host.personality not in [constants.WORKER, constants.STORAGE]: continue self.dbapi.ihost_update(host.uuid, { 'administrative': constants.ADMIN_LOCKED}) @@ -455,8 +455,8 @@ class ConductorManager(service.PeriodicService): }, {'service': constants.SERVICE_TYPE_PLATFORM, 'section': constants.SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE, - 'name': constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT, - 'value': constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_DEFAULT, + 'name': constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT, + 'value': constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_DEFAULT, }, {'service': constants.SERVICE_TYPE_PLATFORM, 'section': constants.SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE, @@ -968,18 +968,18 @@ class ConductorManager(service.PeriodicService): sw_version = target_load.software_version if (host.personality == constants.CONTROLLER and - constants.COMPUTE in tsc.subfunctions): + constants.WORKER in tsc.subfunctions): if constants.LOWLATENCY in host.subfunctions: pxe_config = "pxe-smallsystem_lowlatency-install-%s" % sw_version else: pxe_config = "pxe-smallsystem-install-%s" % sw_version elif host.personality == constants.CONTROLLER: pxe_config = "pxe-controller-install-%s" % sw_version - elif host.personality == constants.COMPUTE: + elif host.personality == constants.WORKER: if constants.LOWLATENCY in host.subfunctions: - pxe_config = "pxe-compute_lowlatency-install-%s" % sw_version + pxe_config = "pxe-worker_lowlatency-install-%s" % sw_version else: - pxe_config = "pxe-compute-install-%s" % sw_version + pxe_config = "pxe-worker-install-%s" % sw_version elif host.personality == constants.STORAGE: pxe_config = "pxe-storage-install-%s" % sw_version @@ -1419,13 +1419,13 @@ class ConductorManager(service.PeriodicService): % (host.hostname, ceph_mon_gib)) self.dbapi.ceph_mon_create(values) - def config_compute_for_ceph(self, context): + def config_worker_for_ceph(self, context): """ - configure compute nodes for adding ceph + configure worker nodes for adding ceph :param context: :return: none """ - personalities = [constants.COMPUTE] + personalities = [constants.WORKER] config_uuid = self._config_update_hosts(context, personalities) config_dict = { "personalities": personalities, @@ -1437,7 +1437,7 @@ class ConductorManager(service.PeriodicService): """Update the remotelogging configuration""" personalities = [constants.CONTROLLER, - constants.COMPUTE, + constants.WORKER, constants.STORAGE] config_uuid = self._config_update_hosts(context, personalities) @@ -1449,7 +1449,7 @@ class ConductorManager(service.PeriodicService): self._config_apply_runtime_manifest(context, config_uuid, config_dict) config_dict = { - "personalities": [constants.COMPUTE, constants.STORAGE], + "personalities": [constants.WORKER, constants.STORAGE], "classes": ['platform::remotelogging::runtime'], } self._config_apply_runtime_manifest(context, config_uuid, config_dict) @@ -1457,8 +1457,8 @@ class ConductorManager(service.PeriodicService): def get_magnum_cluster_count(self, context): return self._openstack.get_magnum_cluster_count() - def _configure_compute_host(self, context, host): - """Configure a compute host with the supplied data. + def _configure_worker_host(self, context, host): + """Configure a worker host with the supplied data. Does the following tasks: - Create or update entries in address table @@ -1472,7 +1472,7 @@ class ConductorManager(service.PeriodicService): # Only update the config if the host is running the same version as # the active controller. if self.host_load_matches_sw_version(host): - # Only generate the config files if the compute host is unlocked. + # Only generate the config files if the worker host is unlocked. if (host.administrative == constants.ADMIN_UNLOCKED or host.action == constants.FORCE_UNLOCK_ACTION or host.action == constants.UNLOCK_ACTION): @@ -1574,8 +1574,8 @@ class ConductorManager(service.PeriodicService): elif host.hostname == constants.CONTROLLER_1_HOSTNAME: self.controller_1_posted = False - def _unconfigure_compute_host(self, host, is_cpe=False): - """Unconfigure a compute host. + def _unconfigure_worker_host(self, host, is_cpe=False): + """Unconfigure a worker host. Does the following tasks: - Remove the puppet hiera data configuration for host @@ -1605,12 +1605,12 @@ class ConductorManager(service.PeriodicService): self._remove_pxe_config(host) def configure_ihost(self, context, host, - do_compute_apply=False): + do_worker_apply=False): """Configure a host. :param context: an admin context. :param host: a host object. - :param do_compute_apply: configure the compute subfunctions of the host. + :param do_worker_apply: configure the worker subfunctions of the host. """ LOG.debug("configure_ihost %s" % host.hostname) @@ -1623,8 +1623,8 @@ class ConductorManager(service.PeriodicService): if host.personality == constants.CONTROLLER: self._configure_controller_host(context, host) - elif host.personality == constants.COMPUTE: - self._configure_compute_host(context, host) + elif host.personality == constants.WORKER: + self._configure_worker_host(context, host) elif host.personality == constants.STORAGE: self._configure_storage_host(context, host) else: @@ -1632,10 +1632,10 @@ class ConductorManager(service.PeriodicService): "Invalid method call: unsupported personality: %s") % host.personality) - if do_compute_apply: + if do_worker_apply: # Apply the manifests immediately puppet_common.puppet_apply_manifest(host.mgmt_ip, - constants.COMPUTE, + constants.WORKER, do_reboot=True) return host @@ -1659,8 +1659,8 @@ class ConductorManager(service.PeriodicService): for personality in personalities: if personality == constants.CONTROLLER: self._unconfigure_controller_host(ihost_obj) - elif personality == constants.COMPUTE: - self._unconfigure_compute_host(ihost_obj, is_cpe) + elif personality == constants.WORKER: + self._unconfigure_worker_host(ihost_obj, is_cpe) elif personality == constants.STORAGE: self._unconfigure_storage_host(ihost_obj) else: @@ -2493,7 +2493,7 @@ class ConductorManager(service.PeriodicService): """Return the initial number of reserved logical cores for platform use. This can be overridden later by the end user.""" cpus = 0 - if cutils.host_has_function(ihost, constants.COMPUTE) and node == 0: + if cutils.host_has_function(ihost, constants.WORKER) and node == 0: cpus += 1 if not hyperthreading else 2 if cutils.host_has_function(ihost, constants.CONTROLLER): cpus += 1 if not hyperthreading else 2 @@ -2503,7 +2503,7 @@ class ConductorManager(service.PeriodicService): cpu_count, hyperthreading): """Return the initial number of reserved logical cores for vswitch use. This can be overridden later by the end user.""" - if cutils.host_has_function(ihost, constants.COMPUTE) and node == 0: + if cutils.host_has_function(ihost, constants.WORKER) and node == 0: physical_cores = (cpu_count / 2) if hyperthreading else cpu_count system_mode = self.dbapi.isystem_get_one().system_mode if system_mode == constants.SYSTEM_MODE_SIMPLEX: @@ -2999,7 +2999,7 @@ class ConductorManager(service.PeriodicService): # a physical volume in the nova-local volume group cinder_device = None if (cutils.host_has_function(ihost, constants.CONTROLLER) and - cutils.host_has_function(ihost, constants.COMPUTE)): + cutils.host_has_function(ihost, constants.WORKER)): if lvm_config: cinder_device = cutils._get_cinder_device(self.dbapi, @@ -4228,11 +4228,11 @@ class ConductorManager(service.PeriodicService): kubernetes_config = utils.is_kubernetes_config(self.dbapi) - if (cutils.host_has_function(ihost, constants.COMPUTE) and not + if (cutils.host_has_function(ihost, constants.WORKER) and not kubernetes_config): if availability == constants.VIM_SERVICES_ENABLED: # report to nova the host aggregate groupings now that - # the compute node is available + # the worker node is available LOG.info("AGG iplatform available for ihost= %s imsg= %s" % (ihost_uuid, imsg_dict)) # AGG10 noted 13secs in vbox between nova manifests applied and @@ -4361,7 +4361,7 @@ class ConductorManager(service.PeriodicService): # Create the host entry in neutron to allow for data interfaces to # be configured on a combined node if (constants.CONTROLLER in subfunctions and - constants.COMPUTE in subfunctions): + constants.WORKER in subfunctions): try: ihost = self.dbapi.ihost_get(ihost_uuid) except exception.ServerNotFound: @@ -4642,7 +4642,7 @@ class ConductorManager(service.PeriodicService): return if upgrade.state == constants.UPGRADE_ACTIVATING: - personalities = [constants.CONTROLLER, constants.COMPUTE] + personalities = [constants.CONTROLLER, constants.WORKER] all_manifests_applied = True hosts = self.dbapi.ihost_get_list() @@ -4671,7 +4671,7 @@ class ConductorManager(service.PeriodicService): # In CPE upgrades, after swacting to controller-1, we need to clear # the VIM upgrade flag on Controller-0 to allow VMs to be migrated # to controller-1. - if constants.COMPUTE in tsc.subfunctions: + if constants.WORKER in tsc.subfunctions: try: controller_0 = self.dbapi.ihost_get_by_hostname( constants.CONTROLLER_0_HOSTNAME) @@ -5285,7 +5285,7 @@ class ConductorManager(service.PeriodicService): """Update the NTP configuration""" if service_change: personalities = [constants.CONTROLLER, - constants.COMPUTE, + constants.WORKER, constants.STORAGE] else: personalities = [constants.CONTROLLER] @@ -5294,7 +5294,7 @@ class ConductorManager(service.PeriodicService): def update_ptp_config(self, context): """Update the PTP configuration""" personalities = [constants.CONTROLLER, - constants.COMPUTE, + constants.WORKER, constants.STORAGE] self._config_update_hosts(context, personalities) @@ -5310,7 +5310,7 @@ class ConductorManager(service.PeriodicService): """ # update manifest files and notify agents to apply timezone files - personalities = [constants.COMPUTE, + personalities = [constants.WORKER, constants.STORAGE] config_uuid = self._config_update_hosts(context, personalities) @@ -5338,7 +5338,7 @@ class ConductorManager(service.PeriodicService): # update manifest files and notifiy agents to apply them personalities = [constants.CONTROLLER, - constants.COMPUTE, + constants.WORKER, constants.STORAGE] config_uuid = self._config_update_hosts(context, personalities) @@ -5381,7 +5381,7 @@ class ConductorManager(service.PeriodicService): self._config_update_hosts(context, [constants.CONTROLLER], reboot=True) - config_uuid = self._config_update_hosts(context, [constants.COMPUTE], + config_uuid = self._config_update_hosts(context, [constants.WORKER], reboot=False) extoam = self.dbapi.iextoam_get_one() @@ -5389,9 +5389,9 @@ class ConductorManager(service.PeriodicService): self._update_hosts_file('oamcontroller', extoam.oam_floating_ip, active=False) - # make changes to the computes + # make changes to the workers config_dict = { - "personalities": [constants.COMPUTE], + "personalities": [constants.WORKER], "classes": ['openstack::nova::compute::runtime'] } self._config_apply_runtime_manifest(context, config_uuid, config_dict) @@ -5401,7 +5401,7 @@ class ConductorManager(service.PeriodicService): LOG.info("update_user_config") personalities = [constants.CONTROLLER, - constants.COMPUTE, + constants.WORKER, constants.STORAGE] config_uuid = self._config_update_hosts(context, personalities) @@ -5723,7 +5723,7 @@ class ConductorManager(service.PeriodicService): def config_update_nova_local_backed_hosts(self, context, instance_backing): hosts_uuid = self.hosts_with_nova_local(instance_backing) if hosts_uuid: - personalities = [constants.CONTROLLER, constants.COMPUTE] + personalities = [constants.CONTROLLER, constants.WORKER] self._config_update_hosts(context, personalities, host_uuids=hosts_uuid, @@ -5734,8 +5734,8 @@ class ConductorManager(service.PeriodicService): hosts_uuid = [] hosts = self.dbapi.ihost_get_list() for host in hosts: - if ((host.personality and host.personality == constants.COMPUTE) or - (host.subfunctions and constants.COMPUTE in host.subfunctions)): + if ((host.personality and host.personality == constants.WORKER) or + (host.subfunctions and constants.WORKER in host.subfunctions)): ilvgs = self.dbapi.ilvg_get_by_ihost(host['uuid']) for lvg in ilvgs: if (lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and @@ -6256,8 +6256,8 @@ class ConductorManager(service.PeriodicService): self.dbapi, target=constants.SB_TYPE_CEPH_EXTERNAL) if ceph_conf: - # For NOVA, if nova.conf needs to be updated on compute nodes, the - # task should be set to what? constants.SB_TASK_RECONFIG_COMPUTE? + # For NOVA, if nova.conf needs to be updated on worker nodes, the + # task should be set to what? constants.SB_TASK_RECONFIG_WORKER? config_done = True active_controller = utils.HostHelper.get_active_controller(self.dbapi) @@ -6846,7 +6846,7 @@ class ConductorManager(service.PeriodicService): LOG.info("update_infra_config") personalities = [constants.CONTROLLER, - constants.COMPUTE, + constants.WORKER, constants.STORAGE] config_uuid = self._config_update_hosts(context, personalities, @@ -6885,9 +6885,9 @@ class ConductorManager(service.PeriodicService): self._config_apply_runtime_manifest(context, config_uuid, config_dict) - if constants.COMPUTE in host.subfunctions: + if constants.WORKER in host.subfunctions: config_dict = { - 'personalities': [constants.COMPUTE], + 'personalities': [constants.WORKER], 'host_uuids': host.uuid, 'classes': ['openstack::nova::compute::runtime'] } @@ -6911,8 +6911,8 @@ class ConductorManager(service.PeriodicService): config_uuid = self._config_update_hosts(context, personalities, reboot=True) else: - # compute hosts must be rebooted following service reconfig - self._config_update_hosts(context, [constants.COMPUTE], + # worker hosts must be rebooted following service reconfig + self._config_update_hosts(context, [constants.WORKER], reboot=True) # controller hosts will actively apply the manifests config_uuid = self._config_update_hosts(context, @@ -6933,7 +6933,7 @@ class ConductorManager(service.PeriodicService): elif service == constants.SERVICE_TYPE_NOVA: config_uuid = self._config_update_hosts(context, [constants.CONTROLLER, - constants.COMPUTE]) + constants.WORKER]) else: # All other services personalities = [constants.CONTROLLER] @@ -6990,7 +6990,7 @@ class ConductorManager(service.PeriodicService): multipath_state_changed = self._multipath_update_state() if multipath_state_changed: self._config_update_hosts(context, - [constants.CONTROLLER, constants.COMPUTE], + [constants.CONTROLLER, constants.WORKER], reboot=True) elif service == constants.SERVICE_TYPE_PLATFORM: @@ -7009,7 +7009,7 @@ class ConductorManager(service.PeriodicService): } self._config_apply_runtime_manifest(context, config_uuid, config_dict) - personalities = [constants.COMPUTE] + personalities = [constants.WORKER] config_uuid = self._config_update_hosts(context, personalities) config_dict = { "personalities": personalities, @@ -7192,7 +7192,7 @@ class ConductorManager(service.PeriodicService): # Apply Neutron manifest on Controller(this # will update the SNAT rules for the SDN controllers) - self._config_update_hosts(context, [constants.COMPUTE], reboot=True) + self._config_update_hosts(context, [constants.WORKER], reboot=True) config_uuid = self._config_update_hosts(context, [constants.CONTROLLER]) @@ -7218,7 +7218,7 @@ class ConductorManager(service.PeriodicService): config_uuid = self._config_update_hosts(context, personalities) self._config_apply_runtime_manifest(context, config_uuid, config_dict) - personalities = [constants.COMPUTE] + personalities = [constants.WORKER] self._config_update_hosts(context, personalities, reboot=True) def update_vswitch_type(self, context): @@ -7241,7 +7241,7 @@ class ConductorManager(service.PeriodicService): if tsc.system_type == constants.TIS_AIO_BUILD: personalities = [constants.CONTROLLER] else: - personalities = [constants.COMPUTE] + personalities = [constants.WORKER] self._config_update_hosts(context, personalities, reboot=True) @@ -7276,13 +7276,13 @@ class ConductorManager(service.PeriodicService): def update_cpu_config(self, context, host_uuid): """Update the cpu assignment configuration on a host""" - # only apply the manifest on the host that has compute sub function + # only apply the manifest on the host that has worker sub function host = self.dbapi.ihost_get(host_uuid) - if constants.COMPUTE in host.subfunctions: + if constants.WORKER in host.subfunctions: force = (not utils.is_host_simplex_controller(host)) LOG.info("update_cpu_config, host uuid: (%s), force: (%s)", host_uuid, str(force)) - personalities = [constants.CONTROLLER, constants.COMPUTE] + personalities = [constants.CONTROLLER, constants.WORKER] config_uuid = self._config_update_hosts(context, personalities, host_uuids=[host_uuid]) @@ -7992,7 +7992,7 @@ class ConductorManager(service.PeriodicService): # We will allow controller nodes to re-generate manifests # when in an "provisioning" state. This will allow for # example the ntp configuration to be changed on an CPE - # node before the "compute_config_complete" has been + # node before the "worker_config_complete" has been # executed. if (force or host.invprovision == constants.PROVISIONED or @@ -8872,7 +8872,7 @@ class ConductorManager(service.PeriodicService): to_load = self.dbapi.load_get(upgrade.to_load) to_version = to_load.software_version - personalities = [constants.CONTROLLER, constants.COMPUTE] + personalities = [constants.CONTROLLER, constants.WORKER] config_uuid = self._config_update_hosts(context, personalities) self.dbapi.software_upgrade_update( @@ -8902,7 +8902,7 @@ class ConductorManager(service.PeriodicService): self._config_apply_runtime_manifest(context, config_uuid, config_dict) config_dict = { - "personalities": [constants.COMPUTE], + "personalities": [constants.WORKER], "classes": ['openstack::nova::compute::runtime'] } self._config_apply_runtime_manifest(context, config_uuid, config_dict) @@ -9752,7 +9752,7 @@ class ConductorManager(service.PeriodicService): ceph_conf_file = os.path.join(constants.CEPH_CONF_PATH, ceph_conf_filename) - personalities = [constants.CONTROLLER, constants.COMPUTE] + personalities = [constants.CONTROLLER, constants.WORKER] config_uuid = self._config_update_hosts(context, personalities) config_dict = { 'personalities': personalities, @@ -9900,7 +9900,7 @@ class ConductorManager(service.PeriodicService): # Should only be applicable to the single controller that is up # when the dc role is configured, but add personalities anyway. personalities = [constants.CONTROLLER, - constants.COMPUTE, + constants.WORKER, constants.STORAGE] config_uuid = self._config_update_hosts(context, personalities) diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/openstack.py b/sysinv/sysinv/sysinv/sysinv/conductor/openstack.py index fa09bf457b..4e9a7b4219 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/openstack.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/openstack.py @@ -464,7 +464,7 @@ class OpenStackOperator(object): # # can query it from do_aggregate_list # ('Name', 'Availability Zone'); anyways it doesnt - # allow duplicates on Name. can be done prior to compute nodes? + # allow duplicates on Name. can be done prior to worker nodes? # # # On unlock, check whether exists: metadata is a key/value pair # 2. nova aggregate-set-metadata provider_physnet0 \ diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py b/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py index 044c3566b2..e95fe10314 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py @@ -100,7 +100,7 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): ihost_obj=ihost_obj)) def configure_ihost(self, context, host, - do_compute_apply=False): + do_worker_apply=False): """Synchronously, have a conductor configure an ihost. Does the following tasks: @@ -110,12 +110,12 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): :param context: request context. :param host: an ihost object. - :param do_compute_apply: apply the newly created compute manifests. + :param do_worker_apply: apply the newly created worker manifests. """ return self.call(context, self.make_msg('configure_ihost', host=host, - do_compute_apply=do_compute_apply)) + do_worker_apply=do_worker_apply)) # TODO(CephPoolsDecouple): remove def configure_osd_pools(self, context, ceph_backend=None, new_pool_size=None, new_pool_min_size=None): @@ -788,13 +788,13 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): """ return self.call(context, self.make_msg('update_lvm_config')) - def config_compute_for_ceph(self, context): - """Synchronously, have the conductor update the compute configuration + def config_worker_for_ceph(self, context): + """Synchronously, have the conductor update the worker configuration for adding ceph. :param context: request context. """ - return self.call(context, self.make_msg('config_compute_for_ceph')) + return self.call(context, self.make_msg('config_worker_for_ceph')) def update_drbd_config(self, context): """Synchronously, have the conductor update the drbd configuration. @@ -876,7 +876,7 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): services=services)) def config_update_nova_local_backed_hosts(self, context, instance_backing): - """Synchronously, have the conductor set the hosts with compute + """Synchronously, have the conductor set the hosts with worker functionality and with a certain nova-local instance backing to config out-of-date. diff --git a/sysinv/sysinv/sysinv/sysinv/db/api.py b/sysinv/sysinv/sysinv/sysinv/db/api.py index 8d049dcb51..769ea3e37a 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/api.py +++ b/sysinv/sysinv/sysinv/sysinv/db/api.py @@ -188,7 +188,7 @@ class Connection(object): sort_key=None, sort_dir=None): """Return a list of servers by personality. :param personality: The personality of the server - e.g. controller or compute + e.g. controller or worker returns: A server """ diff --git a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/001_init.py b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/001_init.py index a2913a53c2..b009044d95 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/001_init.py +++ b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/001_init.py @@ -47,7 +47,7 @@ def upgrade(migrate_engine): name='recordtypeEnum') personalityEnum = Enum('controller', - 'compute', + 'worker', 'network', 'storage', 'profile', diff --git a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/035_system_type.py b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/035_system_type.py index fae626eb3f..408f30e226 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/035_system_type.py +++ b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/035_system_type.py @@ -13,7 +13,7 @@ from sysinv.common import constants def _populate_system_type(system_table): - if constants.COMPUTE in tsconfig.subfunctions: + if constants.WORKER in tsconfig.subfunctions: s_type = constants.TIS_AIO_BUILD else: s_type = constants.TIS_STD_BUILD diff --git a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py index 87c311baeb..beff9361ea 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py +++ b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py @@ -126,7 +126,7 @@ class ihost(Base): name='invprovisionStateEnum') invPersonalityEnum = Enum('controller', - 'compute', + 'worker', 'network', 'storage', 'profile', diff --git a/sysinv/sysinv/sysinv/sysinv/helm/neutron.py b/sysinv/sysinv/sysinv/sysinv/helm/neutron.py index a495ce3b71..e9c277dc0c 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/neutron.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/neutron.py @@ -153,7 +153,7 @@ class NeutronHelm(openstack.OpenstackBaseHelm): for host in hosts: if (host.invprovision == constants.PROVISIONED): - if constants.COMPUTE in utils.get_personalities(host): + if constants.WORKER in utils.get_personalities(host): hostname = str(host.hostname) host_neutron = { diff --git a/sysinv/sysinv/sysinv/sysinv/helm/nova.py b/sysinv/sysinv/sysinv/sysinv/helm/nova.py index 8bed939eef..138488374e 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/nova.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/nova.py @@ -269,7 +269,7 @@ class NovaHelm(openstack.OpenstackBaseHelm): host_cpus = self._get_host_cpu_list(host, threads=True) if host_cpus: vm_cpus = self._get_host_cpu_list( - host, function=constants.VM_FUNCTION, threads=True) + host, function=constants.APPLICATION_FUNCTION, threads=True) vm_cpu_list = [c.cpu for c in vm_cpus] vm_cpu_fmt = "\"%s\"" % utils.format_range_set(vm_cpu_list) default_config.update({'vcpu_pin_set': vm_cpu_fmt}) @@ -399,7 +399,7 @@ class NovaHelm(openstack.OpenstackBaseHelm): for host in hosts: if (host.invprovision == constants.PROVISIONED): - if constants.COMPUTE in utils.get_personalities(host): + if constants.WORKER in utils.get_personalities(host): hostname = str(host.hostname) default_config = {} diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/ceph.py b/sysinv/sysinv/sysinv/sysinv/puppet/ceph.py index 46f65d7c0c..05695e733c 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/ceph.py @@ -170,9 +170,9 @@ class CephPuppet(openstack.OpenstackBasePuppet): config.update(self._get_ceph_mon_config(host)) config.update(self._get_ceph_osd_config(host)) - # if it is a compute node and on an secondary region, + # if it is a worker node and on an secondary region, # check if ceph mon configuration is required - if constants.COMPUTE in host.subfunctions and self._region_config(): + if constants.WORKER in host.subfunctions and self._region_config(): from sysinv.conductor import openstack op = openstack.OpenStackOperator(self.dbapi) if self._is_ceph_mon_required(host, op): diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/device.py b/sysinv/sysinv/sysinv/sysinv/puppet/device.py index fc5b009bbd..e17b05f09b 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/device.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/device.py @@ -57,7 +57,7 @@ class DevicePuppet(base.BasePuppet): } def get_host_config(self, host): - if constants.COMPUTE not in host.subfunctions: + if constants.WORKER not in host.subfunctions: # configuration only required for compute hosts return {} diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/interface.py b/sysinv/sysinv/sysinv/sysinv/puppet/interface.py index 28efde57c8..272c5ef71c 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/interface.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/interface.py @@ -270,7 +270,7 @@ class InterfacePuppet(base.BasePuppet): # deal with this in a later commit. pnets = {} if (self.openstack and - constants.COMPUTE in utils.get_personalities(host)): + constants.WORKER in utils.get_personalities(host)): pnets = self.openstack.get_providernetworksdict(quiet=True) return pnets @@ -286,19 +286,19 @@ def is_data_network_type(iface): def is_controller(context): """ Determine we are creating a manifest for a controller node; regardless of - whether it has a compute subfunction or not. + whether it has a worker subfunction or not. """ return bool(context['personality'] == constants.CONTROLLER) -def is_compute_subfunction(context): +def is_worker_subfunction(context): """ - Determine if we are creating a manifest for a compute node or a compute + Determine if we are creating a manifest for a worker node or a worker subfunction. """ - if context['personality'] == constants.COMPUTE: + if context['personality'] == constants.WORKER: return True - if constants.COMPUTE in context['subfunctions']: + if constants.WORKER in context['subfunctions']: return True return False @@ -662,7 +662,7 @@ def needs_interface_config(context, iface): """ if is_platform_interface(context, iface): return True - elif not is_compute_subfunction(context): + elif not is_worker_subfunction(context): return False elif is_data_interface(context, iface): if not is_dpdk_compatible(context, iface): @@ -1141,7 +1141,7 @@ def generate_driver_config(context, config): """ Generate custom configuration for driver specific parameters. """ - if is_compute_subfunction(context): + if is_worker_subfunction(context): generate_mlx4_core_options(context, config) diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/kubernetes.py b/sysinv/sysinv/sysinv/sysinv/puppet/kubernetes.py index a916277d95..6b103f05a8 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/kubernetes.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/kubernetes.py @@ -65,7 +65,7 @@ class KubernetesPuppet(base.BasePuppet): def get_host_config(self, host): config = {} - if host.personality != constants.COMPUTE: + if host.personality != constants.WORKER: return config if self._kubernetes_enabled(): diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/ldap.py b/sysinv/sysinv/sysinv/sysinv/puppet/ldap.py index 2a22e74b69..92e25665e6 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/ldap.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/ldap.py @@ -64,7 +64,7 @@ class LdapPuppet(base.BasePuppet): bind_anonymous = True if host.personality != constants.CONTROLLER: - # if storage/compute, use bind anonymously + # if storage/worker, use bind anonymously bind_anonymous = True return { 'platform::ldap::params::ldapserver_remote': ldapserver_remote, diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/neutron.py b/sysinv/sysinv/sysinv/sysinv/puppet/neutron.py index c0ad9175d9..c09c2596e5 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/neutron.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/neutron.py @@ -157,7 +157,7 @@ class NeutronPuppet(openstack.OpenstackBasePuppet): def get_host_config(self, host): if (constants.CONTROLLER not in utils.get_personalities(host) and - constants.COMPUTE not in utils.get_personalities(host)): + constants.WORKER not in utils.get_personalities(host)): return {} device_mappings = [] diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/nova.py b/sysinv/sysinv/sysinv/sysinv/puppet/nova.py index ca1e04f6c5..e16edd7f03 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/nova.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/nova.py @@ -114,7 +114,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet): raise exception.SysinvException('Failed to generate nova rsa key') # Generate an ecdsa key for the system, which will be used on all - # controller/compute nodes. When external ssh connections to the + # controller/worker nodes. When external ssh connections to the # controllers are made, this key will be stored in the known_hosts file # and allow connections after the controller swacts. The ecdsa key # has precedence over the rsa key, which is why we use ecdsa. @@ -340,7 +340,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet): def get_host_config(self, host): config = {} - if constants.COMPUTE in host.subfunctions: + if constants.WORKER in host.subfunctions: # nova storage and compute configuration is required for hosts # with a compute function only config.update(self._get_compute_config(host)) @@ -569,7 +569,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet): def _get_vcpu_pin_set(self, host): vm_cpus = self._get_host_cpu_list( - host, function=constants.VM_FUNCTION, threads=True) + host, function=constants.APPLICATION_FUNCTION, threads=True) cpu_list = [c.cpu for c in vm_cpus] return "\"%s\"" % utils.format_range_set(cpu_list) diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/ovs.py b/sysinv/sysinv/sysinv/sysinv/puppet/ovs.py index 683cdfc06f..a77ee06f76 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/ovs.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/ovs.py @@ -19,7 +19,7 @@ class OVSPuppet(base.BasePuppet): def get_host_config(self, host): config = {} - if (constants.COMPUTE in utils.get_personalities(host) and + if (constants.WORKER in utils.get_personalities(host) and self._vswitch_type() == constants.VSWITCH_TYPE_OVS_DPDK): config.update(self._get_cpu_config(host)) config.update(self._get_memory_config(host)) @@ -346,7 +346,7 @@ class OVSPuppet(base.BasePuppet): def _get_virtual_config(self, host): config = {} - if utils.is_virtual() or utils.is_virtual_compute(host): + if utils.is_virtual() or utils.is_virtual_worker(host): config.update({ 'platform::vswitch::params::iommu_enabled': False, 'platform::vswitch::params::hugepage_dir': '/mnt/huge-2048kB', diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/platform.py b/sysinv/sysinv/sysinv/sysinv/puppet/platform.py index c33117d26a..a471f54986 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/platform.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/platform.py @@ -526,14 +526,14 @@ class PlatformPuppet(base.BasePuppet): def _get_host_cpu_config(self, host): config = {} - if constants.COMPUTE in utils.get_personalities(host): + if constants.WORKER in utils.get_personalities(host): host_cpus = self._get_host_cpu_list(host, threads=True) if not host_cpus: return config # Define the full range of CPUs for the compute host max_cpu = max(host_cpus, key=operator.attrgetter('cpu')) - compute_cpu_list = "\"0-%d\"" % max_cpu.cpu + worker_cpu_list = "\"0-%d\"" % max_cpu.cpu platform_cpus_no_threads = self._get_platform_cpu_list(host) vswitch_cpus_no_threads = self._get_vswitch_cpu_list(host) @@ -620,8 +620,8 @@ class PlatformPuppet(base.BasePuppet): platform_cpu_list, platform_cpu_list) config.update({ - 'platform::compute::params::compute_cpu_list': - compute_cpu_list, + 'platform::compute::params::worker_cpu_list': + worker_cpu_list, 'platform::compute::params::platform_cpu_list': platform_cpu_list_with_quotes, 'platform::compute::params::reserved_vswitch_cores': @@ -635,7 +635,7 @@ class PlatformPuppet(base.BasePuppet): def _get_host_memory_config(self, host): config = {} - if constants.COMPUTE in utils.get_personalities(host): + if constants.WORKER in utils.get_personalities(host): host_memory = self.dbapi.imemory_get_by_ihost(host.id) memory_numa_list = utils.get_numa_index_list(host_memory) @@ -716,7 +716,7 @@ class PlatformPuppet(base.BasePuppet): vm_1G = "\"%s\"" % ','.join([str(i) for i in vm_1G_pages]) config.update({ - 'platform::compute::params::compute_base_reserved': + 'platform::compute::params::worker_base_reserved': platform_reserved_memory, 'platform::compute::params::compute_vswitch_reserved': vswitch_reserved_memory, diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/storage.py b/sysinv/sysinv/sysinv/sysinv/puppet/storage.py index bff876b13f..41ecd4915c 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/storage.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/storage.py @@ -205,7 +205,7 @@ class StoragePuppet(base.BasePuppet): # LVM Global Filter is driven by: # - cgts-vg PVs : controllers and all storage # - cinder-volumes PVs: controllers - # - nova-local PVs : controllers and all computes + # - nova-local PVs : controllers and all workers # Go through the PVs and pvs = self.dbapi.ipv_get_by_ihost(host.id) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py index 880912cc55..2313cbe2d0 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py @@ -258,7 +258,7 @@ class InterfaceTestCase(base.FunctionalTest): if personality == constants.CONTROLLER: self.controller = host else: - self.compute = host + self.worker = host return def _create_ethernet(self, ifname=None, networktype=None, ifclass=None, @@ -377,10 +377,10 @@ class InterfaceTestCase(base.FunctionalTest): self.profile['interfaces'].append(interface) return interface - def _create_compute_bond(self, ifname, networktype=None, ifclass=None, + def _create_worker_bond(self, ifname, networktype=None, ifclass=None, providernetworks=None, expect_errors=False): return self._create_bond(ifname, networktype, ifclass, providernetworks, - self.compute, expect_errors) + self.worker, expect_errors) def _create_vlan(self, ifname, networktype, ifclass, vlan_id, lower_iface=None, providernetworks=None, host=None, @@ -424,12 +424,12 @@ class InterfaceTestCase(base.FunctionalTest): self.profile['interfaces'].append(interface) return interface - def _create_compute_vlan(self, ifname, networktype, ifclass, vlan_id, + def _create_worker_vlan(self, ifname, networktype, ifclass, vlan_id, lower_iface=None, providernetworks=None, host=None, expect_errors=False): return self._create_vlan(ifname, networktype, ifclass, vlan_id, lower_iface, - providernetworks, self.compute, expect_errors) + providernetworks, self.worker, expect_errors) def _post_and_check_success(self, ndict): response = self.post_json('%s' % self._get_path(), ndict) @@ -491,7 +491,7 @@ class InterfaceTestCase(base.FunctionalTest): 'interface_networks': []} self.system = None self.controller = None - self.compute = None + self.worker = None self._setup_configuration() def test_interface(self): @@ -583,56 +583,56 @@ class InterfaceComputeEthernet(InterfaceTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # compute and all interfaces are ethernet interfaces. + # worker and all interfaces are ethernet interfaces. self._create_host(constants.CONTROLLER, admin=constants.ADMIN_UNLOCKED) self._create_ethernet('oam', constants.NETWORK_TYPE_OAM) self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA) - self._create_host(constants.COMPUTE, constants.COMPUTE, + self._create_host(constants.WORKER, constants.WORKER, mgmt_mac='01:02.03.04.05.C0', mgmt_ip='192.168.24.12', admin=constants.ADMIN_LOCKED) self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, - host=self.compute) + host=self.worker) self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA, - host=self.compute) + host=self.worker) self._create_ethernet('data', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, - 'group0-data0', host=self.compute) + 'group0-data0', host=self.worker) self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV, constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-data1', host=self.compute) + 'group0-data1', host=self.worker) self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext0', host=self.compute) + 'group0-ext0', host=self.worker) port, iface = ( self._create_ethernet('slow', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, - 'group0-ext1', host=self.compute)) + 'group0-ext1', host=self.worker)) port['dpdksupport'] = False port, iface = ( self._create_ethernet('mlx4', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, - 'group0-ext2', host=self.compute)) + 'group0-ext2', host=self.worker)) port['driver'] = 'mlx4_core' port, iface = ( self._create_ethernet('mlx5', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, - 'group0-ext3', host=self.compute)) + 'group0-ext3', host=self.worker)) port['driver'] = 'mlx5_core' def setUp(self): super(InterfaceComputeEthernet, self).setUp() - def test_compute_ethernet_profile(self): - self._create_and_apply_profile(self.compute) + def test_worker_ethernet_profile(self): + self._create_and_apply_profile(self.worker) class InterfaceComputeVlanOverEthernet(InterfaceTestCase): @@ -652,32 +652,32 @@ class InterfaceComputeVlanOverEthernet(InterfaceTestCase): constants.INTERFACE_CLASS_PLATFORM, 3, iface) # Setup a sample configuration where the personality is set to a - # compute and all interfaces are vlan interfaces over ethernet + # worker and all interfaces are vlan interfaces over ethernet # interfaces. - self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED) + self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) port, iface = self._create_ethernet( - 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.compute) - self._create_compute_vlan('mgmt', constants.NETWORK_TYPE_MGMT, + 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.worker) + self._create_worker_vlan('mgmt', constants.NETWORK_TYPE_MGMT, constants.INTERFACE_CLASS_PLATFORM, 2, iface) - self._create_compute_vlan('infra', constants.NETWORK_TYPE_INFRA, + self._create_worker_vlan('infra', constants.NETWORK_TYPE_INFRA, constants.INTERFACE_CLASS_PLATFORM, 3) - self._create_compute_vlan('data', constants.INTERFACE_CLASS_DATA, + self._create_worker_vlan('data', constants.INTERFACE_CLASS_DATA, constants.NETWORK_TYPE_DATA, 5, providernetworks='group0-ext0') self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV, constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-data0', host=self.compute) + 'group0-data0', host=self.worker) self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-data1', host=self.compute) + 'group0-data1', host=self.worker) def setUp(self): super(InterfaceComputeVlanOverEthernet, self).setUp() - def test_compute_vlan_over_ethernet_profile(self): - self._create_and_apply_profile(self.compute) + def test_worker_vlan_over_ethernet_profile(self): + self._create_and_apply_profile(self.worker) class InterfaceComputeBond(InterfaceTestCase): @@ -691,28 +691,28 @@ class InterfaceComputeBond(InterfaceTestCase): self._create_bond('infra', constants.NETWORK_TYPE_INFRA) # Setup a sample configuration where the personality is set to a - # compute and all interfaces are aggregated ethernet interfaces. - self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED) - self._create_compute_bond('mgmt', constants.NETWORK_TYPE_MGMT) - self._create_compute_bond('infra', constants.NETWORK_TYPE_INFRA) - self._create_compute_bond('data', + # worker and all interfaces are aggregated ethernet interfaces. + self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) + self._create_worker_bond('mgmt', constants.NETWORK_TYPE_MGMT) + self._create_worker_bond('infra', constants.NETWORK_TYPE_INFRA) + self._create_worker_bond('data', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, providernetworks='group0-data0') self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV, constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-ext0', host=self.compute) + 'group0-ext0', host=self.worker) self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext1', host=self.compute) + 'group0-ext1', host=self.worker) def setUp(self): super(InterfaceComputeBond, self).setUp() - def test_compute_bond_profile(self): - self._create_and_apply_profile(self.compute) + def test_worker_bond_profile(self): + self._create_and_apply_profile(self.worker) class InterfaceComputeVlanOverBond(InterfaceTestCase): @@ -729,40 +729,40 @@ class InterfaceComputeVlanOverBond(InterfaceTestCase): constants.INTERFACE_CLASS_PLATFORM, 3, bond) # Setup a sample configuration where the personality is set to a - # compute and all interfaces are vlan interfaces over aggregated + # worker and all interfaces are vlan interfaces over aggregated # ethernet interfaces. - self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED) - bond = self._create_compute_bond('pxeboot', + self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) + bond = self._create_worker_bond('pxeboot', constants.NETWORK_TYPE_PXEBOOT, constants.INTERFACE_CLASS_PLATFORM) - self._create_compute_vlan('mgmt', constants.NETWORK_TYPE_MGMT, + self._create_worker_vlan('mgmt', constants.NETWORK_TYPE_MGMT, constants.INTERFACE_CLASS_PLATFORM, 2, bond) - self._create_compute_vlan('infra', constants.NETWORK_TYPE_INFRA, + self._create_worker_vlan('infra', constants.NETWORK_TYPE_INFRA, constants.INTERFACE_CLASS_PLATFORM, 3, bond) - bond2 = self._create_compute_bond('bond2', constants.NETWORK_TYPE_NONE) - self._create_compute_vlan('data', + bond2 = self._create_worker_bond('bond2', constants.NETWORK_TYPE_NONE) + self._create_worker_vlan('data', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, 5, bond2, providernetworks='group0-ext0') - self._create_compute_bond('bond3', constants.NETWORK_TYPE_NONE) + self._create_worker_bond('bond3', constants.NETWORK_TYPE_NONE) self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV, constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-data0', host=self.compute) + 'group0-data0', host=self.worker) self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-data1', host=self.compute) + 'group0-data1', host=self.worker) def setUp(self): super(InterfaceComputeVlanOverBond, self).setUp() - def test_compute_vlan_over_bond_profile(self): - self._create_and_apply_profile(self.compute) + def test_worker_vlan_over_bond_profile(self): + self._create_and_apply_profile(self.worker) class InterfaceComputeVlanOverDataEthernet(InterfaceTestCase): @@ -776,44 +776,44 @@ class InterfaceComputeVlanOverDataEthernet(InterfaceTestCase): self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA) # Setup a sample configuration where the personality is set to a - # compute and all interfaces are vlan interfaces over data ethernet + # worker and all interfaces are vlan interfaces over data ethernet # interfaces. - self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED) + self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) port, iface = ( self._create_ethernet('data', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, - 'group0-data0', host=self.compute)) + 'group0-data0', host=self.worker)) self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, - host=self.compute) + host=self.worker) self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA, - host=self.compute) - self._create_compute_vlan('data2', constants.NETWORK_TYPE_DATA, + host=self.worker) + self._create_worker_vlan('data2', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, 5, iface, providernetworks='group0-ext0') self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV, constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-ext1', host=self.compute) + 'group0-ext1', host=self.worker) self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext2', host=self.compute) + 'group0-ext2', host=self.worker) def setUp(self): super(InterfaceComputeVlanOverDataEthernet, self).setUp() - def test_compute_vlan_over_data_ethernet_profile(self): - self._create_and_apply_profile(self.compute) + def test_worker_vlan_over_data_ethernet_profile(self): + self._create_and_apply_profile(self.worker) class InterfaceCpeEthernet(InterfaceTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # controller with a compute subfunction and all interfaces are + # controller with a worker subfunction and all interfaces are # ethernet interfaces. - self._create_host(constants.CONTROLLER, constants.COMPUTE, + self._create_host(constants.CONTROLLER, constants.WORKER, admin=constants.ADMIN_LOCKED) self._create_ethernet('oam', constants.NETWORK_TYPE_OAM) self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) @@ -853,9 +853,9 @@ class InterfaceCpeVlanOverEthernet(InterfaceTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # controller with a compute subfunction and all interfaces are + # controller with a worker subfunction and all interfaces are # vlan interfaces over ethernet interfaces. - self._create_host(constants.CONTROLLER, constants.COMPUTE, + self._create_host(constants.CONTROLLER, constants.WORKER, admin=constants.ADMIN_LOCKED) port, iface = self._create_ethernet( 'pxeboot', constants.NETWORK_TYPE_PXEBOOT) @@ -886,10 +886,10 @@ class InterfaceCpeBond(InterfaceTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # controller with a compute subfunction and all interfaces are + # controller with a worker subfunction and all interfaces are # aggregated ethernet interfaces. self._create_host(constants.CONTROLLER, - subfunction=constants.COMPUTE, + subfunction=constants.WORKER, admin=constants.ADMIN_LOCKED) self._create_bond('oam', constants.NETWORK_TYPE_OAM) self._create_bond('mgmt', constants.NETWORK_TYPE_MGMT) @@ -915,9 +915,9 @@ class InterfaceCpeVlanOverBond(InterfaceTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # controller with a compute subfunction and all interfaces are + # controller with a worker subfunction and all interfaces are # vlan interfaces over aggregated ethernet interfaces. - self._create_host(constants.CONTROLLER, constants.COMPUTE, + self._create_host(constants.CONTROLLER, constants.WORKER, admin=constants.ADMIN_LOCKED) bond = self._create_bond('pxeboot', constants.NETWORK_TYPE_PXEBOOT) self._create_vlan('oam', constants.NETWORK_TYPE_OAM, @@ -950,9 +950,9 @@ class InterfaceCpeVlanOverDataEthernet(InterfaceTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # controller with a compute subfunction and all interfaces are + # controller with a worker subfunction and all interfaces are # vlan interfaces over data ethernet interfaces. - self._create_host(constants.CONTROLLER, constants.COMPUTE, + self._create_host(constants.CONTROLLER, constants.WORKER, admin=constants.ADMIN_LOCKED) port, iface = ( self._create_ethernet('data', @@ -1007,7 +1007,7 @@ class TestPatch(InterfaceTestCase): def setUp(self): super(TestPatch, self).setUp() self._create_host(constants.CONTROLLER) - self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED) + self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) def test_modify_ifname(self): interface = dbutils.create_test_interface(forihostid='1') @@ -1031,10 +1031,10 @@ class TestPatch(InterfaceTestCase): data_bond = self._create_bond('data', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, providernetworks='group0-data0', - host=self.compute) + host=self.worker) port, new_ethernet = self._create_ethernet( - 'new', constants.NETWORK_TYPE_NONE, host=self.compute) + 'new', constants.NETWORK_TYPE_NONE, host=self.worker) # Modify AE interface to add another port uses = ','.join(data_bond['uses']) patch_result = self.patch_dict_json( @@ -1047,10 +1047,10 @@ class TestPatch(InterfaceTestCase): # MTU (%s) using this interface def test_mtu_smaller_than_users(self): port, lower_interface = self._create_ethernet( - 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.compute) + 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.worker) dbutils.create_test_interface( forihostid='2', - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='data0', networktype=constants.NETWORK_TYPE_DATA, ifclass=constants.INTERFACE_CLASS_DATA, @@ -1071,10 +1071,10 @@ class TestPatch(InterfaceTestCase): # interface ___ def test_vlan_mtu_smaller_than_users(self): port, lower_interface = self._create_ethernet( - 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.compute) + 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.worker) upper = dbutils.create_test_interface( forihostid='2', - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='data0', networktype=constants.NETWORK_TYPE_DATA, ifclass=constants.INTERFACE_CLASS_DATA, @@ -1110,24 +1110,24 @@ class TestPost(InterfaceTestCase): def setUp(self): super(TestPost, self).setUp() self._create_host(constants.CONTROLLER) - self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED) + self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) # Expected error: The oam network type is only supported on controller nodes - def test_invalid_oam_on_compute(self): + def test_invalid_oam_on_worker(self): self._create_ethernet('oam', constants.NETWORK_TYPE_OAM, constants.INTERFACE_CLASS_PLATFORM, - host=self.compute, expect_errors=True) + host=self.worker, expect_errors=True) # Expected error: The pci-passthrough, pci-sriov network types are only # valid on Ethernet interfaces def test_invalid_iftype_for_pci_network_type(self): self._create_bond('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, ifclass=constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - host=self.compute, expect_errors=True) + host=self.worker, expect_errors=True) # Expected error: The ___ network type is only supported on nodes supporting - # compute functions - def test_invalid_network_type_on_noncompute(self): + # worker functions + def test_invalid_network_type_on_nonworker(self): self._create_ethernet('data0', constants.NETWORK_TYPE_DATA, ifclass=constants.INTERFACE_CLASS_DATA, providernetworks='group0-ext0', @@ -1164,11 +1164,11 @@ class TestPost(InterfaceTestCase): self._create_ethernet('data0', constants.NETWORK_TYPE_DATA, ifclass=constants.INTERFACE_CLASS_DATA, providernetworks='group0-data0', - host=self.compute) + host=self.worker) self._create_ethernet('data0', constants.NETWORK_TYPE_DATA, ifclass=constants.INTERFACE_CLASS_DATA, providernetworks='group0-ext0', - host=self.compute, + host=self.worker, expect_errors=True) def test_ipv4_mode_valid(self): @@ -1187,7 +1187,7 @@ class TestPost(InterfaceTestCase): # mgmt, infra, data, data-vrs interfaces def test_ipv4_mode_networktype_invalid(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='name', networktype=constants.NETWORK_TYPE_PCI_PASSTHROUGH, ifclass=constants.INTERFACE_CLASS_PCI_PASSTHROUGH, @@ -1244,7 +1244,7 @@ class TestPost(InterfaceTestCase): # Expected error: IPv4 address pool name not specified def test_ipv4_mode_no_pool_invalid(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='name', networktype=constants.NETWORK_TYPE_MGMT, networks=['1'], @@ -1257,7 +1257,7 @@ class TestPost(InterfaceTestCase): # Expected error: IPv6 address pool name not specified def test_ipv6_mode_no_pool_invalid(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='name', networktype=constants.NETWORK_TYPE_MGMT, networks=['1'], @@ -1271,7 +1271,7 @@ class TestPost(InterfaceTestCase): # Expected error: Address pool IP family does not match requested family def test_ipv4_pool_family_mismatch_invalid(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='name', networktype=constants.NETWORK_TYPE_MGMT, networks=['1'], @@ -1286,7 +1286,7 @@ class TestPost(InterfaceTestCase): # Expected error: Address pool IP family does not match requested family def test_ipv6_pool_family_mismatch_invalid(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='name', networktype=constants.NETWORK_TYPE_MGMT, networks=['1'], @@ -1302,7 +1302,7 @@ class TestPost(InterfaceTestCase): # 'vlan' or 'ethernet'. def test_aemode_invalid_iftype(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, providernetworks='group0-data0', ifname='name', networktype=constants.NETWORK_TYPE_DATA, @@ -1316,7 +1316,7 @@ class TestPost(InterfaceTestCase): # in ___ mode should not specify a Tx Hash Policy. def test_aemode_no_txhash(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, providernetworks='group0-data0', ifname='name', networktype=constants.NETWORK_TYPE_DATA, @@ -1330,7 +1330,7 @@ class TestPost(InterfaceTestCase): # 'aggregated ethernet' must have a Tx Hash Policy of 'layer2'. def test_aemode_invalid_txhash(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='name', networktype=constants.NETWORK_TYPE_DATA, ifclass=constants.INTERFACE_CLASS_DATA, @@ -1343,7 +1343,7 @@ class TestPost(InterfaceTestCase): # in 'balanced' or '802.3ad' mode require a valid Tx Hash Policy def test_aemode_invalid_txhash_none(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, providernetworks='group0-data0', ifname='name', networktype=constants.NETWORK_TYPE_DATA, @@ -1354,7 +1354,7 @@ class TestPost(InterfaceTestCase): self._post_and_check_failure(ndict) ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, providernetworks='group0-data0', ifname='name', networktype=constants.NETWORK_TYPE_DATA, @@ -1368,7 +1368,7 @@ class TestPost(InterfaceTestCase): # 'aggregated ethernet' must be in mode '802.3ad' def test_aemode_invalid_mgmt(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, providernetworks='group0-data0', ifname='name', networktype=constants.NETWORK_TYPE_MGMT, @@ -1384,7 +1384,7 @@ class TestPost(InterfaceTestCase): # '802.3ad'. def test_aemode_invalid_data(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, providernetworks='group0-data0', ifname='name', networktype=constants.NETWORK_TYPE_DATA, @@ -1408,7 +1408,7 @@ class TestPost(InterfaceTestCase): def test_aemode_invalid_infra(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='name', networktype=constants.NETWORK_TYPE_INFRA, networks=['2'], @@ -1422,7 +1422,7 @@ class TestPost(InterfaceTestCase): # on controller. def test_no_infra_on_controller(self): ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, ifname='name', networktype=constants.NETWORK_TYPE_INFRA, networks=['2'], @@ -1457,14 +1457,14 @@ class TestPost(InterfaceTestCase): # Expected message: Interface eth0 is already used by another AE interface # bond0 def test_create_bond_invalid_overlap_ae(self): - bond_iface = self._create_compute_bond('bond0', + bond_iface = self._create_worker_bond('bond0', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, providernetworks='group0-data0') port, iface1 = self._create_ethernet() ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, providernetworks='group0-ext1', ifname='bond1', networktype=constants.NETWORK_TYPE_DATA, @@ -1477,7 +1477,7 @@ class TestPost(InterfaceTestCase): # Expected message: VLAN id must be between 1 and 4094. def test_create_invalid_vlan_id(self): - self._create_compute_vlan('vlan0', constants.NETWORK_TYPE_DATA, + self._create_worker_vlan('vlan0', constants.NETWORK_TYPE_DATA, ifclass=constants.INTERFACE_CLASS_DATA, vlan_id=4095, providernetworks='group0-ext0', @@ -1486,7 +1486,7 @@ class TestPost(InterfaceTestCase): # Expected message: Interface eth0 is already used by another VLAN # interface vlan0 def test_create_bond_invalid_overlap_vlan(self): - vlan_iface = self._create_compute_vlan( + vlan_iface = self._create_worker_vlan( 'vlan0', constants.NETWORK_TYPE_DATA, ifclass=constants.INTERFACE_CLASS_DATA, @@ -1494,7 +1494,7 @@ class TestPost(InterfaceTestCase): port, iface1 = self._create_ethernet() ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, providernetworks='group0-ext1', ifname='bond0', networktype=constants.NETWORK_TYPE_DATA, @@ -1507,14 +1507,14 @@ class TestPost(InterfaceTestCase): # Expected message: Can only have one interface for vlan type. def test_create_vlan_invalid_uses(self): - bond_iface = self._create_compute_bond('bond0', + bond_iface = self._create_worker_bond('bond0', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, providernetworks='group0-data0') port, iface1 = self._create_ethernet() ndict = dbutils.post_get_test_interface( - ihost_uuid=self.compute.uuid, + ihost_uuid=self.worker.uuid, providernetworks='group0-ext1', ifname='bond1', networktype=constants.NETWORK_TYPE_DATA, @@ -1528,11 +1528,11 @@ class TestPost(InterfaceTestCase): # Expected message: VLAN interfaces cannot be created over existing VLAN # interfaces def test_create_invalid_vlan_over_vlan(self): - vlan_iface = self._create_compute_vlan( + vlan_iface = self._create_worker_vlan( 'vlan1', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, 1, providernetworks='group0-ext0') - self._create_compute_vlan('vlan2', + self._create_worker_vlan('vlan2', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, vlan_id=2, @@ -1543,10 +1543,10 @@ class TestPost(InterfaceTestCase): # Expected message: data VLAN cannot be created over a LAG interface with # network type pxeboot def test_create_data_vlan_over_pxeboot_lag(self): - bond_iface = self._create_compute_bond( + bond_iface = self._create_worker_bond( 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, constants.INTERFACE_CLASS_PLATFORM) - self._create_compute_vlan( + self._create_worker_vlan( 'vlan2', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, 2, lower_iface=bond_iface, providernetworks='group0-ext1', @@ -1555,10 +1555,10 @@ class TestPost(InterfaceTestCase): # Expected message: data VLAN cannot be created over a LAG interface with # network type mgmt def test_create_data_vlan_over_mgmt_lag(self): - bond_iface = self._create_compute_bond( + bond_iface = self._create_worker_bond( 'mgmt', constants.NETWORK_TYPE_MGMT, constants.INTERFACE_CLASS_PLATFORM) - self._create_compute_vlan( + self._create_worker_vlan( 'vlan2', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, 2, lower_iface=bond_iface, providernetworks='group0-ext1', @@ -1567,10 +1567,10 @@ class TestPost(InterfaceTestCase): # Expected message: mgmt VLAN cannot be created over a LAG interface with # network type data def test_create_mgmt_vlan_over_data_lag(self): - bond_iface = self._create_compute_bond( + bond_iface = self._create_worker_bond( 'data', constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, providernetworks='group0-ext1') - self._create_compute_vlan( + self._create_worker_vlan( 'mgmt', constants.NETWORK_TYPE_MGMT, constants.INTERFACE_CLASS_PLATFORM, 2, lower_iface=bond_iface, providernetworks='group0-ext1', @@ -1579,7 +1579,7 @@ class TestPost(InterfaceTestCase): # Expected message: # Provider network(s) not supported for non-data interfaces. def test_create_nondata_provider_network(self): - self._create_compute_bond( + self._create_worker_bond( 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, constants.INTERFACE_CLASS_PLATFORM, providernetworks='group0-data0', expect_errors=True) @@ -1608,7 +1608,7 @@ class TestPost(InterfaceTestCase): networktype=[constants.NETWORK_TYPE_MGMT, constants.NETWORK_TYPE_DATA], providernetworks='group0-data0', - host=self.compute, + host=self.worker, expect_errors=True) # Expected message: @@ -1619,14 +1619,14 @@ class TestPost(InterfaceTestCase): networktype=[constants.NETWORK_TYPE_DATA, constants.NETWORK_TYPE_PXEBOOT], providernetworks='group0-data0', - host=self.compute, + host=self.worker, expect_errors=True) class TestCpePost(InterfaceTestCase): def setUp(self): super(TestCpePost, self).setUp() - self._create_host(constants.CONTROLLER, constants.COMPUTE, + self._create_host(constants.CONTROLLER, constants.WORKER, admin=constants.ADMIN_LOCKED) # Expected message: @@ -1790,7 +1790,7 @@ class TestCpePost(InterfaceTestCase): class TestCpePatch(InterfaceTestCase): def setUp(self): super(TestCpePatch, self).setUp() - self._create_host(constants.CONTROLLER, constants.COMPUTE, + self._create_host(constants.CONTROLLER, constants.WORKER, admin=constants.ADMIN_LOCKED) def test_create_invalid_infra_data_ethernet(self): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_network.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_network.py index d55abbd819..29fb2c7a1b 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_network.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_network.py @@ -28,13 +28,13 @@ class InterfaceNetworkTestCase(base.FunctionalTest): subfunctions=constants.CONTROLLER, invprovision=constants.PROVISIONED, ) - self.compute = dbutils.create_test_ihost( + self.worker = dbutils.create_test_ihost( id='2', uuid=None, forisystemid=self.system.id, - hostname='compute-0', - personality=constants.COMPUTE, - subfunctions=constants.COMPUTE, + hostname='worker-0', + personality=constants.WORKER, + subfunctions=constants.WORKER, mgmt_mac='01:02.03.04.05.C0', mgmt_ip='192.168.24.12', invprovision=constants.PROVISIONED, @@ -114,73 +114,73 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase): controller_interface = dbutils.create_test_interface( ifname='enp0s8', forihostid=self.controller.id) - compute_interface = dbutils.create_test_interface( + worker_interface = dbutils.create_test_interface( ifname='enp0s8', - forihostid=self.compute.id) + forihostid=self.worker.id) controller_interface_network = dbutils.post_get_test_interface_network( interface_uuid=controller_interface.uuid, network_uuid=self.mgmt_network.uuid) self._post_and_check(controller_interface_network, expect_errors=False) - compute_interface_network = dbutils.post_get_test_interface_network( - interface_uuid=compute_interface.uuid, + worker_interface_network = dbutils.post_get_test_interface_network( + interface_uuid=worker_interface.uuid, network_uuid=self.mgmt_network.uuid) - self._post_and_check(compute_interface_network, expect_errors=False) + self._post_and_check(worker_interface_network, expect_errors=False) def test_create_infra_interface_network(self): controller_interface = dbutils.create_test_interface( ifname='enp0s8', forihostid=self.controller.id) - compute_interface = dbutils.create_test_interface( + worker_interface = dbutils.create_test_interface( ifname='enp0s8', - forihostid=self.compute.id) + forihostid=self.worker.id) controller_interface_network = dbutils.post_get_test_interface_network( interface_uuid=controller_interface.uuid, network_uuid=self.infra_network.uuid) self._post_and_check(controller_interface_network, expect_errors=False) - compute_interface_network = dbutils.post_get_test_interface_network( - interface_uuid=compute_interface.uuid, + worker_interface_network = dbutils.post_get_test_interface_network( + interface_uuid=worker_interface.uuid, network_uuid=self.infra_network.uuid) - self._post_and_check(compute_interface_network, expect_errors=False) + self._post_and_check(worker_interface_network, expect_errors=False) def test_create_oam_interface_network(self): controller_interface = dbutils.create_test_interface( ifname='enp0s8', forihostid=self.controller.id) - compute_interface = dbutils.create_test_interface( + worker_interface = dbutils.create_test_interface( ifname='enp0s8', - forihostid=self.compute.id) + forihostid=self.worker.id) controller_interface_network = dbutils.post_get_test_interface_network( interface_uuid=controller_interface.uuid, network_uuid=self.oam_network.uuid) self._post_and_check(controller_interface_network, expect_errors=False) - compute_interface_network = dbutils.post_get_test_interface_network( - interface_uuid=compute_interface.uuid, + worker_interface_network = dbutils.post_get_test_interface_network( + interface_uuid=worker_interface.uuid, network_uuid=self.oam_network.uuid) - self._post_and_check(compute_interface_network, expect_errors=False) + self._post_and_check(worker_interface_network, expect_errors=False) def test_create_pxeboot_interface_network(self): controller_interface = dbutils.create_test_interface( ifname='enp0s8', forihostid=self.controller.id) - compute_interface = dbutils.create_test_interface( + worker_interface = dbutils.create_test_interface( ifname='enp0s8', - forihostid=self.compute.id) + forihostid=self.worker.id) controller_interface_network = dbutils.post_get_test_interface_network( interface_uuid=controller_interface.uuid, network_uuid=self.pxeboot_network.uuid) self._post_and_check(controller_interface_network, expect_errors=False) - compute_interface_network = dbutils.post_get_test_interface_network( - interface_uuid=compute_interface.uuid, + worker_interface_network = dbutils.post_get_test_interface_network( + interface_uuid=worker_interface.uuid, network_uuid=self.pxeboot_network.uuid) - self._post_and_check(compute_interface_network, expect_errors=False) + self._post_and_check(worker_interface_network, expect_errors=False) def test_create_mgmt_infra_interface_network(self): controller_interface = dbutils.create_test_interface( @@ -190,11 +190,11 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase): interface_id=controller_interface.id, network_id=self.mgmt_network.id) - compute_interface = dbutils.create_test_interface( + worker_interface = dbutils.create_test_interface( ifname='enp0s8', - forihostid=self.compute.id) + forihostid=self.worker.id) dbutils.create_test_interface_network( - interface_id=compute_interface.id, + interface_id=worker_interface.id, network_id=self.mgmt_network.id) controller_interface_network = dbutils.post_get_test_interface_network( @@ -202,10 +202,10 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase): network_uuid=self.infra_network.uuid) self._post_and_check(controller_interface_network, expect_errors=False) - compute_interface_network = dbutils.post_get_test_interface_network( - interface_uuid=compute_interface.uuid, + worker_interface_network = dbutils.post_get_test_interface_network( + interface_uuid=worker_interface.uuid, network_uuid=self.infra_network.uuid) - self._post_and_check(compute_interface_network, expect_errors=False) + self._post_and_check(worker_interface_network, expect_errors=False) # Expected error: # You cannot assign a network of type 'oam' to an interface @@ -218,11 +218,11 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase): interface_id=controller_interface.id, network_id=self.mgmt_network.id) - compute_interface = dbutils.create_test_interface( + worker_interface = dbutils.create_test_interface( ifname='enp0s8', - forihostid=self.compute.id) + forihostid=self.worker.id) dbutils.create_test_interface_network( - interface_id=compute_interface.id, + interface_id=worker_interface.id, network_id=self.mgmt_network.id) controller_interface_network = dbutils.post_get_test_interface_network( @@ -230,10 +230,10 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase): network_uuid=self.oam_network.uuid) self._post_and_check(controller_interface_network, expect_errors=True) - compute_interface_network = dbutils.post_get_test_interface_network( - interface_uuid=compute_interface.uuid, + worker_interface_network = dbutils.post_get_test_interface_network( + interface_uuid=worker_interface.uuid, network_uuid=self.oam_network.uuid) - self._post_and_check(compute_interface_network, expect_errors=True) + self._post_and_check(worker_interface_network, expect_errors=True) # Expected error: # You cannot assign a network of type 'pxeboot' to an interface @@ -246,11 +246,11 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase): interface_id=controller_interface.id, network_id=self.mgmt_network.id) - compute_interface = dbutils.create_test_interface( + worker_interface = dbutils.create_test_interface( ifname='enp0s8', - forihostid=self.compute.id) + forihostid=self.worker.id) dbutils.create_test_interface_network( - interface_id=compute_interface.id, + interface_id=worker_interface.id, network_id=self.mgmt_network.id) controller_interface_network = dbutils.post_get_test_interface_network( @@ -258,10 +258,10 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase): network_uuid=self.pxeboot_network.uuid) self._post_and_check(controller_interface_network, expect_errors=True) - compute_interface_network = dbutils.post_get_test_interface_network( - interface_uuid=compute_interface.uuid, + worker_interface_network = dbutils.post_get_test_interface_network( + interface_uuid=worker_interface.uuid, network_uuid=self.pxeboot_network.uuid) - self._post_and_check(compute_interface_network, expect_errors=True) + self._post_and_check(worker_interface_network, expect_errors=True) # Expected error: # Interface network with interface ID '%s' and @@ -274,11 +274,11 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase): interface_id=controller_interface.id, network_id=self.mgmt_network.id) - compute_interface = dbutils.create_test_interface( + worker_interface = dbutils.create_test_interface( ifname='enp0s8', - forihostid=self.compute.id) + forihostid=self.worker.id) dbutils.create_test_interface_network( - interface_id=compute_interface.id, + interface_id=worker_interface.id, network_id=self.mgmt_network.id) controller_interface_network = dbutils.post_get_test_interface_network( @@ -286,7 +286,7 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase): network_uuid=self.mgmt_network.uuid) self._post_and_check(controller_interface_network, expect_errors=True) - compute_interface_network = dbutils.post_get_test_interface_network( - interface_uuid=compute_interface.uuid, + worker_interface_network = dbutils.post_get_test_interface_network( + interface_uuid=worker_interface.uuid, network_uuid=self.mgmt_network.uuid) - self._post_and_check(compute_interface_network, expect_errors=True) + self._post_and_check(worker_interface_network, expect_errors=True) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_profile.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_profile.py index bbd2c48e61..a9b758b2c7 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_profile.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_profile.py @@ -41,13 +41,13 @@ class ProfileTestCase(base.FunctionalTest): subfunctions=constants.CONTROLLER, invprovision=constants.PROVISIONED, ) - self.compute = dbutils.create_test_ihost( + self.worker = dbutils.create_test_ihost( id='2', uuid=None, forisystemid=self.system.id, - hostname='compute-0', - personality=constants.COMPUTE, - subfunctions=constants.COMPUTE, + hostname='worker-0', + personality=constants.WORKER, + subfunctions=constants.WORKER, mgmt_mac='01:02.03.04.05.C0', mgmt_ip='192.168.24.12', invprovision=constants.PROVISIONED, @@ -76,27 +76,27 @@ class ProfileTestCase(base.FunctionalTest): hugepages_configured=True, forinodeid=self.ctrlcpu.forinodeid)) - self.compnode = self.dbapi.inode_create(self.compute.id, + self.compnode = self.dbapi.inode_create(self.worker.id, dbutils.get_test_node(id=2)) self.compcpu = self.dbapi.icpu_create( - self.compute.id, + self.worker.id, dbutils.get_test_icpu(id=5, cpu=3, forinodeid=self.compnode.id, - forihostid=self.compute.id)) + forihostid=self.worker.id)) self.compmemory = self.dbapi.imemory_create( - self.compute.id, + self.worker.id, dbutils.get_test_imemory(id=2, Hugepagesize=constants.MIB_1G, forinodeid=self.compcpu.forinodeid)) self.disk = self.dbapi.idisk_create( - self.compute.id, + self.worker.id, dbutils.get_test_idisk(device_node='/dev/sdb', device_type=constants.DEVICE_TYPE_HDD)) self.lvg = self.dbapi.ilvg_create( - self.compute.id, + self.worker.id, dbutils.get_test_lvg(lvm_vg_name=constants.LVG_NOVA_LOCAL)) self.pv = self.dbapi.ipv_create( - self.compute.id, + self.worker.id, dbutils.get_test_pv(lvm_vg_name=constants.LVG_NOVA_LOCAL, disk_or_part_uuid=self.disk.uuid)) @@ -129,13 +129,13 @@ class ProfileCreateTestCase(ProfileTestCase): def test_create_memory_success(self): self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY - self.profile["ihost_uuid"] = self.compute.uuid + self.profile["ihost_uuid"] = self.worker.uuid response = self.post_json('%s' % self._get_path(), self.profile) self.assertEqual(http_client.OK, response.status_int) def test_create_storage_success(self): self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE - self.profile["ihost_uuid"] = self.compute.uuid + self.profile["ihost_uuid"] = self.worker.uuid response = self.post_json('%s' % self._get_path(), self.profile) self.assertEqual(http_client.OK, response.status_int) @@ -176,7 +176,7 @@ class ProfileDeleteTestCase(ProfileTestCase): def test_delete_storage_success(self): self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE - self.profile["ihost_uuid"] = self.compute.uuid + self.profile["ihost_uuid"] = self.worker.uuid post_response = self.post_json('%s' % self._get_path(), self.profile) profile_data = self.get_json('%s' % self._get_path()) storprofile_data = self.get_json( @@ -227,7 +227,7 @@ class ProfileShowTestCase(ProfileTestCase): def test_show_storage_success(self): self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE - self.profile["ihost_uuid"] = self.compute.uuid + self.profile["ihost_uuid"] = self.worker.uuid self.post_json('%s' % self._get_path(), self.profile) list_data = self.get_json('%s' % self._get_path()) profile_uuid = list_data['iprofiles'][0]['uuid'] @@ -272,7 +272,7 @@ class ProfileListTestCase(ProfileTestCase): def test_list_storage_success(self): self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE - self.profile["ihost_uuid"] = self.compute.uuid + self.profile["ihost_uuid"] = self.worker.uuid post_response = self.post_json('%s' % self._get_path(), self.profile) list_data = self.get_json('%s' % self._get_path()) self.assertEqual(post_response.json['uuid'], @@ -296,7 +296,7 @@ class ProfileApplyTestCase(ProfileTestCase): self.assertEqual(http_client.OK, result.status_int) hostcpu_r = self.get_json( - '/ihosts/%s/icpus' % self.compute.uuid) + '/ihosts/%s/icpus' % self.worker.uuid) profile_r = self.get_json( '%s/icpus' % self._get_path(profile_uuid)) self.assertEqual(hostcpu_r['icpus'][0]['allocated_function'], @@ -306,20 +306,20 @@ class ProfileApplyTestCase(ProfileTestCase): def test_apply_memory_success(self, mock_is_virtual): mock_is_virtual.return_value = True self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY - self.profile["ihost_uuid"] = self.compute.uuid + self.profile["ihost_uuid"] = self.worker.uuid response = self.post_json('%s' % self._get_path(), self.profile) self.assertEqual(http_client.OK, response.status_int) list_data = self.get_json('%s' % self._get_path()) profile_uuid = list_data['iprofiles'][0]['uuid'] - result = self.patch_dict_json('/ihosts/%s' % self.compute.id, + result = self.patch_dict_json('/ihosts/%s' % self.worker.id, headers=HEADER, action=constants.APPLY_PROFILE_ACTION, iprofile_uuid=profile_uuid) self.assertEqual(http_client.OK, result.status_int) hostmem_r = self.get_json( - '/ihosts/%s/imemorys' % self.compute.uuid) + '/ihosts/%s/imemorys' % self.worker.uuid) profile_r = self.get_json( '%s/imemorys' % self._get_path(profile_uuid)) self.assertEqual(hostmem_r['imemorys'][0]['platform_reserved_mib'], @@ -331,7 +331,7 @@ class ProfileApplyTestCase(ProfileTestCase): def test_apply_storage_success(self): self.profile["profiletype"] = constants.PROFILE_TYPE_LOCAL_STORAGE - self.profile["ihost_uuid"] = self.compute.uuid + self.profile["ihost_uuid"] = self.worker.uuid response = self.post_json('%s' % self._get_path(), self.profile) self.assertEqual(http_client.OK, response.status_int) @@ -346,21 +346,21 @@ class ProfileApplyTestCase(ProfileTestCase): self.delete('/ilvgs/%s' % self.lvg.uuid) # Apply storage profile - result = self.patch_dict_json('/ihosts/%s' % self.compute.id, + result = self.patch_dict_json('/ihosts/%s' % self.worker.id, headers=HEADER, action=constants.APPLY_PROFILE_ACTION, iprofile_uuid=profile_uuid) self.assertEqual(http_client.OK, result.status_int) hostdisk_r = self.get_json( - '/ihosts/%s/idisks' % self.compute.uuid) + '/ihosts/%s/idisks' % self.worker.uuid) profile_r = self.get_json( '%s/idisks' % self._get_path(profile_uuid)) self.assertEqual(hostdisk_r['idisks'][0]['device_path'], profile_r['idisks'][0]['device_path']) hostpv_r = self.get_json( - '/ihosts/%s/ipvs' % self.compute.uuid) + '/ihosts/%s/ipvs' % self.worker.uuid) profile_r = self.get_json( '%s/ipvs' % self._get_path(profile_uuid)) self.assertEqual(hostpv_r['ipvs'][1]['pv_type'], @@ -370,7 +370,7 @@ class ProfileApplyTestCase(ProfileTestCase): profile_r['ipvs'][0]['lvm_pv_name']) hostlvg_r = self.get_json( - '/ihosts/%s/ilvgs' % self.compute.uuid) + '/ihosts/%s/ilvgs' % self.worker.uuid) profile_r = self.get_json( '%s/ilvgs' % self._get_path(profile_uuid)) self.assertEqual(hostlvg_r['ilvgs'][0]['lvm_vg_name'], diff --git a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py index 8960820ff0..c937aa9ee0 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py @@ -101,7 +101,7 @@ class ManagerTestCase(base.DbTestCase): 'mgmt_ip': '1.2.3.4', 'hostname': 'newhost', 'invprovision': 'unprovisioned', - 'personality': 'compute', + 'personality': 'worker', 'administrative': 'locked', 'operational': 'disabled', 'availability': 'not-installed', @@ -126,7 +126,7 @@ class ManagerTestCase(base.DbTestCase): ihost['mgmt_ip'] = '1.2.3.4' ihost['hostname'] = 'newhost' ihost['invprovision'] = 'unprovisioned' - ihost['personality'] = 'compute' + ihost['personality'] = 'worker' ihost['administrative'] = 'locked' ihost['operational'] = 'disabled' ihost['availability'] = 'not-installed' @@ -142,7 +142,7 @@ class ManagerTestCase(base.DbTestCase): self.assertEqual(res['mgmt_ip'], '1.2.3.4') self.assertEqual(res['hostname'], 'newhost') self.assertEqual(res['invprovision'], 'unprovisioned') - self.assertEqual(res['personality'], 'compute') + self.assertEqual(res['personality'], 'worker') self.assertEqual(res['administrative'], 'locked') self.assertEqual(res['operational'], 'disabled') self.assertEqual(res['availability'], 'not-installed') @@ -179,7 +179,7 @@ class ManagerTestCase(base.DbTestCase): # IOError: [Errno 13] Permission denied: '/tmp/dnsmasq.hosts' self.skipTest("Skipping to prevent failure notification on Jenkins") with open(self.dnsmasq_hosts_file, 'w') as f: - f.write("dhcp-host=08:00:27:0a:fa:fa,compute-1,192.168.204.25,2h\n") + f.write("dhcp-host=08:00:27:0a:fa:fa,worker-1,192.168.204.25,2h\n") ihost = self._create_test_ihost() @@ -187,7 +187,7 @@ class ManagerTestCase(base.DbTestCase): ihost['mgmt_ip'] = '1.2.3.4' ihost['hostname'] = 'newhost' ihost['invprovision'] = 'unprovisioned' - ihost['personality'] = 'compute' + ihost['personality'] = 'worker' ihost['administrative'] = 'locked' ihost['operational'] = 'disabled' ihost['availability'] = 'not-installed' @@ -202,7 +202,7 @@ class ManagerTestCase(base.DbTestCase): with open(self.dnsmasq_hosts_file, 'r') as f: self.assertEqual( f.readline(), - "dhcp-host=08:00:27:0a:fa:fa,compute-1,192.168.204.25,2h\n") + "dhcp-host=08:00:27:0a:fa:fa,worker-1,192.168.204.25,2h\n") self.assertEqual( f.readline(), "dhcp-host=00:11:22:33:44:55,newhost,1.2.3.4,2h\n") @@ -215,7 +215,7 @@ class ManagerTestCase(base.DbTestCase): self.skipTest("Skipping to prevent failure notification on Jenkins") with open(self.dnsmasq_hosts_file, 'w') as f: f.write("dhcp-host=00:11:22:33:44:55,oldhost,1.2.3.4,2h\n") - f.write("dhcp-host=08:00:27:0a:fa:fa,compute-1,192.168.204.25,2h\n") + f.write("dhcp-host=08:00:27:0a:fa:fa,worker-1,192.168.204.25,2h\n") ihost = self._create_test_ihost() @@ -223,7 +223,7 @@ class ManagerTestCase(base.DbTestCase): ihost['mgmt_ip'] = '1.2.3.42' ihost['hostname'] = 'newhost' ihost['invprovision'] = 'unprovisioned' - ihost['personality'] = 'compute' + ihost['personality'] = 'worker' ihost['administrative'] = 'locked' ihost['operational'] = 'disabled' ihost['availability'] = 'not-installed' @@ -241,7 +241,7 @@ class ManagerTestCase(base.DbTestCase): "dhcp-host=00:11:22:33:44:55,newhost,1.2.3.42,2h\n") self.assertEqual( f.readline(), - "dhcp-host=08:00:27:0a:fa:fa,compute-1,192.168.204.25,2h\n") + "dhcp-host=08:00:27:0a:fa:fa,worker-1,192.168.204.25,2h\n") def test_configure_ihost_no_hostname(self): # Test skipped to prevent error message in Jenkins. Error thrown is: diff --git a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_rpcapi.py b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_rpcapi.py index a33187da68..c6b8e6f5f3 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_rpcapi.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_rpcapi.py @@ -94,4 +94,4 @@ class RPCAPITestCase(base.DbTestCase): self._test_rpcapi('configure_ihost', 'call', host=self.fake_ihost, - do_compute_apply=False) + do_worker_apply=False) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/events_for_testing.yaml b/sysinv/sysinv/sysinv/sysinv/tests/events_for_testing.yaml index adcc9bb335..2a759b4b79 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/events_for_testing.yaml +++ b/sysinv/sysinv/sysinv/sysinv/tests/events_for_testing.yaml @@ -392,12 +392,12 @@ 200.012: Type: Alarm - Description: controller function has in-service failure while compute services remain healthy. + Description: controller function has in-service failure while worker services remain healthy. Entity_Instance_ID: host= Severity: major Proposed_Repair_Action: |- Lock and then Unlock host to recover. - Avoid using 'Force Lock' action as that will impact compute services running on this host, + Avoid using 'Force Lock' action as that will impact worker services running on this host, If lock action fails then contact next level of support to investigate and recover. Maintenance_Action: "degrade - requires manual action" Inhibit_Alarms: false @@ -408,10 +408,10 @@ 200.013: Type: Alarm - Description: compute service of the only available controller is not poperational. Auto-recovery is disabled. Deggrading host instead. + Description: worker service of the only available controller is not poperational. Auto-recovery is disabled. Deggrading host instead. Entity_Instance_ID: host= Severity: major - Proposed_Repair_Action: Enable second controller and Switch Activity (Swact) over to it as soon as possible. Then Lock and Unlock host to recover its local compute service. + Proposed_Repair_Action: Enable second controller and Switch Activity (Swact) over to it as soon as possible. Then Lock and Unlock host to recover its local worker service. Maintenance_Action: "degrade - requires manual action" Inhibit_Alarms: false Alarm_Type: operational-violation @@ -674,8 +674,8 @@ # --------------------------------------------------------------------------- 270.001: Type: Alarm - Description: "Host compute services failure[, reason = ]" - Entity_Instance_ID: host=.services=compute + Description: "Host worker services failure[, reason = ]" + Entity_Instance_ID: host=.services=worker Severity: critical Proposed_Repair_Action: Wait for host services recovery to complete; if problem persists contact next level of support Maintenance_Action: @@ -687,7 +687,7 @@ 270.101: Type: Log - Description: "Host compute services failure[, reason = ]" + Description: "Host worker services failure[, reason = ]" Entity_Instance_ID: tenant=.instance= Severity: critical Alarm_Type: equipment @@ -696,7 +696,7 @@ 270.102: Type: Log - Description: Host compute services enabled + Description: Host worker services enabled Entity_Instance_ID: tenant=.instance= Severity: critical Alarm_Type: equipment @@ -705,7 +705,7 @@ 270.103: Type: Log - Description: Host compute services disabled + Description: Host worker services disabled Entity_Instance_ID: tenant=.instance= Severity: critical Alarm_Type: equipment @@ -774,10 +774,10 @@ 300.004: Type: Alarm - Description: No enabled compute host with connectivity to provider network. + Description: No enabled worker host with connectivity to provider network. Entity_Instance_ID: host=.providernet= Severity: major - Proposed_Repair_Action: Enable compute hosts with required provider network connectivity. + Proposed_Repair_Action: Enable worker hosts with required provider network connectivity. Maintenance_Action: Inhibit_Alarms: Alarm_Type: operational-violation diff --git a/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_interface.py b/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_interface.py index 68749a33fc..b095dd78d3 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_interface.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_interface.py @@ -504,29 +504,29 @@ class InterfaceTestCase(BaseTestCase): self.assertEqual(index[constants.NETWORK_TYPE_OAM], str(self.oam_gateway_address.ip)) - def test_is_compute_subfunction_true(self): - self.host['personality'] = constants.COMPUTE - self.host['subfunctions'] = constants.COMPUTE + def test_is_worker_subfunction_true(self): + self.host['personality'] = constants.WORKER + self.host['subfunctions'] = constants.WORKER self._update_context() - self.assertTrue(interface.is_compute_subfunction(self.context)) + self.assertTrue(interface.is_worker_subfunction(self.context)) - def test_is_compute_subfunction_true_cpe(self): + def test_is_worker_subfunction_true_cpe(self): self.host['personality'] = constants.CONTROLLER - self.host['subfunctions'] = constants.COMPUTE + self.host['subfunctions'] = constants.WORKER self._update_context() - self.assertTrue(interface.is_compute_subfunction(self.context)) + self.assertTrue(interface.is_worker_subfunction(self.context)) - def test_is_compute_subfunction_false(self): + def test_is_worker_subfunction_false(self): self.host['personality'] = constants.STORAGE self.host['subfunctions'] = constants.STORAGE self._update_context() - self.assertFalse(interface.is_compute_subfunction(self.context)) + self.assertFalse(interface.is_worker_subfunction(self.context)) - def test_is_compute_subfunction_false_cpe(self): + def test_is_worker_subfunction_false_cpe(self): self.host['personality'] = constants.CONTROLLER self.host['subfunctions'] = constants.CONTROLLER self._update_context() - self.assertFalse(interface.is_compute_subfunction(self.context)) + self.assertFalse(interface.is_worker_subfunction(self.context)) def test_is_pci_interface_true(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV @@ -670,10 +670,10 @@ class InterfaceTestCase(BaseTestCase): self.context, self.iface) self.assertEqual(method, 'manual') - def test_get_interface_address_method_for_pxeboot_compute(self): + def test_get_interface_address_method_for_pxeboot_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM self.iface['networktype'] = constants.NETWORK_TYPE_PXEBOOT - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() method = interface.get_interface_address_method( self.context, self.iface) @@ -697,10 +697,10 @@ class InterfaceTestCase(BaseTestCase): self.context, self.iface) self.assertEqual(method, 'static') - def test_get_interface_address_method_for_mgmt_compute(self): + def test_get_interface_address_method_for_mgmt_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM self.iface['networktype'] = constants.NETWORK_TYPE_MGMT - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() method = interface.get_interface_address_method( self.context, self.iface) @@ -724,10 +724,10 @@ class InterfaceTestCase(BaseTestCase): self.context, self.iface) self.assertEqual(method, 'static') - def test_get_interface_address_method_for_infra_compute(self): + def test_get_interface_address_method_for_infra_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM self.iface['networktype'] = constants.NETWORK_TYPE_INFRA - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() method = interface.get_interface_address_method( self.context, self.iface) @@ -889,84 +889,84 @@ class InterfaceTestCase(BaseTestCase): needed = interface.needs_interface_config(self.context, self.iface) self.assertFalse(needed) - def test_needs_interface_config_data_slow_compute(self): + def test_needs_interface_config_data_slow_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA self.iface['networktype'] = constants.NETWORK_TYPE_DATA - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self.port['dpdksupport'] = False self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertTrue(needed) - def test_needs_interface_config_data_mlx4_compute(self): + def test_needs_interface_config_data_mlx4_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA self.iface['networktype'] = constants.NETWORK_TYPE_DATA - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self.port['driver'] = interface.DRIVER_MLX_CX3 self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertTrue(needed) - def test_needs_interface_config_data_mlx5_compute(self): + def test_needs_interface_config_data_mlx5_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA self.iface['networktype'] = constants.NETWORK_TYPE_DATA - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self.port['driver'] = interface.DRIVER_MLX_CX4 self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertTrue(needed) - def test_needs_interface_config_sriov_compute(self): + def test_needs_interface_config_sriov_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertTrue(needed) - def test_needs_interface_config_pthru_compute(self): + def test_needs_interface_config_pthru_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_PASSTHROUGH self.iface['networktype'] = constants.NETWORK_TYPE_PCI_PASSTHROUGH - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertTrue(needed) - def test_needs_interface_config_data_cpe_compute(self): + def test_needs_interface_config_data_cpe_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA self.iface['networktype'] = constants.NETWORK_TYPE_DATA self.host['personality'] = constants.CONTROLLER - self.host['subfunctions'] = constants.COMPUTE + self.host['subfunctions'] = constants.WORKER self.port['dpdksupport'] = True self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertFalse(needed) - def test_needs_interface_config_data_slow_cpe_compute(self): + def test_needs_interface_config_data_slow_cpe_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA self.iface['networktype'] = constants.NETWORK_TYPE_DATA self.host['personality'] = constants.CONTROLLER - self.host['subfunctions'] = constants.COMPUTE + self.host['subfunctions'] = constants.WORKER self.port['dpdksupport'] = False self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertTrue(needed) - def test_needs_interface_config_data_mlx4_cpe_compute(self): + def test_needs_interface_config_data_mlx4_cpe_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA self.iface['networktype'] = constants.NETWORK_TYPE_DATA self.host['personality'] = constants.CONTROLLER - self.host['subfunctions'] = constants.COMPUTE + self.host['subfunctions'] = constants.WORKER self.port['driver'] = interface.DRIVER_MLX_CX3 self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertTrue(needed) - def test_needs_interface_config_data_mlx5_cpe_compute(self): + def test_needs_interface_config_data_mlx5_cpe_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA self.iface['networktype'] = constants.NETWORK_TYPE_DATA self.host['personality'] = constants.CONTROLLER - self.host['subfunctions'] = constants.COMPUTE + self.host['subfunctions'] = constants.WORKER self.port['driver'] = interface.DRIVER_MLX_CX4 self._update_context() needed = interface.needs_interface_config(self.context, self.iface) @@ -981,20 +981,20 @@ class InterfaceTestCase(BaseTestCase): needed = interface.needs_interface_config(self.context, self.iface) self.assertFalse(needed) - def test_needs_interface_config_sriov_cpe_compute(self): + def test_needs_interface_config_sriov_cpe_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV self.host['personality'] = constants.CONTROLLER - self.host['subfunctions'] = constants.COMPUTE + self.host['subfunctions'] = constants.WORKER self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertTrue(needed) - def test_needs_interface_config_pthru_cpe_compute(self): + def test_needs_interface_config_pthru_cpe_worker(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_PASSTHROUGH self.iface['networktype'] = constants.NETWORK_TYPE_PCI_PASSTHROUGH self.host['personality'] = constants.CONTROLLER - self.host['subfunctions'] = constants.COMPUTE + self.host['subfunctions'] = constants.WORKER self._update_context() needed = interface.needs_interface_config(self.context, self.iface) self.assertTrue(needed) @@ -1192,10 +1192,10 @@ class InterfaceTestCase(BaseTestCase): print(expected) self.assertEqual(expected, config) - def test_get_compute_ethernet_config_mgmt(self): + def test_get_worker_ethernet_config_mgmt(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM self.iface['networktype'] = constants.NETWORK_TYPE_MGMT - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER for network in self.networks: if network['type'] == constants.NETWORK_TYPE_MGMT: net_id = network['id'] @@ -1213,10 +1213,10 @@ class InterfaceTestCase(BaseTestCase): print(expected) self.assertEqual(expected, config) - def test_get_compute_ethernet_config_infra(self): + def test_get_worker_ethernet_config_infra(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM self.iface['networktype'] = constants.NETWORK_TYPE_INFRA - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER for network in self.networks: if network['type'] == constants.NETWORK_TYPE_INFRA: net_id = network['id'] @@ -1234,10 +1234,10 @@ class InterfaceTestCase(BaseTestCase): print(expected) self.assertEqual(expected, config) - def test_get_compute_ethernet_config_pci_sriov(self): + def test_get_worker_ethernet_config_pci_sriov(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() config = interface.get_interface_network_config( self.context, self.iface) @@ -1251,10 +1251,10 @@ class InterfaceTestCase(BaseTestCase): print(expected) self.assertEqual(expected, config) - def test_get_compute_ethernet_config_pci_pthru(self): + def test_get_worker_ethernet_config_pci_pthru(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_PASSTHROUGH self.iface['networktype'] = constants.NETWORK_TYPE_PCI_PASSTHROUGH - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() config = interface.get_interface_network_config( self.context, self.iface) @@ -1268,11 +1268,11 @@ class InterfaceTestCase(BaseTestCase): print(expected) self.assertEqual(expected, config) - def test_get_compute_ethernet_config_data_slow(self): + def test_get_worker_ethernet_config_data_slow(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA self.iface['networktype'] = constants.NETWORK_TYPE_DATA self.port['dpdksupport'] = False - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() config = interface.get_interface_network_config( self.context, self.iface) @@ -1284,10 +1284,10 @@ class InterfaceTestCase(BaseTestCase): print(expected) self.assertEqual(expected, config) - def test_get_compute_ethernet_config_data_slow_as_bond_slave(self): + def test_get_worker_ethernet_config_data_slow_as_bond_slave(self): bond = self._create_bond_test("data1", constants.INTERFACE_CLASS_DATA, constants.NETWORK_TYPE_DATA) - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() lower_ifname = bond['uses'][0] lower_iface = self.context['interfaces'][lower_ifname] @@ -1305,11 +1305,11 @@ class InterfaceTestCase(BaseTestCase): print(expected) self.assertEqual(expected, config) - def test_get_compute_ethernet_config_data_slow_bridge(self): + def test_get_worker_ethernet_config_data_slow_bridge(self): self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA self.iface['networktype'] = constants.NETWORK_TYPE_DATA self.port['dpdksupport'] = False - self.host['personality'] = constants.COMPUTE + self.host['personality'] = constants.WORKER self._update_context() avp_config, bridge_config = interface.get_bridged_network_config( self.context, self.iface) @@ -1453,10 +1453,10 @@ class InterfaceTestCase(BaseTestCase): class InterfaceHostTestCase(BaseTestCase): def _setup_configuration(self): - # Personality is set to compute to avoid issues due to missing OAM + # Personality is set to worker to avoid issues due to missing OAM # interface in this empty/dummy configuration self._create_test_common() - self._create_test_host(constants.COMPUTE) + self._create_test_host(constants.WORKER) def _update_context(self): # ensure DB entries are updated prior to updating the context which @@ -1572,7 +1572,7 @@ class InterfaceHostTestCase(BaseTestCase): def test_needs_interface_config(self): expected_configured = (self.expected_platform_interfaces + [self.expected_bmc_interface]) - if interface.is_compute_subfunction(self.context): + if interface.is_worker_subfunction(self.context): expected_configured += (self.expected_pci_interfaces + self.expected_slow_interfaces + self.expected_mlx_interfaces) @@ -1670,9 +1670,9 @@ class InterfaceControllerVlanOverEthernet(InterfaceHostTestCase): class InterfaceComputeEthernet(InterfaceHostTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # compute and all interfaces are ethernet interfaces. + # worker and all interfaces are ethernet interfaces. self._create_test_common() - self._create_test_host(constants.COMPUTE) + self._create_test_host(constants.WORKER) self._create_ethernet_test('mgmt', None, constants.NETWORK_TYPE_MGMT) self._create_ethernet_test('infra', None, constants.NETWORK_TYPE_INFRA) self._create_ethernet_test('data', constants.INTERFACE_CLASS_DATA, @@ -1710,10 +1710,10 @@ class InterfaceComputeEthernet(InterfaceHostTestCase): class InterfaceComputeVlanOverEthernet(InterfaceHostTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # compute and all interfaces are vlan interfaces over ethernet + # worker and all interfaces are vlan interfaces over ethernet # interfaces. self._create_test_common() - self._create_test_host(constants.COMPUTE) + self._create_test_host(constants.WORKER) port, iface = self._create_ethernet_test( 'pxeboot', None, constants.NETWORK_TYPE_PXEBOOT) self._create_vlan_test('mgmt', None, constants.NETWORK_TYPE_MGMT, 2, @@ -1739,8 +1739,8 @@ class InterfaceComputeBond(InterfaceHostTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a self._create_test_common() - # compute and all interfaces are aggregated ethernet interfaces. - self._create_test_host(constants.COMPUTE) + # worker and all interfaces are aggregated ethernet interfaces. + self._create_test_host(constants.WORKER) self._create_bond_test('mgmt', None, constants.NETWORK_TYPE_MGMT) self._create_bond_test('infra', None, constants.NETWORK_TYPE_INFRA) self._create_bond_test('data', constants.INTERFACE_CLASS_DATA, @@ -1768,10 +1768,10 @@ class InterfaceComputeBond(InterfaceHostTestCase): class InterfaceComputeVlanOverBond(InterfaceHostTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # compute and all interfaces are vlan interfaces over ethernet + # worker and all interfaces are vlan interfaces over ethernet # interfaces. self._create_test_common() - self._create_test_host(constants.COMPUTE) + self._create_test_host(constants.WORKER) bond = self._create_bond_test('pxeboot', None, constants.NETWORK_TYPE_PXEBOOT) self._create_vlan_test('oam', None, constants.NETWORK_TYPE_OAM, 1, bond) @@ -1937,10 +1937,10 @@ class InterfaceCpeVlanOverBond(InterfaceHostTestCase): class InterfaceCpeComputeEthernet(InterfaceHostTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # controller with a compute subfunction and all interfaces are + # controller with a worker subfunction and all interfaces are # ethernet interfaces. self._create_test_common() - self._create_test_host(constants.CONTROLLER, constants.COMPUTE) + self._create_test_host(constants.CONTROLLER, constants.WORKER) self._create_ethernet_test('oam', None, constants.NETWORK_TYPE_OAM) self._create_ethernet_test('mgmt', None, constants.NETWORK_TYPE_MGMT) self._create_ethernet_test('infra', None, constants.NETWORK_TYPE_INFRA) @@ -1979,10 +1979,10 @@ class InterfaceCpeComputeEthernet(InterfaceHostTestCase): class InterfaceCpeComputeVlanOverEthernet(InterfaceHostTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # controller with a compute subfunction and all interfaces are + # controller with a worker subfunction and all interfaces are # vlan interfaces over ethernet interfaces. self._create_test_common() - self._create_test_host(constants.CONTROLLER, constants.COMPUTE) + self._create_test_host(constants.CONTROLLER, constants.WORKER) port, iface = self._create_ethernet_test( 'pxeboot', None, constants.NETWORK_TYPE_PXEBOOT) self._create_vlan_test('oam', None, constants.NETWORK_TYPE_OAM, 1, iface) @@ -2008,10 +2008,10 @@ class InterfaceCpeComputeVlanOverEthernet(InterfaceHostTestCase): class InterfaceCpeComputeBond(InterfaceHostTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # controller with a compute subfunction and all interfaces are + # controller with a worker subfunction and all interfaces are # aggregated ethernet interfaces. self._create_test_common() - self._create_test_host(constants.CONTROLLER, constants.COMPUTE) + self._create_test_host(constants.CONTROLLER, constants.WORKER) self._create_bond_test('oam', None, constants.NETWORK_TYPE_OAM) self._create_bond_test('mgmt', None, constants.NETWORK_TYPE_MGMT) self._create_bond_test('infra', None, constants.NETWORK_TYPE_INFRA) @@ -2038,10 +2038,10 @@ class InterfaceCpeComputeBond(InterfaceHostTestCase): class InterfaceCpeComputeVlanOverBond(InterfaceHostTestCase): def _setup_configuration(self): # Setup a sample configuration where the personality is set to a - # controller with a compute subfunction and all interfaces are + # controller with a worker subfunction and all interfaces are # vlan interfaces over aggregated ethernet interfaces. self._create_test_common() - self._create_test_host(constants.CONTROLLER, constants.COMPUTE) + self._create_test_host(constants.CONTROLLER, constants.WORKER) bond = self._create_bond_test('pxeboot', None, constants.NETWORK_TYPE_PXEBOOT) self._create_vlan_test('oam', None, constants.NETWORK_TYPE_OAM, 1, bond) diff --git a/compute-huge/.gitignore b/worker-utils/.gitignore similarity index 71% rename from compute-huge/.gitignore rename to worker-utils/.gitignore index 115c07f04e..06c1a0e807 100644 --- a/compute-huge/.gitignore +++ b/worker-utils/.gitignore @@ -3,4 +3,4 @@ .distro/centos7/rpmbuild/SRPMS .distro/centos7/rpmbuild/BUILD .distro/centos7/rpmbuild/BUILDROOT -.distro/centos7/rpmbuild/SOURCES/compute-huge*tar.gz +.distro/centos7/rpmbuild/SOURCES/worker-utils*tar.gz diff --git a/worker-utils/centos/build_srpm.data b/worker-utils/centos/build_srpm.data new file mode 100644 index 0000000000..8a107e9769 --- /dev/null +++ b/worker-utils/centos/build_srpm.data @@ -0,0 +1,3 @@ +SRC_DIR="worker-utils" +COPY_LIST="$SRC_DIR/LICENSE" +TIS_PATCH_VER=1 diff --git a/compute-huge/centos/compute-huge.spec b/worker-utils/centos/worker-utils.spec similarity index 74% rename from compute-huge/centos/compute-huge.spec rename to worker-utils/centos/worker-utils.spec index 93aa5c953b..560b41e361 100644 --- a/compute-huge/centos/compute-huge.spec +++ b/worker-utils/centos/worker-utils.spec @@ -1,5 +1,5 @@ -Summary: Initial compute node hugepages and reserved cpus configuration -Name: compute-huge +Summary: Initial worker node resource reservation and misc. utilities +Name: worker-utils Version: 1.0 Release: %{tis_patch_ver}%{?_tis_dist} License: Apache-2.0 @@ -15,11 +15,11 @@ Requires: python Requires: /bin/systemctl %description -Initial compute node hugepages and reserved cpus configuration +Initial worker node resource reservation and misc. utilities %define local_bindir /usr/bin/ %define local_etc_initd /etc/init.d/ -%define local_etc_nova /etc/nova/ +%define local_etc_platform /etc/platform/ %define local_etc_goenabledd /etc/goenabled.d/ %define debug_package %{nil} @@ -34,7 +34,7 @@ make make install BINDIR=%{buildroot}%{local_bindir} \ INITDDIR=%{buildroot}%{local_etc_initd} \ GOENABLEDDIR=%{buildroot}%{local_etc_goenabledd} \ - NOVACONFDIR=%{buildroot}%{local_etc_nova} \ + PLATFORMCONFDIR=%{buildroot}%{local_etc_platform} \ SYSTEMDDIR=%{buildroot}%{_unitdir} %post @@ -50,6 +50,6 @@ rm -rf $RPM_BUILD_ROOT %{local_bindir}/* %{local_etc_initd}/* %{local_etc_goenabledd}/* -%config(noreplace) %{local_etc_nova}/compute_reserved.conf +%config(noreplace) %{local_etc_platform}/worker_reserved.conf %{_unitdir}/affine-platform.sh.service diff --git a/compute-huge/compute-huge/LICENSE b/worker-utils/worker-utils/LICENSE similarity index 100% rename from compute-huge/compute-huge/LICENSE rename to worker-utils/worker-utils/LICENSE diff --git a/compute-huge/compute-huge/Makefile b/worker-utils/worker-utils/Makefile similarity index 81% rename from compute-huge/compute-huge/Makefile rename to worker-utils/worker-utils/Makefile index 3f99622535..97ab6c607b 100644 --- a/compute-huge/compute-huge/Makefile +++ b/worker-utils/worker-utils/Makefile @@ -5,7 +5,7 @@ BINDIR ?= /usr/bin INITDDIR ?= /etc/init.d/ GOENABLEDDIR ?= /etc/goenabled.d/ -NOVACONFDIR ?= /etc/nova +PLATFORMCONFDIR ?= /etc/platform SYSTEMDDIR ?= /usr/lib/systemd/system/ all: @@ -15,7 +15,7 @@ install: install -d -m 755 $(BINDIR) install -d -m 755 $(INITDDIR) install -d -m 755 $(GOENABLEDDIR) - install -d -m 755 $(NOVACONFDIR) + install -d -m 755 $(PLATFORMCONFDIR) install -d -m 755 $(SYSTEMDDIR) install -p -D -m 755 affine-platform.sh $(INITDDIR)/affine-platform.sh install -p -D -m 755 cpumap_functions.sh $(INITDDIR)/cpumap_functions.sh @@ -26,6 +26,6 @@ install: install -p -D -m 755 affine-interrupts.sh $(BINDIR)/affine-interrupts.sh install -p -D -m 755 set-cpu-wakeup-latency.sh $(BINDIR)/set-cpu-wakeup-latency.sh install -p -D -m 755 topology $(BINDIR)/topology - install -p -D -m 755 compute_reserved.conf $(NOVACONFDIR)/compute_reserved.conf - install -p -D -m 755 compute-huge-goenabled.sh $(GOENABLEDDIR)/compute-huge-goenabled.sh + install -p -D -m 755 worker_reserved.conf $(PLATFORMCONFDIR)/worker_reserved.conf + install -p -D -m 755 worker-goenabled.sh $(GOENABLEDDIR)/worker-goenabled.sh install -p -D -m 664 affine-platform.sh.service $(SYSTEMDDIR)/affine-platform.sh.service diff --git a/compute-huge/compute-huge/affine-interrupts.sh b/worker-utils/worker-utils/affine-interrupts.sh similarity index 99% rename from compute-huge/compute-huge/affine-interrupts.sh rename to worker-utils/worker-utils/affine-interrupts.sh index 8fe5066d3d..6a7c228e24 100644 --- a/compute-huge/compute-huge/affine-interrupts.sh +++ b/worker-utils/worker-utils/affine-interrupts.sh @@ -1,7 +1,7 @@ #!/bin/bash ################################################################################ # Copyright (c) 2015-2016 Wind River Systems, Inc. -# +# # SPDX-License-Identifier: Apache-2.0 # ################################################################################ diff --git a/compute-huge/compute-huge/affine-platform.sh b/worker-utils/worker-utils/affine-platform.sh similarity index 98% rename from compute-huge/compute-huge/affine-platform.sh rename to worker-utils/worker-utils/affine-platform.sh index 3f5033ed94..13f46163f2 100755 --- a/compute-huge/compute-huge/affine-platform.sh +++ b/worker-utils/worker-utils/affine-platform.sh @@ -1,7 +1,7 @@ #!/bin/bash ################################################################################ # Copyright (c) 2013 Wind River Systems, Inc. -# +# # SPDX-License-Identifier: Apache-2.0 # ################################################################################ @@ -24,7 +24,7 @@ LOG_DEBUG=1 . /etc/platform/platform.conf ################################################################################ -# Affine all running tasks to the CPULIST provided in the first parameter. +# Affine all running tasks to the CPULIST provided in the first parameter. ################################################################################ function affine_tasks { local CPULIST=$1 @@ -64,7 +64,7 @@ function affine_tasks { for i in ${irqs[@]}; do /bin/bash -c "[[ -e /proc/irq/${i} ]] && echo ${CPULIST} > /proc/irq/${i}/smp_affinity_list" 2>/dev/null done - if [[ "$subfunction" == *"compute,lowlatency" ]]; then + if [[ "$subfunction" == *"worker,lowlatency" ]]; then # Affine work queues to platform cores echo ${PLATFORM_COREMASK} > /sys/devices/virtual/workqueue/cpumask echo ${PLATFORM_COREMASK} > /sys/bus/workqueue/devices/writeback/cpumask diff --git a/compute-huge/compute-huge/affine-platform.sh.service b/worker-utils/worker-utils/affine-platform.sh.service similarity index 92% rename from compute-huge/compute-huge/affine-platform.sh.service rename to worker-utils/worker-utils/affine-platform.sh.service index 7ab9bbe89a..f124182bcc 100644 --- a/compute-huge/compute-huge/affine-platform.sh.service +++ b/worker-utils/worker-utils/affine-platform.sh.service @@ -1,7 +1,7 @@ [Unit] Description=Titanium Cloud Affine Platform After=syslog.service network.service dbus.service sw-patch.service -Before=computeconfig.service +Before=workerconfig.service [Service] Type=oneshot diff --git a/compute-huge/compute-huge/cpumap_functions.sh b/worker-utils/worker-utils/cpumap_functions.sh similarity index 97% rename from compute-huge/compute-huge/cpumap_functions.sh rename to worker-utils/worker-utils/cpumap_functions.sh index 2825f7d56c..3202fc3b69 100644 --- a/compute-huge/compute-huge/cpumap_functions.sh +++ b/worker-utils/worker-utils/cpumap_functions.sh @@ -1,7 +1,7 @@ #!/bin/bash ################################################################################ -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# +# Copyright (c) 2013-2018 Wind River Systems, Inc. +# # SPDX-License-Identifier: Apache-2.0 # ################################################################################ @@ -106,7 +106,7 @@ function cpulist_to_cpumap { ################################################################################ # Converts a CPUMAP (e.g., 0x00FF00FF) to a CPULIST (e.g., 0-7,16-23). The # CPUMAP is expected in hexidecimal (base=10) form without the leading "0x" -# characters. +# characters. # ################################################################################ function cpumap_to_cpulist { @@ -118,7 +118,7 @@ function cpumap_to_cpulist { for((i=0; i < NR_CPUS; i++)) do ## Since 'bc' does not support any bitwise operators this expression: - ## if (CPUMAP & (1 << CPUID)) + ## if (CPUMAP & (1 << CPUID)) ## has to be rewritten like this: ## if (CPUMAP % (2**(CPUID+1)) > ((2**(CPUID)) - 1)) ## @@ -239,8 +239,8 @@ function any_in_list { function get_platform_cpu_list { ## Define platform cpulist based on engineering a number of cores and ## whether this is a combo or not, and include SMT siblings. - if [[ $subfunction = *compute* ]]; then - RESERVE_CONF="/etc/nova/compute_reserved.conf" + if [[ $subfunction = *worker* ]]; then + RESERVE_CONF="/etc/platform/worker_reserved.conf" [[ -e ${RESERVE_CONF} ]] && source ${RESERVE_CONF} if [ -n "$PLATFORM_CPU_LIST" ];then echo "$PLATFORM_CPU_LIST" @@ -265,7 +265,7 @@ function get_platform_cpu_list { function get_vswitch_cpu_list { ## Define default avp cpulist based on engineered number of platform cores, ## engineered avp cores, and include SMT siblings. - if [[ $subfunction = *compute* ]]; then + if [[ $subfunction = *worker* ]]; then VSWITCH_CONF="/etc/vswitch/vswitch.conf" [[ -e ${VSWITCH_CONF} ]] && source ${VSWITCH_CONF} if [ -n "$VSWITCH_CPU_LIST" ];then diff --git a/compute-huge/compute-huge/cpumap_functions_unit_test.sh b/worker-utils/worker-utils/cpumap_functions_unit_test.sh similarity index 100% rename from compute-huge/compute-huge/cpumap_functions_unit_test.sh rename to worker-utils/worker-utils/cpumap_functions_unit_test.sh diff --git a/compute-huge/compute-huge/ps-sched.sh b/worker-utils/worker-utils/ps-sched.sh similarity index 100% rename from compute-huge/compute-huge/ps-sched.sh rename to worker-utils/worker-utils/ps-sched.sh diff --git a/compute-huge/compute-huge/set-cpu-wakeup-latency.sh b/worker-utils/worker-utils/set-cpu-wakeup-latency.sh similarity index 97% rename from compute-huge/compute-huge/set-cpu-wakeup-latency.sh rename to worker-utils/worker-utils/set-cpu-wakeup-latency.sh index 04f4a0fbba..0efa13ec06 100644 --- a/compute-huge/compute-huge/set-cpu-wakeup-latency.sh +++ b/worker-utils/worker-utils/set-cpu-wakeup-latency.sh @@ -51,7 +51,7 @@ for CPU_NUM in $(expand_sequence "$CPU_LIST" " "); do log_debug "Failed to get PM QoS latency limits for CPU ${CPU_NUM}" fi - # Select appropriate CPU wakeup latency based on "low" or "high" policy + # Select appropriate CPU wakeup latency based on "low" or "high" policy case "${POLICY}" in "low") # Get first sleep state for "low" policy @@ -62,7 +62,7 @@ for CPU_NUM in $(expand_sequence "$CPU_LIST" " "); do fi ;; "high") - # Get deepest sleep state for "high" policy + # Get deepest sleep state for "high" policy if [ ${#LIMITS[@]} -eq 0 ]; then LATENCY=1000 else diff --git a/compute-huge/compute-huge/task_affinity_functions.sh b/worker-utils/worker-utils/task_affinity_functions.sh similarity index 99% rename from compute-huge/compute-huge/task_affinity_functions.sh rename to worker-utils/worker-utils/task_affinity_functions.sh index ae097aff78..96f7912145 100755 --- a/compute-huge/compute-huge/task_affinity_functions.sh +++ b/worker-utils/worker-utils/task_affinity_functions.sh @@ -86,7 +86,7 @@ function audit_and_reaffine { # The following function is used to verify that any sleeping management tasks # that are on non-platform cores can be migrated to platform cores as soon as # they are scheduled. It can be invoked either manually or from goenableCompute -# script as a scheduled job (with a few minute delay) if desired. +# script as a scheduled job (with a few minute delay) if desired. # The induced tasks migration should be done after all VMs have been restored # following a host reboot in AIO, hence the delay. ################################################################################ @@ -157,7 +157,7 @@ function affine_tasks_to_all_cores { # The following function can be called by any platform service that needs to # temporarily make use of idle VM cores to run a short-duration, service # critical and cpu intensive operation in AIO. For instance, sm can levearage -# the idle cores to speed up swact activity. +# the idle cores to speed up swact activity. # # At the end of the operation, regarless of the result, the service must be # calling function affine_tasks_to_platform_cores to re-affine platform tasks @@ -268,7 +268,7 @@ function affine_tasks_to_platform_cores { # mask(s). if [ "${pid_affinity_mask}" == "${affinity_mask}" ]; then count=$(($count+1)) - # log_debug "Affining pid $pid to platform cores..." + # log_debug "Affining pid $pid to platform cores..." taskset --all-tasks --pid --cpu-list ${PLATFORM_CPUS} $pid &> /dev/null rc=$? [[ $rc -ne 0 ]] && log_error "Failed to set CPU affinity for pid $pid, rc=$rc" diff --git a/compute-huge/compute-huge/topology b/worker-utils/worker-utils/topology similarity index 100% rename from compute-huge/compute-huge/topology rename to worker-utils/worker-utils/topology diff --git a/compute-huge/compute-huge/topology.py b/worker-utils/worker-utils/topology.py similarity index 100% rename from compute-huge/compute-huge/topology.py rename to worker-utils/worker-utils/topology.py diff --git a/worker-utils/worker-utils/worker-goenabled.sh b/worker-utils/worker-utils/worker-goenabled.sh new file mode 100644 index 0000000000..35f6c13329 --- /dev/null +++ b/worker-utils/worker-utils/worker-goenabled.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Copyright (c) 2014,2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# worker "goenabled" check. +# +# If a problem was detected during configuration of worker +# resources then the board is not allowed to enable. +# +WORKER_GOENABLED="/var/run/worker_goenabled" + +source "/etc/init.d/log_functions.sh" +source "/usr/bin/tsconfig" + +if [ -e ${VOLATILE_WORKER_CONFIG_COMPLETE} -a ! -f ${WORKER_GOENABLED} ]; then + log_error "Worker manifest CPU configuration check failed. Failing goenabled check." + exit 1 +fi + +exit 0 diff --git a/compute-huge/compute-huge/compute_reserved.conf b/worker-utils/worker-utils/worker_reserved.conf similarity index 57% rename from compute-huge/compute-huge/compute_reserved.conf rename to worker-utils/worker-utils/worker_reserved.conf index e3337bd1ca..6123411aa3 100644 --- a/compute-huge/compute-huge/compute_reserved.conf +++ b/worker-utils/worker-utils/worker_reserved.conf @@ -1,21 +1,13 @@ ################################################################################ -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# +# Copyright (c) 2018 Wind River Systems, Inc. +# # SPDX-License-Identifier: Apache-2.0 # ################################################################################ -# COMPUTE Node configuration parameters for reserved memory and physical cores +# WORKER Node configuration parameters for reserved memory and physical cores # used by Base software and VSWITCH. These are resources that libvirt cannot use. # -################################################################################ -# -# Enable compute-huge.sh console debug logs (uncomment) -# -################################################################################ -LOG_DEBUG=1 - - ################################################################################ # # List of logical CPU instances available in the system. This value is used @@ -23,19 +15,19 @@ LOG_DEBUG=1 # validity against the actual number of logical CPU instances in the system. # ################################################################################ -COMPUTE_CPU_LIST="0-1" +WORKER_CPU_LIST="0-1" ################################################################################ # # List of Base software resources reserved per numa node. Each array element # consists of a 3-tuple formatted as: ::. -# +# # Example: To reserve 1500MB and 1 core on NUMA node0, and 1500MB and 1 core # on NUMA node1, the variable must be specified as follows. -# COMPUTE_BASE_MEMORY=("node0:1500MB:1" "node1:1500MB:1") +# WORKER_BASE_MEMORY=("node0:1500MB:1" "node1:1500MB:1") # ################################################################################ -COMPUTE_BASE_RESERVED=("node0:8000MB:1" "node1:2000MB:0" "node2:2000MB:0" "node3:2000MB:0") +WORKER_BASE_RESERVED=("node0:8000MB:1" "node1:2000MB:0" "node2:2000MB:0" "node3:2000MB:0") ################################################################################ # @@ -46,10 +38,10 @@ COMPUTE_BASE_RESERVED=("node0:8000MB:1" "node1:2000MB:0" "node2:2000MB:0" "node3 # # For example, to request 256 x 2MB HugeTLB pages on NUMA node0 and node1 the # variable must be specified as follows. -# COMPUTE_VSWITCH_MEMORY=("node0:2048kB:256" "node1:2048kB:256") +# WORKER_VSWITCH_MEMORY=("node0:2048kB:256" "node1:2048kB:256") # ################################################################################ -COMPUTE_VSWITCH_MEMORY=("node0:1048576kB:1" "node1:1048576kB:1" "node2:1048576kB:1" "node3:1048576kB:1") +WORKER_VSWITCH_MEMORY=("node0:1048576kB:1" "node1:1048576kB:1" "node2:1048576kB:1" "node3:1048576kB:1") ################################################################################ # @@ -57,22 +49,7 @@ COMPUTE_VSWITCH_MEMORY=("node0:1048576kB:1" "node1:1048576kB:1" "node2:1048576kB # # Example: To reserve 2 cores on NUMA node0, and 2 cores on NUMA node1, the # variable must be specified as follows. -# COMPUTE_VSWITCH_CORES=("node0:2" "node1:2") +# WORKER_VSWITCH_CORES=("node0:2" "node1:2") # ################################################################################ -COMPUTE_VSWITCH_CORES=("node0:2" "node1:0" "node2:0" "node3:0") - -################################################################################ -# -# List of HugeTLB memory descriptors to configure for Libvirt. Each array element -# consists of a 3-tuple descriptor formatted as: ::. -# The NUMA node specified must exist and the HugeTLB pagesize must be a valid -# value such as 2048kB or 1048576kB. -# -# For example, to request 256 x 2MB HugeTLB pages on NUMA node0 and node1 the -# variable must be specified as follows. -# COMPUTE_VM_MEMORY_2M=("node0:2048kB:256" "node1:2048kB:256") -# -################################################################################ -COMPUTE_VM_MEMORY_2M=() -COMPUTE_VM_MEMORY_1G=() +WORKER_VSWITCH_CORES=("node0:2" "node1:0" "node2:0" "node3:0") diff --git a/computeconfig/.gitignore b/workerconfig/.gitignore similarity index 71% rename from computeconfig/.gitignore rename to workerconfig/.gitignore index 87be545b53..6ecc09a72a 100644 --- a/computeconfig/.gitignore +++ b/workerconfig/.gitignore @@ -3,4 +3,4 @@ .distro/centos7/rpmbuild/SRPMS .distro/centos7/rpmbuild/BUILD .distro/centos7/rpmbuild/BUILDROOT -.distro/centos7/rpmbuild/SOURCES/computeconfig*tar.gz +.distro/centos7/rpmbuild/SOURCES/workerconfig*tar.gz diff --git a/workerconfig/PKG-INFO b/workerconfig/PKG-INFO new file mode 100644 index 0000000000..5b1f0068bd --- /dev/null +++ b/workerconfig/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 1.1 +Name: workerconfig +Version: 1.0 +Summary: Initial worker node configuration +Home-page: +Author: Windriver +Author-email: info@windriver.com +License: Apache-2.0 + +Description: Initial worker node configuration + + +Platform: UNKNOWN diff --git a/workerconfig/centos/build_srpm.data b/workerconfig/centos/build_srpm.data new file mode 100644 index 0000000000..5367b32dd1 --- /dev/null +++ b/workerconfig/centos/build_srpm.data @@ -0,0 +1,2 @@ +SRC_DIR="workerconfig" +TIS_PATCH_VER=11 diff --git a/workerconfig/centos/workerconfig.spec b/workerconfig/centos/workerconfig.spec new file mode 100644 index 0000000000..3ae0d0456f --- /dev/null +++ b/workerconfig/centos/workerconfig.spec @@ -0,0 +1,85 @@ +Summary: workerconfig +Name: workerconfig +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz + +%define debug_package %{nil} + +Requires: systemd + +%description +Initial worker node configuration + +%package -n workerconfig-standalone +Summary: workerconfig +Group: base + +%description -n workerconfig-standalone +Initial worker node configuration + +%package -n workerconfig-subfunction +Summary: workerconfig +Group: base + +%description -n workerconfig-subfunction +Initial worker node configuration + +%define initddir /etc/init.d/ +%define goenableddir /etc/goenabled.d/ +%define systemddir /etc/systemd/system/ + +%prep +%setup + +%build + +%install +make install INITDDIR=%{buildroot}%{initddir} GOENABLEDDIR=%{buildroot}%{goenableddir} SYSTEMDDIR=%{buildroot}%{systemddir} + +%post -n workerconfig-standalone +if [ ! -e $D%{systemddir}/workerconfig.service ]; then + cp $D%{systemddir}/config/workerconfig-standalone.service $D%{systemddir}/workerconfig.service +else + cmp -s $D%{systemddir}/config/workerconfig-standalone.service $D%{systemddir}/workerconfig.service + if [ $? -ne 0 ]; then + rm -f $D%{systemddir}/workerconfig.service + cp $D%{systemddir}/config/workerconfig-standalone.service $D%{systemddir}/workerconfig.service + fi +fi +systemctl enable workerconfig.service + + +%post -n workerconfig-subfunction +if [ ! -e $D%{systemddir}/workerconfig.service ]; then + cp $D%{systemddir}/config/workerconfig-combined.service $D%{systemddir}/workerconfig.service +else + cmp -s $D%{systemddir}/config/workerconfig-combined.service $D%{systemddir}/workerconfig.service + if [ $? -ne 0 ]; then + rm -f $D%{systemddir}/workerconfig.service + cp $D%{systemddir}/config/workerconfig-combined.service $D%{systemddir}/workerconfig.service + fi +fi +systemctl enable workerconfig.service + +%clean + +%files +%defattr(-,root,root,-) +%doc LICENSE +%{initddir}/* + +%files -n workerconfig-standalone +%defattr(-,root,root,-) +%dir %{systemddir}/config +%{systemddir}/config/workerconfig-standalone.service +%{goenableddir}/* + +%files -n workerconfig-subfunction +%defattr(-,root,root,-) +%dir %{systemddir}/config +%{systemddir}/config/workerconfig-combined.service diff --git a/computeconfig/computeconfig/LICENSE b/workerconfig/workerconfig/LICENSE similarity index 100% rename from computeconfig/computeconfig/LICENSE rename to workerconfig/workerconfig/LICENSE diff --git a/computeconfig/computeconfig/Makefile b/workerconfig/workerconfig/Makefile similarity index 52% rename from computeconfig/computeconfig/Makefile rename to workerconfig/workerconfig/Makefile index 6deaa1f5ec..16f29935e9 100644 --- a/computeconfig/computeconfig/Makefile +++ b/workerconfig/workerconfig/Makefile @@ -11,8 +11,8 @@ install: install -d -m 755 $(GOENABLEDDIR) install -d -m 755 $(SYSTEMDDIR) install -d -m 755 $(SYSTEMDDIR)/config - install -p -D -m 700 compute_config $(INITDDIR)/compute_config - install -p -D -m 700 compute_services $(INITDDIR)/compute_services + install -p -D -m 700 worker_config $(INITDDIR)/worker_config + install -p -D -m 700 worker_services $(INITDDIR)/worker_services install -p -D -m 755 config_goenabled_check.sh $(GOENABLEDDIR)/config_goenabled_check.sh - install -p -D -m 664 computeconfig.service $(SYSTEMDDIR)/config/computeconfig-standalone.service - install -p -D -m 664 computeconfig-combined.service $(SYSTEMDDIR)/config/computeconfig-combined.service + install -p -D -m 664 workerconfig.service $(SYSTEMDDIR)/config/workerconfig-standalone.service + install -p -D -m 664 workerconfig-combined.service $(SYSTEMDDIR)/config/workerconfig-combined.service diff --git a/computeconfig/computeconfig/config_goenabled_check.sh b/workerconfig/workerconfig/config_goenabled_check.sh similarity index 100% rename from computeconfig/computeconfig/config_goenabled_check.sh rename to workerconfig/workerconfig/config_goenabled_check.sh diff --git a/computeconfig/computeconfig/compute_config b/workerconfig/workerconfig/worker_config similarity index 92% rename from computeconfig/computeconfig/compute_config rename to workerconfig/workerconfig/worker_config index 7e84f30b97..cdcaaa1186 100644 --- a/computeconfig/computeconfig/compute_config +++ b/workerconfig/workerconfig/worker_config @@ -10,8 +10,8 @@ # ### BEGIN INIT INFO -# Provides: compute_config -# Short-Description: Compute node config agent +# Provides: worker_config +# Short-Description: Worker node config agent # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 ### END INIT INFO @@ -23,10 +23,10 @@ PLATFORM_DIR=/opt/platform CONFIG_DIR=$CONFIG_PATH VOLATILE_CONFIG_PASS="/var/run/.config_pass" VOLATILE_CONFIG_FAIL="/var/run/.config_fail" -LOGFILE="/var/log/compute_config.log" +LOGFILE="/var/log/worker_config.log" IMA_POLICY=/etc/ima.policy -# Copy of /opt/platform required for compute_services +# Copy of /opt/platform required for worker_services VOLATILE_PLATFORM_PATH=$VOLATILE_PATH/cpe_upgrade_opt_platform DELAY_SEC=600 @@ -108,7 +108,7 @@ wait_for_controller_services() fi # Not running Let's wait a couple of seconds and check again sleep 2 - done + done return 1 } @@ -120,8 +120,8 @@ start() function=`echo "$subfunction" | cut -f 2 -d','` - if [ "$nodetype" != "compute" -a "$function" != "compute" ] ; then - logger -t $0 -p warn "exiting because this is not compute host" + if [ "$nodetype" != "worker" -a "$function" != "worker" ] ; then + logger -t $0 -p warn "exiting because this is not worker node" exit 0 fi @@ -147,7 +147,7 @@ start() # we are in chroot installer environment exit 0 fi - echo "Configuring compute node..." + echo "Configuring worker node..." ###### SECURITY PROFILE (EXTENDED) ################# # If we are in Extended Security Profile mode, # @@ -155,11 +155,11 @@ start() # IMA Policy so that all configuration operations # # can be measured and appraised # # # - # N.B: Only run for compute nodetype since for AIO # + # N.B: Only run for worker nodetype since for AIO # # controllerconfig would have already enabled IMA # # policy # ##################################################### - if [ "$nodetype" = "compute" -a "${security_profile}" = "extended" ] + if [ "$nodetype" = "worker" -a "${security_profile}" = "extended" ] then IMA_LOAD_PATH=/sys/kernel/security/ima/policy if [ -f ${IMA_LOAD_PATH} ]; then @@ -195,7 +195,7 @@ start() if [ -e "${PLATFORM_SIMPLEX_FLAG}" ] then echo "Wait for the controller services" - wait_for_controller_services + wait_for_controller_services if [ $? -ne 0 ] then fatal_error "Controller services are not ready" @@ -236,7 +236,7 @@ start() fi fi - if [ "$nodetype" = "compute" ] + if [ "$nodetype" = "worker" ] then # Check whether our installed load matches the active controller CONTROLLER_UUID=`curl -sf http://controller/feed/rel-${SW_VERSION}/install_uuid` @@ -273,12 +273,12 @@ start() fi fi - # Upgrade related checks for controller-1 in combined controller/compute + # Upgrade related checks for controller-1 in combined controller/worker if [ "$nodetype" = "controller" -a "$HOST" = "controller-1" ] then # Check controller activity. # Prior to the final compile of R5 the service check below had been - # against platform-nfs-ip. However, there was a compute + # against platform-nfs-ip. However, there was a worker # subfunction configuration failure when an AIO-DX system controller # booted up while there was no pingable backup controller. Seems the # platform-nfs-ip service was not always reaching the enabled-active @@ -308,12 +308,12 @@ start() # than controller-0. # This controller is not active and is running a higher # release than the mate controller, so do not launch - # any of the compute services (they will not work with + # any of the worker services (they will not work with # a lower version of the controller services). - echo "Disabling compute services until controller activated" - touch $VOLATILE_DISABLE_COMPUTE_SERVICES + echo "Disabling worker services until controller activated" + touch $VOLATILE_DISABLE_WORKER_SERVICES - # Copy $PLATFORM_DIR into a temporary location for the compute_services script to + # Copy $PLATFORM_DIR into a temporary location for the worker_services script to # access. This is only required for CPE upgrades rm -rf $VOLATILE_PLATFORM_PATH mkdir -p $VOLATILE_PLATFORM_PATH @@ -329,7 +329,7 @@ start() else # Controller-1 (CPE) is active and is rebooting. This is probably a DOR. Since this # could happen during an upgrade, we will copy $PLATFORM_DIR into a temporary - # location for the compute_services script to access in case of a future swact. + # location for the worker_services script to access in case of a future swact. rm -rf $VOLATILE_PLATFORM_PATH mkdir -p $VOLATILE_PLATFORM_PATH cp -Rp $PLATFORM_DIR/* $VOLATILE_PLATFORM_PATH/ @@ -340,7 +340,7 @@ start() HOST_HIERA=${PUPPET_PATH}/hieradata/${IPADDR}.yaml if [ -f ${HOST_HIERA} ]; then echo "$0: Running puppet manifest apply" - puppet-manifest-apply.sh ${PUPPET_PATH}/hieradata ${IPADDR} compute + puppet-manifest-apply.sh ${PUPPET_PATH}/hieradata ${IPADDR} worker RC=$? if [ $RC -ne 0 ]; then @@ -365,7 +365,7 @@ start() then fatal_error "Unable to mount NFS filesystems (RC:$RC)" fi - + touch $VOLATILE_CONFIG_PASS } diff --git a/computeconfig/computeconfig/compute_services b/workerconfig/workerconfig/worker_services similarity index 85% rename from computeconfig/computeconfig/compute_services rename to workerconfig/workerconfig/worker_services index e1b7fab318..8f44bf0a82 100644 --- a/computeconfig/computeconfig/compute_services +++ b/workerconfig/workerconfig/worker_services @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright (c) 2016-2016 Wind River Systems, Inc. +# Copyright (c) 2016-2018 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -9,12 +9,12 @@ # by the /usr/local/sbin/sm-notification python script, if we are in a small # footprint system (CPE) # -# During a swact to, the script will delete the $VOLATILE_DISABLE_COMPUTE_SERVICES -# flag and re-apply the compute manifests. +# During a swact to, the script will delete the $VOLATILE_DISABLE_WORKER_SERVICES +# flag and re-apply the worker manifests. # During a swact away from (downgrades), the script re-create the -# $VOLATILE_DISABLE_COMPUTE_SERVICES flag and re-apply the compute manifests. +# $VOLATILE_DISABLE_WORKER_SERVICES flag and re-apply the worker manifests. # -# This script should only re-apply the compute manifests if; +# This script should only re-apply the worker manifests if; # - It is running on a CPE (small footprint) system # - It is controller-1 # - Controller-0 has not yet been upgraded @@ -28,12 +28,12 @@ VOLATILE_CONFIG_PASS="/var/run/.config_pass" VOLATILE_CONFIG_FAIL="/var/run/.config_fail" -IN_PROGRESS="/var/run/.compute_services_in_progress" +IN_PROGRESS="/var/run/.worker_services_in_progress" -TEMP_MATE_ETC_DIR="$VOLATILE_PATH/etc_platform_compute" -TEMP_PUPPET_DIR="$VOLATILE_PATH/puppet_compute" +TEMP_MATE_ETC_DIR="$VOLATILE_PATH/etc_platform_worker" +TEMP_PUPPET_DIR="$VOLATILE_PATH/puppet_worker" -# Copy of /opt/platform populate by compute_config +# Copy of /opt/platform populate by worker_config VOLATILE_PLATFORM_PATH=$VOLATILE_PATH/cpe_upgrade_opt_platform # Process id and full filename of this executable @@ -81,18 +81,18 @@ init() # This script should only be called if we are in a CPE system sub_function=`echo "$subfunction" | cut -f 2 -d','` - if [ $sub_function != "compute" ] ; then + if [ $sub_function != "worker" ] ; then logger -t $NAME -p local1.error "Exiting because this is not CPE host" end_exec fi - # Exit if called while the config compute success flag file is not present + # Exit if called while the config worker success flag file is not present if [ ! -f $VOLATILE_CONFIG_PASS ] ; then logger -t $NAME -p local1.info "Exiting due to non-presence of $VOLATILE_CONFIG_PASS file" end_exec fi - # Exit if called while the config compute failure flag file is present + # Exit if called while the config worker failure flag file is present if [ -f $VOLATILE_CONFIG_FAIL ] ; then logger -t $NAME -p local1.info "Exiting due to presence of $VOLATILE_CONFIG_FAIL file" end_exec @@ -110,7 +110,7 @@ init() end_exec fi - # The platform filesystem was mounted in compute_config and copied in a temp + # The platform filesystem was mounted in worker_config and copied in a temp # location if [ ! -f $VOLATILE_PLATFORM_PATH/config/${SW_VERSION}/hosts ] ; then logger -t $NAME -p local1.error "Error accessing $VOLATILE_PLATFORM_PATH" @@ -158,12 +158,12 @@ init() end_exec fi - # Update the VOLATILE_DISABLE_COMPUTE_SERVICES flag and stop nova-compute if in "stop" + # Update the VOLATILE_DISABLE_WORKER_SERVICES flag and stop nova-compute if in "stop" if [ $action_to_perform == "stop" ] ; then logger -t $NAME -p local1.info "Disabling compute services" - # Set the compute services disable flag used by the manifest - touch $VOLATILE_DISABLE_COMPUTE_SERVICES + # Set the worker services disable flag used by the manifest + touch $VOLATILE_DISABLE_WORKER_SERVICES # Stop nova-compute logger -t $NAME -p local1.info "Stopping nova-compute" @@ -171,15 +171,15 @@ init() else logger -t $NAME -p local1.info "Enabling compute services" - # Clear the compute services disable flag used by the manifest - rm $VOLATILE_DISABLE_COMPUTE_SERVICES + # Clear the worker services disable flag used by the manifest + rm $VOLATILE_DISABLE_WORKER_SERVICES fi # Apply the puppet manifest HOST_HIERA=${TEMP_PUPPET_DIR}/hieradata/${IPADDR}.yaml if [ -f ${HOST_HIERA} ]; then echo "$0: Running puppet manifest apply" - puppet-manifest-apply.sh ${TEMP_PUPPET_DIR}/hieradata ${IPADDR} compute + puppet-manifest-apply.sh ${TEMP_PUPPET_DIR}/hieradata ${IPADDR} worker RC=$? if [ $RC -ne 0 ]; then diff --git a/computeconfig/computeconfig/computeconfig-combined.service b/workerconfig/workerconfig/workerconfig-combined.service similarity index 73% rename from computeconfig/computeconfig/computeconfig-combined.service rename to workerconfig/workerconfig/workerconfig-combined.service index d9307fa728..6aefb66888 100644 --- a/computeconfig/computeconfig/computeconfig-combined.service +++ b/workerconfig/workerconfig/workerconfig-combined.service @@ -1,8 +1,8 @@ [Unit] -Description=computeconfig service +Description=workerconfig service After=syslog.target network.service remote-fs.target After=sw-patch.service -After=affine-platform.sh.service compute-huge.sh.service +After=affine-platform.sh.service After=controllerconfig.service config.service After=goenabled.service After=sysinv-agent.service @@ -10,7 +10,7 @@ After=network-online.target [Service] Type=simple -ExecStart=/etc/init.d/compute_config start +ExecStart=/etc/init.d/worker_config start ExecStop= ExecReload= StandardOutput=syslog+console diff --git a/computeconfig/computeconfig/computeconfig.service b/workerconfig/workerconfig/workerconfig.service similarity index 65% rename from computeconfig/computeconfig/computeconfig.service rename to workerconfig/workerconfig/workerconfig.service index d65bf01982..97e6a1af38 100644 --- a/computeconfig/computeconfig/computeconfig.service +++ b/workerconfig/workerconfig/workerconfig.service @@ -1,17 +1,17 @@ [Unit] -Description=computeconfig service +Description=workerconfig service After=syslog.target network.service remote-fs.target After=sw-patch.service -After=affine-platform.sh.service compute-huge.sh.service +After=affine-platform.sh.service After=opt-platform.service After=sysinv-agent.service After=network-online.target -Before=config.service compute-config-gate.service +Before=config.service worker-config-gate.service Before=goenabled.service [Service] Type=simple -ExecStart=/etc/init.d/compute_config start +ExecStart=/etc/init.d/worker_config start ExecStop= ExecReload= StandardOutput=syslog+console