Change compute node to worker node personality

Update misc compute references to worker

Tests Performed:
Non-containerized deployment
AIO-SX: Sanity and Nightly automated test suite
AIO-DX: Sanity and Nightly automated test suite
2+2 System: Sanity and Nightly automated test suite
2+2 System: Horizon Patch Orchestration

Kubernetes deployment:
AIO-SX: Create, delete, reboot and rebuild instances
2+2+2 System: worker nodes are unlock enable and no alarms

Story: 2004022
Task: 27013

Depends-On: https://review.openstack.org/#/c/624452/

Change-Id: I7b2de5c7202f2e86b55f5343c8d79d50e3a072a2
Signed-off-by: Tao Liu <tao.liu@windriver.com>
This commit is contained in:
Tao Liu 2018-12-12 09:35:28 -05:00
parent 3ce422d5a2
commit 397e40a89f
36 changed files with 688 additions and 688 deletions

View File

@ -217,8 +217,8 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404)
"controller-apply-type": "serial", "controller-apply-type": "serial",
"current-phase-completion-percentage": 100, "current-phase-completion-percentage": 100,
"uuid": "c1971c42-b494-4ff0-8abf-dbde17929972", "uuid": "c1971c42-b494-4ff0-8abf-dbde17929972",
"compute-apply-type": "serial", "worker-apply-type": "serial",
"max-parallel-compute-hosts": 2, "max-parallel-worker-hosts": 2,
"alarm-restrictions": "strict", "alarm-restrictions": "strict",
"current-phase": "build", "current-phase": "build",
"apply-phase": { "apply-phase": {
@ -537,7 +537,7 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404)
"timeout": 4621, "timeout": 4621,
"total-steps": 6, "total-steps": 6,
"inprogress": false, "inprogress": false,
"stage-name": "sw-patch-compute-hosts" "stage-name": "sw-patch-worker-hosts"
}, },
{ {
"start-date-time": "", "start-date-time": "",
@ -643,7 +643,7 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404)
"timeout": 4621, "timeout": 4621,
"total-steps": 6, "total-steps": 6,
"inprogress": false, "inprogress": false,
"stage-name": "sw-patch-compute-hosts" "stage-name": "sw-patch-worker-hosts"
}, },
{ {
"start-date-time": "", "start-date-time": "",
@ -745,7 +745,7 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404)
"timeout": 4621, "timeout": 4621,
"total-steps": 6, "total-steps": 6,
"inprogress": false, "inprogress": false,
"stage-name": "sw-patch-compute-hosts" "stage-name": "sw-patch-worker-hosts"
}, },
{ {
"start-date-time": "", "start-date-time": "",
@ -847,7 +847,7 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404)
"timeout": 4621, "timeout": 4621,
"total-steps": 6, "total-steps": 6,
"inprogress": false, "inprogress": false,
"stage-name": "sw-patch-compute-hosts" "stage-name": "sw-patch-worker-hosts"
} }
], ],
"current-stage": 0 "current-stage": 0
@ -963,8 +963,8 @@ forbidden (403), badMethod (405), overLimit (413)
"controller-apply-type", "plain", "xsd:string", "The apply type for controller hosts: ``serial`` or ``ignore``." "controller-apply-type", "plain", "xsd:string", "The apply type for controller hosts: ``serial`` or ``ignore``."
"storage-apply-type", "plain", "xsd:string", "The apply type for storage hosts: ``serial``, ``parallel`` or ``ignore``." "storage-apply-type", "plain", "xsd:string", "The apply type for storage hosts: ``serial``, ``parallel`` or ``ignore``."
"compute-apply-type", "plain", "xsd:string", "The apply type for compute hosts: ``serial``, ``parallel`` or ``ignore``." "worker-apply-type", "plain", "xsd:string", "The apply type for worker hosts: ``serial``, ``parallel`` or ``ignore``."
"max-parallel-compute-hosts (Optional)", "plain", "xsd:integer", "The maximum number of compute hosts to patch in parallel; only applicable if ``compute-apply-type = parallel``. Default value is ``2``." "max-parallel-worker-hosts (Optional)", "plain", "xsd:integer", "The maximum number of worker hosts to patch in parallel; only applicable if ``worker-apply-type = parallel``. Default value is ``2``."
"swift-apply-type", "plain", "xsd:string", "The apply type for swift hosts: ``serial``, ``parallel`` or ``ignore``." "swift-apply-type", "plain", "xsd:string", "The apply type for swift hosts: ``serial``, ``parallel`` or ``ignore``."
"default-instance-action", "plain", "xsd:string", "The default instance action: ``stop-start`` or ``migrate``." "default-instance-action", "plain", "xsd:string", "The default instance action: ``stop-start`` or ``migrate``."
"alarm-restrictions (Optional)", "plain", "xsd:string", "The strictness of alarm checks: ``strict`` or ``relaxed``." "alarm-restrictions (Optional)", "plain", "xsd:string", "The strictness of alarm checks: ``strict`` or ``relaxed``."
@ -974,7 +974,7 @@ forbidden (403), badMethod (405), overLimit (413)
{ {
"controller-apply-type": "serial", "controller-apply-type": "serial",
"default-instance-action": "stop-start", "default-instance-action": "stop-start",
"compute-apply-type": "serial", "worker-apply-type": "serial",
"storage-apply-type": "serial", "storage-apply-type": "serial",
"swift-apply-type": "ignore", "swift-apply-type": "ignore",
"alarm-restrictions": "strict" "alarm-restrictions": "strict"
@ -987,8 +987,8 @@ forbidden (403), badMethod (405), overLimit (413)
"controller-apply-type": "serial", "controller-apply-type": "serial",
"current-phase-completion-percentage": 0, "current-phase-completion-percentage": 0,
"uuid": "15dc4b63-ae5f-48ca-b76e-ec367ef817f5", "uuid": "15dc4b63-ae5f-48ca-b76e-ec367ef817f5",
"compute-apply-type": "serial", "worker-apply-type": "serial",
"max-parallel-compute-hosts": 2, "max-parallel-worker-hosts": 2,
"alarm-restrictions": "strict", "alarm-restrictions": "strict",
"current-phase": "build", "current-phase": "build",
"apply-phase": { "apply-phase": {
@ -1143,8 +1143,8 @@ forbidden (403), badMethod (405), overLimit (413)
"controller-apply-type": "serial", "controller-apply-type": "serial",
"current-phase-completion-percentage": 0, "current-phase-completion-percentage": 0,
"uuid": "15dc4b63-ae5f-48ca-b76e-ec367ef817f5", "uuid": "15dc4b63-ae5f-48ca-b76e-ec367ef817f5",
"compute-apply-type": "serial", "worker-apply-type": "serial",
"max-parallel-compute-hosts": 2, "max-parallel-worker-hosts": 2,
"alarm-restrictions": "strict", "alarm-restrictions": "strict",
"current-phase": "apply", "current-phase": "apply",
"apply-phase": { "apply-phase": {
@ -1265,7 +1265,7 @@ forbidden (403), badMethod (405), overLimit (413)
"timeout": 1921, "timeout": 1921,
"total-steps": 3, "total-steps": 3,
"inprogress": false, "inprogress": false,
"stage-name": "sw-patch-compute-hosts" "stage-name": "sw-patch-worker-hosts"
}, },
{ {
"start-date-time": "", "start-date-time": "",
@ -1319,7 +1319,7 @@ forbidden (403), badMethod (405), overLimit (413)
"timeout": 1921, "timeout": 1921,
"total-steps": 3, "total-steps": 3,
"inprogress": false, "inprogress": false,
"stage-name": "sw-patch-compute-hosts" "stage-name": "sw-patch-worker-hosts"
}, },
{ {
"start-date-time": "", "start-date-time": "",
@ -1373,7 +1373,7 @@ forbidden (403), badMethod (405), overLimit (413)
"timeout": 1921, "timeout": 1921,
"total-steps": 3, "total-steps": 3,
"inprogress": false, "inprogress": false,
"stage-name": "sw-patch-compute-hosts" "stage-name": "sw-patch-worker-hosts"
}, },
{ {
"start-date-time": "", "start-date-time": "",
@ -1427,7 +1427,7 @@ forbidden (403), badMethod (405), overLimit (413)
"timeout": 1921, "timeout": 1921,
"total-steps": 3, "total-steps": 3,
"inprogress": false, "inprogress": false,
"stage-name": "sw-patch-compute-hosts" "stage-name": "sw-patch-worker-hosts"
} }
], ],
"current-stage": 0 "current-stage": 0
@ -1550,8 +1550,8 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404)
"current-phase-completion-percentage": 100, "current-phase-completion-percentage": 100,
"uuid": "ac9b953a-caf1-4abe-8d53-498b598e6731", "uuid": "ac9b953a-caf1-4abe-8d53-498b598e6731",
"name": "sw-upgrade", "name": "sw-upgrade",
"compute-apply-type": "serial", "worker-apply-type": "serial",
"max-parallel-compute-hosts": 2, "max-parallel-worker-hosts": 2,
"current-phase": "build", "current-phase": "build",
"apply-phase": { "apply-phase": {
"start-date-time": "", "start-date-time": "",
@ -1735,7 +1735,7 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404)
"timeout": 3721, "timeout": 3721,
"total-steps": 5, "total-steps": 5,
"inprogress": false, "inprogress": false,
"stage-name": "sw-upgrade-compute-hosts" "stage-name": "sw-upgrade-worker-hosts"
}, },
{ {
"start-date-time": "", "start-date-time": "",
@ -1821,7 +1821,7 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404)
"timeout": 3721, "timeout": 3721,
"total-steps": 5, "total-steps": 5,
"inprogress": false, "inprogress": false,
"stage-name": "sw-upgrade-compute-hosts" "stage-name": "sw-upgrade-worker-hosts"
} }
], ],
"current-stage": 0 "current-stage": 0
@ -1925,14 +1925,14 @@ forbidden (403), badMethod (405), overLimit (413)
:widths: 20, 20, 20, 60 :widths: 20, 20, 20, 60
"storage-apply-type", "plain", "xsd:string", "The apply type for storage hosts: ``serial``, ``parallel`` or ``ignore``." "storage-apply-type", "plain", "xsd:string", "The apply type for storage hosts: ``serial``, ``parallel`` or ``ignore``."
"compute-apply-type", "plain", "xsd:string", "The apply type for compute hosts: ``serial``, ``parallel`` or ``ignore``." "worker-apply-type", "plain", "xsd:string", "The apply type for worker hosts: ``serial``, ``parallel`` or ``ignore``."
"max-parallel-compute-hosts (Optional)", "plain", "xsd:integer", "The maximum number of compute hosts to upgrade in parallel; only applicable if ``compute-apply-type = parallel``. Default value is ``2``." "max-parallel-worker-hosts (Optional)", "plain", "xsd:integer", "The maximum number of worker hosts to upgrade in parallel; only applicable if ``worker-apply-type = parallel``. Default value is ``2``."
"alarm-restrictions (Optional)", "plain", "xsd:string", "The strictness of alarm checks: ``strict`` or ``relaxed``." "alarm-restrictions (Optional)", "plain", "xsd:string", "The strictness of alarm checks: ``strict`` or ``relaxed``."
:: ::
{ {
"compute-apply-type": "serial", "worker-apply-type": "serial",
"storage-apply-type": "serial", "storage-apply-type": "serial",
"alarm-restrictions": "relaxed" "alarm-restrictions": "relaxed"
} }
@ -1945,8 +1945,8 @@ forbidden (403), badMethod (405), overLimit (413)
"current-phase-completion-percentage": 0, "current-phase-completion-percentage": 0,
"uuid": "ac9b953a-caf1-4abe-8d53-498b598e6731", "uuid": "ac9b953a-caf1-4abe-8d53-498b598e6731",
"name": "sw-upgrade", "name": "sw-upgrade",
"compute-apply-type": "serial", "worker-apply-type": "serial",
"max-parallel-compute-hosts": 2, "max-parallel-worker-hosts": 2,
"current-phase": "build", "current-phase": "build",
"apply-phase": { "apply-phase": {
"start-date-time": "", "start-date-time": "",
@ -2090,8 +2090,8 @@ forbidden (403), badMethod (405), overLimit (413)
"current-phase-completion-percentage": 0, "current-phase-completion-percentage": 0,
"uuid": "ac9b953a-caf1-4abe-8d53-498b598e6731", "uuid": "ac9b953a-caf1-4abe-8d53-498b598e6731",
"name": "sw-upgrade", "name": "sw-upgrade",
"compute-apply-type": "serial", "worker-apply-type": "serial",
"max-parallel-compute-hosts": 2, "max-parallel-worker-hosts": 2,
"current-phase": "apply", "current-phase": "apply",
"apply-phase": { "apply-phase": {
"start-date-time": "2017-01-10 16:19:12", "start-date-time": "2017-01-10 16:19:12",
@ -2275,7 +2275,7 @@ forbidden (403), badMethod (405), overLimit (413)
"timeout": 3721, "timeout": 3721,
"total-steps": 5, "total-steps": 5,
"inprogress": false, "inprogress": false,
"stage-name": "sw-upgrade-compute-hosts" "stage-name": "sw-upgrade-worker-hosts"
}, },
{ {
"start-date-time": "", "start-date-time": "",
@ -2361,7 +2361,7 @@ forbidden (403), badMethod (405), overLimit (413)
"timeout": 3721, "timeout": 3721,
"total-steps": 5, "total-steps": 5,
"inprogress": false, "inprogress": false,
"stage-name": "sw-upgrade-compute-hosts" "stage-name": "sw-upgrade-worker-hosts"
} }
], ],
"current-stage": 0 "current-stage": 0

View File

@ -230,7 +230,7 @@ snapshot will start with the number of vCPUs specified by the flavor.
CAVEATS CAVEATS
======= =======
It is possible for the scale-up operation to fail if the compute node has It is possible for the scale-up operation to fail if the worker node has
already allocated all of its resources to other guests. If this happens, already allocated all of its resources to other guests. If this happens,
the system will not do any automatic migration to try to free up resources. the system will not do any automatic migration to try to free up resources.
Manual action will be required to free up resources. Manual action will be required to free up resources.

View File

@ -43,7 +43,7 @@ Titanium Cloud Setup
If support is indicated, then as soon as the VM's Titanium Cloud If support is indicated, then as soon as the VM's Titanium Cloud
Guest-Client daemon registers with the Titanium Cloud Compute Guest-Client daemon registers with the Titanium Cloud Compute
Services on the compute node host, heartbeating will be enabled. Services on the worker node host, heartbeating will be enabled.
a) Create a new flavor: a) Create a new flavor:
@ -129,14 +129,14 @@ VM Setup
Configuring Guest Heartbeat & Application Health Check Configuring Guest Heartbeat & Application Health Check
------------------------------------------------------ ------------------------------------------------------
The Guest-Client within your VM will register with the Titanium Cloud The Guest-Client within your VM will register with the Titanium Cloud
Compute Services on the compute node host. Part of that registration Compute Services on the worker node host. Part of that registration
process is the specification of a heartbeat interval and a corrective process is the specification of a heartbeat interval and a corrective
action for a failed/unhealthy VM. The values of heartbeat interval and action for a failed/unhealthy VM. The values of heartbeat interval and
corrective action come from the guest_heartbeat.conf file and is located corrective action come from the guest_heartbeat.conf file and is located
in /etc/guest-client/heartbeat directory by default. in /etc/guest-client/heartbeat directory by default.
Guest heartbeat works on a challenge response model. The Titanium Guest heartbeat works on a challenge response model. The Titanium
Server Compute Services on the compute node host will challenge the Server Compute Services on the worker node host will challenge the
Guest-Client daemon with a message each interval. The Guest-Client Guest-Client daemon with a message each interval. The Guest-Client
must respond prior to the next interval with a message indicating good must respond prior to the next interval with a message indicating good
health. If the Titanium Cloud Compute Services does not receive a valid health. If the Titanium Cloud Compute Services does not receive a valid
@ -147,7 +147,7 @@ VM Setup
specific scripts and processes, to register for heartbeating. Each script specific scripts and processes, to register for heartbeating. Each script
or process can specify its own heartbeat interval, and its own corrective or process can specify its own heartbeat interval, and its own corrective
action to be taken against the VM as a whole. On ill health the Guest-Client action to be taken against the VM as a whole. On ill health the Guest-Client
reports ill health to the Titanium Cloud Compute Services on the compute node reports ill health to the Titanium Cloud Compute Services on the worker node
host on the next challenge, and provoke the corrective action. host on the next challenge, and provoke the corrective action.
This mechanism allows for detection of a failed or hung QEMU/KVM instance, This mechanism allows for detection of a failed or hung QEMU/KVM instance,
@ -162,7 +162,7 @@ VM Setup
/etc/guest-client/heartbeat/guest_heartbeat.conf: /etc/guest-client/heartbeat/guest_heartbeat.conf:
## This specifies the interval between heartbeats in milliseconds between the ## This specifies the interval between heartbeats in milliseconds between the
## guest-client heartbeat and the Titanium Cloud Compute Services on the ## guest-client heartbeat and the Titanium Cloud Compute Services on the
## compute node host. ## worker node host.
HB_INTERVAL=1000 HB_INTERVAL=1000
The corrective action defaults to 'reboot' and can be overridden by the The corrective action defaults to 'reboot' and can be overridden by the
@ -171,7 +171,7 @@ VM Setup
/etc/guest-client/heartbeat/guest_heartbeat.conf: /etc/guest-client/heartbeat/guest_heartbeat.conf:
## This specifies the corrective action against the VM in the case of a ## This specifies the corrective action against the VM in the case of a
## heartbeat failure between the guest-client and Titanium Cloud Compute ## heartbeat failure between the guest-client and Titanium Cloud Compute
## Services on the compute node host and also when the health script ## Services on the worker node host and also when the health script
## configured below fails. ## configured below fails.
## ##
## Your options are: ## Your options are:
@ -251,7 +251,7 @@ VM Setup
While post-notification handlers are running, or waiting to be run, While post-notification handlers are running, or waiting to be run,
the Titanium Cloud will not be able to declare the action complete. the Titanium Cloud will not be able to declare the action complete.
Keep in mind that many events that offer a post notification will Keep in mind that many events that offer a post notification will
require the VM's Guest-Client to reconnect to the compute host, and require the VM's Guest-Client to reconnect to the worker host, and
that may be further delayed while the VM is rebooted as in a cold that may be further delayed while the VM is rebooted as in a cold
migration. When post-notification is finally triggered, it is subject migration. When post-notification is finally triggered, it is subject
to a timeout as well. If the timeout is reached, the event will be to a timeout as well. If the timeout is reached, the event will be
@ -259,7 +259,7 @@ VM Setup
NOTE: A post-event notification that follows a reboot, as in the NOTE: A post-event notification that follows a reboot, as in the
cold_migrate_end event, is a special case. It will be triggered as cold_migrate_end event, is a special case. It will be triggered as
soon as the local heartbeat server reconnects with the compute host, soon as the local heartbeat server reconnects with the worker host,
and likely before any processes have a chance to register a handler. and likely before any processes have a chance to register a handler.
The only handler guaranteed to see such a notification is a script The only handler guaranteed to see such a notification is a script
directly registered by the Guest-Client itself via guest_heartbeat.conf. directly registered by the Guest-Client itself via guest_heartbeat.conf.

View File

@ -32,12 +32,12 @@
## This specifies the interval between heartbeats in milliseconds between the ## This specifies the interval between heartbeats in milliseconds between the
## guest-client heartbeat and the Titanium Cloud Compute Services on the ## guest-client heartbeat and the Titanium Cloud Compute Services on the
## compute node host. ## worker node host.
HB_INTERVAL=1000 HB_INTERVAL=1000
## This specifies the corrective action against the VM in the case of a ## This specifies the corrective action against the VM in the case of a
## heartbeat failure between the guest-client and Titanium Cloud Compute ## heartbeat failure between the guest-client and Titanium Cloud Compute
## Services on the compute node host and also when the health script ## Services on the worker node host and also when the health script
## configured below fails. ## configured below fails.
## ##
## Your options are: ## Your options are:

View File

@ -14,11 +14,11 @@ mode = passive ; Monitoring mode: passive (default or if mode field i
; passive: process death monitoring (default: always) ; passive: process death monitoring (default: always)
; active: heartbeat monitoring, i.e. request / response messaging ; active: heartbeat monitoring, i.e. request / response messaging
; ignore: do not monitor or stop monitoring process ; ignore: do not monitor or stop monitoring process
subfunction = compute ; Optional label. subfunction = worker ; Optional label.
; Manage this process in the context of a combo host subfunction ; Manage this process in the context of a combo host subfunction
; Choices: compute or storage. ; Choices: worker or storage.
; when specified pmond will wait for ; when specified pmond will wait for
; /var/run/.compute_config_complete or ; /var/run/.worker_config_complete or
; /var/run/.storage_config_complete ; /var/run/.storage_config_complete
; ... before managing this process with the specified subfunction ; ... before managing this process with the specified subfunction
; Excluding this label will cause this process to be managed by default on startup ; Excluding this label will cause this process to be managed by default on startup

View File

@ -1,3 +1,3 @@
SRC_DIR="src" SRC_DIR="src"
TIS_PATCH_VER=140 TIS_PATCH_VER=141
BUILD_IS_SLOW=5 BUILD_IS_SLOW=5

View File

@ -105,7 +105,7 @@ Requires: libpthread.so.0()(64bit)
%description -n mtce-guestServer %description -n mtce-guestServer
Maintenance Guest Server assists in VM guest Maintenance Guest Server assists in VM guest
heartbeat control and failure reporting at the compute level. heartbeat control and failure reporting at the worker level.
%define local_dir /usr/local %define local_dir /usr/local
%define local_bindir %{local_dir}/bin %define local_bindir %{local_dir}/bin

View File

@ -9,9 +9,9 @@ orchestration under VIM (system management) control.
a. image.inc and filter_out packaging files are modified to exclude the a. image.inc and filter_out packaging files are modified to exclude the
heartbeat daemon from being packaged on the controller. heartbeat daemon from being packaged on the controller.
b. because the heartbeat daemon is still packaged on the compute b. because the heartbeat daemon is still packaged on the worker
heartbeat_init script is modified to prevent the heartbeat heartbeat_init script is modified to prevent the heartbeat
daemon from being spawned on the compute host. daemon from being spawned on the worker host.
2. Compute Function: Heartbeats the guest and reports failures. 2. Compute Function: Heartbeats the guest and reports failures.
@ -37,7 +37,7 @@ orchestration under VIM (system management) control.
Behavioral Executive Summary: Behavioral Executive Summary:
The guestServer daemon (on the compute) listens for (using inotify) 'uuid' The guestServer daemon (on the worker) listens for (using inotify) 'uuid'
UNIX named heartbeat communication channels that nova:libvirt creates and UNIX named heartbeat communication channels that nova:libvirt creates and
opens in /var/lib/libvirt/qemu whenever an instance is created. Example: opens in /var/lib/libvirt/qemu whenever an instance is created. Example:

View File

@ -418,7 +418,7 @@ int _socket_init ( void )
/* Get a socket that listens to the controller's FLOATING IP */ /* Get a socket that listens to the controller's FLOATING IP */
/* This is the socket that the guestAgent receives events from /* This is the socket that the guestAgent receives events from
* the guestServer from the compute on */ * the guestServer from the worker on */
_ctrl.sock.agent_rx_float_sock = new msgClassRx(hostInv.hostBase.my_float_ip.c_str(), guest_config.agent_rx_port, IPPROTO_UDP); _ctrl.sock.agent_rx_float_sock = new msgClassRx(hostInv.hostBase.my_float_ip.c_str(), guest_config.agent_rx_port, IPPROTO_UDP);
rc = _ctrl.sock.agent_rx_float_sock->return_status; rc = _ctrl.sock.agent_rx_float_sock->return_status;
if ( rc ) if ( rc )
@ -430,7 +430,7 @@ int _socket_init ( void )
/* Get a socket that listens to the controller's LOCAL IP */ /* Get a socket that listens to the controller's LOCAL IP */
/* This is the socket that the guestAgent receives events from /* This is the socket that the guestAgent receives events from
* the guestServer from the compute on */ * the guestServer from the worker on */
_ctrl.sock.agent_rx_local_sock = new msgClassRx(hostInv.hostBase.my_local_ip.c_str(), guest_config.agent_rx_port, IPPROTO_UDP); _ctrl.sock.agent_rx_local_sock = new msgClassRx(hostInv.hostBase.my_local_ip.c_str(), guest_config.agent_rx_port, IPPROTO_UDP);
rc = _ctrl.sock.agent_rx_local_sock->return_status; rc = _ctrl.sock.agent_rx_local_sock->return_status;
if ( rc ) if ( rc )
@ -509,7 +509,7 @@ int daemon_init ( string iface, string nodetype )
* *
* Description: Messaging interface capable of building command specific * Description: Messaging interface capable of building command specific
* messages and sending them to the guestServer daemon on * messages and sending them to the guestServer daemon on
* the specified compute host. * the specified worker host.
* *
* TODO: setup acknowledge mechanism using guestHost * TODO: setup acknowledge mechanism using guestHost
* *
@ -1116,8 +1116,8 @@ void guestHostClass::run_fsm ( string hostname )
guest_host_ptr = guestHostClass::getHost ( hostname ); guest_host_ptr = guestHostClass::getHost ( hostname );
if ( guest_host_ptr != NULL ) if ( guest_host_ptr != NULL )
{ {
/* This FSM is only run on computes */ /* This FSM is only run on workers */
if (( guest_host_ptr->hosttype & COMPUTE_TYPE ) == COMPUTE_TYPE) if (( guest_host_ptr->hosttype & WORKER_TYPE ) == WORKER_TYPE)
{ {
flog ("%s FSM\n", hostname.c_str() ); flog ("%s FSM\n", hostname.c_str() );
} }

View File

@ -123,7 +123,7 @@ typedef struct
{ {
/** Guest Services Messaging Agent Receive (from guestServer) Socket /** Guest Services Messaging Agent Receive (from guestServer) Socket
* *
* Note: This socket supports receiving from the computes specifying * Note: This socket supports receiving from the workers specifying
* either the floating or local IP */ * either the floating or local IP */
int agent_rx_port ; int agent_rx_port ;
msgClassSock* agent_rx_float_sock ; msgClassSock* agent_rx_float_sock ;

View File

@ -1326,7 +1326,7 @@ void guestHostClass::memDumpAllState ( void )
for ( struct guest_host * ptr = guest_head ; ptr != NULL ; ptr = ptr->next ) for ( struct guest_host * ptr = guest_head ; ptr != NULL ; ptr = ptr->next )
{ {
memDumpNodeState ( ptr->hostname ); memDumpNodeState ( ptr->hostname );
if ( (ptr->hosttype & COMPUTE_TYPE) == COMPUTE_TYPE) if ( (ptr->hosttype & WORKER_TYPE) == WORKER_TYPE)
{ {
mem_log_info_inst ( ptr ); mem_log_info_inst ( ptr );
} }

View File

@ -15,11 +15,11 @@ mode = passive ; Monitoring mode: passive (default) or active
; passive: process death monitoring (default: always) ; passive: process death monitoring (default: always)
; active : heartbeat monitoring, i.e. request / response messaging ; active : heartbeat monitoring, i.e. request / response messaging
; ignore : do not monitor or stop monitoring ; ignore : do not monitor or stop monitoring
subfunction = compute ; Optional label. subfunction = worker ; Optional label.
; Manage this process in the context of a combo host subfunction ; Manage this process in the context of a combo host subfunction
; Choices: compute or storage. ; Choices: worker or storage.
; when specified pmond will wait for ; when specified pmond will wait for
; /var/run/.compute_config_complete or ; /var/run/.worker_config_complete or
; /var/run/.storage_config_complete ; /var/run/.storage_config_complete
; ... before managing this process with the specified subfunction ; ... before managing this process with the specified subfunction
; Excluding this label will cause this process to be managed by default on startup ; Excluding this label will cause this process to be managed by default on startup

View File

@ -64,8 +64,8 @@ class Strategy(object):
controller_apply_type = None controller_apply_type = None
storage_apply_type = None storage_apply_type = None
swift_apply_type = None swift_apply_type = None
compute_apply_type = None worker_apply_type = None
max_parallel_compute_hosts = None max_parallel_worker_hosts = None
default_instance_action = None default_instance_action = None
alarm_restrictions = None alarm_restrictions = None
current_phase = None current_phase = None
@ -157,9 +157,9 @@ def _get_strategy_object_from_response(response):
strategy.controller_apply_type = strategy_data['controller-apply-type'] strategy.controller_apply_type = strategy_data['controller-apply-type']
strategy.storage_apply_type = strategy_data['storage-apply-type'] strategy.storage_apply_type = strategy_data['storage-apply-type']
strategy.swift_apply_type = strategy_data['swift-apply-type'] strategy.swift_apply_type = strategy_data['swift-apply-type']
strategy.compute_apply_type = strategy_data['compute-apply-type'] strategy.worker_apply_type = strategy_data['worker-apply-type']
strategy.max_parallel_compute_hosts = \ strategy.max_parallel_worker_hosts = \
strategy_data['max-parallel-compute-hosts'] strategy_data['max-parallel-worker-hosts']
strategy.default_instance_action = strategy_data['default-instance-action'] strategy.default_instance_action = strategy_data['default-instance-action']
strategy.alarm_restrictions = strategy_data['alarm-restrictions'] strategy.alarm_restrictions = strategy_data['alarm-restrictions']
strategy.current_phase = strategy_data['current-phase'] strategy.current_phase = strategy_data['current-phase']
@ -211,8 +211,8 @@ def get_strategy(token_id, url, strategy_name, strategy_uuid):
def create_strategy(token_id, url, strategy_name, controller_apply_type, def create_strategy(token_id, url, strategy_name, controller_apply_type,
storage_apply_type, swift_apply_type, compute_apply_type, storage_apply_type, swift_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
default_instance_action, alarm_restrictions, **kwargs): default_instance_action, alarm_restrictions, **kwargs):
""" """
Software Update - Create Strategy Software Update - Create Strategy
@ -234,10 +234,10 @@ def create_strategy(token_id, url, strategy_name, controller_apply_type,
if 'complete_upgrade' in kwargs and kwargs['complete_upgrade']: if 'complete_upgrade' in kwargs and kwargs['complete_upgrade']:
api_cmd_payload['complete-upgrade'] = True api_cmd_payload['complete-upgrade'] = True
api_cmd_payload['storage-apply-type'] = storage_apply_type api_cmd_payload['storage-apply-type'] = storage_apply_type
api_cmd_payload['compute-apply-type'] = compute_apply_type api_cmd_payload['worker-apply-type'] = worker_apply_type
if max_parallel_compute_hosts is not None: if max_parallel_worker_hosts is not None:
api_cmd_payload['max-parallel-compute-hosts'] = \ api_cmd_payload['max-parallel-worker-hosts'] = \
max_parallel_compute_hosts max_parallel_worker_hosts
api_cmd_payload['alarm-restrictions'] = alarm_restrictions api_cmd_payload['alarm-restrictions'] = alarm_restrictions
response = rest_api.request(token_id, "POST", api_cmd, api_cmd_headers, response = rest_api.request(token_id, "POST", api_cmd, api_cmd_headers,

View File

@ -50,13 +50,13 @@ def process_main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value
sw_update.APPLY_TYPE_IGNORE], sw_update.APPLY_TYPE_IGNORE],
help='defaults to serial') help='defaults to serial')
sw_patch_create_strategy_cmd.add_argument( sw_patch_create_strategy_cmd.add_argument(
'--compute-apply-type', default=sw_update.APPLY_TYPE_SERIAL, '--worker-apply-type', default=sw_update.APPLY_TYPE_SERIAL,
choices=[sw_update.APPLY_TYPE_SERIAL, sw_update.APPLY_TYPE_PARALLEL, choices=[sw_update.APPLY_TYPE_SERIAL, sw_update.APPLY_TYPE_PARALLEL,
sw_update.APPLY_TYPE_IGNORE], sw_update.APPLY_TYPE_IGNORE],
help='defaults to serial') help='defaults to serial')
sw_patch_create_strategy_cmd.add_argument( sw_patch_create_strategy_cmd.add_argument(
'--max-parallel-compute-hosts', type=int, choices=range(2, 101), '--max-parallel-worker-hosts', type=int, choices=range(2, 101),
help='maximum compute hosts to patch in parallel') help='maximum worker hosts to patch in parallel')
sw_patch_create_strategy_cmd.add_argument( sw_patch_create_strategy_cmd.add_argument(
'--instance-action', default=sw_update.INSTANCE_ACTION_STOP_START, '--instance-action', default=sw_update.INSTANCE_ACTION_STOP_START,
choices=[sw_update.INSTANCE_ACTION_MIGRATE, choices=[sw_update.INSTANCE_ACTION_MIGRATE,
@ -109,13 +109,13 @@ def process_main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value
sw_update.APPLY_TYPE_IGNORE], sw_update.APPLY_TYPE_IGNORE],
help='defaults to serial') help='defaults to serial')
sw_upgrade_create_strategy_cmd.add_argument( sw_upgrade_create_strategy_cmd.add_argument(
'--compute-apply-type', default=sw_update.APPLY_TYPE_SERIAL, '--worker-apply-type', default=sw_update.APPLY_TYPE_SERIAL,
choices=[sw_update.APPLY_TYPE_SERIAL, sw_update.APPLY_TYPE_PARALLEL, choices=[sw_update.APPLY_TYPE_SERIAL, sw_update.APPLY_TYPE_PARALLEL,
sw_update.APPLY_TYPE_IGNORE], sw_update.APPLY_TYPE_IGNORE],
help='defaults to serial') help='defaults to serial')
sw_upgrade_create_strategy_cmd.add_argument( sw_upgrade_create_strategy_cmd.add_argument(
'--max-parallel-compute-hosts', type=int, choices=range(2, 11), '--max-parallel-worker-hosts', type=int, choices=range(2, 11),
help='maximum compute hosts to upgrade in parallel') help='maximum worker hosts to upgrade in parallel')
# Disable support for --start-upgrade as it was not completed # Disable support for --start-upgrade as it was not completed
# sw_upgrade_create_strategy_cmd.add_argument( # sw_upgrade_create_strategy_cmd.add_argument(
# '--start-upgrade', action='store_true', # '--start-upgrade', action='store_true',
@ -227,8 +227,8 @@ def process_main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value
sw_update.STRATEGY_NAME_SW_PATCH, sw_update.STRATEGY_NAME_SW_PATCH,
args.controller_apply_type, args.controller_apply_type,
args.storage_apply_type, sw_update.APPLY_TYPE_IGNORE, args.storage_apply_type, sw_update.APPLY_TYPE_IGNORE,
args.compute_apply_type, args.worker_apply_type,
args.max_parallel_compute_hosts, args.max_parallel_worker_hosts,
args.instance_action, args.instance_action,
args.alarm_restrictions) args.alarm_restrictions)
@ -281,8 +281,8 @@ def process_main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value
sw_update.STRATEGY_NAME_SW_UPGRADE, sw_update.STRATEGY_NAME_SW_UPGRADE,
sw_update.APPLY_TYPE_IGNORE, sw_update.APPLY_TYPE_IGNORE,
args.storage_apply_type, sw_update.APPLY_TYPE_IGNORE, args.storage_apply_type, sw_update.APPLY_TYPE_IGNORE,
args.compute_apply_type, args.worker_apply_type,
args.max_parallel_compute_hosts, args.max_parallel_worker_hosts,
None, args.alarm_restrictions, None, args.alarm_restrictions,
# start_upgrade=args.start_upgrade, # start_upgrade=args.start_upgrade,
complete_upgrade=args.complete_upgrade complete_upgrade=args.complete_upgrade

View File

@ -111,10 +111,10 @@ def _display_strategy(strategy, details=False):
_print(2, "strategy-uuid", strategy.uuid) _print(2, "strategy-uuid", strategy.uuid)
_print(2, "controller-apply-type", strategy.controller_apply_type) _print(2, "controller-apply-type", strategy.controller_apply_type)
_print(2, "storage-apply-type", strategy.storage_apply_type) _print(2, "storage-apply-type", strategy.storage_apply_type)
_print(2, "compute-apply-type", strategy.compute_apply_type) _print(2, "worker-apply-type", strategy.worker_apply_type)
if APPLY_TYPE_PARALLEL == strategy.compute_apply_type: if APPLY_TYPE_PARALLEL == strategy.worker_apply_type:
_print(2, "max-parallel-compute-hosts", _print(2, "max-parallel-worker-hosts",
strategy.max_parallel_compute_hosts) strategy.max_parallel_worker_hosts)
_print(2, "default-instance-action", strategy.default_instance_action) _print(2, "default-instance-action", strategy.default_instance_action)
_print(2, "alarm-restrictions", strategy.alarm_restrictions) _print(2, "alarm-restrictions", strategy.alarm_restrictions)
_print(2, "current-phase", strategy.current_phase) _print(2, "current-phase", strategy.current_phase)
@ -162,8 +162,8 @@ def create_strategy(os_auth_uri, os_project_name, os_project_domain_name,
os_username, os_password, os_user_domain_name, os_username, os_password, os_user_domain_name,
os_region_name, os_interface, os_region_name, os_interface,
strategy_name, controller_apply_type, strategy_name, controller_apply_type,
storage_apply_type, swift_apply_type, compute_apply_type, storage_apply_type, swift_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
default_instance_action, alarm_restrictions, **kwargs): default_instance_action, alarm_restrictions, **kwargs):
""" """
Software Update - Create Strategy Software Update - Create Strategy
@ -181,8 +181,8 @@ def create_strategy(os_auth_uri, os_project_name, os_project_domain_name,
strategy_name, strategy_name,
controller_apply_type, controller_apply_type,
storage_apply_type, swift_apply_type, storage_apply_type, swift_apply_type,
compute_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
default_instance_action, default_instance_action,
alarm_restrictions, alarm_restrictions,
**kwargs) **kwargs)

View File

@ -46,8 +46,8 @@ function _swmanager()
local createopts=" local createopts="
--controller-apply-type --controller-apply-type
--storage-apply-type --storage-apply-type
--compute-apply-type --worker-apply-type
--max-parallel-compute-hosts --max-parallel-worker-hosts
--instance-action --instance-action
--alarm-restrictions --alarm-restrictions
" "
@ -57,11 +57,11 @@ function _swmanager()
COMPREPLY=($(compgen -W "serial ignore" -- ${cur})) COMPREPLY=($(compgen -W "serial ignore" -- ${cur}))
return 0 return 0
;; ;;
--storage-apply-type|--compute-apply-type) --storage-apply-type|--worker-apply-type)
COMPREPLY=($(compgen -W "serial parallel ignore" -- ${cur})) COMPREPLY=($(compgen -W "serial parallel ignore" -- ${cur}))
return 0 return 0
;; ;;
--max-parallel-compute-hosts) --max-parallel-worker-hosts)
COMPREPLY=( $(compgen -- ${cur})) COMPREPLY=( $(compgen -- ${cur}))
return 0 return 0
;; ;;
@ -122,8 +122,8 @@ function _swmanager()
create) create)
local createopts=" local createopts="
--storage-apply-type --storage-apply-type
--compute-apply-type --worker-apply-type
--max-parallel-compute-hosts --max-parallel-worker-hosts
--alarm-restrictions --alarm-restrictions
" "
local createopt=${prev} local createopt=${prev}
@ -132,11 +132,11 @@ function _swmanager()
COMPREPLY=($(compgen -W "serial parallel ignore" -- ${cur})) COMPREPLY=($(compgen -W "serial parallel ignore" -- ${cur}))
return 0 return 0
;; ;;
--compute-apply-type) --worker-apply-type)
COMPREPLY=($(compgen -W "serial parallel ignore" -- ${cur})) COMPREPLY=($(compgen -W "serial parallel ignore" -- ${cur}))
return 0 return 0
;; ;;
--max-parallel-compute-hosts) --max-parallel-worker-hosts)
COMPREPLY=( $(compgen -- ${cur})) COMPREPLY=( $(compgen -- ${cur}))
return 0 return 0
;; ;;

View File

@ -385,7 +385,7 @@ class NFVIComputeAPI(nfvi.api.v1.NFVIComputeAPI):
self._auto_accept_action_requests = False self._auto_accept_action_requests = False
def _host_supports_nova_compute(self, personality): def _host_supports_nova_compute(self, personality):
return (('compute' in personality) and return (('worker' in personality) and
(self._directory.get_service_info( (self._directory.get_service_info(
OPENSTACK_SERVICE.NOVA) is not None)) OPENSTACK_SERVICE.NOVA) is not None))
@ -401,7 +401,7 @@ class NFVIComputeAPI(nfvi.api.v1.NFVIComputeAPI):
try: try:
future.set_timeouts(config.CONF.get('nfvi-timeouts', None)) future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
# Only applies to compute hosts # Only applies to worker hosts
if not self._host_supports_nova_compute(host_personality): if not self._host_supports_nova_compute(host_personality):
response['completed'] = True response['completed'] = True
response['reason'] = '' response['reason'] = ''
@ -758,7 +758,7 @@ class NFVIComputeAPI(nfvi.api.v1.NFVIComputeAPI):
try: try:
future.set_timeouts(config.CONF.get('nfvi-timeouts', None)) future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
# The following only applies to compute hosts # The following only applies to worker hosts
if self._host_supports_nova_compute(host_personality): if self._host_supports_nova_compute(host_personality):
response['reason'] = 'failed to get openstack token from ' \ response['reason'] = 'failed to get openstack token from ' \
'keystone' 'keystone'

View File

@ -215,7 +215,7 @@ class NFVIGuestAPI(nfvi.api.v1.NFVIGuestAPI):
self._guest_services_action_notify_callbacks = list() self._guest_services_action_notify_callbacks = list()
def _host_supports_nova_compute(self, personality): def _host_supports_nova_compute(self, personality):
return (('compute' in personality) and return (('worker' in personality) and
(self._openstack_directory.get_service_info( (self._openstack_directory.get_service_info(
OPENSTACK_SERVICE.NOVA) is not None)) OPENSTACK_SERVICE.NOVA) is not None))
@ -829,7 +829,7 @@ class NFVIGuestAPI(nfvi.api.v1.NFVIGuestAPI):
try: try:
future.set_timeouts(config.CONF.get('nfvi-timeouts', None)) future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
# The following only applies to compute hosts # The following only applies to worker hosts
if self._host_supports_nova_compute(host_personality): if self._host_supports_nova_compute(host_personality):
response['reason'] = 'failed to get platform token from ' \ response['reason'] = 'failed to get platform token from ' \
'keystone' 'keystone'

View File

@ -50,9 +50,9 @@ def host_state(host_uuid, host_name, host_personality, host_sub_functions,
nfvi_data['data_ports_oper'] = 'n/a' nfvi_data['data_ports_oper'] = 'n/a'
nfvi_data['data_ports_avail'] = 'n/a' nfvi_data['data_ports_avail'] = 'n/a'
if 'compute' != host_personality and 'compute' in host_sub_functions: if 'worker' != host_personality and 'worker' in host_sub_functions:
if sub_function_oper_state is not None: if sub_function_oper_state is not None:
nfvi_data['subfunction_name'] = 'compute' nfvi_data['subfunction_name'] = 'worker'
nfvi_data['subfunction_oper'] = sub_function_oper_state nfvi_data['subfunction_oper'] = sub_function_oper_state
nfvi_data['subfunction_avail'] = sub_function_avail_status nfvi_data['subfunction_avail'] = sub_function_avail_status
@ -65,12 +65,12 @@ def host_state(host_uuid, host_name, host_personality, host_sub_functions,
return (host_admin_state, host_oper_state, host_avail_status, return (host_admin_state, host_oper_state, host_avail_status,
nfvi_data) nfvi_data)
if 'compute' != host_personality and 'compute' in host_sub_functions: if 'worker' != host_personality and 'worker' in host_sub_functions:
if nfvi.objects.v1.HOST_OPER_STATE.ENABLED != sub_function_oper_state: if nfvi.objects.v1.HOST_OPER_STATE.ENABLED != sub_function_oper_state:
return (host_admin_state, sub_function_oper_state, return (host_admin_state, sub_function_oper_state,
sub_function_avail_status, nfvi_data) sub_function_avail_status, nfvi_data)
if 'compute' == host_personality or 'compute' in host_sub_functions: if 'worker' == host_personality or 'worker' in host_sub_functions:
if data_port_fault_handling_enabled: if data_port_fault_handling_enabled:
if data_port_oper_state is not None: if data_port_oper_state is not None:
if data_port_avail_status in \ if data_port_avail_status in \
@ -122,7 +122,7 @@ class NFVIInfrastructureAPI(nfvi.api.v1.NFVIInfrastructureAPI):
# TODO(bwensley): This check will disappear once kubernetes is the # TODO(bwensley): This check will disappear once kubernetes is the
# default # default
if os.path.isfile('/etc/kubernetes/admin.conf'): if os.path.isfile('/etc/kubernetes/admin.conf'):
return ('compute' in personality or 'controller' in personality) return ('worker' in personality or 'controller' in personality)
else: else:
return False return False
@ -166,7 +166,7 @@ class NFVIInfrastructureAPI(nfvi.api.v1.NFVIInfrastructureAPI):
self._host_listener = None self._host_listener = None
def _host_supports_nova_compute(self, personality): def _host_supports_nova_compute(self, personality):
return (('compute' in personality) and return (('worker' in personality) and
(self._openstack_directory.get_service_info( (self._openstack_directory.get_service_info(
OPENSTACK_SERVICE.NOVA) is not None)) OPENSTACK_SERVICE.NOVA) is not None))
@ -1548,7 +1548,7 @@ class NFVIInfrastructureAPI(nfvi.api.v1.NFVIInfrastructureAPI):
try: try:
future.set_timeouts(config.CONF.get('nfvi-timeouts', None)) future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
# Only applies to compute hosts # Only applies to worker hosts
if not self._host_supports_nova_compute(host_personality): if not self._host_supports_nova_compute(host_personality):
response['completed'] = True response['completed'] = True
return return

View File

@ -86,7 +86,7 @@ class NFVINetworkAPI(nfvi.api.v1.NFVINetworkAPI):
return self._signature return self._signature
def _host_supports_neutron(self, personality): def _host_supports_neutron(self, personality):
return (('compute' in personality or 'controller' in personality) and return (('worker' in personality or 'controller' in personality) and
(self._directory.get_service_info( (self._directory.get_service_info(
OPENSTACK_SERVICE.NEUTRON) is not None)) OPENSTACK_SERVICE.NEUTRON) is not None))

View File

@ -10,7 +10,7 @@ cat > create_serial.txt << EOF
{ {
"controller-apply-type": "serial", "controller-apply-type": "serial",
"default-instance-action": "stop-start", "default-instance-action": "stop-start",
"compute-apply-type": "serial", "worker-apply-type": "serial",
"storage-apply-type": "serial", "storage-apply-type": "serial",
"swift-apply-type": "ignore", "swift-apply-type": "ignore",
"alarm-restrictions": "relaxed" "alarm-restrictions": "relaxed"
@ -22,8 +22,8 @@ cat > create_parallel.txt << EOF
{ {
"controller-apply-type": "serial", "controller-apply-type": "serial",
"default-instance-action": "migrate", "default-instance-action": "migrate",
"compute-apply-type": "parallel", "worker-apply-type": "parallel",
"max-parallel-compute-hosts": "3", "max-parallel-worker-hosts": "3",
"storage-apply-type": "parallel", "storage-apply-type": "parallel",
"swift-apply-type": "ignore", "swift-apply-type": "ignore",
"alarm-restrictions": "relaxed" "alarm-restrictions": "relaxed"
@ -79,12 +79,12 @@ TOKEN_ID=`openstack token issue | grep "| id |" | cut -f3 -d'|' | tr -d
Create strategy Create strategy
--------------- ---------------
cat > create_serial.txt << EOF cat > create_serial.txt << EOF
{ "compute-apply-type": "serial", "storage-apply-type": "serial", "alarm-restrictions": "relaxed" } { "worker-apply-type": "serial", "storage-apply-type": "serial", "alarm-restrictions": "relaxed" }
EOF EOF
curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/sw-upgrade/strategy -d @create_serial.txt curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/sw-upgrade/strategy -d @create_serial.txt
cat > create_parallel.txt << EOF cat > create_parallel.txt << EOF
{ "compute-apply-type": "parallel", "max-parallel-compute-hosts": "3", "storage-apply-type": "parallel", "alarm-restrictions": "relaxed" } { "worker-apply-type": "parallel", "max-parallel-worker-hosts": "3", "storage-apply-type": "parallel", "alarm-restrictions": "relaxed" }
EOF EOF
curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/sw-upgrade/strategy -d @create_parallel.txt curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/sw-upgrade/strategy -d @create_parallel.txt

View File

@ -345,13 +345,13 @@ do
7) 7)
echo "${TODAY}: ${CMD_COUNT}. Add Hosts (compute40, compute41, compute42, compute43, compute44, compute45, compute46)" echo "${TODAY}: ${CMD_COUNT}. Add Hosts (compute40, compute41, compute42, compute43, compute44, compute45, compute46)"
system host-add --hostname compute40 --personality compute --mgmt_mac 5a:ec:8b:20:02:65 --bm_mac 58:20:B2:0A:6A:72 --bm_ip 10.32.254.71 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null system host-add --hostname compute40 --personality worker --mgmt_mac 5a:ec:8b:20:02:65 --bm_mac 58:20:B2:0A:6A:72 --bm_ip 10.32.254.71 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null
system host-add --hostname compute41 --personality compute --mgmt_mac 5a:ec:8b:20:02:66 --bm_mac 58:20:B2:0A:6A:73 --bm_ip 10.32.254.72 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null system host-add --hostname compute41 --personality worker --mgmt_mac 5a:ec:8b:20:02:66 --bm_mac 58:20:B2:0A:6A:73 --bm_ip 10.32.254.72 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null
system host-add --hostname compute42 --personality compute --mgmt_mac 5a:ec:8b:20:02:69 --bm_mac 58:20:b2:0b:73:30 --bm_ip 10.32.254.73 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null system host-add --hostname compute42 --personality worker --mgmt_mac 5a:ec:8b:20:02:69 --bm_mac 58:20:b2:0b:73:30 --bm_ip 10.32.254.73 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null
system host-add --hostname compute43 --personality compute --mgmt_mac 5a:ec:8b:20:02:70 --bm_mac 58:20:b2:0b:73:31 --bm_ip 10.32.254.74 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null system host-add --hostname compute43 --personality worker --mgmt_mac 5a:ec:8b:20:02:70 --bm_mac 58:20:b2:0b:73:31 --bm_ip 10.32.254.74 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null
system host-add --hostname compute44 --personality compute --mgmt_mac 5a:ec:8b:20:02:6d --bm_mac 58:20:b2:0b:9a:6a --bm_ip 10.32.254.75 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null system host-add --hostname compute44 --personality worker --mgmt_mac 5a:ec:8b:20:02:6d --bm_mac 58:20:b2:0b:9a:6a --bm_ip 10.32.254.75 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null
system host-add --hostname compute45 --personality compute --mgmt_mac 5a:ec:8b:20:02:6f --bm_mac 58:20:B2:0B:DA:06 --bm_ip 10.32.254.76 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null system host-add --hostname compute45 --personality worker --mgmt_mac 5a:ec:8b:20:02:6f --bm_mac 58:20:B2:0B:DA:06 --bm_ip 10.32.254.76 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null
system host-add --hostname compute46 --personality compute --mgmt_mac 5a:ec:8b:20:02:71 --bm_mac 58:20:B2:0A:68:38 --bm_ip 10.32.254.77 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null system host-add --hostname compute46 --personality worker --mgmt_mac 5a:ec:8b:20:02:71 --bm_mac 58:20:B2:0A:68:38 --bm_ip 10.32.254.77 --bm_type ilo4 --bm_username hpadmin --bm_password HPinvent2016 2>&1 >/dev/null
echo "${TODAY}: ${CMD_COUNT}. Added Hosts (compute40, compute41, compute42, compute43, compute44, compute45, compute46)" echo "${TODAY}: ${CMD_COUNT}. Added Hosts (compute40, compute41, compute42, compute43, compute44, compute45, compute46)"
sleep 30 sleep 30
echo "${TODAY}: ${CMD_COUNT}. Delete Hosts (compute40, compute41, compute42, compute43, compute44, compute45, compute46)" echo "${TODAY}: ${CMD_COUNT}. Delete Hosts (compute40, compute41, compute42, compute43, compute44, compute45, compute46)"

View File

@ -199,9 +199,9 @@ class TestInstance(testcase.NFVTestCase):
if host_name.startswith('controller'): if host_name.startswith('controller'):
personality = HOST_PERSONALITY.CONTROLLER personality = HOST_PERSONALITY.CONTROLLER
if cpe: if cpe:
personality = personality + ',' + HOST_PERSONALITY.COMPUTE personality = personality + ',' + HOST_PERSONALITY.WORKER
elif host_name.startswith('compute'): elif host_name.startswith('compute'):
personality = HOST_PERSONALITY.COMPUTE personality = HOST_PERSONALITY.WORKER
elif host_name.startswith('storage'): elif host_name.startswith('storage'):
personality = HOST_PERSONALITY.STORAGE personality = HOST_PERSONALITY.STORAGE
else: else:

File diff suppressed because it is too large Load Diff

View File

@ -255,9 +255,9 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
if host_name.startswith('controller'): if host_name.startswith('controller'):
personality = HOST_PERSONALITY.CONTROLLER personality = HOST_PERSONALITY.CONTROLLER
if cpe: if cpe:
personality = personality + ',' + HOST_PERSONALITY.COMPUTE personality = personality + ',' + HOST_PERSONALITY.WORKER
elif host_name.startswith('compute'): elif host_name.startswith('compute'):
personality = HOST_PERSONALITY.COMPUTE personality = HOST_PERSONALITY.WORKER
elif host_name.startswith('storage'): elif host_name.startswith('storage'):
personality = HOST_PERSONALITY.STORAGE personality = HOST_PERSONALITY.STORAGE
else: else:
@ -316,8 +316,8 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
def create_sw_upgrade_strategy(self, def create_sw_upgrade_strategy(self,
storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
compute_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, worker_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
max_parallel_compute_hosts=10, max_parallel_worker_hosts=10,
alarm_restrictions=SW_UPDATE_ALARM_RESTRICTION.STRICT, alarm_restrictions=SW_UPDATE_ALARM_RESTRICTION.STRICT,
start_upgrade=False, start_upgrade=False,
complete_upgrade=False, complete_upgrade=False,
@ -329,8 +329,8 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
strategy = SwUpgradeStrategy( strategy = SwUpgradeStrategy(
uuid=str(uuid.uuid4()), uuid=str(uuid.uuid4()),
storage_apply_type=storage_apply_type, storage_apply_type=storage_apply_type,
compute_apply_type=compute_apply_type, worker_apply_type=worker_apply_type,
max_parallel_compute_hosts=max_parallel_compute_hosts, max_parallel_worker_hosts=max_parallel_worker_hosts,
alarm_restrictions=alarm_restrictions, alarm_restrictions=alarm_restrictions,
start_upgrade=start_upgrade, start_upgrade=start_upgrade,
complete_upgrade=complete_upgrade, complete_upgrade=complete_upgrade,
@ -341,9 +341,9 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', @mock.patch('nfv_vim.strategy._strategy.get_local_host_name',
fake_host_name_controller_1) fake_host_name_controller_1)
def test_sw_upgrade_strategy_compute_stages_ignore(self): def test_sw_upgrade_strategy_worker_stages_ignore(self):
""" """
Test the sw_upgrade strategy add compute strategy stages: Test the sw_upgrade strategy add worker strategy stages:
- ignore apply - ignore apply
Verify: Verify:
- stages not created - stages not created
@ -364,19 +364,19 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
['test_instance_0', 'test_instance_1'], ['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
compute_hosts = [] worker_hosts = []
for host in self._host_table.values(): for host in self._host_table.values():
if HOST_PERSONALITY.COMPUTE in host.personality: if HOST_PERSONALITY.WORKER in host.personality:
compute_hosts.append(host) worker_hosts.append(host)
# Sort compute hosts so the order of the steps is deterministic # Sort worker hosts so the order of the steps is deterministic
sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE worker_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE
) )
success, reason = strategy._add_compute_strategy_stages( success, reason = strategy._add_worker_strategy_stages(
compute_hosts=sorted_compute_hosts, worker_hosts=sorted_worker_hosts,
reboot=True) reboot=True)
assert success is True, "Strategy creation failed" assert success is True, "Strategy creation failed"
@ -392,9 +392,9 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', @mock.patch('nfv_vim.strategy._strategy.get_local_host_name',
fake_host_name_controller_1) fake_host_name_controller_1)
def test_sw_upgrade_strategy_compute_stages_parallel_migrate_anti_affinity(self): def test_sw_upgrade_strategy_worker_stages_parallel_migrate_anti_affinity(self):
""" """
Test the sw_upgrade strategy add compute strategy stages: Test the sw_upgrade strategy add worker strategy stages:
- parallel apply - parallel apply
- migrate instance action - migrate instance action
Verify: Verify:
@ -417,20 +417,20 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
['test_instance_0', 'test_instance_1'], ['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
compute_hosts = [] worker_hosts = []
for host in self._host_table.values(): for host in self._host_table.values():
if HOST_PERSONALITY.COMPUTE in host.personality: if HOST_PERSONALITY.WORKER in host.personality:
compute_hosts.append(host) worker_hosts.append(host)
# Sort compute hosts so the order of the steps is deterministic # Sort worker hosts so the order of the steps is deterministic
sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
max_parallel_compute_hosts=2 max_parallel_worker_hosts=2
) )
strategy._add_compute_strategy_stages( strategy._add_worker_strategy_stages(
compute_hosts=sorted_compute_hosts, worker_hosts=sorted_worker_hosts,
reboot=True) reboot=True)
apply_phase = strategy.apply_phase.as_dict() apply_phase = strategy.apply_phase.as_dict()
@ -438,7 +438,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
expected_results = { expected_results = {
'total_stages': 3, 'total_stages': 3,
'stages': [ 'stages': [
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 5, 'total_steps': 5,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -451,7 +451,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
{'name': 'system-stabilize'} {'name': 'system-stabilize'}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 7, 'total_steps': 7,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -467,7 +467,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
{'name': 'system-stabilize'} {'name': 'system-stabilize'}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 7, 'total_steps': 7,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -491,9 +491,9 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', @mock.patch('nfv_vim.strategy._strategy.get_local_host_name',
fake_host_name_controller_1) fake_host_name_controller_1)
def test_sw_upgrade_strategy_compute_stages_parallel_migrate_ten_hosts(self): def test_sw_upgrade_strategy_worker_stages_parallel_migrate_ten_hosts(self):
""" """
Test the sw_upgrade strategy add compute strategy stages: Test the sw_upgrade strategy add worker strategy stages:
- parallel apply - parallel apply
- migrate instance action - migrate instance action
Verify: Verify:
@ -520,20 +520,20 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
self.create_instance('small', "test_instance_8", 'compute-8') self.create_instance('small', "test_instance_8", 'compute-8')
self.create_instance('small', "test_instance_9", 'compute-9') self.create_instance('small', "test_instance_9", 'compute-9')
compute_hosts = [] worker_hosts = []
for host in self._host_table.values(): for host in self._host_table.values():
if HOST_PERSONALITY.COMPUTE in host.personality: if HOST_PERSONALITY.WORKER in host.personality:
compute_hosts.append(host) worker_hosts.append(host)
# Sort compute hosts so the order of the steps is deterministic # Sort worker hosts so the order of the steps is deterministic
sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
max_parallel_compute_hosts=3 max_parallel_worker_hosts=3
) )
strategy._add_compute_strategy_stages( strategy._add_worker_strategy_stages(
compute_hosts=sorted_compute_hosts, worker_hosts=sorted_worker_hosts,
reboot=True) reboot=True)
apply_phase = strategy.apply_phase.as_dict() apply_phase = strategy.apply_phase.as_dict()
@ -541,7 +541,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
expected_results = { expected_results = {
'total_stages': 4, 'total_stages': 4,
'stages': [ 'stages': [
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 5, 'total_steps': 5,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -554,7 +554,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
{'name': 'system-stabilize'} {'name': 'system-stabilize'}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 7, 'total_steps': 7,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -572,7 +572,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
{'name': 'system-stabilize'} {'name': 'system-stabilize'}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 7, 'total_steps': 7,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -590,7 +590,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
{'name': 'system-stabilize'} {'name': 'system-stabilize'}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 7, 'total_steps': 7,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -615,9 +615,9 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', @mock.patch('nfv_vim.strategy._strategy.get_local_host_name',
fake_host_name_controller_1) fake_host_name_controller_1)
def test_sw_upgrade_strategy_compute_stages_parallel_migrate_fifty_hosts(self): def test_sw_upgrade_strategy_worker_stages_parallel_migrate_fifty_hosts(self):
""" """
Test the sw_upgrade strategy add compute strategy stages: Test the sw_upgrade strategy add worker strategy stages:
- parallel apply - parallel apply
- migrate instance action - migrate instance action
Verify: Verify:
@ -637,20 +637,20 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
self.create_host_aggregate('aggregate-2', self.create_host_aggregate('aggregate-2',
["compute-%02d" % x for x in range(25, 50)]) ["compute-%02d" % x for x in range(25, 50)])
compute_hosts = [] worker_hosts = []
for host in self._host_table.values(): for host in self._host_table.values():
if HOST_PERSONALITY.COMPUTE in host.personality: if HOST_PERSONALITY.WORKER in host.personality:
compute_hosts.append(host) worker_hosts.append(host)
# Sort compute hosts so the order of the steps is deterministic # Sort worker hosts so the order of the steps is deterministic
sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
max_parallel_compute_hosts=5 max_parallel_worker_hosts=5
) )
strategy._add_compute_strategy_stages( strategy._add_worker_strategy_stages(
compute_hosts=sorted_compute_hosts, worker_hosts=sorted_worker_hosts,
reboot=True) reboot=True)
apply_phase = strategy.apply_phase.as_dict() apply_phase = strategy.apply_phase.as_dict()
@ -683,7 +683,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
expected_results = { expected_results = {
'total_stages': 13, 'total_stages': 13,
'stages': [ 'stages': [
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 5, 'total_steps': 5,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -701,7 +701,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
for x in range(1, len(stage_hosts)): for x in range(1, len(stage_hosts)):
expected_results['stages'].append( expected_results['stages'].append(
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 7, 'total_steps': 7,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -724,9 +724,9 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', @mock.patch('nfv_vim.strategy._strategy.get_local_host_name',
fake_host_name_controller_1) fake_host_name_controller_1)
def test_sw_upgrade_strategy_compute_stages_serial_migrate(self): def test_sw_upgrade_strategy_worker_stages_serial_migrate(self):
""" """
Test the sw_upgrade strategy add compute strategy stages: Test the sw_upgrade strategy add worker strategy stages:
- serial apply - serial apply
- migrate instance action - migrate instance action
Verify: Verify:
@ -748,18 +748,18 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
['test_instance_0', 'test_instance_1'], ['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
compute_hosts = [] worker_hosts = []
for host in self._host_table.values(): for host in self._host_table.values():
if HOST_PERSONALITY.COMPUTE in host.personality: if HOST_PERSONALITY.WORKER in host.personality:
compute_hosts.append(host) worker_hosts.append(host)
# Sort compute hosts so the order of the steps is deterministic # Sort worker hosts so the order of the steps is deterministic
sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
) )
strategy._add_compute_strategy_stages(compute_hosts=sorted_compute_hosts, strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True) reboot=True)
apply_phase = strategy.apply_phase.as_dict() apply_phase = strategy.apply_phase.as_dict()
@ -767,7 +767,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
expected_results = { expected_results = {
'total_stages': 4, 'total_stages': 4,
'stages': [ 'stages': [
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 5, 'total_steps': 5,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -780,7 +780,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
{'name': 'system-stabilize'} {'name': 'system-stabilize'}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 5, 'total_steps': 5,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -793,7 +793,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
{'name': 'system-stabilize'} {'name': 'system-stabilize'}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 6, 'total_steps': 6,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -808,7 +808,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
{'name': 'system-stabilize'} {'name': 'system-stabilize'}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 6, 'total_steps': 6,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -831,9 +831,9 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', @mock.patch('nfv_vim.strategy._strategy.get_local_host_name',
fake_host_name_controller_1) fake_host_name_controller_1)
def test_sw_upgrade_strategy_compute_stages_serial_migrate_locked_instance(self): def test_sw_upgrade_strategy_worker_stages_serial_migrate_locked_instance(self):
""" """
Test the sw_upgrade strategy add compute strategy stages: Test the sw_upgrade strategy add worker strategy stages:
- serial apply - serial apply
- migrate instance action - migrate instance action
- locked instance in instance group - locked instance in instance group
@ -857,19 +857,19 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
['test_instance_0', 'test_instance_1'], ['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
compute_hosts = [] worker_hosts = []
for host in self._host_table.values(): for host in self._host_table.values():
if HOST_PERSONALITY.COMPUTE in host.personality: if HOST_PERSONALITY.WORKER in host.personality:
compute_hosts.append(host) worker_hosts.append(host)
# Sort compute hosts so the order of the steps is deterministic # Sort worker hosts so the order of the steps is deterministic
sorted_compute_hosts = sorted(compute_hosts, key=lambda host: host.name) sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
) )
success, reason = strategy._add_compute_strategy_stages( success, reason = strategy._add_worker_strategy_stages(
compute_hosts=sorted_compute_hosts, worker_hosts=sorted_worker_hosts,
reboot=True) reboot=True)
assert success is False, "Strategy creation did not fail" assert success is False, "Strategy creation did not fail"
@ -1280,7 +1280,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
storage_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, storage_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
start_upgrade=True, start_upgrade=True,
complete_upgrade=True complete_upgrade=True
) )
@ -1367,7 +1367,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'timeout': 7200} 'timeout': 7200}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 5, 'total_steps': 5,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -1381,7 +1381,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'timeout': 60} 'timeout': 60}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 6, 'total_steps': 6,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -1435,7 +1435,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'compute-0') 'compute-0')
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
nfvi_upgrade=nfvi.objects.v1.Upgrade( nfvi_upgrade=nfvi.objects.v1.Upgrade(
UPGRADE_STATE.UPGRADING_CONTROLLERS, UPGRADE_STATE.UPGRADING_CONTROLLERS,
'12.01', '12.01',
@ -1483,7 +1483,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'timeout': 14400} 'timeout': 14400}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 5, 'total_steps': 5,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -1497,7 +1497,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'timeout': 60} 'timeout': 60}
] ]
}, },
{'name': 'sw-upgrade-compute-hosts', {'name': 'sw-upgrade-worker-hosts',
'total_steps': 6, 'total_steps': 6,
'steps': [ 'steps': [
{'name': 'query-alarms'}, {'name': 'query-alarms'},
@ -1543,7 +1543,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'compute-1') 'compute-1')
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
nfvi_upgrade=nfvi.objects.v1.Upgrade( nfvi_upgrade=nfvi.objects.v1.Upgrade(
UPGRADE_STATE.DATA_MIGRATION_COMPLETE, UPGRADE_STATE.DATA_MIGRATION_COMPLETE,
'12.01', '12.01',
@ -1588,7 +1588,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'compute-1') 'compute-1')
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
) )
fake_upgrade_obj = SwUpgrade() fake_upgrade_obj = SwUpgrade()
@ -1629,7 +1629,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'compute-1') 'compute-1')
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
nfvi_upgrade=nfvi.objects.v1.Upgrade( nfvi_upgrade=nfvi.objects.v1.Upgrade(
UPGRADE_STATE.DATA_MIGRATION_COMPLETE, UPGRADE_STATE.DATA_MIGRATION_COMPLETE,
'12.01', '12.01',
@ -1674,7 +1674,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'compute-1') 'compute-1')
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
nfvi_upgrade=nfvi.objects.v1.Upgrade( nfvi_upgrade=nfvi.objects.v1.Upgrade(
UPGRADE_STATE.UPGRADING_CONTROLLERS, UPGRADE_STATE.UPGRADING_CONTROLLERS,
'12.01', '12.01',
@ -1721,7 +1721,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'compute-1') 'compute-1')
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
nfvi_upgrade=nfvi.objects.v1.Upgrade( nfvi_upgrade=nfvi.objects.v1.Upgrade(
UPGRADE_STATE.UPGRADING_CONTROLLERS, UPGRADE_STATE.UPGRADING_CONTROLLERS,
'12.01', '12.01',
@ -1744,10 +1744,10 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', @mock.patch('nfv_vim.strategy._strategy.get_local_host_name',
fake_host_name_controller_1) fake_host_name_controller_1)
def test_sw_upgrade_strategy_build_complete_locked_compute(self): def test_sw_upgrade_strategy_build_complete_locked_worker(self):
""" """
Test the sw_upgrade strategy build_complete: Test the sw_upgrade strategy build_complete:
- locked compute host - locked worker host
Verify: Verify:
- build fails - build fails
""" """
@ -1767,7 +1767,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
'compute-1') 'compute-1')
strategy = self.create_sw_upgrade_strategy( strategy = self.create_sw_upgrade_strategy(
compute_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
nfvi_upgrade=nfvi.objects.v1.Upgrade( nfvi_upgrade=nfvi.objects.v1.Upgrade(
UPGRADE_STATE.UPGRADING_CONTROLLERS, UPGRADE_STATE.UPGRADING_CONTROLLERS,
'12.01', '12.01',
@ -1783,7 +1783,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase):
expected_results = { expected_results = {
'total_stages': 0, 'total_stages': 0,
'result': 'failed', 'result': 'failed',
'result_reason': 'all compute hosts must be unlocked-enabled-available' 'result_reason': 'all worker hosts must be unlocked-enabled-available'
} }
validate_phase(build_phase, expected_results) validate_phase(build_phase, expected_results)

View File

@ -108,10 +108,10 @@ class SwUpdateStrategyData(wsme_types.Base):
name='storage-apply-type') name='storage-apply-type')
swift_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, swift_apply_type = wsme_types.wsattr(SwUpdateApplyTypes,
name='swift-apply-type') name='swift-apply-type')
compute_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, worker_apply_type = wsme_types.wsattr(SwUpdateApplyTypes,
name='compute-apply-type') name='worker-apply-type')
max_parallel_compute_hosts = wsme_types.wsattr( max_parallel_worker_hosts = wsme_types.wsattr(
int, name='max-parallel-compute-hosts') int, name='max-parallel-worker-hosts')
default_instance_action = wsme_types.wsattr(SwUpdateInstanceActionTypes, default_instance_action = wsme_types.wsattr(SwUpdateInstanceActionTypes,
name='default-instance-action') name='default-instance-action')
alarm_restrictions = wsme_types.wsattr(SwUpdateAlarmRestrictionTypes, alarm_restrictions = wsme_types.wsattr(SwUpdateAlarmRestrictionTypes,
@ -135,10 +135,10 @@ class SwPatchStrategyCreateData(wsme_types.Base):
name='storage-apply-type') name='storage-apply-type')
swift_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=False, swift_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=False,
name='swift-apply-type') name='swift-apply-type')
compute_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=True, worker_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=True,
name='compute-apply-type') name='worker-apply-type')
max_parallel_compute_hosts = wsme_types.wsattr( max_parallel_worker_hosts = wsme_types.wsattr(
int, mandatory=False, name='max-parallel-compute-hosts') int, mandatory=False, name='max-parallel-worker-hosts')
default_instance_action = wsme_types.wsattr(SwUpdateInstanceActionTypes, default_instance_action = wsme_types.wsattr(SwUpdateInstanceActionTypes,
mandatory=True, mandatory=True,
name='default-instance-action') name='default-instance-action')
@ -154,10 +154,10 @@ class SwUpgradeStrategyCreateData(wsme_types.Base):
""" """
storage_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=True, storage_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=True,
name='storage-apply-type') name='storage-apply-type')
compute_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=True, worker_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=True,
name='compute-apply-type') name='worker-apply-type')
max_parallel_compute_hosts = wsme_types.wsattr( max_parallel_worker_hosts = wsme_types.wsattr(
int, mandatory=False, name='max-parallel-compute-hosts') int, mandatory=False, name='max-parallel-worker-hosts')
# Disable support for start-upgrade as it was not completed # Disable support for start-upgrade as it was not completed
# start_upgrade = wsme_types.wsattr( # start_upgrade = wsme_types.wsattr(
# bool, mandatory=False, default=False, name='start-upgrade') # bool, mandatory=False, default=False, name='start-upgrade')
@ -243,9 +243,9 @@ class SwUpdateStrategyQueryData(wsme_types.Base):
strategy.controller_apply_type = strategy_data['controller_apply_type'] strategy.controller_apply_type = strategy_data['controller_apply_type']
strategy.storage_apply_type = strategy_data['storage_apply_type'] strategy.storage_apply_type = strategy_data['storage_apply_type']
strategy.swift_apply_type = strategy_data['swift_apply_type'] strategy.swift_apply_type = strategy_data['swift_apply_type']
strategy.compute_apply_type = strategy_data['compute_apply_type'] strategy.worker_apply_type = strategy_data['worker_apply_type']
strategy.max_parallel_compute_hosts = \ strategy.max_parallel_worker_hosts = \
strategy_data['max_parallel_compute_hosts'] strategy_data['max_parallel_worker_hosts']
strategy.default_instance_action = strategy_data['default_instance_action'] strategy.default_instance_action = strategy_data['default_instance_action']
strategy.alarm_restrictions = strategy_data['alarm_restrictions'] strategy.alarm_restrictions = strategy_data['alarm_restrictions']
strategy.state = strategy_data['state'] strategy.state = strategy_data['state']
@ -470,16 +470,16 @@ class SwPatchStrategyAPI(SwUpdateStrategyAPI):
rpc_request.swift_apply_type = SW_UPDATE_APPLY_TYPE.IGNORE rpc_request.swift_apply_type = SW_UPDATE_APPLY_TYPE.IGNORE
else: else:
rpc_request.swift_apply_type = request_data.swift_apply_type rpc_request.swift_apply_type = request_data.swift_apply_type
rpc_request.compute_apply_type = request_data.compute_apply_type rpc_request.worker_apply_type = request_data.worker_apply_type
if wsme_types.Unset != request_data.max_parallel_compute_hosts: if wsme_types.Unset != request_data.max_parallel_worker_hosts:
if request_data.max_parallel_compute_hosts < MIN_PARALLEL_HOSTS \ if request_data.max_parallel_worker_hosts < MIN_PARALLEL_HOSTS \
or request_data.max_parallel_compute_hosts > \ or request_data.max_parallel_worker_hosts > \
MAX_PARALLEL_PATCH_HOSTS: MAX_PARALLEL_PATCH_HOSTS:
return pecan.abort( return pecan.abort(
httplib.BAD_REQUEST, httplib.BAD_REQUEST,
"Invalid value for max-parallel-compute-hosts") "Invalid value for max-parallel-worker-hosts")
rpc_request.max_parallel_compute_hosts = \ rpc_request.max_parallel_worker_hosts = \
request_data.max_parallel_compute_hosts request_data.max_parallel_worker_hosts
rpc_request.default_instance_action = request_data.default_instance_action rpc_request.default_instance_action = request_data.default_instance_action
rpc_request.alarm_restrictions = request_data.alarm_restrictions rpc_request.alarm_restrictions = request_data.alarm_restrictions
vim_connection = pecan.request.vim.open_connection() vim_connection = pecan.request.vim.open_connection()
@ -521,16 +521,16 @@ class SwUpgradeStrategyAPI(SwUpdateStrategyAPI):
rpc_request.controller_apply_type = SW_UPDATE_APPLY_TYPE.SERIAL rpc_request.controller_apply_type = SW_UPDATE_APPLY_TYPE.SERIAL
rpc_request.storage_apply_type = request_data.storage_apply_type rpc_request.storage_apply_type = request_data.storage_apply_type
rpc_request.swift_apply_type = SW_UPDATE_APPLY_TYPE.IGNORE rpc_request.swift_apply_type = SW_UPDATE_APPLY_TYPE.IGNORE
rpc_request.compute_apply_type = request_data.compute_apply_type rpc_request.worker_apply_type = request_data.worker_apply_type
if wsme_types.Unset != request_data.max_parallel_compute_hosts: if wsme_types.Unset != request_data.max_parallel_worker_hosts:
if request_data.max_parallel_compute_hosts < MIN_PARALLEL_HOSTS \ if request_data.max_parallel_worker_hosts < MIN_PARALLEL_HOSTS \
or request_data.max_parallel_compute_hosts > \ or request_data.max_parallel_worker_hosts > \
MAX_PARALLEL_UPGRADE_HOSTS: MAX_PARALLEL_UPGRADE_HOSTS:
return pecan.abort( return pecan.abort(
httplib.BAD_REQUEST, httplib.BAD_REQUEST,
"Invalid value for max-parallel-compute-hosts") "Invalid value for max-parallel-worker-hosts")
rpc_request.max_parallel_compute_hosts = \ rpc_request.max_parallel_worker_hosts = \
request_data.max_parallel_compute_hosts request_data.max_parallel_worker_hosts
rpc_request.default_instance_action = SW_UPDATE_INSTANCE_ACTION.MIGRATE rpc_request.default_instance_action = SW_UPDATE_INSTANCE_ACTION.MIGRATE
rpc_request.alarm_restrictions = request_data.alarm_restrictions rpc_request.alarm_restrictions = request_data.alarm_restrictions
# rpc_request.start_upgrade = request_data.start_upgrade # rpc_request.start_upgrade = request_data.start_upgrade

View File

@ -38,8 +38,8 @@ class SwMgmtDirector(object):
return self._sw_update return self._sw_update
def create_sw_patch_strategy(self, controller_apply_type, storage_apply_type, def create_sw_patch_strategy(self, controller_apply_type, storage_apply_type,
swift_apply_type, compute_apply_type, swift_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
default_instance_action, alarm_restrictions, default_instance_action, alarm_restrictions,
callback): callback):
""" """
@ -58,7 +58,7 @@ class SwMgmtDirector(object):
success, reason = self._sw_update.strategy_build( success, reason = self._sw_update.strategy_build(
strategy_uuid, controller_apply_type, strategy_uuid, controller_apply_type,
storage_apply_type, swift_apply_type, storage_apply_type, swift_apply_type,
compute_apply_type, max_parallel_compute_hosts, worker_apply_type, max_parallel_worker_hosts,
default_instance_action, alarm_restrictions, default_instance_action, alarm_restrictions,
self._ignore_alarms, self._single_controller) self._ignore_alarms, self._single_controller)
@ -66,8 +66,8 @@ class SwMgmtDirector(object):
self._sw_update.strategy) self._sw_update.strategy)
return strategy_uuid, '' return strategy_uuid, ''
def create_sw_upgrade_strategy(self, storage_apply_type, compute_apply_type, def create_sw_upgrade_strategy(self, storage_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
alarm_restrictions, start_upgrade, alarm_restrictions, start_upgrade,
complete_upgrade, callback): complete_upgrade, callback):
""" """
@ -85,7 +85,7 @@ class SwMgmtDirector(object):
self._sw_update = objects.SwUpgrade() self._sw_update = objects.SwUpgrade()
success, reason = self._sw_update.strategy_build( success, reason = self._sw_update.strategy_build(
strategy_uuid, storage_apply_type, strategy_uuid, storage_apply_type,
compute_apply_type, max_parallel_compute_hosts, worker_apply_type, max_parallel_worker_hosts,
alarm_restrictions, start_upgrade, alarm_restrictions, start_upgrade,
complete_upgrade, self._ignore_alarms) complete_upgrade, self._ignore_alarms)

View File

@ -61,7 +61,7 @@ def _system_state_query_callback():
for host_data in result_data: for host_data in result_data:
host = host_table.get(host_data['hostname'], None) host = host_table.get(host_data['hostname'], None)
if host is not None: if host is not None:
if objects.HOST_PERSONALITY.COMPUTE in host.personality: if objects.HOST_PERSONALITY.WORKER in host.personality:
DLOG.info("Host %s uptime is %s, host_uuid=%s." DLOG.info("Host %s uptime is %s, host_uuid=%s."
% (host_data['hostname'], host_data['uptime'], % (host_data['hostname'], host_data['uptime'],
host_data['uuid'])) host_data['uuid']))

View File

@ -70,17 +70,17 @@ def vim_sw_update_api_create_strategy(connection, msg):
else: else:
swift_apply_type = objects.SW_UPDATE_APPLY_TYPE.IGNORE swift_apply_type = objects.SW_UPDATE_APPLY_TYPE.IGNORE
if 'parallel' == msg.compute_apply_type: if 'parallel' == msg.worker_apply_type:
compute_apply_type = objects.SW_UPDATE_APPLY_TYPE.PARALLEL worker_apply_type = objects.SW_UPDATE_APPLY_TYPE.PARALLEL
elif 'serial' == msg.compute_apply_type: elif 'serial' == msg.worker_apply_type:
compute_apply_type = objects.SW_UPDATE_APPLY_TYPE.SERIAL worker_apply_type = objects.SW_UPDATE_APPLY_TYPE.SERIAL
else: else:
compute_apply_type = objects.SW_UPDATE_APPLY_TYPE.IGNORE worker_apply_type = objects.SW_UPDATE_APPLY_TYPE.IGNORE
if msg.max_parallel_compute_hosts is not None: if msg.max_parallel_worker_hosts is not None:
max_parallel_compute_hosts = msg.max_parallel_compute_hosts max_parallel_worker_hosts = msg.max_parallel_worker_hosts
else: else:
max_parallel_compute_hosts = 2 max_parallel_worker_hosts = 2
if 'migrate' == msg.default_instance_action: if 'migrate' == msg.default_instance_action:
default_instance_action = objects.SW_UPDATE_INSTANCE_ACTION.MIGRATE default_instance_action = objects.SW_UPDATE_INSTANCE_ACTION.MIGRATE
@ -96,14 +96,14 @@ def vim_sw_update_api_create_strategy(connection, msg):
if 'sw-patch' == msg.sw_update_type: if 'sw-patch' == msg.sw_update_type:
uuid, reason = sw_mgmt_director.create_sw_patch_strategy( uuid, reason = sw_mgmt_director.create_sw_patch_strategy(
controller_apply_type, storage_apply_type, controller_apply_type, storage_apply_type,
swift_apply_type, compute_apply_type, max_parallel_compute_hosts, swift_apply_type, worker_apply_type, max_parallel_worker_hosts,
default_instance_action, default_instance_action,
alarm_restrictions, _vim_sw_update_api_create_strategy_callback) alarm_restrictions, _vim_sw_update_api_create_strategy_callback)
elif 'sw-upgrade' == msg.sw_update_type: elif 'sw-upgrade' == msg.sw_update_type:
start_upgrade = msg.start_upgrade start_upgrade = msg.start_upgrade
complete_upgrade = msg.complete_upgrade complete_upgrade = msg.complete_upgrade
uuid, reason = sw_mgmt_director.create_sw_upgrade_strategy( uuid, reason = sw_mgmt_director.create_sw_upgrade_strategy(
storage_apply_type, compute_apply_type, max_parallel_compute_hosts, storage_apply_type, worker_apply_type, max_parallel_worker_hosts,
alarm_restrictions, alarm_restrictions,
start_upgrade, complete_upgrade, start_upgrade, complete_upgrade,
_vim_sw_update_api_create_strategy_callback) _vim_sw_update_api_create_strategy_callback)

View File

@ -204,9 +204,9 @@ class DisableHostTask(state_machine.StateTask):
self._host_reference = weakref.ref(host) self._host_reference = weakref.ref(host)
if objects.HOST_PERSONALITY.COMPUTE in self._host.personality and \ if objects.HOST_PERSONALITY.WORKER in self._host.personality and \
self._host.is_force_lock(): self._host.is_force_lock():
# When a compute host is being disabled due to a force lock, we # When a worker host is being disabled due to a force lock, we
# want it to be rebooted. To do this we need to indicate that # want it to be rebooted. To do this we need to indicate that
# the host services disable failed. # the host services disable failed.
notify_host_services_task = NotifyHostServicesDisableFailedTaskWork notify_host_services_task = NotifyHostServicesDisableFailedTaskWork

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2015-2016 Wind River Systems, Inc. # Copyright (c) 2015-2018 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -31,7 +31,7 @@ class HostPersonality(object):
CONTROLLER = Constant('controller') CONTROLLER = Constant('controller')
STORAGE = Constant('storage') STORAGE = Constant('storage')
SWIFT = Constant('swift') SWIFT = Constant('swift')
COMPUTE = Constant('compute') WORKER = Constant('worker')
@six.add_metaclass(Singleton) @six.add_metaclass(Singleton)
@ -758,8 +758,8 @@ class Host(ObjectData):
self._host_service_state[service] = host_service_state self._host_service_state[service] = host_service_state
# Host services logs and alarms only apply to compute hosts # Host services logs and alarms only apply to worker hosts
if 'compute' in self.personality: if 'worker' in self.personality:
host_service_state_overall = \ host_service_state_overall = \
self.host_service_state_aggregate() self.host_service_state_aggregate()
if (HOST_SERVICE_STATE.ENABLED == if (HOST_SERVICE_STATE.ENABLED ==

View File

@ -31,8 +31,8 @@ class SwPatch(SwUpdate):
self._nfvi_sw_patch_hosts = list() self._nfvi_sw_patch_hosts = list()
def strategy_build(self, strategy_uuid, controller_apply_type, def strategy_build(self, strategy_uuid, controller_apply_type,
storage_apply_type, swift_apply_type, compute_apply_type, storage_apply_type, swift_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
default_instance_action, alarm_restrictions, default_instance_action, alarm_restrictions,
ignore_alarms, ignore_alarms,
single_controller): single_controller):
@ -47,7 +47,7 @@ class SwPatch(SwUpdate):
self._strategy = strategy.SwPatchStrategy( self._strategy = strategy.SwPatchStrategy(
strategy_uuid, controller_apply_type, storage_apply_type, strategy_uuid, controller_apply_type, storage_apply_type,
swift_apply_type, compute_apply_type, max_parallel_compute_hosts, swift_apply_type, worker_apply_type, max_parallel_worker_hosts,
default_instance_action, default_instance_action,
alarm_restrictions, ignore_alarms, alarm_restrictions, ignore_alarms,
single_controller) single_controller)

View File

@ -28,7 +28,7 @@ class SwUpgrade(SwUpdate):
strategy_data=strategy_data) strategy_data=strategy_data)
def strategy_build(self, strategy_uuid, storage_apply_type, def strategy_build(self, strategy_uuid, storage_apply_type,
compute_apply_type, max_parallel_compute_hosts, worker_apply_type, max_parallel_worker_hosts,
alarm_restrictions, start_upgrade, alarm_restrictions, start_upgrade,
complete_upgrade, ignore_alarms): complete_upgrade, ignore_alarms):
""" """
@ -41,8 +41,8 @@ class SwUpgrade(SwUpdate):
return False, reason return False, reason
self._strategy = strategy.SwUpgradeStrategy( self._strategy = strategy.SwUpgradeStrategy(
strategy_uuid, storage_apply_type, compute_apply_type, strategy_uuid, storage_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
alarm_restrictions, start_upgrade, complete_upgrade, ignore_alarms) alarm_restrictions, start_upgrade, complete_upgrade, ignore_alarms)
self._strategy.sw_update_obj = self self._strategy.sw_update_obj = self

View File

@ -21,8 +21,8 @@ class APIRequestCreateSwUpdateStrategy(RPCMessage):
controller_apply_type = None controller_apply_type = None
storage_apply_type = None storage_apply_type = None
swift_apply_type = None swift_apply_type = None
compute_apply_type = None worker_apply_type = None
max_parallel_compute_hosts = None max_parallel_worker_hosts = None
default_instance_action = None default_instance_action = None
alarm_restrictions = None alarm_restrictions = None
@ -37,8 +37,8 @@ class APIRequestCreateSwUpdateStrategy(RPCMessage):
msg['controller_apply_type'] = self.controller_apply_type msg['controller_apply_type'] = self.controller_apply_type
msg['storage_apply_type'] = self.storage_apply_type msg['storage_apply_type'] = self.storage_apply_type
msg['swift_apply_type'] = self.swift_apply_type msg['swift_apply_type'] = self.swift_apply_type
msg['compute_apply_type'] = self.compute_apply_type msg['worker_apply_type'] = self.worker_apply_type
msg['max_parallel_compute_hosts'] = self.max_parallel_compute_hosts msg['max_parallel_worker_hosts'] = self.max_parallel_worker_hosts
msg['default_instance_action'] = self.default_instance_action msg['default_instance_action'] = self.default_instance_action
msg['alarm_restrictions'] = self.alarm_restrictions msg['alarm_restrictions'] = self.alarm_restrictions
@ -47,9 +47,9 @@ class APIRequestCreateSwUpdateStrategy(RPCMessage):
self.controller_apply_type = msg.get('controller_apply_type', None) self.controller_apply_type = msg.get('controller_apply_type', None)
self.storage_apply_type = msg.get('storage_apply_type', None) self.storage_apply_type = msg.get('storage_apply_type', None)
self.swift_apply_type = msg.get('swift_apply_type', None) self.swift_apply_type = msg.get('swift_apply_type', None)
self.compute_apply_type = msg.get('compute_apply_type', None) self.worker_apply_type = msg.get('worker_apply_type', None)
self.max_parallel_compute_hosts = msg.get( self.max_parallel_worker_hosts = msg.get(
'max_parallel_compute_hosts', None) 'max_parallel_worker_hosts', None)
self.default_instance_action = msg.get('default_instance_action', None) self.default_instance_action = msg.get('default_instance_action', None)
self.alarm_restrictions = msg.get('alarm_restrictions', None) self.alarm_restrictions = msg.get('alarm_restrictions', None)

View File

@ -52,16 +52,16 @@ class SwUpdateStrategy(strategy.Strategy):
""" """
def __init__(self, uuid, strategy_name, controller_apply_type, def __init__(self, uuid, strategy_name, controller_apply_type,
storage_apply_type, storage_apply_type,
swift_apply_type, compute_apply_type, swift_apply_type, worker_apply_type,
max_parallel_compute_hosts, default_instance_action, max_parallel_worker_hosts, default_instance_action,
alarm_restrictions, alarm_restrictions,
ignore_alarms): ignore_alarms):
super(SwUpdateStrategy, self).__init__(uuid, strategy_name) super(SwUpdateStrategy, self).__init__(uuid, strategy_name)
self._controller_apply_type = controller_apply_type self._controller_apply_type = controller_apply_type
self._storage_apply_type = storage_apply_type self._storage_apply_type = storage_apply_type
self._swift_apply_type = swift_apply_type self._swift_apply_type = swift_apply_type
self._compute_apply_type = compute_apply_type self._worker_apply_type = worker_apply_type
self._max_parallel_compute_hosts = max_parallel_compute_hosts self._max_parallel_worker_hosts = max_parallel_worker_hosts
self._default_instance_action = default_instance_action self._default_instance_action = default_instance_action
self._alarm_restrictions = alarm_restrictions self._alarm_restrictions = alarm_restrictions
self._ignore_alarms = ignore_alarms self._ignore_alarms = ignore_alarms
@ -159,9 +159,9 @@ class SwUpdateStrategy(strategy.Strategy):
return host_lists, '' return host_lists, ''
def _create_compute_host_lists(self, compute_hosts, reboot): def _create_worker_host_lists(self, worker_hosts, reboot):
""" """
Create host lists for updating compute hosts Create host lists for updating worker hosts
""" """
from nfv_vim import tables from nfv_vim import tables
@ -180,17 +180,17 @@ class SwUpdateStrategy(strategy.Strategy):
""" """
Calculate limit for each host aggregate Calculate limit for each host aggregate
""" """
# Use the ratio of the max parallel compute hosts to the total # Use the ratio of the max parallel worker hosts to the total
# number of compute hosts to limit the number of hosts in each # number of worker hosts to limit the number of hosts in each
# aggregate that will be patched at the same time. If there # aggregate that will be patched at the same time. If there
# are multiple aggregates, that will help us select hosts # are multiple aggregates, that will help us select hosts
# from more than one aggregate for each stage. # from more than one aggregate for each stage.
host_table = tables.tables_get_host_table() host_table = tables.tables_get_host_table()
num_compute_hosts = host_table.total_by_personality( num_worker_hosts = host_table.total_by_personality(
HOST_PERSONALITY.COMPUTE) HOST_PERSONALITY.WORKER)
aggregate_ratio = \ aggregate_ratio = \
float(self._max_parallel_compute_hosts) / num_compute_hosts float(self._max_parallel_worker_hosts) / num_worker_hosts
# Limit the ratio to half the compute hosts in an aggregate # Limit the ratio to half the worker hosts in an aggregate
if aggregate_ratio > 0.5: if aggregate_ratio > 0.5:
aggregate_ratio = 0.5 aggregate_ratio = 0.5
@ -242,12 +242,12 @@ class SwUpdateStrategy(strategy.Strategy):
instance_table = tables.tables_get_instance_table() instance_table = tables.tables_get_instance_table()
instance_group_table = tables.tables_get_instance_group_table() instance_group_table = tables.tables_get_instance_group_table()
if SW_UPDATE_APPLY_TYPE.IGNORE != self._compute_apply_type: if SW_UPDATE_APPLY_TYPE.IGNORE != self._worker_apply_type:
for host in compute_hosts: for host in worker_hosts:
if HOST_PERSONALITY.COMPUTE not in host.personality: if HOST_PERSONALITY.WORKER not in host.personality:
DLOG.error("Host inventory personality compute mismatch " DLOG.error("Host inventory personality worker mismatch "
"detected for host %s." % host.name) "detected for host %s." % host.name)
reason = 'host inventory personality compute mismatch detected' reason = 'host inventory personality worker mismatch detected'
return None, reason return None, reason
# Do not allow reboots if there are locked instances that # Do not allow reboots if there are locked instances that
@ -269,21 +269,21 @@ class SwUpdateStrategy(strategy.Strategy):
host_lists = list() host_lists = list()
if SW_UPDATE_APPLY_TYPE.SERIAL == self._compute_apply_type: if SW_UPDATE_APPLY_TYPE.SERIAL == self._worker_apply_type:
host_with_instances_lists = list() host_with_instances_lists = list()
# handle the computes with no instances first # handle the workers with no instances first
for host in compute_hosts: for host in worker_hosts:
if not instance_table.exist_on_host(host.name): if not instance_table.exist_on_host(host.name):
host_lists.append([host]) host_lists.append([host])
else: else:
host_with_instances_lists.append([host]) host_with_instances_lists.append([host])
# then add computes with instances # then add workers with instances
if host_with_instances_lists: if host_with_instances_lists:
host_lists += host_with_instances_lists host_lists += host_with_instances_lists
elif SW_UPDATE_APPLY_TYPE.PARALLEL == self._compute_apply_type: elif SW_UPDATE_APPLY_TYPE.PARALLEL == self._worker_apply_type:
policies = [INSTANCE_GROUP_POLICY.ANTI_AFFINITY, policies = [INSTANCE_GROUP_POLICY.ANTI_AFFINITY,
INSTANCE_GROUP_POLICY.ANTI_AFFINITY_BEST_EFFORT] INSTANCE_GROUP_POLICY.ANTI_AFFINITY_BEST_EFFORT]
@ -291,26 +291,26 @@ class SwUpdateStrategy(strategy.Strategy):
host_aggregate_limit = {} host_aggregate_limit = {}
calculate_host_aggregate_limits() calculate_host_aggregate_limits()
controller_list = list() controller_list = list()
host_lists.append([]) # start with empty list of computes host_lists.append([]) # start with empty list of workers
for host in compute_hosts: for host in worker_hosts:
if HOST_PERSONALITY.CONTROLLER in host.personality: if HOST_PERSONALITY.CONTROLLER in host.personality:
# have to swact the controller so put it in its own list # have to swact the controller so put it in its own list
controller_list.append([host]) controller_list.append([host])
continue continue
elif not reboot: elif not reboot:
# parallel no-reboot can group all computes together # parallel no-reboot can group all workers together
host_lists[0].append(host) host_lists[0].append(host)
continue continue
elif not instance_table.exist_on_host(host.name): elif not instance_table.exist_on_host(host.name):
# group the computes with no instances together # group the workers with no instances together
host_lists[0].append(host) host_lists[0].append(host)
continue continue
# find the first list that can add this host else create a new list # find the first list that can add this host else create a new list
for idx in range(1, len(host_lists), 1): for idx in range(1, len(host_lists), 1):
host_list = host_lists[idx] host_list = host_lists[idx]
if len(host_list) >= self._max_parallel_compute_hosts: if len(host_list) >= self._max_parallel_worker_hosts:
# this list is full - don't add the host # this list is full - don't add the host
continue continue
@ -337,21 +337,21 @@ class SwUpdateStrategy(strategy.Strategy):
DLOG.verbose("Compute apply type set to ignore.") DLOG.verbose("Compute apply type set to ignore.")
# Drop empty lists and enforce a maximum number of hosts to be updated # Drop empty lists and enforce a maximum number of hosts to be updated
# at once (only required list of computes with no instances, as we # at once (only required list of workers with no instances, as we
# enforced the limit for compute hosts with instances above). # enforced the limit for worker hosts with instances above).
sized_host_lists = list() sized_host_lists = list()
for host_list in host_lists: for host_list in host_lists:
# drop empty host lists # drop empty host lists
if not host_list: if not host_list:
continue continue
if self._max_parallel_compute_hosts < len(host_list): if self._max_parallel_worker_hosts < len(host_list):
start = 0 start = 0
end = self._max_parallel_compute_hosts end = self._max_parallel_worker_hosts
while start < len(host_list): while start < len(host_list):
sized_host_lists.append(host_list[start:end]) sized_host_lists.append(host_list[start:end])
start = end start = end
end += self._max_parallel_compute_hosts end += self._max_parallel_worker_hosts
else: else:
sized_host_lists.append(host_list) sized_host_lists.append(host_list)
@ -424,8 +424,8 @@ class SwUpdateStrategy(strategy.Strategy):
self._controller_apply_type = data['controller_apply_type'] self._controller_apply_type = data['controller_apply_type']
self._storage_apply_type = data['storage_apply_type'] self._storage_apply_type = data['storage_apply_type']
self._swift_apply_type = data['swift_apply_type'] self._swift_apply_type = data['swift_apply_type']
self._compute_apply_type = data['compute_apply_type'] self._worker_apply_type = data['worker_apply_type']
self._max_parallel_compute_hosts = data['max_parallel_compute_hosts'] self._max_parallel_worker_hosts = data['max_parallel_worker_hosts']
self._default_instance_action = data['default_instance_action'] self._default_instance_action = data['default_instance_action']
self._alarm_restrictions = data['alarm_restrictions'] self._alarm_restrictions = data['alarm_restrictions']
self._ignore_alarms = data['ignore_alarms'] self._ignore_alarms = data['ignore_alarms']
@ -450,8 +450,8 @@ class SwUpdateStrategy(strategy.Strategy):
data['controller_apply_type'] = self._controller_apply_type data['controller_apply_type'] = self._controller_apply_type
data['storage_apply_type'] = self._storage_apply_type data['storage_apply_type'] = self._storage_apply_type
data['swift_apply_type'] = self._swift_apply_type data['swift_apply_type'] = self._swift_apply_type
data['compute_apply_type'] = self._compute_apply_type data['worker_apply_type'] = self._worker_apply_type
data['max_parallel_compute_hosts'] = self._max_parallel_compute_hosts data['max_parallel_worker_hosts'] = self._max_parallel_worker_hosts
data['default_instance_action'] = self._default_instance_action data['default_instance_action'] = self._default_instance_action
data['alarm_restrictions'] = self._alarm_restrictions data['alarm_restrictions'] = self._alarm_restrictions
data['ignore_alarms'] = self._ignore_alarms data['ignore_alarms'] = self._ignore_alarms
@ -469,8 +469,8 @@ class SwPatchStrategy(SwUpdateStrategy):
Software Patch - Strategy Software Patch - Strategy
""" """
def __init__(self, uuid, controller_apply_type, storage_apply_type, def __init__(self, uuid, controller_apply_type, storage_apply_type,
swift_apply_type, compute_apply_type, swift_apply_type, worker_apply_type,
max_parallel_compute_hosts, default_instance_action, max_parallel_worker_hosts, default_instance_action,
alarm_restrictions, alarm_restrictions,
ignore_alarms, ignore_alarms,
single_controller): single_controller):
@ -480,8 +480,8 @@ class SwPatchStrategy(SwUpdateStrategy):
controller_apply_type, controller_apply_type,
storage_apply_type, storage_apply_type,
swift_apply_type, swift_apply_type,
compute_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
default_instance_action, default_instance_action,
alarm_restrictions, alarm_restrictions,
ignore_alarms) ignore_alarms)
@ -572,7 +572,7 @@ class SwPatchStrategy(SwUpdateStrategy):
local_host_name = get_local_host_name() local_host_name = get_local_host_name()
for host in controllers: for host in controllers:
if HOST_PERSONALITY.COMPUTE not in host.personality: if HOST_PERSONALITY.WORKER not in host.personality:
if local_host_name == host.name: if local_host_name == host.name:
local_host = host local_host = host
else: else:
@ -713,18 +713,18 @@ class SwPatchStrategy(SwUpdateStrategy):
return True, '' return True, ''
def _add_compute_strategy_stages(self, compute_hosts, reboot): def _add_worker_strategy_stages(self, worker_hosts, reboot):
""" """
Add compute software patch strategy stages Add worker software patch strategy stages
""" """
from nfv_vim import tables from nfv_vim import tables
from nfv_vim import strategy from nfv_vim import strategy
if SW_UPDATE_APPLY_TYPE.IGNORE != self._compute_apply_type: if SW_UPDATE_APPLY_TYPE.IGNORE != self._worker_apply_type:
# When using a single controller/compute host, only allow the # When using a single controller/worker host, only allow the
# stop/start instance action. # stop/start instance action.
if self._single_controller: if self._single_controller:
for host in compute_hosts: for host in worker_hosts:
if HOST_PERSONALITY.CONTROLLER in host.personality and \ if HOST_PERSONALITY.CONTROLLER in host.personality and \
SW_UPDATE_INSTANCE_ACTION.STOP_START != \ SW_UPDATE_INSTANCE_ACTION.STOP_START != \
self._default_instance_action: self._default_instance_action:
@ -734,7 +734,7 @@ class SwPatchStrategy(SwUpdateStrategy):
'controller configuration' 'controller configuration'
return False, reason return False, reason
host_lists, reason = self._create_compute_host_lists(compute_hosts, reboot) host_lists, reason = self._create_worker_host_lists(worker_hosts, reboot)
if host_lists is None: if host_lists is None:
return False, reason return False, reason
@ -757,7 +757,7 @@ class SwPatchStrategy(SwUpdateStrategy):
hosts_to_reboot = [x for x in host_list if x.is_locked()] hosts_to_reboot = [x for x in host_list if x.is_locked()]
stage = strategy.StrategyStage( stage = strategy.StrategyStage(
strategy.STRATEGY_STAGE_NAME.SW_PATCH_COMPUTE_HOSTS) strategy.STRATEGY_STAGE_NAME.SW_PATCH_WORKER_HOSTS)
stage.add_step(strategy.QueryAlarmsStep( stage.add_step(strategy.QueryAlarmsStep(
True, ignore_alarms=self._ignore_alarms)) True, ignore_alarms=self._ignore_alarms))
@ -774,9 +774,9 @@ class SwPatchStrategy(SwUpdateStrategy):
if SW_UPDATE_INSTANCE_ACTION.MIGRATE == \ if SW_UPDATE_INSTANCE_ACTION.MIGRATE == \
self._default_instance_action: self._default_instance_action:
if SW_UPDATE_APPLY_TYPE.PARALLEL == \ if SW_UPDATE_APPLY_TYPE.PARALLEL == \
self._compute_apply_type: self._worker_apply_type:
# Disable host services before migrating to ensure # Disable host services before migrating to ensure
# instances do not migrate to compute hosts in the # instances do not migrate to worker hosts in the
# same set of hosts. # same set of hosts.
if host_list[0].host_service_configured( if host_list[0].host_service_configured(
HOST_SERVICES.COMPUTE): HOST_SERVICES.COMPUTE):
@ -874,9 +874,9 @@ class SwPatchStrategy(SwUpdateStrategy):
return return
for host in host_table.values(): for host in host_table.values():
if HOST_PERSONALITY.COMPUTE in host.personality and \ if HOST_PERSONALITY.WORKER in host.personality and \
HOST_PERSONALITY.CONTROLLER not in host.personality: HOST_PERSONALITY.CONTROLLER not in host.personality:
# Allow patch orchestration when compute hosts are available, # Allow patch orchestration when worker hosts are available,
# locked or powered down. # locked or powered down.
if not ((host.is_unlocked() and host.is_enabled() and if not ((host.is_unlocked() and host.is_enabled() and
host.is_available()) or host.is_available()) or
@ -885,14 +885,14 @@ class SwPatchStrategy(SwUpdateStrategy):
(host.is_locked() and host.is_disabled() and (host.is_locked() and host.is_disabled() and
host.is_online())): host.is_online())):
DLOG.warn( DLOG.warn(
"All compute hosts must be unlocked-enabled-available, " "All worker hosts must be unlocked-enabled-available, "
"locked-disabled-online or locked-disabled-offline, " "locked-disabled-online or locked-disabled-offline, "
"can't apply software patches.") "can't apply software patches.")
self._state = strategy.STRATEGY_STATE.BUILD_FAILED self._state = strategy.STRATEGY_STATE.BUILD_FAILED
self.build_phase.result = \ self.build_phase.result = \
strategy.STRATEGY_PHASE_RESULT.FAILED strategy.STRATEGY_PHASE_RESULT.FAILED
self.build_phase.result_reason = ( self.build_phase.result_reason = (
'all compute hosts must be unlocked-enabled-available, ' 'all worker hosts must be unlocked-enabled-available, '
'locked-disabled-online or locked-disabled-offline') 'locked-disabled-online or locked-disabled-offline')
self.sw_update_obj.strategy_build_complete( self.sw_update_obj.strategy_build_complete(
False, self.build_phase.result_reason) False, self.build_phase.result_reason)
@ -925,8 +925,8 @@ class SwPatchStrategy(SwUpdateStrategy):
storage_hosts_no_reboot = list() storage_hosts_no_reboot = list()
swift_hosts = list() swift_hosts = list()
swift_hosts_no_reboot = list() swift_hosts_no_reboot = list()
compute_hosts = list() worker_hosts = list()
compute_hosts_no_reboot = list() worker_hosts_no_reboot = list()
for sw_patch_host in self.nfvi_sw_patch_hosts: for sw_patch_host in self.nfvi_sw_patch_hosts:
host = host_table.get(sw_patch_host.name, None) host = host_table.get(sw_patch_host.name, None)
@ -982,13 +982,13 @@ class SwPatchStrategy(SwUpdateStrategy):
# Separate if check to handle CPE where host has multiple # Separate if check to handle CPE where host has multiple
# personality disorder. # personality disorder.
if HOST_PERSONALITY.COMPUTE in sw_patch_host.personality: if HOST_PERSONALITY.WORKER in sw_patch_host.personality:
# Ignore compute hosts that are powered down # Ignore worker hosts that are powered down
if not host.is_offline(): if not host.is_offline():
if sw_patch_host.requires_reboot: if sw_patch_host.requires_reboot:
compute_hosts.append(host) worker_hosts.append(host)
else: else:
compute_hosts_no_reboot.append(host) worker_hosts_no_reboot.append(host)
STRATEGY_CREATION_COMMANDS = [ STRATEGY_CREATION_COMMANDS = [
(self._add_controller_strategy_stages, (self._add_controller_strategy_stages,
@ -1003,10 +1003,10 @@ class SwPatchStrategy(SwUpdateStrategy):
swift_hosts_no_reboot, False), swift_hosts_no_reboot, False),
(self._add_swift_strategy_stages, (self._add_swift_strategy_stages,
swift_hosts, True), swift_hosts, True),
(self._add_compute_strategy_stages, (self._add_worker_strategy_stages,
compute_hosts_no_reboot, False), worker_hosts_no_reboot, False),
(self._add_compute_strategy_stages, (self._add_worker_strategy_stages,
compute_hosts, True) worker_hosts, True)
] ]
for add_strategy_stages_function, host_list, reboot in \ for add_strategy_stages_function, host_list, reboot in \
@ -1097,8 +1097,8 @@ class SwUpgradeStrategy(SwUpdateStrategy):
""" """
Software Upgrade - Strategy Software Upgrade - Strategy
""" """
def __init__(self, uuid, storage_apply_type, compute_apply_type, def __init__(self, uuid, storage_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
alarm_restrictions, start_upgrade, complete_upgrade, alarm_restrictions, start_upgrade, complete_upgrade,
ignore_alarms): ignore_alarms):
super(SwUpgradeStrategy, self).__init__( super(SwUpgradeStrategy, self).__init__(
@ -1107,8 +1107,8 @@ class SwUpgradeStrategy(SwUpdateStrategy):
SW_UPDATE_APPLY_TYPE.SERIAL, SW_UPDATE_APPLY_TYPE.SERIAL,
storage_apply_type, storage_apply_type,
SW_UPDATE_APPLY_TYPE.IGNORE, SW_UPDATE_APPLY_TYPE.IGNORE,
compute_apply_type, worker_apply_type,
max_parallel_compute_hosts, max_parallel_worker_hosts,
SW_UPDATE_INSTANCE_ACTION.MIGRATE, SW_UPDATE_INSTANCE_ACTION.MIGRATE,
alarm_restrictions, alarm_restrictions,
ignore_alarms) ignore_alarms)
@ -1232,7 +1232,7 @@ class SwUpgradeStrategy(SwUpdateStrategy):
controller_1_host = None controller_1_host = None
for host in controllers: for host in controllers:
if HOST_PERSONALITY.COMPUTE in host.personality: if HOST_PERSONALITY.WORKER in host.personality:
DLOG.warn("Cannot apply software upgrades to CPE configuration.") DLOG.warn("Cannot apply software upgrades to CPE configuration.")
reason = 'cannot apply software upgrades to CPE configuration' reason = 'cannot apply software upgrades to CPE configuration'
return False, reason return False, reason
@ -1331,14 +1331,14 @@ class SwUpgradeStrategy(SwUpdateStrategy):
return True, '' return True, ''
def _add_compute_strategy_stages(self, compute_hosts, reboot): def _add_worker_strategy_stages(self, worker_hosts, reboot):
""" """
Add compute software upgrade strategy stages Add worker software upgrade strategy stages
""" """
from nfv_vim import tables from nfv_vim import tables
from nfv_vim import strategy from nfv_vim import strategy
host_lists, reason = self._create_compute_host_lists(compute_hosts, reboot) host_lists, reason = self._create_worker_host_lists(worker_hosts, reboot)
if host_lists is None: if host_lists is None:
return False, reason return False, reason
@ -1361,7 +1361,7 @@ class SwUpgradeStrategy(SwUpdateStrategy):
# Computes with no instances # Computes with no instances
if 0 == len(instance_list): if 0 == len(instance_list):
stage = strategy.StrategyStage( stage = strategy.StrategyStage(
strategy.STRATEGY_STAGE_NAME.SW_UPGRADE_COMPUTE_HOSTS) strategy.STRATEGY_STAGE_NAME.SW_UPGRADE_WORKER_HOSTS)
stage.add_step(strategy.QueryAlarmsStep( stage.add_step(strategy.QueryAlarmsStep(
True, ignore_alarms=self._ignore_alarms)) True, ignore_alarms=self._ignore_alarms))
stage.add_step(strategy.LockHostsStep(host_list)) stage.add_step(strategy.LockHostsStep(host_list))
@ -1373,14 +1373,14 @@ class SwUpgradeStrategy(SwUpdateStrategy):
# Computes with instances # Computes with instances
stage = strategy.StrategyStage( stage = strategy.StrategyStage(
strategy.STRATEGY_STAGE_NAME.SW_UPGRADE_COMPUTE_HOSTS) strategy.STRATEGY_STAGE_NAME.SW_UPGRADE_WORKER_HOSTS)
stage.add_step(strategy.QueryAlarmsStep( stage.add_step(strategy.QueryAlarmsStep(
True, ignore_alarms=self._ignore_alarms)) True, ignore_alarms=self._ignore_alarms))
if SW_UPDATE_APPLY_TYPE.PARALLEL == self._compute_apply_type: if SW_UPDATE_APPLY_TYPE.PARALLEL == self._worker_apply_type:
# Disable host services before migrating to ensure # Disable host services before migrating to ensure
# instances do not migrate to compute hosts in the # instances do not migrate to worker hosts in the
# same set of hosts. # same set of hosts.
if host_list[0].host_service_configured( if host_list[0].host_service_configured(
HOST_SERVICES.COMPUTE): HOST_SERVICES.COMPUTE):
@ -1502,7 +1502,7 @@ class SwUpgradeStrategy(SwUpdateStrategy):
controller_hosts = list() controller_hosts = list()
storage_hosts = list() storage_hosts = list()
compute_hosts = list() worker_hosts = list()
if self.nfvi_upgrade is None: if self.nfvi_upgrade is None:
# Start upgrade # Start upgrade
@ -1516,8 +1516,8 @@ class SwUpgradeStrategy(SwUpdateStrategy):
elif HOST_PERSONALITY.STORAGE in host.personality: elif HOST_PERSONALITY.STORAGE in host.personality:
storage_hosts.append(host) storage_hosts.append(host)
elif HOST_PERSONALITY.COMPUTE in host.personality: elif HOST_PERSONALITY.WORKER in host.personality:
compute_hosts.append(host) worker_hosts.append(host)
else: else:
# Only hosts not yet upgraded will be upgraded # Only hosts not yet upgraded will be upgraded
to_load = self.nfvi_upgrade.to_release to_load = self.nfvi_upgrade.to_release
@ -1532,16 +1532,16 @@ class SwUpgradeStrategy(SwUpdateStrategy):
elif HOST_PERSONALITY.STORAGE in host.personality: elif HOST_PERSONALITY.STORAGE in host.personality:
storage_hosts.append(host) storage_hosts.append(host)
elif HOST_PERSONALITY.COMPUTE in host.personality: elif HOST_PERSONALITY.WORKER in host.personality:
compute_hosts.append(host) worker_hosts.append(host)
STRATEGY_CREATION_COMMANDS = [ STRATEGY_CREATION_COMMANDS = [
(self._add_controller_strategy_stages, (self._add_controller_strategy_stages,
controller_hosts, True), controller_hosts, True),
(self._add_storage_strategy_stages, (self._add_storage_strategy_stages,
storage_hosts, True), storage_hosts, True),
(self._add_compute_strategy_stages, (self._add_worker_strategy_stages,
compute_hosts, True) worker_hosts, True)
] ]
for add_strategy_stages_function, host_list, reboot in \ for add_strategy_stages_function, host_list, reboot in \

View File

@ -23,12 +23,12 @@ class StrategyStageNames(Constants):
SW_PATCH_CONTROLLERS = Constant('sw-patch-controllers') SW_PATCH_CONTROLLERS = Constant('sw-patch-controllers')
SW_PATCH_STORAGE_HOSTS = Constant('sw-patch-storage-hosts') SW_PATCH_STORAGE_HOSTS = Constant('sw-patch-storage-hosts')
SW_PATCH_SWIFT_HOSTS = Constant('sw-patch-swift-hosts') SW_PATCH_SWIFT_HOSTS = Constant('sw-patch-swift-hosts')
SW_PATCH_COMPUTE_HOSTS = Constant('sw-patch-compute-hosts') SW_PATCH_WORKER_HOSTS = Constant('sw-patch-worker-hosts')
SW_UPGRADE_QUERY = Constant('sw-upgrade-query') SW_UPGRADE_QUERY = Constant('sw-upgrade-query')
SW_UPGRADE_START = Constant('sw-upgrade-start') SW_UPGRADE_START = Constant('sw-upgrade-start')
SW_UPGRADE_CONTROLLERS = Constant('sw-upgrade-controllers') SW_UPGRADE_CONTROLLERS = Constant('sw-upgrade-controllers')
SW_UPGRADE_STORAGE_HOSTS = Constant('sw-upgrade-storage-hosts') SW_UPGRADE_STORAGE_HOSTS = Constant('sw-upgrade-storage-hosts')
SW_UPGRADE_COMPUTE_HOSTS = Constant('sw-upgrade-compute-hosts') SW_UPGRADE_WORKER_HOSTS = Constant('sw-upgrade-worker-hosts')
SW_UPGRADE_COMPLETE = Constant('sw-upgrade-complete') SW_UPGRADE_COMPLETE = Constant('sw-upgrade-complete')