Merge remote-tracking branch 'gerrit/master' into f/centos8

Change-Id: If1a58a93b71d8e91ddd115a53b39069db0308b7f
Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
This commit is contained in:
Shuicheng Lin 2020-05-21 19:57:17 +08:00
commit a39415ff84
98 changed files with 7152 additions and 3227 deletions

View File

@ -9,12 +9,14 @@
- openstack-tox-linters
- stx-distcloud-tox-pep8
- stx-distcloud-tox-py27
- stx-distcloud-tox-py36
- stx-distcloud-tox-pylint
gate:
jobs:
- openstack-tox-linters
- stx-distcloud-tox-pep8
- stx-distcloud-tox-py27
- stx-distcloud-tox-py36
- stx-distcloud-tox-pylint
post:
jobs:
@ -33,6 +35,19 @@
tox_envlist: py27
tox_extra_args: -c distributedcloud/tox.ini
- job:
name: stx-distcloud-tox-py36
parent: tox
description: Run py36 for distcloud
required-projects:
- starlingx/fault
- starlingx/nfv
- starlingx/update
- starlingx/config
vars:
tox_envlist: py36
tox_extra_args: -c distributedcloud/tox.ini
- job:
name: stx-distcloud-tox-pylint
parent: tox

View File

@ -1,4 +1,4 @@
====================================================
====================================================
Dcmanager API v1
====================================================
@ -93,6 +93,7 @@ internalServerError (500), serviceUnavailable (503)
"compute_sync_status (Optional)", "plain", "xsd:string", "The compute sync status of the subcloud."
"network_sync_status (Optional)", "plain", "xsd:string", "The network sync status of the subcloud."
"patching_sync_status (Optional)", "plain", "xsd:string", "The patching sync status of the subcloud."
"group_id (Optional)", "plain", "xsd:int", "The unique identifier for the subcloud group for this subcloud."
::
@ -132,6 +133,7 @@ internalServerError (500), serviceUnavailable (503)
"endpoint_type": "patching"
},
"created-at": u"2018-02-25 19:06:35.208505",
"group_id": 1,
"management-gateway-ip": u"192.168.204.1",
"management-end-ip": u"192.168.204.100",
"id": 1,
@ -171,6 +173,7 @@ internalServerError (500), serviceUnavailable (503)
"endpoint_type": "patching"
},
"created-at": "2018-02-25 19:06:35.208505",
"group_id": 1,
"management-gateway-ip": "192.168.205.1",
"management-end-ip": "192.168.205.100",
"id": 2,
@ -181,12 +184,15 @@ internalServerError (500), serviceUnavailable (503)
This operation does not accept a request body.
********************
******************
Creates a subcloud
********************
******************
.. rest_method:: POST /v1.0/subclouds
Accepts Content-Type multipart/form-data.
**Normal response codes**
200
@ -203,13 +209,13 @@ serviceUnavailable (503)
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"name", "plain", "xsd:string", "The name for the subcloud. Must be a unique name."
"description (Optional)", "plain", "xsd:string", "The description of the subcloud."
"location (Optional)", "plain", "xsd:string", "The location of the subcloud."
"management-subnet", "plain", "xsd:string", "Management subnet for subcloud in CIDR format. Must be unique."
"management-start-ip", "plain", "xsd:string", "Start of management IP address range for subcloud."
"management-end-ip", "plain", "xsd:string", "End of management IP address range for subcloud."
"systemcontroller-gateway-ip", "plain", "xsd:string", "Systemcontroller gateway IP Address."
"bootstrap-address", "plain", "xsd:string", "An OAM IP address of the subcloud controller-0."
"sysadmin_password", "plain", "xsd:string", "The sysadmin password of the subcloud. Must be base64 encoded."
"bmc_password (optional)", "plain", "xsd:string", "The BMC password of the subcloud. Must be base64 encoded."
"bootstrap_values", "plain", "xsd:string", "The content of a file containing the bootstrap overrides such as subcloud name, management and OAM subnet."
"install_values (Optional)", "plain", "xsd:string", "The content of a file containing install variables such as subcloud bootstrap interface and BMC information."
"deploy_config (Optional)", "plain", "xsd:string", "The content of a file containing the resource definitions describing the desired subcloud configuration."
"group_id", "plain", "xsd:int", "Id of the subcloud group. Defaults to 1."
**Response parameters**
@ -227,6 +233,7 @@ serviceUnavailable (503)
"management-start-ip (Optional)", "plain", "xsd:string", "Start of management IP address range for subcloud."
"management-end-ip (Optional)", "plain", "xsd:string", "End of management IP address range for subcloud."
"systemcontroller-gateway-ip (Optional)", "plain", "xsd:string", "Systemcontroller gateway IP Address."
"group_id (Optional)", "plain", "xsd:int", "Id of the subcloud group."
::
@ -238,6 +245,7 @@ serviceUnavailable (503)
"management-subnet": "192.168.205.0/24",
"management-gateway-ip": "192.168.205.1",
"management-end-ip": "192.168.205.160",
"group_id": 1,
"description": "new subcloud"
}
@ -253,6 +261,7 @@ serviceUnavailable (503)
"availability-status": "offline",
"systemcontroller-gateway-ip": "192.168.204.102",
"location": None,
"group_id": 1,
"management-subnet": "192.168.205.0/24",
"management-gateway-ip": "192.168.205.1",
"management-end-ip": "192.168.205.160",
@ -306,6 +315,7 @@ internalServerError (500), serviceUnavailable (503)
"compute_sync_status (Optional)", "plain", "xsd:string", "The compute sync status of the subcloud."
"network_sync_status (Optional)", "plain", "xsd:string", "The network sync status of the subcloud."
"patching_sync_status (Optional)", "plain", "xsd:string", "The patching sync status of the subcloud."
"group_id (Optional)", "plain", "xsd:int", "Id of the subcloud group."
::
@ -344,6 +354,7 @@ internalServerError (500), serviceUnavailable (503)
],
"management-gateway-ip": "192.168.204.1",
"management-end-ip": "192.168.204.100",
"group_id": 1,
"id": 1,
"name": "subcloud6"
}
@ -397,6 +408,7 @@ internalServerError (500), serviceUnavailable (503)
"network_sync_status (Optional)", "plain", "xsd:string", "The network sync status of the subcloud."
"patching_sync_status (Optional)", "plain", "xsd:string", "The patching sync status of the subcloud."
"oam_floating_ip (Optional)", "plain", "xsd:string", "OAM Floating IP of the subcloud."
"group_id (Optional)", "plain", "xsd:int", "Id of the subcloud group."
::
@ -435,6 +447,7 @@ internalServerError (500), serviceUnavailable (503)
],
"management-gateway-ip": "192.168.204.1",
"management-end-ip": "192.168.204.100",
"group_id": 1,
"id": 1,
"name": "subcloud6",
"oam_floating_ip" "10.10.10.12"
@ -476,6 +489,7 @@ serviceUnavailable (503)
"description (Optional)", "plain", "xsd:string", "The description of the subcloud."
"location (Optional)", "plain", "xsd:string", "The location of the subcloud."
"management-state (Optional)", "plain", "xsd:string", "The management-state of the subcloud, ``managed`` or ``unmanaged``. The subcloud must be online before this can be modified to managed."
"group_id (Optional)", "plain", "xsd:int", "Id of the subcloud group. The group must exist."
**Response parameters**
@ -493,6 +507,7 @@ serviceUnavailable (503)
"management-start-ip (Optional)", "plain", "xsd:string", "Start of management IP address range for subcloud."
"management-end-ip (Optional)", "plain", "xsd:string", "End of management IP address range for subcloud."
"systemcontroller-gateway-ip (Optional)", "plain", "xsd:string", "Systemcontroller gateway IP Address."
"group_id (Optional)", "plain", "xsd:int", "Id of the subcloud group."
::
@ -500,6 +515,7 @@ serviceUnavailable (503)
"description": "new description",
"location": "new location",
"management-state": "managed"
"group_id": 2,
}
::
@ -517,6 +533,7 @@ serviceUnavailable (503)
"management-subnet": "192.168.204.0/24",
"management-gateway-ip": "192.168.204.1",
"management-end-ip": "192.168.204.100",
"group_id": 2,
"id": 1,
"name": "subcloud6"
}
@ -541,6 +558,341 @@ Deletes a specific subcloud
This operation does not accept a request body.
----------------
Subcloud Groups
----------------
Subcloud Groups are a logical grouping managed by a central System Controller.
Subclouds in a group can be updated in parallel when applying patches or
software upgrades.
**************************
Lists all subcloud groups
**************************
.. rest_method:: GET /v1.0/subcloud-groups
**Normal response codes**
200
**Error response codes**
itemNotFound (404), badRequest (400), unauthorized (401), forbidden
(403), badMethod (405), HTTPUnprocessableEntity (422),
internalServerError (500), serviceUnavailable (503)
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"subcloud_groups (Optional)", "plain", "xsd:list", "The list of subcloud groups."
"id (Optional)", "plain", "xsd:int", "The unique identifier for this object."
"name (Optional)", "plain", "xsd:string", "The unique name for the subcloud group."
"description (Optional)", "plain", "xsd:string", "The description of the subcloud group."
"update_apply_type (Optional)", "plain", "xsd:string", "The method for applying an update. ```serial``` or ```parallel```."
"max_parallel_subclouds (Optional)", "plain", "xsd:int", "The maximum number of subclouds to update in parallel."
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
::
{
"subcloud_groups": [
{
"update_apply_type": "parallel",
"description": "Default Subcloud Group",
"updated-at": null,
"created-at": null,
"max_parallel_subclouds": 2,
"id": 1,
"name": "Default"
},
]
}
This operation does not accept a request body.
*************************
Creates a subcloud group
*************************
.. rest_method:: POST /v1.0/subcloud-groups
**Normal response codes**
200
**Error response codes**
badRequest (400), unauthorized (401), forbidden (403), badMethod (405),
HTTPUnprocessableEntity (422), internalServerError (500),
serviceUnavailable (503)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"name (Optional)", "plain", "xsd:string", "The name for the subcloud group. Must be unique."
"description (Optional)", "plain", "xsd:string", "The description of the subcloud group."
"update_apply_type (Optional)", "plain", "xsd:string", "The method for applying an update. Must be ```serial``` or ```parallel```."
"max_parallel_subclouds (Optional)", "plain", "xsd:int", "The maximum number of subclouds to update in parallel. Must be greater than 0."
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"id (Optional)", "plain", "xsd:int", "The unique identifier for this object."
"name (Optional)", "plain", "xsd:string", "The unique name for the subcloud group."
"description (Optional)", "plain", "xsd:string", "The description of the subcloud group."
"update_apply_type (Optional)", "plain", "xsd:string", "The method for applying an update. ```serial``` or ```parallel```."
"max_parallel_subclouds (Optional)", "plain", "xsd:int", "The maximum number of subclouds to update in parallel."
::
{
"name": "GroupX",
"description": "A new group",
"update_apply_type": "parallel",
"max_parallel_subclouds": 3
}
::
{
"id": 2,
"name": "GroupX",
"description": "A new group",
"update_apply_type": "parallel",
"max_parallel_subclouds": "3",
"updated-at": null,
"created-at": "2020-04-08 15:15:10.750592",
}
******************************************************
Shows information about a specific subcloud group
******************************************************
.. rest_method:: GET /v1.0/subcloud-groups/{subcloud-group}
**Normal response codes**
200
**Error response codes**
itemNotFound (404), badRequest (400), unauthorized (401), forbidden
(403), badMethod (405), HTTPUnprocessableEntity (422),
internalServerError (500), serviceUnavailable (503)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"subcloud-group", "URI", "xsd:string", "The subcloud group reference, name or id."
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"id (Optional)", "plain", "xsd:int", "The unique identifier for this object."
"name (Optional)", "plain", "xsd:string", "The name provisioned for the subcloud group."
"description (Optional)", "plain", "xsd:string", "The description for the subcloud group."
"max_parallel_subclouds (Optional)", "plain", "xsd:int", "The maximum number of subclouds to update in parallel."
"update_apply_type (Optional)", "plain", "xsd:string", "The update apply type for the subcloud group: ```serial``` or ```parallel```."
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
::
{
"id": 2,
"name": "GroupX",
"description": "A new group",
"max_parallel_subclouds": 3,
"update_apply_type": "parallel",
"created-at": "2020-04-08 15:15:10.750592",
"updated-at": null
}
This operation does not accept a request body.
******************************************************
Shows subclouds that are part of a subcloud group
******************************************************
.. rest_method:: GET /v1.0/subcloud-groups/{subcloud-group}/subclouds
**Normal response codes**
200
**Error response codes**
itemNotFound (404), badRequest (400), unauthorized (401), forbidden
(403), badMethod (405), HTTPUnprocessableEntity (422),
internalServerError (500), serviceUnavailable (503)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"subcloud-group", "URI", "xsd:string", "The subcloud group reference, name or id."
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"subclouds (Optional)", "plain", "xsd:list", "The list of subclouds."
"id (Optional)", "plain", "xsd:int", "The unique identifier for a subcloud."
"group_id (Optional)", "plain", "xsd:int", "The unique identifier for the subcloud group."
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
"name (Optional)", "plain", "xsd:string", "The name provisioned for the subcloud."
"management-state (Optional)", "plain", "xsd:string", "Management state of the subcloud."
"management-start-ip (Optional)", "plain", "xsd:string", "Start of management IP address range for subcloud."
"software-version (Optional)", "plain", "xsd:string", "Software version for subcloud."
"availability-status (Optional)", "plain", "xsd:string", "Availability status of the subcloud."
"systemcontroller-gateway-ip (Optional)", "plain", "xsd:string", "Systemcontroller gateway IP Address."
"location (Optional)", "plain", "xsd:string", "The location provisioned for the subcloud."
"openstack-installed (Optional)", "plain", "xsd:boolean", "Whether openstack is installed on the subcloud."
"management-subnet (Optional)", "plain", "xsd:string", "Management subnet for subcloud in CIDR format."
"management-gateway-ip (Optional)", "plain", "xsd:string", "Management gateway IP for subcloud."
"management-end-ip (Optional)", "plain", "xsd:string", "End of management IP address range for subcloud."
"description (Optional)", "plain", "xsd:string", "The description provisioned for the subcloud."
::
{
"subclouds": [
{
"deploy-status": "complete",
"id": 1,
"group_id": 2,
"created-at": "2020-04-13 13:16:21.903294",
"updated-at": "2020-04-13 13:36:27.494056",
"name": "subcloud1",
"management-state": "unmanaged",
"management-start-ip": "192.168.101.2",
"software-version": "20.01",
"availability-status": "offline",
"systemcontroller-gateway-ip": "192.168.204.101",
"location": "YOW",
"openstack-installed": false,
"management-subnet": "192.168.101.0/24",
"management-gateway-ip": "192.168.101.1",
"management-end-ip": "192.168.101.50",
"description": "Ottawa Site"
}
]
}
This operation does not accept a request body.
***********************************
Modifies a specific subcloud group
***********************************
.. rest_method:: PATCH /v1.0/subcloud-groups/{subcloud-group}
The attributes of a subcloud group which are modifiable:
- name
- description
- update_apply_type
- max_parallel_subclouds
**Normal response codes**
200
**Error response codes**
badRequest (400), unauthorized (401), forbidden (403), badMethod (405),
HTTPUnprocessableEntity (422), internalServerError (500),
serviceUnavailable (503)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"subcloud-group", "URI", "xsd:string", "The subcloud group reference, name or id."
"name (Optional)", "plain", "xsd:string", "The name of the subcloud group. Must be unique."
"description (Optional)", "plain", "xsd:string", "The description of the subcloud group."
"update_apply_type (Optional)", "plain", "xsd:string", "The update apply type for the subcloud group. Either ```serial``` or ```parallel```."
"max_parallel_subclouds (Optional)", "plain", "xsd:int", "The number of subclouds to update in parallel. Must be greater than 0."
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"id (Optional)", "plain", "xsd:int", "The unique identifier for this object."
"name (Optional)", "plain", "xsd:string", "The name provisioned for the subcloud group."
"description (Optional)", "plain", "xsd:string", "The description for the subcloud group."
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
::
{
"description": "new description",
"update_apply_type": "serial",
"max_parallel_subclouds": 5
}
::
{
"id": 2,
"name": "GroupX",
"description": "new description",
"update_apply_type": "serial",
"max_parallel_subclouds": 5,
"created-at": "2020-04-08 15:15:10.750592",
"updated-at": "2020-04-08 15:21:01.527101"
}
**********************************
Deletes a specific subcloud group
**********************************
.. rest_method:: DELETE /v1.0/subcloud-groups/{subcloud-group}
**Normal response codes**
204
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"subcloud-group", "URI", "xsd:string", "The subcloud group reference, name or id."
This operation does not accept a request body.
----------------
Subcloud Alarms
----------------
@ -1143,5 +1495,97 @@ Delete per subcloud patch options
This operation does not accept a request body.
----------------
Subcloud Deploy
----------------
These APIs allow for the display and upload of the deployment manager common
files which include deploy playbook, deploy overrides, and deploy helm charts.
**************************
Show Subcloud Deploy Files
**************************
.. rest_method:: GET /v1.0/subcloud-deploy
**Normal response codes**
200
**Error response codes**
badRequest (400), unauthorized (401), forbidden
(403), badMethod (405), HTTPUnprocessableEntity (422),
internalServerError (500), serviceUnavailable (503)
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"subcloud_deploy", "plain", "xsd:dict", "The dictionary of subcloud deploy files."
"deploy_chart", "plain", "xsd:string", "The file name of the deployment manager helm charts."
"deploy_playbook", "plain", "xsd:string", "The file name of the deployment manager playbook."
"deploy_overrides", "plain", "xsd:string", "The file name of the deployment manager overrides."
::
{
"subcloud_deploy":
{
"deploy_chart": "deployment-manager.tgz",
"deploy_playbook": "deployment-manager-playbook.yaml",
"deploy_overrides": "deployment-manager-overrides-subcloud.yaml"
}
}
This operation does not accept a request body.
****************************
Upload Subcloud Deploy Files
****************************
.. rest_method:: POST /v1.0/subcloud-deploy
Accepts Content-Type multipart/form-data.
**Normal response codes**
200
**Error response codes**
badRequest (400), unauthorized (401), forbidden (403), badMethod (405),
HTTPUnprocessableEntity (422), internalServerError (500),
serviceUnavailable (503)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"deploy_chart", "plain", "xsd:string", "The content of a file containing the deployment manager helm charts."
"deploy_playbook", "plain", "xsd:string", "The content of a file containing the deployment manager playbook."
"deploy_overrides", "plain", "xsd:string", "The content of a file containing the deployment manager overrides."
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"deploy_chart", "plain", "xsd:string", "The file name of the deployment manager helm charts."
"deploy_playbook", "plain", "xsd:string", "The file name of the deployment manager playbook."
"deploy_overrides", "plain", "xsd:string", "The file name of the deployment manager overrides."
::
{
"deploy_chart": "deployment-manager.tgz",
"deploy_playbook": "deployment-manager-playbook.yaml",
"deploy_overrides": "deployment-manager-overrides-subcloud.yaml"
}

View File

@ -1,4 +1,4 @@
SRC_DIR="."
COPY_LIST="$FILES_BASE/*"
TIS_PATCH_VER=1
TIS_PATCH_VER=3

View File

@ -21,13 +21,14 @@ Source2: dcmanager-manager.service
Source3: dcorch-api.service
Source4: dcorch-engine.service
Source5: dcorch-sysinv-api-proxy.service
Source6: dcorch-snmp.service
Source7: dcorch-identity-api-proxy.service
Source8: dcdbsync-api.service
Source9: dcdbsync-openstack-api.service
Source10: dcmanager.conf
Source11: dcorch.conf
Source12: dcdbsync.conf
Source6: dcorch-identity-api-proxy.service
Source7: dcdbsync-api.service
Source8: dcdbsync-openstack-api.service
Source9: dcmanager.conf
Source10: dcorch.conf
Source11: dcdbsync.conf
Source12: clean-dcorch
Source13: dcmanager-audit.service
BuildArch: noarch
@ -72,6 +73,7 @@ Distributed Cloud provides configuration and management of distributed clouds
# DC Common
%package dccommon
Summary: DC common module
Requires: python-kubernetes
%description dccommon
Distributed Cloud Common Module
@ -128,7 +130,8 @@ install -d -m 755 %{buildroot}%{_sysconfdir}/dcmanager/
# install systemd unit files
install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir}/dcmanager-api.service
install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}/dcmanager-manager.service
install -p -D -m 644 %{SOURCE10} %{buildroot}%{_tmpfilesdir}
install -p -D -m 644 %{SOURCE13} %{buildroot}%{_unitdir}/dcmanager-audit.service
install -p -D -m 644 %{SOURCE9} %{buildroot}%{_tmpfilesdir}
# install default config files
cd %{_builddir}/%{pypi_name}-%{version} && oslo-config-generator --config-file ./dcmanager/config-generator.conf --output-file %{_builddir}/%{pypi_name}-%{version}%{_sysconfdir}/dcmanager/dcmanager.conf.sample
install -p -D -m 640 %{_builddir}/%{pypi_name}-%{version}%{_sysconfdir}/dcmanager/dcmanager.conf.sample %{buildroot}%{_sysconfdir}/dcmanager/dcmanager.conf
@ -141,9 +144,8 @@ install -d -m 755 %{buildroot}%{_sysconfdir}/dcorch/
install -p -D -m 644 %{SOURCE3} %{buildroot}%{_unitdir}/dcorch-api.service
install -p -D -m 644 %{SOURCE4} %{buildroot}%{_unitdir}/dcorch-engine.service
install -p -D -m 644 %{SOURCE5} %{buildroot}%{_unitdir}/dcorch-sysinv-api-proxy.service
install -p -D -m 644 %{SOURCE6} %{buildroot}%{_unitdir}/dcorch-snmp.service
install -p -D -m 644 %{SOURCE7} %{buildroot}%{_unitdir}/dcorch-identity-api-proxy.service
install -p -D -m 644 %{SOURCE11} %{buildroot}%{_tmpfilesdir}
install -p -D -m 644 %{SOURCE6} %{buildroot}%{_unitdir}/dcorch-identity-api-proxy.service
install -p -D -m 644 %{SOURCE10} %{buildroot}%{_tmpfilesdir}
# install ocf scripts
install -d -m 755 ${RPM_BUILD_ROOT}/usr/lib/ocf/resource.d/openstack
@ -158,10 +160,10 @@ install -d -m 755 %{buildroot}/var/log/dcdbsync
install -d -m 755 %{buildroot}/var/cache/dcdbsync
install -d -m 755 %{buildroot}%{_sysconfdir}/dcdbsync/
# install systemd unit files
install -p -D -m 644 %{SOURCE8} %{buildroot}%{_unitdir}/dcdbsync-api.service
install -p -D -m 644 %{SOURCE7} %{buildroot}%{_unitdir}/dcdbsync-api.service
# install systemd unit files for optional second instance
install -p -D -m 644 %{SOURCE9} %{buildroot}%{_unitdir}/dcdbsync-openstack-api.service
install -p -D -m 644 %{SOURCE12} %{buildroot}%{_tmpfilesdir}
install -p -D -m 644 %{SOURCE8} %{buildroot}%{_unitdir}/dcdbsync-openstack-api.service
install -p -D -m 644 %{SOURCE11} %{buildroot}%{_tmpfilesdir}
# install default config files
cd %{_builddir}/%{pypi_name}-%{version} && oslo-config-generator --config-file ./dcdbsync/config-generator.conf --output-file %{_builddir}/%{pypi_name}-%{version}%{_sysconfdir}/dcdbsync/dcdbsync.conf.sample
install -p -D -m 640 %{_builddir}/%{pypi_name}-%{version}%{_sysconfdir}/dcdbsync/dcdbsync.conf.sample %{buildroot}%{_sysconfdir}/dcdbsync/dcdbsync.conf
@ -169,6 +171,9 @@ install -p -D -m 640 %{_builddir}/%{pypi_name}-%{version}%{_sysconfdir}/dcdbsync
# install ansible overrides dir
install -d -m 600 ${RPM_BUILD_ROOT}/opt/dc/ansible
# install dcorch cleaner
install -m 755 -D -p %{SOURCE12} %{buildroot}/%{_bindir}/clean-dcorch
%files dccommon
%license LICENSE
%{python3_sitelib}/dccommon*
@ -178,10 +183,11 @@ install -d -m 600 ${RPM_BUILD_ROOT}/opt/dc/ansible
%files dcmanager
%license LICENSE
%{python3_sitelib}/dcmanager*
%{python3_sitelib}/distributedcloud-*.egg-info
%exclude %{python3_sitelib}/dcmanager/tests
%{_bindir}/dcmanager-api
%{_unitdir}/dcmanager-api.service
%{_bindir}/dcmanager-audit
%{_unitdir}/dcmanager-audit.service
%{_bindir}/dcmanager-manager
%{_unitdir}/dcmanager-manager.service
%{_bindir}/dcmanager-manage
@ -199,7 +205,6 @@ install -d -m 600 ${RPM_BUILD_ROOT}/opt/dc/ansible
%files dcorch
%license LICENSE
%{python3_sitelib}/dcorch*
%{python3_sitelib}/distributedcloud-*.egg-info
%exclude %{python3_sitelib}/dcorch/tests
%{_bindir}/dcorch-api
%{_unitdir}/dcorch-api.service
@ -209,8 +214,7 @@ install -d -m 600 ${RPM_BUILD_ROOT}/opt/dc/ansible
%{_unitdir}/dcorch-sysinv-api-proxy.service
%{_unitdir}/dcorch-identity-api-proxy.service
%{_bindir}/dcorch-manage
%{_bindir}/dcorch-snmp
%{_unitdir}/dcorch-snmp.service
%{_bindir}/clean-dcorch
%{_tmpfilesdir}/dcorch.conf
%dir %attr(0755,root,root) %{_localstatedir}/log/dcorch
%dir %attr(0755,root,root) %{_localstatedir}/cache/dcorch
@ -224,7 +228,6 @@ install -d -m 600 ${RPM_BUILD_ROOT}/opt/dc/ansible
%files dcdbsync
%license LICENSE
%{python3_sitelib}/dcdbsync*
%{python3_sitelib}/distributedcloud-*.egg-info
%exclude %{python3_sitelib}/dcdbsync/tests
%{_bindir}/dcdbsync-api
%{_unitdir}/dcdbsync-api.service

View File

@ -0,0 +1,23 @@
#!/bin/bash
#
# Wrapper script to run dcorch-manage db_clean on active controller only
#
re='^[0-9]+$'
function is_active_pgserver()
{
# Determine whether we're running on the same controller as the service.
/usr/bin/sm-query service postgres | grep -q enabled-active
return $?
}
if is_active_pgserver
then
if [[ $1 =~ $re ]]
then
/usr/bin/nice -n 2 /usr/bin/dcorch-manage db_clean $1 >> /var/log/dcorch/dcorch-clean.log
else
/usr/bin/nice -n 2 /usr/bin/dcorch-manage db_clean 3 >> /var/log/dcorch/dcorch-clean.log
fi
fi

View File

@ -0,0 +1,12 @@
[Unit]
Description=DC Manager Audit Service
After=syslog-ng.service network-online.target dcmanager-manager.service
[Service]
Type=simple
User=root
ExecStart=/usr/bin/dcmanager-audit --config-file /etc/dcmanager/dcmanager.conf
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -1,14 +0,0 @@
[Unit]
Description=DC Manager SNMP Service
After=syslog.target network.target mysqld.service
[Service]
Type=simple
# TODO(Bart): what user to use?
User=root
ExecStart=/usr/bin/dcorch-snmp --config-file /etc/dcorch/dcorch.conf
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -21,7 +21,7 @@ SECONDS_IN_HOUR = 3600
KS_ENDPOINT_ADMIN = "admin"
KS_ENDPOINT_INTERNAL = "internal"
KS_ENDPOINT_DEFAULT = KS_ENDPOINT_INTERNAL
KS_ENDPOINT_DEFAULT = KS_ENDPOINT_ADMIN
ENDPOINT_TYPE_IDENTITY_OS = "identity_openstack"
@ -36,3 +36,6 @@ SW_UPDATE_DEFAULT_TITLE = "all clouds default"
USER_HEADER_VALUE = "distcloud"
USER_HEADER = {'User-Header': USER_HEADER_VALUE}
ADMIN_USER_NAME = "admin"
ADMIN_PROJECT_NAME = "admin"

View File

@ -50,8 +50,7 @@ class FmClient(base.DriverBase):
"""
try:
LOG.info("get_alarm_summary region %s" %
self.region_name)
LOG.debug("get_alarm_summary region %s" % self.region_name)
alarms = self.fm.alarm.summary()
except Exception as e:
LOG.error("get_alarm_summary exception={}".format(e))

View File

@ -23,6 +23,7 @@ from oslo_log import log
import requests
from requests_toolbelt import MultipartEncoder
from dccommon import consts
from dccommon.drivers import base
LOG = log.getLogger(__name__)
@ -41,9 +42,10 @@ class PatchingClient(base.DriverBase):
def __init__(self, region, session):
# Get an endpoint and token.
self.endpoint = session.get_endpoint(service_type='patching',
region_name=region,
interface='internal')
self.endpoint = session.get_endpoint(
service_type='patching',
region_name=region,
interface=consts.KS_ENDPOINT_ADMIN)
self.token = session.get_token()
def query(self, state=None, release=None,):

View File

@ -31,6 +31,7 @@ from oslo_log import log
from sysinv.common import constants as sysinv_constants
from dccommon import consts
from dccommon.drivers import base
from dccommon import exceptions
@ -66,9 +67,10 @@ class SysinvClient(base.DriverBase):
# The sysinv client doesn't support a session, so we need to
# get an endpoint and token.
endpoint = session.get_endpoint(service_type='platform',
region_name=region,
interface='internal')
endpoint = session.get_endpoint(
service_type='platform',
region_name=region,
interface=consts.KS_ENDPOINT_ADMIN)
token = session.get_token()
self.sysinv_client = client.Client(API_VERSION,
@ -158,6 +160,10 @@ class SysinvClient(base.DriverBase):
"""Get a list of loads."""
return self.sysinv_client.load.list()
def get_upgrades(self):
"""Get a list of upgrades."""
return self.sysinv_client.upgrade.list()
def get_applications(self):
"""Get a list of containerized applications"""

View File

@ -30,6 +30,7 @@ from dccommon import exceptions
LOG = log.getLogger(__name__)
STRATEGY_NAME_FW_UPDATE = 'fw-update'
STRATEGY_NAME_SW_PATCH = 'sw-patch'
STRATEGY_NAME_SW_UPGRADE = 'sw-upgrade'
@ -75,9 +76,10 @@ class VimClient(base.DriverBase):
try:
# The nfv_client doesn't support a session, so we need to
# get an endpoint and token.
self.endpoint = session.get_endpoint(service_type='nfv',
region_name=region,
interface='internal')
self.endpoint = session.get_endpoint(
service_type='nfv',
region_name=region,
interface=consts.KS_ENDPOINT_ADMIN)
self.token = session.get_token()
except exceptions.ServiceUnavailable:

View File

@ -90,7 +90,7 @@ class EndpointCache(object):
name='keystone', type='identity')
sc_auth_url = self.keystone_client.endpoints.list(
service=identity_service[0].id,
interface=consts.KS_ENDPOINT_INTERNAL,
interface=consts.KS_ENDPOINT_ADMIN,
region=region_name)
try:
sc_auth_url = sc_auth_url[0].url
@ -116,6 +116,11 @@ class EndpointCache(object):
region_name=region_name)
self.external_auth_url = sc_auth_url
@staticmethod
def _is_central_cloud(region_id):
central_cloud_regions = [consts.CLOUD_0, consts.VIRTUAL_MASTER_CLOUD]
return region_id in central_cloud_regions
@staticmethod
def _get_endpoint_from_keystone(self):
service_id_name_map = {}
@ -126,9 +131,15 @@ class EndpointCache(object):
region_service_endpoint_map = {}
for endpoint in self.keystone_client.endpoints.list():
endpoint_dict = endpoint.to_dict()
if endpoint_dict['interface'] != consts.KS_ENDPOINT_INTERNAL:
continue
region_id = endpoint_dict['region']
# within central cloud, use internal endpoints
if EndpointCache._is_central_cloud(region_id) and \
endpoint_dict['interface'] != consts.KS_ENDPOINT_INTERNAL:
continue
# Otherwise should always use admin endpoints
elif endpoint_dict['interface'] != consts.KS_ENDPOINT_ADMIN:
continue
service_id = endpoint_dict['service_id']
url = endpoint_dict['url']
service_name = service_id_name_map[service_id]

View File

@ -81,11 +81,6 @@ class OAMAddressesNotFound(NotFound):
message = _("OAM Addresses Not Found")
class TrapDestAlreadyExists(Conflict):
message = _("TrapDest in region=%(region_name)s ip_address=%(ip_address)s "
"community=%(community)s already exists")
class TrapDestNotFound(NotFound):
message = _("Trapdest in region=%(region_name)s with ip_address "
"%(ip_address)s not found")

View File

@ -0,0 +1,147 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from kubernetes import client
from kubernetes.client import Configuration
from kubernetes.client.rest import ApiException
from kubernetes import config
from oslo_log import log as logging
from six.moves import http_client as httplib
LOG = logging.getLogger(__name__)
KUBE_CONFIG_PATH = '/etc/kubernetes/admin.conf'
CERT_MANAGER_GROUP = 'cert-manager.io'
CERT_MANAGER_VERSION = 'v1alpha2'
CERT_MANAGER_CERTIFICATE = 'certificates'
class KubeOperator(object):
def __init__(self):
self._kube_client_batch = None
self._kube_client_core = None
self._kube_client_custom_objects = None
def _load_kube_config(self):
config.load_kube_config(KUBE_CONFIG_PATH)
# Workaround: Turn off SSL/TLS verification
c = Configuration()
c.verify_ssl = False
Configuration.set_default(c)
def _get_kubernetesclient_batch(self):
if not self._kube_client_batch:
self._load_kube_config()
self._kube_client_batch = client.BatchV1Api()
return self._kube_client_batch
def _get_kubernetesclient_core(self):
if not self._kube_client_core:
self._load_kube_config()
self._kube_client_core = client.CoreV1Api()
return self._kube_client_core
def _get_kubernetesclient_custom_objects(self):
if not self._kube_client_custom_objects:
self._load_kube_config()
self._kube_client_custom_objects = client.CustomObjectsApi()
return self._kube_client_custom_objects
def kube_get_secret(self, name, namespace):
c = self._get_kubernetesclient_core()
try:
return c.read_namespaced_secret(name, namespace)
except ApiException as e:
if e.status == httplib.NOT_FOUND:
return None
else:
LOG.error("Failed to get Secret %s under "
"Namespace %s: %s" % (name, namespace, e.body))
raise
except Exception as e:
LOG.error("Kubernetes exception in kube_get_secret: %s" % e)
raise
def kube_delete_secret(self, name, namespace, **kwargs):
body = {}
if kwargs:
body.update(kwargs)
c = self._get_kubernetesclient_core()
try:
c.delete_namespaced_secret(name, namespace, body)
except ApiException as e:
if e.status == httplib.NOT_FOUND:
LOG.warn("Secret %s under Namespace %s "
"not found." % (name, namespace))
else:
LOG.error("Failed to clean up Secret %s under "
"Namespace %s: %s" % (name, namespace, e.body))
raise
except Exception as e:
LOG.error("Kubernetes exception in kube_delete_secret: %s" % e)
raise
def get_cert_manager_certificate(self, namespace, name):
custom_object_api = self._get_kubernetesclient_custom_objects()
try:
cert = custom_object_api.get_namespaced_custom_object(
CERT_MANAGER_GROUP,
CERT_MANAGER_VERSION,
namespace,
CERT_MANAGER_CERTIFICATE,
name)
except ApiException as e:
if e.status == httplib.NOT_FOUND:
return None
else:
LOG.error("Fail to access %s:%s. %s" % (namespace, name, e))
raise
else:
return cert
def apply_cert_manager_certificate(self, namespace, name, body):
custom_object_api = self._get_kubernetesclient_custom_objects()
cert = self.get_cert_manager_certificate(namespace, name)
if cert:
custom_object_api.patch_namespaced_custom_object(
CERT_MANAGER_GROUP,
CERT_MANAGER_VERSION,
namespace,
CERT_MANAGER_CERTIFICATE,
name,
body
)
else:
custom_object_api.create_namespaced_custom_object(
CERT_MANAGER_GROUP,
CERT_MANAGER_VERSION,
namespace,
CERT_MANAGER_CERTIFICATE,
body)
def delete_cert_manager_certificate(self, namespace, name):
custom_object_api = self._get_kubernetesclient_custom_objects()
try:
custom_object_api.delete_namespaced_custom_object(
CERT_MANAGER_GROUP,
CERT_MANAGER_VERSION,
namespace,
CERT_MANAGER_CERTIFICATE,
name,
{}
)
except ApiException as e:
if e.status != httplib.NOT_FOUND:
LOG.error("Fail to delete %s:%s. %s" % (namespace, name, e))
raise

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@ -21,7 +21,9 @@
#
from dcmanager.api.controllers import restcomm
from dcorch.rpc import client as dcorch_rpc_client
from dcmanager.common import consts
from dcmanager.db import api as db_api
from oslo_log import log as logging
from pecan import expose
@ -35,7 +37,6 @@ class SubcloudAlarmController(object):
def __init__(self, *args, **kwargs):
super(SubcloudAlarmController, self).__init__(*args, **kwargs)
self.dcorch_rpc_client = dcorch_rpc_client.EngineClient()
# to do the version compatibility for future purpose
def _determine_version_cap(self, target):
@ -50,9 +51,9 @@ class SubcloudAlarmController(object):
def _get_alarm_aggregates(self):
summary = []
context = restcomm.extract_context_from_environ()
alarms = self.dcorch_rpc_client.get_alarm_summary(context)
alarms = db_api.subcloud_alarms_get_all(context)
for alarm in alarms:
alarm_dict = {'region_name': alarm['region_name'],
alarm_dict = {'region_name': alarm['name'],
'uuid': alarm['uuid'],
'critical_alarms': alarm['critical_alarms'],
'major_alarms': alarm['major_alarms'],
@ -71,11 +72,10 @@ class SubcloudAlarmController(object):
def _get_alarm_summary(self):
alarms = self._get_alarm_aggregates()
summary = {'critical': 0,
'degraded': 0,
'ok': 0,
'unreachable': 0}
for alarm in alarms:
summary = {consts.ALARM_CRITICAL_STATUS: 0,
consts.ALARM_DEGRADED_STATUS: 0,
consts.ALARM_OK_STATUS: 0}
for alarm in alarms['alarm_summary']:
summary[alarm['cloud_status']] += 1
return summary

View File

@ -19,15 +19,15 @@
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import pecan
from dcmanager.api.controllers.v1 import alarm_manager
from dcmanager.api.controllers.v1 import subcloud_deploy
from dcmanager.api.controllers.v1 import subcloud_group
from dcmanager.api.controllers.v1 import subclouds
from dcmanager.api.controllers.v1 import sw_update_options
from dcmanager.api.controllers.v1 import sw_update_strategy
import pecan
class Controller(object):
@ -41,11 +41,15 @@ class Controller(object):
sub_controllers = dict()
if minor_version == '0':
sub_controllers["subclouds"] = subclouds.SubcloudsController
sub_controllers["subcloud-deploy"] = subcloud_deploy.\
SubcloudDeployController
sub_controllers["alarms"] = alarm_manager.SubcloudAlarmController
sub_controllers["sw-update-strategy"] = \
sw_update_strategy.SwUpdateStrategyController
sub_controllers["sw-update-options"] = \
sw_update_options.SwUpdateOptionsController
sub_controllers["subcloud-groups"] = \
subcloud_group.SubcloudGroupsController
for name, ctrl in sub_controllers.items():
setattr(self, name, ctrl)

View File

@ -0,0 +1,119 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
from oslo_config import cfg
from oslo_log import log as logging
import http.client as httpclient
import pecan
from pecan import expose
from pecan import request
from dcmanager.common import consts
from dcmanager.common.i18n import _
from dcmanager.common import utils
import tsconfig.tsconfig as tsc
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
LOCK_NAME = 'SubcloudDeployController'
class SubcloudDeployController(object):
def __init__(self):
super(SubcloudDeployController, self).__init__()
@staticmethod
def _upload_files(dir_path, file_option, file_item, binary):
prefix = file_option + '_'
# create the version directory if it does not exist
if not os.path.isdir(dir_path):
os.mkdir(dir_path, 0o755)
else:
# check if the file exists, if so remove it
filename = utils.get_filename_by_prefix(dir_path, prefix)
if filename is not None:
os.remove(dir_path + '/' + filename)
# upload the new file
file_item.file.seek(0, os.SEEK_SET)
contents = file_item.file.read()
fn = os.path.join(dir_path, prefix + os.path.basename(
file_item.filename))
if binary:
dst = open(fn, 'wb')
dst.write(contents)
else:
dst = os.open(fn, os.O_WRONLY | os.O_CREAT)
os.write(dst, contents)
@expose(generic=True, template='json')
def index(self):
# Route the request to specific methods with parameters
pass
@utils.synchronized(LOCK_NAME)
@index.when(method='POST', template='json')
def post(self):
deploy_dicts = dict()
for f in consts.DEPLOY_COMMON_FILE_OPTIONS:
if f not in request.POST:
pecan.abort(httpclient.BAD_REQUEST,
_("Missing required file for %s") % f)
file_item = request.POST[f]
filename = getattr(file_item, 'filename', '')
if not filename:
pecan.abort(httpclient.BAD_REQUEST,
_("No %s file uploaded" % f))
dir_path = tsc.DEPLOY_PATH
binary = False
if f == consts.DEPLOY_CHART:
binary = True
try:
self._upload_files(dir_path, f, file_item, binary)
except Exception as e:
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_("Failed to upload %s file: %s" % (f, e)))
deploy_dicts.update({f: filename})
return deploy_dicts
@index.when(method='GET', template='json')
def get(self):
"""Get the subcloud deploy files that has been uploaded and stored"""
deploy_dicts = dict()
for f in consts.DEPLOY_COMMON_FILE_OPTIONS:
dir_path = tsc.DEPLOY_PATH
filename = None
if os.path.isdir(dir_path):
prefix = f + '_'
filename = utils.get_filename_by_prefix(dir_path, prefix)
if filename is not None:
filename = filename.replace(prefix, '', 1)
deploy_dicts.update({f: filename})
return dict(subcloud_deploy=deploy_dicts)

View File

@ -0,0 +1,313 @@
# Copyright (c) 2017 Ericsson AB.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_messaging import RemoteError
import http.client as httpclient
import pecan
from pecan import expose
from pecan import request
from dcmanager.api.controllers import restcomm
from dcmanager.common import consts
from dcmanager.common import exceptions
from dcmanager.common.i18n import _
from dcmanager.db import api as db_api
from dcmanager.rpc import client as rpc_client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SUPPORTED_GROUP_APPLY_TYPES = [
consts.SUBCLOUD_APPLY_TYPE_PARALLEL,
consts.SUBCLOUD_APPLY_TYPE_SERIAL
]
# validation constants for Subcloud Group
MAX_SUBCLOUD_GROUP_NAME_LEN = 255
MAX_SUBCLOUD_GROUP_DESCRIPTION_LEN = 255
MIN_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS = 1
MAX_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS = 100
class SubcloudGroupsController(object):
def __init__(self):
super(SubcloudGroupsController, self).__init__()
self.rpc_client = rpc_client.ManagerClient()
@expose(generic=True, template='json')
def index(self):
# Route the request to specific methods with parameters
pass
def _get_subcloud_list_for_group(self, context, group_id):
subcloud_list = []
subclouds = db_api.subcloud_get_for_group(context, group_id)
for subcloud in subclouds:
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
subcloud_list.append(subcloud_dict)
result = dict()
result['subclouds'] = subcloud_list
return result
def _get_subcloud_group_list(self, context):
groups = db_api.subcloud_group_get_all(context)
subcloud_group_list = []
for group in groups:
group_dict = db_api.subcloud_group_db_model_to_dict(group)
subcloud_group_list.append(group_dict)
result = dict()
result['subcloud_groups'] = subcloud_group_list
return result
def _get_by_ref(self, context, group_ref):
# Handle getting a group by either name, or ID
group = None
if group_ref.isdigit():
# Lookup subcloud group as an ID
try:
group = db_api.subcloud_group_get(context, group_ref)
except exceptions.SubcloudGroupNotFound:
return None
else:
# Lookup subcloud group as a name
try:
group = db_api.subcloud_group_get_by_name(context, group_ref)
except exceptions.SubcloudGroupNameNotFound:
return None
return group
@index.when(method='GET', template='json')
def get(self, group_ref=None, subclouds=False):
"""Get details about subcloud group.
:param group_ref: ID or name of subcloud group
"""
context = restcomm.extract_context_from_environ()
if group_ref is None:
# List of subcloud groups requested
return self._get_subcloud_group_list(context)
group = self._get_by_ref(context, group_ref)
if group is None:
pecan.abort(httpclient.NOT_FOUND, _('Subcloud Group not found'))
if subclouds:
# Return only the subclouds for this subcloud group
return self._get_subcloud_list_for_group(context, group.id)
subcloud_group_dict = db_api.subcloud_group_db_model_to_dict(group)
return subcloud_group_dict
def _validate_name(self, name):
# Reject post and update operations for name that:
# - attempt to set to None
# - attempt to set to a number
# - attempt to set to the Default subcloud group
# - exceed the max length
if not name:
return False
if name.isdigit():
return False
if name == consts.DEFAULT_SUBCLOUD_GROUP_NAME:
return False
if len(name) >= MAX_SUBCLOUD_GROUP_NAME_LEN:
return False
return True
def _validate_description(self, description):
if not description:
return False
if len(description) >= MAX_SUBCLOUD_GROUP_DESCRIPTION_LEN:
return False
return True
def _validate_update_apply_type(self, update_apply_type):
if not update_apply_type:
return False
if update_apply_type not in SUPPORTED_GROUP_APPLY_TYPES:
return False
return True
def _validate_max_parallel_subclouds(self, max_parallel_str):
if not max_parallel_str:
return False
try:
# Check the value is an integer
val = int(max_parallel_str)
except ValueError:
return False
# We do not support less than min or greater than max
if val < MIN_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS:
return False
if val > MAX_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS:
return False
return True
@index.when(method='POST', template='json')
def post(self):
"""Create a new subcloud group."""
context = restcomm.extract_context_from_environ()
payload = eval(request.body)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
name = payload.get('name')
description = payload.get('description')
update_apply_type = payload.get('update_apply_type')
max_parallel_subclouds = payload.get('max_parallel_subclouds')
# Validate payload
if not self._validate_name(name):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid group name'))
if not self._validate_description(description):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid group description'))
if not self._validate_update_apply_type(update_apply_type):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group update_apply_type'))
if not self._validate_max_parallel_subclouds(max_parallel_subclouds):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group max_parallel_subclouds'))
try:
group_ref = db_api.subcloud_group_create(context,
name,
description,
update_apply_type,
max_parallel_subclouds)
return db_api.subcloud_group_db_model_to_dict(group_ref)
except db_exc.DBDuplicateEntry:
LOG.info("Group create failed. Group %s already exists" % name)
pecan.abort(httpclient.BAD_REQUEST,
_('A subcloud group with this name already exists'))
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
# TODO(abailey) add support for GROUP already exists (409)
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to create subcloud group'))
@index.when(method='PATCH', template='json')
def patch(self, group_ref):
"""Update a subcloud group.
:param group_ref: ID or name of subcloud group to update
"""
context = restcomm.extract_context_from_environ()
if group_ref is None:
pecan.abort(httpclient.BAD_REQUEST,
_('Subcloud Group Name or ID required'))
payload = eval(request.body)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
group = self._get_by_ref(context, group_ref)
if group is None:
pecan.abort(httpclient.NOT_FOUND, _('Subcloud Group not found'))
name = payload.get('name')
description = payload.get('description')
update_apply_type = payload.get('update_apply_type')
max_parallel_str = payload.get('max_parallel_subclouds')
if not (name or description or update_apply_type or max_parallel_str):
pecan.abort(httpclient.BAD_REQUEST, _('nothing to update'))
# Check value is not None or empty before calling validate
if name:
if not self._validate_name(name):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group name'))
# Special case. Default group name cannot be changed
if group.id == consts.DEFAULT_SUBCLOUD_GROUP_ID:
pecan.abort(httpclient.BAD_REQUEST,
_('Default group name cannot be changed'))
if description:
if not self._validate_description(description):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group description'))
if update_apply_type:
if not self._validate_update_apply_type(update_apply_type):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group update_apply_type'))
if max_parallel_str:
if not self._validate_max_parallel_subclouds(max_parallel_str):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group max_parallel_subclouds'))
try:
updated_group = db_api.subcloud_group_update(
context,
group.id,
name=name,
description=description,
update_apply_type=update_apply_type,
max_parallel_subclouds=max_parallel_str)
return db_api.subcloud_group_db_model_to_dict(updated_group)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
# additional exceptions.
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to update subcloud group'))
@index.when(method='delete', template='json')
def delete(self, group_ref):
"""Delete the subcloud group."""
context = restcomm.extract_context_from_environ()
if group_ref is None:
pecan.abort(httpclient.BAD_REQUEST,
_('Subcloud Group Name or ID required'))
group = self._get_by_ref(context, group_ref)
if group is None:
pecan.abort(httpclient.NOT_FOUND, _('Subcloud Group not found'))
if group.name == consts.DEFAULT_SUBCLOUD_GROUP_NAME:
pecan.abort(httpclient.BAD_REQUEST,
_('Default Subcloud Group may not be deleted'))
try:
# a subcloud group may not be deleted if it is use by any subclouds
subclouds = db_api.subcloud_get_for_group(context, group.id)
if len(subclouds) > 0:
pecan.abort(httpclient.BAD_REQUEST,
_('Subcloud Group not empty'))
db_api.subcloud_group_destroy(context, group.id)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to delete subcloud group'))
# This should return nothing
return None

View File

@ -18,11 +18,13 @@
# SPDX-License-Identifier: Apache-2.0
#
import base64
import keyring
from netaddr import AddrFormatError
from netaddr import IPAddress
from netaddr import IPNetwork
from netaddr import IPRange
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_messaging import RemoteError
@ -32,16 +34,14 @@ import pecan
from pecan import expose
from pecan import request
from controllerconfig.common.exceptions import ValidateFail
from controllerconfig.utils import validate_address_str
from controllerconfig.utils import validate_network_str
from dccommon.drivers.openstack.keystone_v3 import KeystoneClient
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon import exceptions as dccommon_exceptions
from keystoneauth1 import exceptions as keystone_exceptions
import tsconfig.tsconfig as tsc
from dcmanager.api.controllers import restcomm
from dcmanager.common import consts
from dcmanager.common import exceptions
@ -52,6 +52,7 @@ from dcmanager.db import api as db_api
from dcmanager.rpc import client as rpc_client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# System mode
@ -61,6 +62,18 @@ SYSTEM_MODE_DUPLEX_DIRECT = "duplex-direct"
LOCK_NAME = 'SubcloudsController'
BOOTSTRAP_VALUES = 'bootstrap_values'
INSTALL_VALUES = 'install_values'
SUBCLOUD_ADD_MANDATORY_FILE = [
BOOTSTRAP_VALUES,
]
SUBCLOUD_ADD_GET_FILE_CONTENTS = [
BOOTSTRAP_VALUES,
INSTALL_VALUES,
]
class SubcloudsController(object):
VERSION_ALIASES = {
@ -81,6 +94,67 @@ class SubcloudsController(object):
# Route the request to specific methods with parameters
pass
def _validate_group_id(self, context, group_id):
try:
# The DB API will raise an exception if the group_id is invalid
db_api.subcloud_group_get(context, group_id)
except Exception as e:
LOG.exception(e)
pecan.abort(400, _("Invalid group_id"))
@staticmethod
def _get_common_deploy_files(payload):
for f in consts.DEPLOY_COMMON_FILE_OPTIONS:
filename = None
dir_path = tsc.DEPLOY_PATH
if os.path.isdir(dir_path):
filename = utils.get_filename_by_prefix(dir_path, f + '_')
if filename is None:
pecan.abort(400, _("Missing required deploy file for %s") % f)
payload.update({f: os.path.join(dir_path, filename)})
def _upload_deploy_config_file(self, request, payload):
if consts.DEPLOY_CONFIG in request.POST:
file_item = request.POST[consts.DEPLOY_CONFIG]
filename = getattr(file_item, 'filename', '')
if not filename:
pecan.abort(400, _("No %s file uploaded" %
consts.DEPLOY_CONFIG))
file_item.file.seek(0, os.SEEK_SET)
contents = file_item.file.read()
# the deploy config needs to upload to the override location
fn = os.path.join(consts.ANSIBLE_OVERRIDES_PATH, payload['name']
+ '_' + os.path.basename(filename))
try:
dst = os.open(fn, os.O_WRONLY | os.O_CREAT)
os.write(dst, contents)
except Exception:
msg = _("Failed to upload %s file" % consts.DEPLOY_CONFIG)
LOG.exception(msg)
pecan.abort(400, msg)
payload.update({consts.DEPLOY_CONFIG: fn})
self._get_common_deploy_files(payload)
@staticmethod
def _get_request_data(request):
payload = dict()
for f in SUBCLOUD_ADD_MANDATORY_FILE:
if f not in request.POST:
pecan.abort(400, _("Missing required file for %s") % f)
for f in SUBCLOUD_ADD_GET_FILE_CONTENTS:
if f in request.POST:
file_item = request.POST[f]
file_item.file.seek(0, os.SEEK_SET)
data = yaml.safe_load(file_item.file.read().decode('utf8'))
if f == BOOTSTRAP_VALUES:
payload.update(data)
else:
payload.update({f: data})
del request.POST[f]
payload.update(request.POST)
return payload
def _validate_subcloud_config(self,
context,
name,
@ -91,7 +165,8 @@ class SubcloudsController(object):
external_oam_subnet_str,
external_oam_gateway_address_str,
external_oam_floating_address_str,
systemcontroller_gateway_ip_str):
systemcontroller_gateway_ip_str,
group_id):
"""Check whether subcloud config is valid."""
# Validate the name
@ -116,28 +191,28 @@ class SubcloudsController(object):
management_subnet = None
try:
management_subnet = validate_network_str(
management_subnet = utils.validate_network_str(
management_subnet_str,
minimum_size=MIN_MANAGEMENT_SUBNET_SIZE,
existing_networks=subcloud_subnets)
except ValidateFail as e:
except exceptions.ValidateFail as e:
LOG.exception(e)
pecan.abort(400, _("management_subnet invalid: %s") % e)
# Parse/validate the start/end addresses
management_start_ip = None
try:
management_start_ip = validate_address_str(
management_start_ip = utils.validate_address_str(
management_start_ip_str, management_subnet)
except ValidateFail as e:
except exceptions.ValidateFail as e:
LOG.exception(e)
pecan.abort(400, _("management_start_address invalid: %s") % e)
management_end_ip = None
try:
management_end_ip = validate_address_str(
management_end_ip = utils.validate_address_str(
management_end_ip_str, management_subnet)
except ValidateFail as e:
except exceptions.ValidateFail as e:
LOG.exception(e)
pecan.abort(400, _("management_end_address invalid: %s") % e)
@ -156,9 +231,9 @@ class SubcloudsController(object):
# Parse/validate the gateway
try:
validate_address_str(
utils.validate_address_str(
management_gateway_ip_str, management_subnet)
except ValidateFail as e:
except exceptions.ValidateFail as e:
LOG.exception(e)
pecan.abort(400, _("management_gateway_address invalid: %s") % e)
@ -185,9 +260,9 @@ class SubcloudsController(object):
management_address_pool.prefix)
systemcontroller_subnet = IPNetwork(systemcontroller_subnet_str)
try:
validate_address_str(
utils.validate_address_str(
systemcontroller_gateway_ip_str, systemcontroller_subnet)
except ValidateFail as e:
except exceptions.ValidateFail as e:
LOG.exception(e)
pecan.abort(400,
_("systemcontroller_gateway_address invalid: %s") % e)
@ -207,28 +282,29 @@ class SubcloudsController(object):
MIN_OAM_SUBNET_SIZE = 3
oam_subnet = None
try:
oam_subnet = validate_network_str(
oam_subnet = utils.validate_network_str(
external_oam_subnet_str,
minimum_size=MIN_OAM_SUBNET_SIZE,
existing_networks=subcloud_subnets)
except ValidateFail as e:
except exceptions.ValidateFail as e:
LOG.exception(e)
pecan.abort(400, _("external_oam_subnet invalid: %s") % e)
# Parse/validate the addresses
try:
validate_address_str(
utils.validate_address_str(
external_oam_gateway_address_str, oam_subnet)
except ValidateFail as e:
except exceptions.ValidateFail as e:
LOG.exception(e)
pecan.abort(400, _("oam_gateway_address invalid: %s") % e)
try:
validate_address_str(
utils.validate_address_str(
external_oam_floating_address_str, oam_subnet)
except ValidateFail as e:
except exceptions.ValidateFail as e:
LOG.exception(e)
pecan.abort(400, _("oam_floating_address invalid: %s") % e)
self._validate_group_id(context, group_id)
@staticmethod
def _validate_install_values(payload):
@ -236,6 +312,13 @@ class SubcloudsController(object):
bmc_password = payload.get('bmc_password')
if not bmc_password:
pecan.abort(400, _('subcloud bmc_password required'))
try:
bmc_password = base64.b64decode(bmc_password).decode('utf-8')
except Exception:
msg = _('Failed to decode subcloud bmc_password, verify'
' the password is base64 encoded')
LOG.exception(msg)
pecan.abort(400, msg)
payload['install_values'].update({'bmc_password': bmc_password})
for k in install_consts.MANDATORY_INSTALL_VALUES:
@ -288,8 +371,8 @@ class SubcloudsController(object):
network_str = (install_values['network_address'] + '/' +
str(install_values['network_mask']))
try:
network = validate_network_str(network_str, 1)
except ValidateFail as e:
network = utils.validate_network_str(network_str, 1)
except exceptions.ValidateFail as e:
LOG.exception(e)
pecan.abort(400, _("network address invalid: %s") % e)
@ -365,6 +448,38 @@ class SubcloudsController(object):
LOG.error(message)
return None
def _add_subcloud_to_database(self, context, payload):
try:
db_api.subcloud_get_by_name(context, payload['name'])
except exceptions.SubcloudNameNotFound:
pass
else:
raise exceptions.BadRequest(
resource='subcloud',
msg='Subcloud with that name already exists')
# Subcloud is added with software version that matches system
# controller.
software_version = tsc.SW_VERSION
# if group_id has been omitted from payload, use 'Default'.
group_id = payload.get('group_id',
consts.DEFAULT_SUBCLOUD_GROUP_ID)
subcloud = db_api.subcloud_create(
context,
payload['name'],
payload.get('description'),
payload.get('location'),
software_version,
payload['management_subnet'],
payload['management_gateway_address'],
payload['management_start_address'],
payload['management_end_address'],
payload['systemcontroller_gateway_address'],
consts.DEPLOY_STATE_NONE,
False,
group_id)
return subcloud
@index.when(method='GET', template='json')
def get(self, subcloud_ref=None, detail=None):
"""Get details about subcloud.
@ -494,13 +609,16 @@ class SubcloudsController(object):
context = restcomm.extract_context_from_environ()
if subcloud_ref is None:
payload = yaml.safe_load(request.body)
payload = self._get_request_data(request)
if not payload:
pecan.abort(400, _('Body required'))
name = payload.get('name')
if not name:
pecan.abort(400, _('name required'))
system_mode = payload.get('system_mode')
if not system_mode:
pecan.abort(400, _('system_mode required'))
@ -545,6 +663,18 @@ class SubcloudsController(object):
payload.get('sysadmin_password')
if not sysadmin_password:
pecan.abort(400, _('subcloud sysadmin_password required'))
try:
payload['sysadmin_password'] = base64.b64decode(
sysadmin_password).decode('utf-8')
except Exception:
msg = _('Failed to decode subcloud sysadmin_password, '
'verify the password is base64 encoded')
LOG.exception(msg)
pecan.abort(400, msg)
# If a subcloud group is not passed, use the default
group_id = payload.get('group_id',
consts.DEFAULT_SUBCLOUD_GROUP_ID)
self._validate_subcloud_config(context,
name,
@ -555,19 +685,28 @@ class SubcloudsController(object):
external_oam_subnet,
external_oam_gateway_ip,
external_oam_floating_ip,
systemcontroller_gateway_ip)
systemcontroller_gateway_ip,
group_id)
if 'install_values' in payload:
self._validate_install_values(payload)
# Upload the deploy config files if it is included in the request
# It has a dependency on the subcloud name, and it is called after
# the name has been validated
self._upload_deploy_config_file(request, payload)
try:
# Add the subcloud details to the database
subcloud = self._add_subcloud_to_database(context, payload)
# Ask dcmanager-manager to add the subcloud.
# It will do all the real work...
return self.rpc_client.add_subcloud(context, payload)
self.rpc_client.add_subcloud(context, payload)
return db_api.subcloud_db_model_to_dict(subcloud)
except RemoteError as e:
pecan.abort(422, e.value)
except Exception as e:
LOG.exception(e)
except Exception:
LOG.exception("Unable to create subcloud %s" % name)
pecan.abort(500, _('Unable to create subcloud'))
else:
pecan.abort(400, _('Invalid request'))
@ -609,8 +748,9 @@ class SubcloudsController(object):
management_state = payload.get('management-state')
description = payload.get('description')
location = payload.get('location')
group_id = payload.get('group_id')
if not (management_state or description or location):
if not (management_state or description or location or group_id):
pecan.abort(400, _('nothing to update'))
# Syntax checking
@ -619,12 +759,19 @@ class SubcloudsController(object):
consts.MANAGEMENT_MANAGED]:
pecan.abort(400, _('Invalid management-state'))
# Verify the group_id is valid
if group_id:
try:
db_api.subcloud_group_get(context, group_id)
except exceptions.SubcloudGroupNotFound:
pecan.abort(400, _('Invalid group-id'))
try:
# Inform dcmanager-manager that subcloud has been updated.
# It will do all the real work...
subcloud = self.rpc_client.update_subcloud(
context, subcloud_id, management_state=management_state,
description=description, location=location)
description=description, location=location, group_id=group_id)
return subcloud
except RemoteError as e:
pecan.abort(422, e.value)

View File

@ -38,6 +38,11 @@ from dcmanager.rpc import client as rpc_client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SUPPORTED_STRATEGY_TYPES = [
consts.SW_UPDATE_TYPE_UPGRADE,
consts.SW_UPDATE_TYPE_PATCH
]
class SwUpdateStrategyController(object):
@ -118,7 +123,7 @@ class SwUpdateStrategyController(object):
strategy_type = payload.get('type')
if not strategy_type:
pecan.abort(400, _('type required'))
if strategy_type not in consts.SW_UPDATE_TYPE_PATCH:
if strategy_type not in SUPPORTED_STRATEGY_TYPES:
pecan.abort(400, _('type invalid'))
subcloud_apply_type = payload.get('subcloud-apply-type')

View File

@ -0,0 +1,56 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
from dcmanager.common import consts
from dcmanager.db import api as db_api
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class AlarmAggregation(object):
"""Methods related to alarm aggregation"""
def __init__(self, context):
self.context = context
def update_alarm_summary(self, name, fm_client):
LOG.debug("Updating alarm summary for %s" % name)
try:
alarms = fm_client.get_alarm_summary()
alarm_updates = {'critical_alarms': alarms[0].critical,
'major_alarms': alarms[0].major,
'minor_alarms': alarms[0].minor,
'warnings': alarms[0].warnings}
alarm_updates = self._set_cloud_status(alarm_updates)
db_api.subcloud_alarms_update(self.context, name, alarm_updates)
except Exception as e:
LOG.error('Failed to update alarms for %s error: %s' % (name, e))
def _set_cloud_status(self, alarm_dict):
if (alarm_dict.get('critical_alarms') > 0):
status = consts.ALARM_CRITICAL_STATUS
elif (alarm_dict.get('major_alarms') > 0) or\
(alarm_dict.get('minor_alarms') > 0):
status = consts.ALARM_DEGRADED_STATUS
else:
status = consts.ALARM_OK_STATUS
alarm_dict['cloud_status'] = status
return alarm_dict

View File

@ -0,0 +1,93 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import six
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from dcmanager.audit.subcloud_audit_manager import SubcloudAuditManager
from dcmanager.common import consts
from dcmanager.common.i18n import _
from dcmanager.common import messaging as rpc_messaging
from dcmanager.common import scheduler
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class DCManagerAuditService(service.Service):
"""Lifecycle manager for a running audit service."""
def __init__(self):
super(DCManagerAuditService, self).__init__()
self.host = cfg.CONF.host
# To be used by the sw update manager to trigger the patch audit
self.rpc_api_version = consts.RPC_API_VERSION
self.topic = consts.TOPIC_DC_MANAGER_AUDIT
# The following are initialized here, but assigned in start() which
# happens after the fork when spawning multiple worker processes
self.TG = None
self.target = None
self._rpc_server = None
self.subcloud_audit_manager = None
def start(self):
self.init_tgm()
self.init_audit_managers()
target = oslo_messaging.Target(version=self.rpc_api_version,
server=self.host,
topic=self.topic)
self.target = target
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
self._rpc_server.start()
super(DCManagerAuditService, self).start()
def init_tgm(self):
self.TG = scheduler.ThreadGroupManager()
def init_audit_managers(self):
self.subcloud_audit_manager = SubcloudAuditManager()
# Audit availability of all subclouds.
# Note this will run in a separate green thread
self.TG.start(self.subcloud_audit_manager.periodic_subcloud_audit)
def _stop_rpc_server(self):
# Stop RPC connection to prevent new requests
LOG.debug(_("Attempting to stop engine service..."))
try:
self._rpc_server.stop()
self._rpc_server.wait()
LOG.info('Engine service stopped successfully')
except Exception as ex:
LOG.error('Failed to stop engine service: %s',
six.text_type(ex))
def stop(self):
self._stop_rpc_server()
self.TG.stop()
# Terminate the engine process
LOG.info("All threads were gone, terminating engine")
super(DCManagerAuditService, self).stop()

View File

@ -0,0 +1,339 @@
# Copyright 2017 Ericsson AB.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import eventlet
from keystoneauth1 import exceptions as keystone_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from sysinv.common import constants as sysinv_constants
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dcmanager.audit import alarm_aggregation
from dcmanager.common import consts
from dcmanager.common import context
from dcmanager.common import exceptions
from dcmanager.common.i18n import _
from dcmanager.common import manager
from dcmanager.common import scheduler
from dcmanager.db import api as db_api
from dcmanager.rpc import client as dcmanager_rpc_client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# We will update the state of each subcloud in the dcorch about once per hour.
# Calculate how many iterations that will be.
SUBCLOUD_STATE_UPDATE_ITERATIONS = \
dccommon_consts.SECONDS_IN_HOUR / CONF.scheduler.subcloud_audit_interval
class SubcloudAuditManager(manager.Manager):
"""Manages tasks related to audits."""
def __init__(self, *args, **kwargs):
LOG.debug(_('SubcloudAuditManager initialization...'))
super(SubcloudAuditManager, self).__init__(
service_name="subcloud_audit_manager")
self.context = context.get_admin_context()
self.dcmanager_rpc_client = dcmanager_rpc_client.ManagerClient()
# Keeps track of greenthreads we create to do work.
self.thread_group_manager = scheduler.ThreadGroupManager(
thread_pool_size=100)
# Track workers created for each subcloud.
self.subcloud_workers = dict()
# Number of audits since last subcloud state update
self.audit_count = 0
self.alarm_aggr = alarm_aggregation.AlarmAggregation(self.context)
def periodic_subcloud_audit(self):
"""Audit availability of subclouds."""
# Blanket catch all exceptions in the audit so that the audit
# does not die.
while True:
try:
eventlet.greenthread.sleep(
CONF.scheduler.subcloud_audit_interval)
self._periodic_subcloud_audit_loop()
except eventlet.greenlet.GreenletExit:
# We have been told to exit
return
except Exception:
LOG.exception("Error in periodic subcloud audit loop")
def _periodic_subcloud_audit_loop(self):
"""Audit availability of subclouds loop."""
# We will be running in our own green thread here.
LOG.info('Triggered subcloud audit.')
self.audit_count += 1
# Determine whether to trigger a state update to each subcloud
if self.audit_count >= SUBCLOUD_STATE_UPDATE_ITERATIONS:
update_subcloud_state = True
else:
update_subcloud_state = False
openstack_installed = False
# The feature of syncing openstack resources to the subclouds was not
# completed, therefore, auditing the openstack application is disabled
# Determine whether OpenStack is installed in central cloud
# os_client = OpenStackDriver(region_name=consts.DEFAULT_REGION_NAME,
# thread_name='dcmanager-audit')
# sysinv_client = os_client.sysinv_client
# This could be optimized in the future by attempting to get just the
# one application. However, sysinv currently treats this as a failure
# if the application is not installed and generates warning logs, so it
# would require changes to handle this gracefully.
# apps = sysinv_client.get_applications()
# for app in apps:
# if app.name == sysinv_constants.HELM_APP_OPENSTACK and app.active:
# openstack_installed = True
# break
for subcloud in db_api.subcloud_get_all(self.context):
if (subcloud.deploy_status not in
[consts.DEPLOY_STATE_DONE,
consts.DEPLOY_STATE_DEPLOYING,
consts.DEPLOY_STATE_DEPLOY_FAILED]):
LOG.debug("Skip subcloud %s audit, deploy_status: %s" %
(subcloud.name, subcloud.deploy_status))
continue
# Create a new greenthread for each subcloud to allow the audits
# to be done in parallel. If there are not enough greenthreads
# in the pool, this will block until one becomes available.
self.subcloud_workers[subcloud.name] = \
self.thread_group_manager.start(self._audit_subcloud,
subcloud.name,
update_subcloud_state,
openstack_installed)
# Wait for all greenthreads to complete
LOG.info('Waiting for subcloud audits to complete.')
for thread in self.subcloud_workers.values():
thread.wait()
# Clear the list of workers before next audit
self.subcloud_workers = dict()
LOG.info('All subcloud audits have completed.')
def _update_subcloud_availability(self, subcloud_name,
availability_status=None,
update_state_only=False,
audit_fail_count=None):
try:
self.dcmanager_rpc_client.update_subcloud_availability(
self.context, subcloud_name, availability_status,
update_state_only, audit_fail_count)
LOG.info('Notifying dcmanager, subcloud:%s, availability:%s' %
(subcloud_name,
availability_status))
except Exception:
LOG.exception('Problem informing dcmanager of subcloud '
'availability state change, subcloud: %s'
% subcloud_name)
@staticmethod
def _get_subcloud_availability_status(subcloud_name, sysinv_client):
"""For each subcloud, if at least one service is active in each
service of servicegroup-list then declare the subcloud online.
"""
avail_to_set = consts.AVAILABILITY_OFFLINE
svc_groups = None
# get a list of service groups in the subcloud
try:
svc_groups = sysinv_client.get_service_groups()
except Exception as e:
LOG.warn('Cannot retrieve service groups for '
'subcloud: %s, %s' % (subcloud_name, e))
if svc_groups:
active_sgs = []
inactive_sgs = []
# Build 2 lists, 1 of active service groups,
# one with non-active.
for sg in svc_groups:
if sg.state != consts.SERVICE_GROUP_STATUS_ACTIVE:
inactive_sgs.append(sg.service_group_name)
else:
active_sgs.append(sg.service_group_name)
# Create a list of service groups that are only present
# in non-active list
inactive_only = [sg for sg in inactive_sgs if
sg not in active_sgs]
# An empty inactive only list and a non-empty active list
# means we're good to go.
if not inactive_only and active_sgs:
avail_to_set = \
consts.AVAILABILITY_ONLINE
else:
LOG.info("Subcloud:%s has non-active "
"service groups: %s" %
(subcloud_name, inactive_only))
return avail_to_set
def _audit_subcloud_openstack_app(self, subcloud_name, sysinv_client,
openstack_installed):
openstack_installed_current = False
# get a list of installed apps in the subcloud
try:
apps = sysinv_client.get_applications()
except Exception:
LOG.exception('Cannot retrieve installed apps for subcloud:%s'
% subcloud_name)
return
for app in apps:
if app.name == sysinv_constants.HELM_APP_OPENSTACK \
and app.active:
# audit find openstack app is installed and active in
# the subcloud
openstack_installed_current = True
break
endpoint_type_list = dccommon_consts.ENDPOINT_TYPES_LIST_OS
if openstack_installed_current and not openstack_installed:
self.dcmanager_rpc_client.update_subcloud_sync_endpoint_type(
self.context,
subcloud_name,
endpoint_type_list,
openstack_installed_current)
elif not openstack_installed_current and openstack_installed:
self.dcmanager_rpc_client.update_subcloud_sync_endpoint_type(
self.context,
subcloud_name,
endpoint_type_list,
openstack_installed_current)
def _audit_subcloud(self, subcloud_name, update_subcloud_state,
audit_openstack):
"""Audit a single subcloud."""
# Retrieve the subcloud
try:
subcloud = db_api.subcloud_get_by_name(self.context, subcloud_name)
except exceptions.SubcloudNotFound:
# Possibility subcloud could have been deleted since the list of
# subclouds to audit was created.
LOG.info('Ignoring SubcloudNotFound when auditing subcloud %s' %
subcloud_name)
return
avail_status_current = subcloud.availability_status
audit_fail_count = subcloud.audit_fail_count
# Set defaults to None and disabled so we will still set disabled
# status if we encounter an error.
sysinv_client = None
fm_client = None
avail_to_set = consts.AVAILABILITY_OFFLINE
try:
os_client = OpenStackDriver(region_name=subcloud_name,
thread_name='subcloud-audit')
sysinv_client = os_client.sysinv_client
fm_client = os_client.fm_client
except (keystone_exceptions.EndpointNotFound,
keystone_exceptions.ConnectFailure,
keystone_exceptions.ConnectTimeout,
IndexError):
if avail_status_current == consts.AVAILABILITY_OFFLINE:
LOG.info("Identity or Platform endpoint for %s not "
"found, ignoring for offline "
"subcloud." % subcloud_name)
return
else:
# The subcloud will be marked as offline below.
LOG.error("Identity or Platform endpoint for online "
"subcloud: %s not found." % subcloud_name)
except Exception:
LOG.exception("Failed to get OS Client for subcloud: %s"
% subcloud_name)
# Check availability of the subcloud
if sysinv_client:
avail_to_set = self._get_subcloud_availability_status(
subcloud_name, sysinv_client)
if avail_to_set == consts.AVAILABILITY_OFFLINE:
if audit_fail_count < consts.AVAIL_FAIL_COUNT_MAX:
audit_fail_count = audit_fail_count + 1
if (avail_status_current == consts.AVAILABILITY_ONLINE) and \
(audit_fail_count < consts.AVAIL_FAIL_COUNT_TO_ALARM):
# Do not set offline until we have failed audit
# the requisite number of times
avail_to_set = consts.AVAILABILITY_ONLINE
else:
# In the case of a one off blip, we may need to set the
# fail count back to 0
audit_fail_count = 0
if avail_to_set != avail_status_current:
if avail_to_set == consts.AVAILABILITY_ONLINE:
audit_fail_count = 0
LOG.info('Setting new availability status: %s '
'on subcloud: %s' %
(avail_to_set, subcloud_name))
self._update_subcloud_availability(
subcloud_name,
availability_status=avail_to_set,
audit_fail_count=audit_fail_count)
elif audit_fail_count != subcloud.audit_fail_count:
self._update_subcloud_availability(
subcloud_name,
availability_status=None,
audit_fail_count=audit_fail_count)
elif update_subcloud_state:
# Nothing has changed, but we want to send a state update for this
# subcloud as an audit.
self._update_subcloud_availability(
subcloud_name,
availability_status=avail_status_current,
update_state_only=True)
self.audit_count = 0
if avail_to_set == consts.AVAILABILITY_ONLINE:
# If subcloud is online, get alarm summary and store in db,
if fm_client:
self.alarm_aggr.update_alarm_summary(subcloud_name, fm_client)
# Audit openstack application in the subcloud
if audit_openstack and sysinv_client:
self._audit_subcloud_openstack_app(
subcloud_name, sysinv_client, subcloud.openstack_installed)

View File

@ -0,0 +1,63 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
"""
DC Manager Audit Service.
"""
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_i18n import _lazy
from oslo_log import log as logging
from oslo_service import service
from dcmanager.common import config
from dcmanager.common import messaging
_lazy.enable_lazy()
config.register_options()
config.register_keystone_options()
LOG = logging.getLogger('dcmanager.audit')
CONF = cfg.CONF
def main():
logging.register_options(CONF)
CONF(project='dcmanager', prog='dcmanager-audit')
logging.setup(cfg.CONF, 'dcmanager-audit')
logging.set_defaults()
messaging.setup()
from dcmanager.audit import service as audit
srv = audit.DCManagerAuditService()
launcher = service.launch(cfg.CONF,
srv, workers=CONF.audit_workers)
LOG.info("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.INFO)
launcher.wait()
if __name__ == '__main__':
main()

View File

@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@ -112,7 +112,7 @@ scheduler_opts = [
default=True,
help='boolean value for enable/disable periodic tasks'),
cfg.IntOpt('subcloud_audit_interval',
default=180,
default=20,
help='periodic time interval for subcloud audit'),
cfg.IntOpt('patch_audit_interval',
default=10,
@ -122,6 +122,8 @@ scheduler_opts = [
common_opts = [
cfg.IntOpt('workers', default=1,
help='number of workers'),
cfg.IntOpt('audit_workers', default=1,
help='number of audit workers'),
cfg.StrOpt('host',
default='localhost',
help='hostname of the machine')

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017-2019 Wind River Systems, Inc.
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@ -23,7 +23,9 @@ RPC_API_VERSION = "1.0"
TOPIC_DC_MANAGER = "dcmanager"
PATCH_VAULT_DIR = "/opt/patch-vault"
TOPIC_DC_MANAGER_AUDIT = "dcmanager-audit"
PATCH_VAULT_DIR = "/opt/dc-vault/patches"
# Well known region names
SYSTEM_CONTROLLER_NAME = "SystemController"
@ -54,7 +56,8 @@ SERVICE_GROUP_STATUS_ACTIVE = "active"
AVAIL_FAIL_COUNT_TO_ALARM = 2
AVAIL_FAIL_COUNT_MAX = 9999
# Software update type
# Software update strategy types
SW_UPDATE_TYPE_FIRMWARE = "firmware"
SW_UPDATE_TYPE_PATCH = "patch"
SW_UPDATE_TYPE_UPGRADE = "upgrade"
@ -77,6 +80,13 @@ SW_UPDATE_ACTION_ABORT = "abort"
SUBCLOUD_APPLY_TYPE_PARALLEL = "parallel"
SUBCLOUD_APPLY_TYPE_SERIAL = "serial"
# Values for the Default Subcloud Group
DEFAULT_SUBCLOUD_GROUP_ID = 1
DEFAULT_SUBCLOUD_GROUP_NAME = 'Default'
DEFAULT_SUBCLOUD_GROUP_DESCRIPTION = 'Default Subcloud Group'
DEFAULT_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE = SUBCLOUD_APPLY_TYPE_PARALLEL
DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS = 2
# Strategy step states
STRATEGY_STATE_INITIAL = "initial"
STRATEGY_STATE_UPDATING_PATCHES = "updating patches"
@ -87,8 +97,19 @@ STRATEGY_STATE_COMPLETE = "complete"
STRATEGY_STATE_ABORTED = "aborted"
STRATEGY_STATE_FAILED = "failed"
STRATEGY_STATE_INSTALLING_LICENSE = "installing license"
STRATEGY_STATE_IMPORTING_LOAD = "importing load"
STRATEGY_STATE_STARTING = "starting"
STRATEGY_STATE_LOCKING_CONTROLLER = "locking controller"
STRATEGY_STATE_UPGRADING_SIMPLEX = "upgrading simplex"
STRATEGY_STATE_MIGRATING_DATA = "migrating data"
STRATEGY_STATE_UNLOCKING_CONTROLLER = "unlocking controller"
STRATEGY_STATE_ACTIVATING = "activating"
# Subcloud deploy status states
DEPLOY_STATE_NONE = 'not-deployed'
DEPLOY_STATE_PRE_DEPLOY = 'pre-deploy'
DEPLOY_STATE_DEPLOY_PREP_FAILED = 'deploy-prep-failed'
DEPLOY_STATE_PRE_INSTALL = 'pre-install'
DEPLOY_STATE_PRE_INSTALL_FAILED = 'pre-install-failed'
DEPLOY_STATE_INSTALLING = 'installing'
@ -98,3 +119,25 @@ DEPLOY_STATE_BOOTSTRAP_FAILED = 'bootstrap-failed'
DEPLOY_STATE_DEPLOYING = 'deploying'
DEPLOY_STATE_DEPLOY_FAILED = 'deploy-failed'
DEPLOY_STATE_DONE = 'complete'
# Alarm aggregation
ALARMS_DISABLED = "disabled"
ALARM_OK_STATUS = "OK"
ALARM_DEGRADED_STATUS = "degraded"
ALARM_CRITICAL_STATUS = "critical"
# subcloud deploy file options
ANSIBLE_OVERRIDES_PATH = '/opt/dc/ansible'
DEPLOY_PLAYBOOK = "deploy_playbook"
DEPLOY_OVERRIDES = "deploy_overrides"
DEPLOY_CHART = "deploy_chart"
DEPLOY_CONFIG = 'deploy_config'
DEPLOY_COMMON_FILE_OPTIONS = [
DEPLOY_PLAYBOOK,
DEPLOY_OVERRIDES,
DEPLOY_CHART
]
# Active load state
LOAD_STATE_ACTIVE = 'active'

View File

@ -65,6 +65,12 @@ class BadRequest(DCManagerException):
message = _('Bad %(resource)s request: %(msg)s')
class ValidateFail(DCManagerException):
def __init__(self, message):
self.message = message
super(ValidateFail, self).__init__()
class NotFound(DCManagerException):
message = _("Not found")
@ -94,6 +100,14 @@ class InvalidConfigurationOption(DCManagerException):
"%(opt_value)s")
class InvalidParameterValue(DCManagerException):
message = _("%(err)s")
class SubcloudAlreadyExists(Conflict):
message = _("Subcloud with region_name=%(region_name)s already exists")
class SubcloudNotFound(NotFound):
message = _("Subcloud with id %(subcloud_id)s doesn't exist.")
@ -124,6 +138,22 @@ class SubcloudPatchOptsNotFound(NotFound):
"defaults will be used.")
class SubcloudGroupNotFound(NotFound):
message = _("Subcloud Group with id %(group_id)s doesn't exist.")
class SubcloudGroupNameNotFound(NotFound):
message = _("Subcloud Group with name %(name)s doesn't exist.")
class SubcloudGroupNameViolation(DCManagerException):
message = _("Default Subcloud Group name cannot be changed or reused.")
class SubcloudGroupDefaultNotDeletable(DCManagerException):
message = _("Default Subcloud Group %(group_id)s may not be deleted.")
class ConnectionRefused(DCManagerException):
message = _("Connection to the service endpoint is refused")

View File

@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms

View File

@ -22,6 +22,7 @@
import grp
import itertools
import netaddr
import os
import pwd
import six.moves
@ -53,6 +54,59 @@ def get_batch_projects(batch_size, project_list, fillvalue=None):
return six.moves.zip_longest(fillvalue=fillvalue, *args)
def validate_address_str(ip_address_str, network):
"""Determine whether an address is valid."""
try:
ip_address = netaddr.IPAddress(ip_address_str)
if ip_address.version != network.version:
msg = ("Invalid IP version - must match network version " +
ip_version_to_string(network.version))
raise exceptions.ValidateFail(msg)
elif ip_address == network:
raise exceptions.ValidateFail("Cannot use network address")
elif ip_address == network.broadcast:
raise exceptions.ValidateFail("Cannot use broadcast address")
elif ip_address not in network:
raise exceptions.ValidateFail(
"Address must be in subnet %s" % str(network))
return ip_address
except netaddr.AddrFormatError:
raise exceptions.ValidateFail(
"Invalid address - not a valid IP address")
def ip_version_to_string(ip_version):
"""Returns a string representation of ip_version."""
if ip_version == 4:
return "IPv4"
elif ip_version == 6:
return "IPv6"
else:
return "IP"
def validate_network_str(network_str, minimum_size,
existing_networks=None, multicast=False):
"""Determine whether a network is valid."""
try:
network = netaddr.IPNetwork(network_str)
if network.size < minimum_size:
raise exceptions.ValidateFail("Subnet too small - must have at "
"least %d addresses" % minimum_size)
elif network.version == 6 and network.prefixlen < 64:
raise exceptions.ValidateFail("IPv6 minimum prefix length is 64")
elif existing_networks:
if any(network.ip in subnet for subnet in existing_networks):
raise exceptions.ValidateFail("Subnet overlaps with another "
"configured subnet")
elif multicast and not network.is_multicast():
raise exceptions.ValidateFail("Invalid subnet - must be multicast")
return network
except netaddr.AddrFormatError:
raise exceptions.ValidateFail(
"Invalid subnet - not a valid IP subnet")
# to do validate the quota limits
def validate_quota_limits(payload):
for resource in payload:
@ -146,3 +200,10 @@ def synchronized(name, external=True, fair=False):
return lockutils.synchronized(name, lock_file_prefix=prefix,
external=external, lock_path=lock_path,
semaphores=None, delay=0.01, fair=fair)
def get_filename_by_prefix(dir_path, prefix):
for filename in os.listdir(dir_path):
if filename.startswith(prefix):
return filename
return None

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017-2019 Wind River Systems, Inc.
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@ -67,7 +67,8 @@ def subcloud_db_model_to_dict(subcloud):
"systemcontroller-gateway-ip":
subcloud.systemcontroller_gateway_ip,
"created-at": subcloud.created_at,
"updated-at": subcloud.updated_at}
"updated-at": subcloud.updated_at,
"group_id": subcloud.group_id}
return result
@ -75,14 +76,14 @@ def subcloud_create(context, name, description, location, software_version,
management_subnet, management_gateway_ip,
management_start_ip, management_end_ip,
systemcontroller_gateway_ip, deploy_status,
openstack_installed):
openstack_installed, group_id):
"""Create a subcloud."""
return IMPL.subcloud_create(context, name, description, location,
software_version,
management_subnet, management_gateway_ip,
management_start_ip, management_end_ip,
systemcontroller_gateway_ip, deploy_status,
openstack_installed)
openstack_installed, group_id)
def subcloud_get(context, subcloud_id):
@ -113,12 +114,13 @@ def subcloud_get_all_with_status(context):
def subcloud_update(context, subcloud_id, management_state=None,
availability_status=None, software_version=None,
description=None, location=None, audit_fail_count=None,
deploy_status=None, openstack_installed=None):
deploy_status=None, openstack_installed=None,
group_id=None):
"""Update a subcloud or raise if it does not exist."""
return IMPL.subcloud_update(context, subcloud_id, management_state,
availability_status, software_version,
description, location, audit_fail_count,
deploy_status, openstack_installed)
deploy_status, openstack_installed, group_id)
def subcloud_destroy(context, subcloud_id):
@ -196,6 +198,67 @@ def subcloud_status_destroy_all(context, subcloud_id):
return IMPL.subcloud_status_destroy_all(context, subcloud_id)
###################
# subcloud_group
def subcloud_group_db_model_to_dict(subcloud_group):
"""Convert subcloud_group db model to dictionary."""
result = {"id": subcloud_group.id,
"name": subcloud_group.name,
"description": subcloud_group.description,
"update_apply_type": subcloud_group.update_apply_type,
"max_parallel_subclouds": subcloud_group.max_parallel_subclouds,
"created-at": subcloud_group.created_at,
"updated-at": subcloud_group.updated_at}
return result
def subcloud_group_create(context, name, description, update_apply_type,
max_parallel_subclouds):
"""Create a subcloud_group."""
return IMPL.subcloud_group_create(context,
name,
description,
update_apply_type,
max_parallel_subclouds)
def subcloud_group_get(context, group_id):
"""Retrieve a subcloud_group or raise if it does not exist."""
return IMPL.subcloud_group_get(context, group_id)
def subcloud_group_get_by_name(context, name):
"""Retrieve a subcloud_group b name or raise if it does not exist."""
return IMPL.subcloud_group_get_by_name(context, name)
def subcloud_group_get_all(context):
"""Retrieve all subcloud groups."""
return IMPL.subcloud_group_get_all(context)
def subcloud_get_for_group(context, group_id):
"""Retrieve a subcloud_group or raise if it does not exist."""
return IMPL.subcloud_get_for_group(context, group_id)
def subcloud_group_update(context, group_id, name, description,
update_apply_type, max_parallel_subclouds):
"""Update the subcloud group or raise if it does not exist."""
return IMPL.subcloud_group_update(context,
group_id,
name,
description,
update_apply_type,
max_parallel_subclouds)
def subcloud_group_destroy(context, group_id):
"""Destroy the subcloud group or raise if it does not exist."""
return IMPL.subcloud_group_destroy(context, group_id)
###################
def sw_update_strategy_db_model_to_dict(sw_update_strategy):
@ -403,3 +466,25 @@ def db_sync(engine, version=None):
def db_version(engine):
"""Display the current database version."""
return IMPL.db_version(engine)
# Alarm Resources
###################
def subcloud_alarms_get(context, name):
return IMPL.subcloud_alarms_get(context, name)
def subcloud_alarms_get_all(context, name=None):
return IMPL.subcloud_alarms_get_all(context, name=name)
def subcloud_alarms_create(context, name, values):
return IMPL.subcloud_alarms_create(context, name, values)
def subcloud_alarms_update(context, name, values):
return IMPL.subcloud_alarms_update(context, name, values)
def subcloud_alarms_delete(context, name):
return IMPL.subcloud_alarms_delete(context, name)

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017-2019 Wind River Systems, Inc.
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@ -24,13 +24,20 @@
Implementation of SQLAlchemy backend.
"""
import sqlalchemy
import sys
import threading
from oslo_db import exception as db_exc
from oslo_db.exception import DBDuplicateEntry
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import uuidutils
from sqlalchemy import desc
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import joinedload_all
from dcmanager.common import consts
@ -206,7 +213,7 @@ def subcloud_create(context, name, description, location, software_version,
management_subnet, management_gateway_ip,
management_start_ip, management_end_ip,
systemcontroller_gateway_ip, deploy_status,
openstack_installed):
openstack_installed, group_id):
with write_session() as session:
subcloud_ref = models.Subcloud()
subcloud_ref.name = name
@ -223,6 +230,7 @@ def subcloud_create(context, name, description, location, software_version,
subcloud_ref.deploy_status = deploy_status
subcloud_ref.audit_fail_count = 0
subcloud_ref.openstack_installed = openstack_installed
subcloud_ref.group_id = group_id
session.add(subcloud_ref)
return subcloud_ref
@ -231,7 +239,8 @@ def subcloud_create(context, name, description, location, software_version,
def subcloud_update(context, subcloud_id, management_state=None,
availability_status=None, software_version=None,
description=None, location=None, audit_fail_count=None,
deploy_status=None, openstack_installed=None):
deploy_status=None, openstack_installed=None,
group_id=None):
with write_session() as session:
subcloud_ref = subcloud_get(context, subcloud_id)
if management_state is not None:
@ -250,6 +259,8 @@ def subcloud_update(context, subcloud_id, management_state=None,
subcloud_ref.deploy_status = deploy_status
if openstack_installed is not None:
subcloud_ref.openstack_installed = openstack_installed
if group_id is not None:
subcloud_ref.group_id = group_id
subcloud_ref.save(session)
return subcloud_ref
@ -263,7 +274,6 @@ def subcloud_destroy(context, subcloud_id):
##########################
@require_context
def subcloud_status_get(context, subcloud_id, endpoint_type):
result = model_query(context, models.SubcloudStatus). \
@ -525,6 +535,138 @@ def sw_update_opts_default_destroy(context):
session.delete(sw_update_opts_default_ref)
##########################
# subcloud group
##########################
@require_context
def subcloud_group_get(context, group_id):
try:
result = model_query(context, models.SubcloudGroup). \
filter_by(deleted=0). \
filter_by(id=group_id). \
one()
except NoResultFound:
raise exception.SubcloudGroupNotFound(group_id=group_id)
except MultipleResultsFound:
raise exception.InvalidParameterValue(
err="Multiple entries found for subcloud group %s" % group_id)
return result
@require_context
def subcloud_group_get_by_name(context, name):
try:
result = model_query(context, models.SubcloudGroup). \
filter_by(deleted=0). \
filter_by(name=name). \
one()
except NoResultFound:
raise exception.SubcloudGroupNameNotFound(name=name)
except MultipleResultsFound:
# This exception should never happen due to the UNIQUE setting for name
raise exception.InvalidParameterValue(
err="Multiple entries found for subcloud group %s" % name)
return result
# This method returns all subclouds for a particular subcloud group
@require_context
def subcloud_get_for_group(context, group_id):
return model_query(context, models.Subcloud). \
filter_by(deleted=0). \
filter_by(group_id=group_id). \
order_by(models.Subcloud.id). \
all()
@require_context
def subcloud_group_get_all(context):
result = model_query(context, models.SubcloudGroup). \
filter_by(deleted=0). \
order_by(models.SubcloudGroup.id). \
all()
return result
@require_admin_context
def subcloud_group_create(context,
name,
description,
update_apply_type,
max_parallel_subclouds):
with write_session() as session:
subcloud_group_ref = models.SubcloudGroup()
subcloud_group_ref.name = name
subcloud_group_ref.description = description
subcloud_group_ref.update_apply_type = update_apply_type
subcloud_group_ref.max_parallel_subclouds = max_parallel_subclouds
session.add(subcloud_group_ref)
return subcloud_group_ref
@require_admin_context
def subcloud_group_update(context,
group_id,
name=None,
description=None,
update_apply_type=None,
max_parallel_subclouds=None):
with write_session() as session:
subcloud_group_ref = subcloud_group_get(context, group_id)
if name is not None:
# Do not allow the name of the default group to be edited
if subcloud_group_ref.id == consts.DEFAULT_SUBCLOUD_GROUP_ID:
raise exception.SubcloudGroupNameViolation()
# do not allow another group to use the default group name
if name == consts.DEFAULT_SUBCLOUD_GROUP_NAME:
raise exception.SubcloudGroupNameViolation()
subcloud_group_ref.name = name
if description is not None:
subcloud_group_ref.description = description
if update_apply_type is not None:
subcloud_group_ref.update_apply_type = update_apply_type
if max_parallel_subclouds is not None:
subcloud_group_ref.max_parallel_subclouds = max_parallel_subclouds
subcloud_group_ref.save(session)
return subcloud_group_ref
@require_admin_context
def subcloud_group_destroy(context, group_id):
with write_session() as session:
subcloud_group_ref = subcloud_group_get(context, group_id)
if subcloud_group_ref.id == consts.DEFAULT_SUBCLOUD_GROUP_ID:
raise exception.SubcloudGroupDefaultNotDeletable(group_id=group_id)
session.delete(subcloud_group_ref)
def initialize_subcloud_group_default(engine):
try:
default_group = {
"id": consts.DEFAULT_SUBCLOUD_GROUP_ID,
"name": consts.DEFAULT_SUBCLOUD_GROUP_NAME,
"description": consts.DEFAULT_SUBCLOUD_GROUP_DESCRIPTION,
"update_apply_type":
consts.DEFAULT_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE,
"max_parallel_subclouds":
consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS,
"deleted": 0
}
meta = sqlalchemy.MetaData(bind=engine)
subcloud_group = sqlalchemy.Table('subcloud_group', meta, autoload=True)
try:
with engine.begin() as conn:
conn.execute(subcloud_group.insert(), default_group)
LOG.info("Default Subcloud Group created")
except DBDuplicateEntry:
# The default already exists.
pass
except Exception as ex:
LOG.error("Exception occurred setting up default subcloud group", ex)
##########################
@ -606,13 +748,107 @@ def strategy_step_destroy_all(context):
##########################
def initialize_db_defaults(engine):
# a default value may already exist. If it does not, create it
initialize_subcloud_group_default(engine)
def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version."""
return migration.db_sync(engine, version=version)
retVal = migration.db_sync(engine, version=version)
# returns None if migration has completed
if retVal is None:
initialize_db_defaults(engine)
return retVal
def db_version(engine):
"""Display the current database version."""
return migration.db_version(engine)
##########################
def add_identity_filter(query, value,
use_name=None):
"""Adds an identity filter to a query.
Filters results by 'id', if supplied value is a valid integer.
then attempts to filter results by 'uuid';
otherwise filters by name
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:param use_name: Use name in filter
:return: Modified query.
"""
if strutils.is_int_like(value):
return query.filter_by(id=value)
elif uuidutils.is_uuid_like(value):
return query.filter_by(uuid=value)
elif use_name:
return query.filter_by(name=value)
else:
return query.filter_by(name=value)
@require_context
def _subcloud_alarms_get(context, name):
query = model_query(context, models.SubcloudAlarmSummary). \
filter_by(deleted=0)
query = add_identity_filter(query, name, use_name=True)
try:
return query.one()
except NoResultFound:
raise exception.SubcloudNotFound(region_name=name)
except MultipleResultsFound:
raise exception.InvalidParameterValue(
err="Multiple entries found for subcloud %s" % name)
@require_context
def subcloud_alarms_get(context, name):
return _subcloud_alarms_get(context, name)
@require_context
def subcloud_alarms_get_all(context, name=None):
query = model_query(context, models.SubcloudAlarmSummary). \
filter_by(deleted=0)
if name:
query = add_identity_filter(query, name, use_name=True)
return query.order_by(desc(models.SubcloudAlarmSummary.id)).all()
@require_admin_context
def subcloud_alarms_create(context, name, values):
with write_session() as session:
result = models.SubcloudAlarmSummary()
result.name = name
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
result.update(values)
try:
session.add(result)
except db_exc.DBDuplicateEntry:
raise exception.SubcloudAlreadyExists(region_name=name)
return result
@require_admin_context
def subcloud_alarms_update(context, name, values):
with write_session() as session:
result = _subcloud_alarms_get(context, name)
result.update(values)
result.save(session)
return result
@require_admin_context
def subcloud_alarms_delete(context, name):
with write_session() as session:
session.query(models.SubcloudAlarmSummary).\
filter_by(name=name).delete()

View File

@ -0,0 +1,54 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
subcloud_alarms = sqlalchemy.Table(
'subcloud_alarms', meta,
sqlalchemy.Column('id', sqlalchemy.Integer,
primary_key=True, nullable=False),
sqlalchemy.Column('uuid', sqlalchemy.String(36), unique=True),
sqlalchemy.Column('name', sqlalchemy.String(255), unique=True),
sqlalchemy.Column('critical_alarms', sqlalchemy.Integer),
sqlalchemy.Column('major_alarms', sqlalchemy.Integer),
sqlalchemy.Column('minor_alarms', sqlalchemy.Integer),
sqlalchemy.Column('warnings', sqlalchemy.Integer),
sqlalchemy.Column('cloud_status', sqlalchemy.String(64)),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
sqlalchemy.Column('deleted', sqlalchemy.Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
subcloud_alarms.create()
def downgrade(migrate_engine):
raise NotImplementedError('Database downgrade not supported - '
'would drop all tables')

View File

@ -0,0 +1,101 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
from migrate.changeset import constraint
import sqlalchemy
from dcmanager.common import consts
ENGINE = 'InnoDB',
CHARSET = 'utf8'
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
# Declare the new subcloud_group table
subcloud_group = sqlalchemy.Table(
'subcloud_group', meta,
sqlalchemy.Column('id', sqlalchemy.Integer,
primary_key=True,
autoincrement=True,
nullable=False),
sqlalchemy.Column('name', sqlalchemy.String(255), unique=True),
sqlalchemy.Column('description', sqlalchemy.String(255)),
sqlalchemy.Column('update_apply_type', sqlalchemy.String(255)),
sqlalchemy.Column('max_parallel_subclouds', sqlalchemy.Integer),
sqlalchemy.Column('reserved_1', sqlalchemy.Text),
sqlalchemy.Column('reserved_2', sqlalchemy.Text),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
sqlalchemy.Column('deleted', sqlalchemy.Integer, default=0),
mysql_engine=ENGINE,
mysql_charset=CHARSET
)
subcloud_group.create()
subclouds = sqlalchemy.Table('subclouds', meta, autoload=True)
# TODO(abailey) do we want to fix the missing constraint for strategy_steps
# strat_steps = sqlalchemy.Table('strategy_steps', meta, autoload=True)
# strat_fkey = constraint.ForeignKeyConstraint(
# columns=[strat_steps.c.subcloud_id],
# refcolumns=[subclouds.c.id],
# name='strat_subcloud_ref')
# strat_steps.append_constraint(strat_fkey)
# Create a default subcloud group
default_group = {
"id": consts.DEFAULT_SUBCLOUD_GROUP_ID,
"name": consts.DEFAULT_SUBCLOUD_GROUP_NAME,
"description": consts.DEFAULT_SUBCLOUD_GROUP_DESCRIPTION,
"update_apply_type": consts.DEFAULT_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE,
"max_parallel_subclouds":
consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS,
"deleted": 0
}
# Inserting the GROUP as ID 1,
# This should increment the pkey to 2
with migrate_engine.begin() as conn:
conn.execute(subcloud_group.insert(), default_group)
# postgres does not increment the subcloud group id sequence
# after the insert above as part of the migrate.
# Note: use different SQL syntax if using mysql or sqlite
if migrate_engine.name == 'postgresql':
with migrate_engine.begin() as conn:
conn.execute("ALTER SEQUENCE subcloud_group_id_seq RESTART WITH 2")
# Add group_id column to subclouds table
group_id = \
sqlalchemy.Column('group_id',
sqlalchemy.Integer,
server_default=str(consts.DEFAULT_SUBCLOUD_GROUP_ID))
group_id.create(subclouds)
subcloud_fkey = constraint.ForeignKeyConstraint(
columns=[subclouds.c.group_id],
refcolumns=[subcloud_group.c.id],
name='subclouds_group_ref')
subclouds.append_constraint(subcloud_fkey)
def downgrade(migrate_engine):
raise NotImplementedError('Database downgrade is unsupported.')

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@ -75,6 +75,18 @@ class DCManagerBase(models.ModelBase,
session.commit()
class SubcloudGroup(BASE, DCManagerBase):
"""Represents a subcloud group"""
__tablename__ = 'subcloud_group'
id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
name = Column(String(255), unique=True)
description = Column(String(255))
update_apply_type = Column(String(255))
max_parallel_subclouds = Column(Integer)
class Subcloud(BASE, DCManagerBase):
"""Represents a subcloud"""
@ -95,6 +107,11 @@ class Subcloud(BASE, DCManagerBase):
openstack_installed = Column(Boolean, nullable=False, default=False)
systemcontroller_gateway_ip = Column(String(255))
audit_fail_count = Column(Integer)
# multiple subclouds can be in a particular group
group_id = Column(Integer,
ForeignKey('subcloud_group.id'))
group = relationship(SubcloudGroup,
backref=backref('subcloud'))
class SubcloudStatus(BASE, DCManagerBase):
@ -169,3 +186,16 @@ class StrategyStep(BASE, DCManagerBase):
finished_at = Column(DateTime)
subcloud = relationship('Subcloud', backref=backref("strategy_steps",
cascade="all,delete"))
class SubcloudAlarmSummary(BASE, DCManagerBase):
"""Represents a Distributed Cloud subcloud alarm aggregate"""
__tablename__ = 'subcloud_alarms'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), unique=True)
name = Column('name', String(255), unique=True)
critical_alarms = Column('critical_alarms', Integer)
major_alarms = Column('major_alarms', Integer)
minor_alarms = Column('minor_alarms', Integer)
warnings = Column('warnings', Integer)
cloud_status = Column('cloud_status', String(64))

View File

@ -56,6 +56,7 @@ class PatchAuditManager(manager.Manager):
self.subcloud_manager = kwargs['subcloud_manager']
# Wait 20 seconds before doing the first audit
self.wait_time_passed = DEFAULT_PATCH_AUDIT_DELAY_SECONDS - 25
self.audit_count = 0
# Used to force an audit on the next interval
_force_audit = False
@ -98,11 +99,19 @@ class PatchAuditManager(manager.Manager):
except Exception as e:
LOG.exception(e)
def _update_subcloud_sync_status(self, sc_name, sc_endpoint_type, sc_status):
self.subcloud_manager.update_subcloud_endpoint_status(
self.context,
subcloud_name=sc_name,
endpoint_type=sc_endpoint_type,
sync_status=sc_status)
def _periodic_patch_audit_loop(self):
"""Audit patch status of subclouds loop."""
# We are running in our own green thread here.
LOG.info('Triggered patch audit.')
self.audit_count += 1
try:
m_os_ks_client = OpenStackDriver(
@ -119,6 +128,15 @@ class PatchAuditManager(manager.Manager):
regionone_patches = patching_client.query()
LOG.debug("regionone_patches: %s" % regionone_patches)
# Get the active software version in RegionOne as it may be needed
# later for subcloud load audit.
sysinv_client = SysinvClient(
consts.DEFAULT_REGION_NAME, m_os_ks_client.session)
regionone_loads = sysinv_client.get_loads()
for load in regionone_loads:
if load.state == consts.LOAD_STATE_ACTIVE:
regionone_software_version = load.software_version
# Build lists of patches that should be applied or committed in all
# subclouds, based on their state in RegionOne. Check repostate
# (not patchstate) as we only care if the patch has been applied to
@ -191,8 +209,12 @@ class PatchAuditManager(manager.Manager):
LOG.warn('Cannot retrieve loads for subcloud: %s' %
subcloud.name)
continue
subcloud_software_version = None
for load in loads:
installed_loads.append(load.software_version)
if load.state == consts.LOAD_STATE_ACTIVE:
subcloud_software_version = load.software_version
out_of_sync = False
@ -241,16 +263,41 @@ class PatchAuditManager(manager.Manager):
if out_of_sync:
LOG.debug("Subcloud %s is out-of-sync for patching" %
subcloud.name)
self.subcloud_manager.update_subcloud_endpoint_status(
self.context,
subcloud_name=subcloud.name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC)
self._update_subcloud_sync_status(
subcloud.name, dcorch_consts.ENDPOINT_TYPE_PATCHING,
consts.SYNC_STATUS_OUT_OF_SYNC)
else:
LOG.debug("Subcloud %s is in-sync for patching" %
subcloud.name)
self.subcloud_manager.update_subcloud_endpoint_status(
self.context,
subcloud_name=subcloud.name,
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC)
self._update_subcloud_sync_status(
subcloud.name, dcorch_consts.ENDPOINT_TYPE_PATCHING,
consts.SYNC_STATUS_IN_SYNC)
# Check subcloud software version every other audit cycle
if self.audit_count % 2 != 0:
LOG.debug('Auditing load of subcloud %s' % subcloud.name)
try:
upgrades = sysinv_client.get_upgrades()
except Exception:
LOG.warn('Cannot retrieve upgrade info for subcloud: %s' %
subcloud.name)
continue
if not upgrades:
# No upgrade in progress
if subcloud_software_version == regionone_software_version:
self._update_subcloud_sync_status(
subcloud.name, dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_IN_SYNC)
else:
self._update_subcloud_sync_status(
subcloud.name, dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_OUT_OF_SYNC)
else:
# As upgrade is still in progress, set the subcloud load
# status as out-of-sync.
self._update_subcloud_sync_status(
subcloud.name, dcorch_consts.ENDPOINT_TYPE_LOAD,
consts.SYNC_STATUS_OUT_OF_SYNC)
LOG.info('Patch audit completed.')

File diff suppressed because it is too large Load Diff

View File

@ -34,9 +34,8 @@ from dcmanager.common import context
from dcmanager.common import exceptions
from dcmanager.common.i18n import _
from dcmanager.common import messaging as rpc_messaging
from dcmanager.common import scheduler
from dcmanager.manager.patch_audit_manager import PatchAuditManager
from dcmanager.manager import scheduler
from dcmanager.manager.subcloud_audit_manager import SubcloudAuditManager
from dcmanager.manager.subcloud_manager import SubcloudManager
from dcmanager.manager.sw_update_manager import SwUpdateManager
@ -81,7 +80,6 @@ class DCManagerService(service.Service):
self.target = None
self._rpc_server = None
self.subcloud_manager = None
self.subcloud_audit_manager = None
self.sw_update_manager = None
self.patch_audit_manager = None
@ -89,8 +87,6 @@ class DCManagerService(service.Service):
self.TG = scheduler.ThreadGroupManager()
def init_audit_managers(self):
self.subcloud_audit_manager = SubcloudAuditManager(
subcloud_manager=self.subcloud_manager)
self.patch_audit_manager = PatchAuditManager(
subcloud_manager=self.subcloud_manager)
@ -116,18 +112,9 @@ class DCManagerService(service.Service):
super(DCManagerService, self).start()
if self.periodic_enable:
LOG.info("Adding periodic tasks for the manager to perform")
self.TG.add_timer(cfg.CONF.scheduler.subcloud_audit_interval,
self.subcloud_audit, initial_delay=10)
self.TG.add_timer(cfg.CONF.scheduler.patch_audit_interval,
self.patch_audit, initial_delay=60)
def subcloud_audit(self):
# Audit availability of all subclouds.
# Note this will run in a separate green thread
LOG.debug("Subcloud audit job started at: %s",
time.strftime("%c"))
self.subcloud_audit_manager.periodic_subcloud_audit()
def patch_audit(self):
# Audit patch status of all subclouds.
# Note this will run in a separate green thread
@ -149,13 +136,14 @@ class DCManagerService(service.Service):
@request_context
def update_subcloud(self, context, subcloud_id, management_state=None,
description=None, location=None):
description=None, location=None, group_id=None):
# Updates a subcloud
LOG.info("Handling update_subcloud request for: %s" % subcloud_id)
subcloud = self.subcloud_manager.update_subcloud(context, subcloud_id,
management_state,
description,
location)
location,
group_id)
# If a subcloud has been set to the managed state, trigger the
# patching audit so it can update the sync status ASAP.
if management_state == consts.MANAGEMENT_MANAGED:
@ -188,6 +176,32 @@ class DCManagerService(service.Service):
return
@request_context
def update_subcloud_availability(self, context,
subcloud_name,
availability_status,
update_state_only=False,
audit_fail_count=None):
# Updates subcloud availability
LOG.info("Handling update_subcloud_availability request for: %s" %
subcloud_name)
self.subcloud_manager.update_subcloud_availability(
context,
subcloud_name,
availability_status,
update_state_only,
audit_fail_count)
@request_context
def update_subcloud_sync_endpoint_type(self, context, subcloud_name,
endpoint_type_list,
openstack_installed):
# Updates subcloud sync endpoint type
LOG.info("Handling update_subcloud_sync_endpoint_type request for: %s"
% subcloud_name)
self.subcloud_manager.update_subcloud_sync_endpoint_type(
context, subcloud_name, endpoint_type_list, openstack_installed)
@request_context
def create_sw_update_strategy(self, context, payload):
# Creates a software update strategy

View File

@ -1,413 +0,0 @@
# Copyright 2017 Ericsson AB.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
from keystoneauth1 import exceptions as keystone_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from fm_api import constants as fm_const
from fm_api import fm_api
from sysinv.common import constants as sysinv_constants
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcorch.rpc import client as dcorch_rpc_client
from dcmanager.common import consts
from dcmanager.common import context
from dcmanager.common import exceptions
from dcmanager.common.i18n import _
from dcmanager.common import manager
from dcmanager.db import api as db_api
from dcmanager.manager import scheduler
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# We will update the state of each subcloud in the dcorch about once per hour.
# Calculate how many iterations that will be.
SUBCLOUD_STATE_UPDATE_ITERATIONS = \
dccommon_consts.SECONDS_IN_HOUR / CONF.scheduler.subcloud_audit_interval
class SubcloudAuditManager(manager.Manager):
"""Manages tasks related to audits."""
def __init__(self, *args, **kwargs):
LOG.debug(_('SubcloudAuditManager initialization...'))
super(SubcloudAuditManager, self).__init__(
service_name="subcloud_audit_manager")
self.context = context.get_admin_context()
self.dcorch_rpc_client = dcorch_rpc_client.EngineClient()
self.fm_api = fm_api.FaultAPIs()
self.subcloud_manager = kwargs['subcloud_manager']
# Keeps track of greenthreads we create to do work.
self.thread_group_manager = scheduler.ThreadGroupManager(
thread_pool_size=100)
# Track workers created for each subcloud.
self.subcloud_workers = dict()
# Number of audits since last subcloud state update
self.audit_count = 0
def periodic_subcloud_audit(self):
"""Audit availability of subclouds."""
# Blanket catch all exceptions in the audit so that the audit
# does not die.
try:
self._periodic_subcloud_audit_loop()
except Exception as e:
LOG.exception(e)
def _periodic_subcloud_audit_loop(self):
"""Audit availability of subclouds loop."""
# We will be running in our own green thread here.
LOG.info('Triggered subcloud audit.')
self.audit_count += 1
# Determine whether to trigger a state update to each subcloud
if self.audit_count >= SUBCLOUD_STATE_UPDATE_ITERATIONS:
update_subcloud_state = True
else:
update_subcloud_state = False
# Determine whether OpenStack is installed in central cloud
os_client = OpenStackDriver(region_name=consts.DEFAULT_REGION_NAME,
region_clients=None)
sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME,
os_client.keystone_client.session)
# This could be optimized in the future by attempting to get just the
# one application. However, sysinv currently treats this as a failure
# if the application is not installed and generates warning logs, so it
# would require changes to handle this gracefully.
apps = sysinv_client.get_applications()
openstack_installed = False
for app in apps:
if app.name == sysinv_constants.HELM_APP_OPENSTACK and app.active:
openstack_installed = True
break
for subcloud in db_api.subcloud_get_all(self.context):
if (subcloud.deploy_status not in
[consts.DEPLOY_STATE_DONE,
consts.DEPLOY_STATE_DEPLOYING,
consts.DEPLOY_STATE_DEPLOY_FAILED]):
LOG.debug("Skip subcloud %s audit, deploy_status: %s" %
(subcloud.name, subcloud.deploy_status))
continue
# Create a new greenthread for each subcloud to allow the audits
# to be done in parallel. If there are not enough greenthreads
# in the pool, this will block until one becomes available.
self.subcloud_workers[subcloud.name] = \
self.thread_group_manager.start(self._audit_subcloud,
subcloud.name,
update_subcloud_state,
openstack_installed)
# Wait for all greenthreads to complete
LOG.info('Waiting for subcloud audits to complete.')
for thread in self.subcloud_workers.values():
thread.wait()
# Clear the list of workers before next audit
self.subcloud_workers = dict()
LOG.info('All subcloud audits have completed.')
def _audit_subcloud(self, subcloud_name, update_subcloud_state,
audit_openstack):
"""Audit a single subcloud."""
# Retrieve the subcloud
try:
subcloud = db_api.subcloud_get_by_name(self.context, subcloud_name)
except exceptions.SubcloudNotFound:
# Possibility subcloud could have been deleted since the list of
# subclouds to audit was created.
LOG.info('Ignoring SubcloudNotFound when auditing subcloud %s' %
subcloud_name)
return
# For each subcloud, if at least one service is active in
# each service of servicegroup-list then declare the subcloud online.
subcloud_id = subcloud.id
avail_status_current = subcloud.availability_status
audit_fail_count = subcloud.audit_fail_count
# Set defaults to None and disabled so we will still set disabled
# status if we encounter an error.
sysinv_client = None
svc_groups = None
avail_to_set = consts.AVAILABILITY_OFFLINE
try:
os_client = OpenStackDriver(region_name=subcloud_name,
region_clients=None)
sysinv_client = SysinvClient(subcloud_name,
os_client.keystone_client.session)
except (keystone_exceptions.EndpointNotFound,
keystone_exceptions.ConnectFailure,
keystone_exceptions.ConnectTimeout,
IndexError):
if avail_status_current == consts.AVAILABILITY_OFFLINE:
LOG.info("Identity or Platform endpoint for %s not "
"found, ignoring for offline "
"subcloud." % subcloud_name)
return
else:
# The subcloud will be marked as offline below.
LOG.error("Identity or Platform endpoint for online "
"subcloud: %s not found." % subcloud_name)
except Exception as e:
LOG.exception(e)
if sysinv_client:
# get a list of service groups in the subcloud
try:
svc_groups = sysinv_client.get_service_groups()
except Exception as e:
svc_groups = None
LOG.warn('Cannot retrieve service groups for '
'subcloud: %s, %s' % (subcloud_name, e))
if svc_groups:
active_sgs = []
inactive_sgs = []
# Build 2 lists, 1 of active service groups,
# one with non-active.
for sg in svc_groups:
if sg.state != consts.SERVICE_GROUP_STATUS_ACTIVE:
inactive_sgs.append(sg.service_group_name)
else:
active_sgs.append(sg.service_group_name)
# Create a list of service groups that are only present
# in non-active list
inactive_only = [sg for sg in inactive_sgs if
sg not in active_sgs]
# An empty inactive only list and a non-empty active list
# means we're good to go.
if not inactive_only and active_sgs:
avail_to_set = \
consts.AVAILABILITY_ONLINE
else:
LOG.info("Subcloud:%s has non-active "
"service groups: %s" %
(subcloud_name, inactive_only))
if avail_to_set == consts.AVAILABILITY_OFFLINE:
if audit_fail_count < consts.AVAIL_FAIL_COUNT_MAX:
audit_fail_count = audit_fail_count + 1
if (avail_status_current == consts.AVAILABILITY_ONLINE) and \
(audit_fail_count < consts.AVAIL_FAIL_COUNT_TO_ALARM):
# Do not set offline until we have failed audit
# the requisite number of times
avail_to_set = consts.AVAILABILITY_ONLINE
else:
# In the case of a one off blip, we may need to set the
# fail count back to 0
audit_fail_count = 0
if avail_to_set != avail_status_current:
if avail_to_set == consts.AVAILABILITY_ONLINE:
audit_fail_count = 0
LOG.info('Setting new availability status: %s '
'on subcloud: %s' %
(avail_to_set, subcloud_name))
entity_instance_id = "subcloud=%s" % subcloud_name
fault = self.fm_api.get_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
entity_instance_id)
if fault and (avail_to_set == consts.AVAILABILITY_ONLINE):
try:
self.fm_api.clear_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
entity_instance_id)
except Exception as e:
LOG.exception(e)
elif not fault and \
(avail_to_set == consts.AVAILABILITY_OFFLINE):
try:
fault = fm_api.Fault(
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
alarm_state=fm_const.FM_ALARM_STATE_SET,
entity_type_id=fm_const.FM_ENTITY_TYPE_SUBCLOUD,
entity_instance_id=entity_instance_id,
severity=fm_const.FM_ALARM_SEVERITY_CRITICAL,
reason_text=('%s is offline' % subcloud_name),
alarm_type=fm_const.FM_ALARM_TYPE_0,
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_29,
proposed_repair_action="Wait for subcloud to "
"become online; if "
"problem persists contact "
"next level of support.",
service_affecting=True)
self.fm_api.set_fault(fault)
except Exception as e:
LOG.exception(e)
try:
updated_subcloud = db_api.subcloud_update(
self.context,
subcloud_id,
management_state=None,
availability_status=avail_to_set,
software_version=None,
description=None, location=None,
audit_fail_count=audit_fail_count)
except exceptions.SubcloudNotFound:
# slim possibility subcloud could have been deleted since
# we found it in db, ignore this benign error.
LOG.info('Ignoring SubcloudNotFound when attempting state'
' update: %s' % subcloud_name)
return
try:
self.dcorch_rpc_client.\
update_subcloud_states(self.context,
subcloud_name,
updated_subcloud.management_state,
avail_to_set)
LOG.info('Notifying dcorch, subcloud:%s management: %s, '
'availability:%s' %
(subcloud_name,
updated_subcloud.management_state,
avail_to_set))
except Exception as e:
LOG.exception(e)
LOG.warn('Problem informing dcorch of subcloud '
'state change, subcloud: %s' % subcloud_name)
if avail_to_set == consts.AVAILABILITY_OFFLINE:
# Subcloud is going offline, set all endpoint statuses to
# unknown.
try:
self.subcloud_manager.update_subcloud_endpoint_status(
self.context,
subcloud_name=subcloud_name,
endpoint_type=None,
sync_status=consts.SYNC_STATUS_UNKNOWN)
except exceptions.SubcloudNotFound:
LOG.info('Ignoring SubcloudNotFound when attempting '
'sync_status update: %s' % subcloud_name)
return
elif audit_fail_count != subcloud.audit_fail_count:
try:
db_api.subcloud_update(self.context, subcloud_id,
management_state=None,
availability_status=None,
software_version=None,
description=None, location=None,
audit_fail_count=audit_fail_count)
except exceptions.SubcloudNotFound:
# slim possibility subcloud could have been deleted since
# we found it in db, ignore this benign error.
LOG.info('Ignoring SubcloudNotFound when attempting '
'audit_fail_count update: %s' % subcloud_name)
return
elif update_subcloud_state:
# Nothing has changed, but we want to send a state update for this
# subcloud as an audit. Get the most up-to-date data.
subcloud = db_api.subcloud_get_by_name(self.context, subcloud_name)
self.dcorch_rpc_client. \
update_subcloud_states(self.context,
subcloud_name,
subcloud.management_state,
subcloud.availability_status)
if audit_openstack and sysinv_client:
# get a list of installed apps in the subcloud
try:
apps = sysinv_client.get_applications()
except Exception as e:
LOG.warn('Cannot retrieve installed apps for '
'subcloud:%s, %s' % (subcloud_name, e))
return
openstack_installed = subcloud.openstack_installed
openstack_installed_current = False
for app in apps:
if app.name == sysinv_constants.HELM_APP_OPENSTACK\
and app.active:
# audit find openstack app is installed and active in
# the subcloud
openstack_installed_current = True
break
dcm_update_func = None
dco_update_func = None
if openstack_installed_current and not openstack_installed:
dcm_update_func = db_api.subcloud_status_create
# TODO(andy.ning): This RPC will block for the duration of the
# initial sync. It needs to be made non-blocking.
dco_update_func = self.dcorch_rpc_client.\
add_subcloud_sync_endpoint_type
elif not openstack_installed_current and openstack_installed:
dcm_update_func = db_api.subcloud_status_delete
dco_update_func = self.dcorch_rpc_client.\
remove_subcloud_sync_endpoint_type
if dcm_update_func and dco_update_func:
endpoint_type_list = dccommon_consts.ENDPOINT_TYPES_LIST_OS
try:
# Notify dcorch to add/remove sync endpoint type list
dco_update_func(self.context, subcloud_name,
endpoint_type_list)
LOG.info('Notifying dcorch, subcloud: %s new sync'
' endpoint: %s' % (subcloud_name,
endpoint_type_list))
# Update subcloud status table by adding/removing
# openstack sync endpoint types.
for endpoint_type in endpoint_type_list:
dcm_update_func(self.context, subcloud_id,
endpoint_type)
# Update openstack_installed of subcloud table
db_api.subcloud_update(
self.context, subcloud_id,
openstack_installed=openstack_installed_current)
except exceptions.SubcloudNotFound:
LOG.info('Ignoring SubcloudNotFound when attempting'
' openstack_installed update: %s'
% subcloud_name)
except Exception as e:
LOG.exception(e)
LOG.warn('Problem informing dcorch of subcloud '
'sync endpoint type change, subcloud: %s'
% subcloud_name)

View File

@ -28,15 +28,17 @@ import keyring
import netaddr
import os
import threading
import time
from oslo_log import log as logging
from oslo_messaging import RemoteError
from tsconfig.tsconfig import CONFIG_PATH
from tsconfig.tsconfig import SW_VERSION
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.keystone_v3 import KeystoneClient
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon import kubeoperator
from dcorch.common import consts as dcorch_consts
from dcorch.rpc import client as dcorch_rpc_client
@ -47,6 +49,7 @@ from dcmanager.common import exceptions
from dcmanager.common.i18n import _
from dcmanager.common import manager
from dcmanager.common import utils
from dcmanager.db import api as db_api
from dcmanager.manager.subcloud_install import SubcloudInstall
@ -60,7 +63,6 @@ LOG = logging.getLogger(__name__)
ADDN_HOSTS_DC = 'dnsmasq.addn_hosts_dc'
# Subcloud configuration paths
ANSIBLE_OVERRIDES_PATH = '/opt/dc/ansible'
INVENTORY_FILE_POSTFIX = '_inventory.yml'
ANSIBLE_SUBCLOUD_PLAYBOOK = \
'/usr/share/ansible/stx-ansible/playbooks/bootstrap.yml'
@ -78,6 +80,10 @@ USERS_TO_REPLICATE = [
SERVICES_USER = 'services'
SC_INTERMEDIATE_CERT_DURATION = "87600h"
SC_INTERMEDIATE_CERT_RENEW_BEFORE = "720h"
CERT_NAMESPACE = "dc-cert"
def sync_update_subcloud_endpoint_status(func):
"""Synchronized lock decorator for _update_subcloud_endpoint_status. """
@ -108,6 +114,70 @@ class SubcloudManager(manager.Manager):
self.dcorch_rpc_client = dcorch_rpc_client.EngineClient()
self.fm_api = fm_api.FaultAPIs()
@staticmethod
def _get_subcloud_cert_name(subcloud_name):
cert_name = "%s-adminep-ca-certificate" % subcloud_name
return cert_name
@staticmethod
def _get_subcloud_cert_secret_name(subcloud_name):
secret_name = "%s-adminep-ca-certificate" % subcloud_name
return secret_name
@staticmethod
def _create_intermediate_ca_cert(payload):
subcloud_name = payload["name"]
cert_name = SubcloudManager._get_subcloud_cert_name(subcloud_name)
secret_name = SubcloudManager._get_subcloud_cert_secret_name(
subcloud_name)
cert = {
"apiVersion": "cert-manager.io/v1alpha2",
"kind": "Certificate",
"metadata": {
"namespace": CERT_NAMESPACE,
"name": cert_name
},
"spec": {
"secretName": secret_name,
"duration": SC_INTERMEDIATE_CERT_DURATION,
"renewBefore": SC_INTERMEDIATE_CERT_RENEW_BEFORE,
"issuerRef": {
"kind": "Issuer",
"name": "dc-adminep-root-ca-issuer"
},
"commonName": cert_name,
"isCA": True,
},
}
kube = kubeoperator.KubeOperator()
kube.apply_cert_manager_certificate(CERT_NAMESPACE, cert_name, cert)
for count in range(1, 20):
secret = kube.kube_get_secret(secret_name, CERT_NAMESPACE)
if not hasattr(secret, 'data'):
time.sleep(1)
LOG.debug('Wait for %s ... %s' % (secret_name, count))
continue
data = secret.data
if 'ca.crt' not in data or \
'tls.crt' not in data or 'tls.key' not in data:
# ca cert, certificate and key pair are needed and must exist
# for creating an intermediate ca. If not, certificate is not
# ready yet.
time.sleep(1)
LOG.debug('Wait for %s ... %s' % (secret_name, count))
continue
payload['dc_root_ca_cert'] = data['ca.crt']
payload['sc_ca_cert'] = data['tls.crt']
payload['sc_ca_key'] = data['tls.key']
return
raise Exception("Secret for certificate %s is not ready." % cert_name)
def add_subcloud(self, context, payload):
"""Add subcloud and notify orchestrators.
@ -116,36 +186,11 @@ class SubcloudManager(manager.Manager):
:param payload: subcloud configuration
"""
LOG.info("Adding subcloud %s." % payload['name'])
dcorch_populated = False
try:
subcloud = db_api.subcloud_get_by_name(context, payload['name'])
except exceptions.SubcloudNameNotFound:
pass
else:
raise exceptions.BadRequest(
resource='subcloud',
msg='Subcloud with that name already exists')
subcloud = db_api.subcloud_get_by_name(context, payload['name'])
# Subcloud is added with software version that matches system
# controller.
software_version = SW_VERSION
try:
subcloud = db_api.subcloud_create(
context,
payload['name'],
payload.get('description'),
payload.get('location'),
software_version,
payload['management_subnet'],
payload['management_gateway_address'],
payload['management_start_address'],
payload['management_end_address'],
payload['systemcontroller_gateway_address'],
consts.DEPLOY_STATE_NONE,
False)
except Exception as e:
LOG.exception(e)
raise e
db_api.subcloud_update(
context, subcloud.id,
deploy_status=consts.DEPLOY_STATE_PRE_DEPLOY)
# Populate the subcloud status table with all endpoints
for endpoint in dcorch_consts.ENDPOINT_TYPES_LIST:
@ -156,7 +201,7 @@ class SubcloudManager(manager.Manager):
try:
# Ansible inventory filename for the specified subcloud
ansible_subcloud_inventory_file = os.path.join(
ANSIBLE_OVERRIDES_PATH,
consts.ANSIBLE_OVERRIDES_PATH,
subcloud.name + INVENTORY_FILE_POSTFIX)
# Create a new route to this subcloud on the management interface
@ -195,20 +240,25 @@ class SubcloudManager(manager.Manager):
for service in m_ks_client.services_list:
if service.type == dcorch_consts.ENDPOINT_TYPE_PLATFORM:
endpoint_url = "http://{}:6385/v1".format(endpoint_ip)
endpoint_config.append((service.id, endpoint_url))
if service.type == dcorch_consts.ENDPOINT_TYPE_IDENTITY:
endpoint_url = "http://{}:5000/v3".format(endpoint_ip)
endpoint_config.append((service.id, endpoint_url))
if service.type == dcorch_consts.ENDPOINT_TYPE_PATCHING:
endpoint_url = "http://{}:5491".format(endpoint_ip)
endpoint_config.append((service.id, endpoint_url))
if service.type == dcorch_consts.ENDPOINT_TYPE_FM:
endpoint_url = "http://{}:18002".format(endpoint_ip)
endpoint_config.append((service.id, endpoint_url))
if service.type == dcorch_consts.ENDPOINT_TYPE_NFV:
endpoint_url = "http://{}:4545".format(endpoint_ip)
endpoint_config.append((service.id, endpoint_url))
admin_endpoint_url = "https://{}:6386/v1".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
elif service.type == dcorch_consts.ENDPOINT_TYPE_IDENTITY:
admin_endpoint_url = "https://{}:5001/v3".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
elif service.type == dcorch_consts.ENDPOINT_TYPE_PATCHING:
admin_endpoint_url = "https://{}:5492".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
elif service.type == dcorch_consts.ENDPOINT_TYPE_FM:
admin_endpoint_url = "https://{}:18003".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
elif service.type == dcorch_consts.ENDPOINT_TYPE_NFV:
admin_endpoint_url = "https://{}:4546".format(endpoint_ip)
endpoint_config.append({"id": service.id,
"admin_endpoint_url": admin_endpoint_url})
if len(endpoint_config) < 5:
raise exceptions.BadRequest(
@ -216,17 +266,24 @@ class SubcloudManager(manager.Manager):
msg='Missing service in SystemController')
for endpoint in endpoint_config:
for iface in ['internal', 'admin']:
m_ks_client.keystone_client.endpoints.create(
endpoint[0],
endpoint[1],
interface=iface,
region=subcloud.name)
m_ks_client.keystone_client.endpoints.create(
endpoint["id"],
endpoint['admin_endpoint_url'],
interface=dccommon_consts.KS_ENDPOINT_ADMIN,
region=subcloud.name)
# Inform orchestrator that subcloud has been added
self.dcorch_rpc_client.add_subcloud(
context, subcloud.name, subcloud.software_version)
dcorch_populated = True
# create entry into alarm summary table, will get real values later
alarm_updates = {'critical_alarms': -1,
'major_alarms': -1,
'minor_alarms': -1,
'warnings': -1,
'cloud_status': consts.ALARMS_DISABLED}
db_api.subcloud_alarms_create(context, subcloud.name,
alarm_updates)
# Regenerate the addn_hosts_dc file
self._create_addn_hosts_dc(context)
@ -252,12 +309,19 @@ class SubcloudManager(manager.Manager):
payload['sysadmin_password']
if "deploy_playbook" in payload:
payload['deploy_values'] = dict()
payload['deploy_values']['ansible_become_pass'] = \
payload['sysadmin_password']
payload['deploy_values']['ansible_ssh_pass'] = \
payload['sysadmin_password']
payload['deploy_values']['admin_password'] = \
str(keyring.get_password('CGCS', 'admin'))
payload['deploy_values']['deployment_config'] = \
payload[consts.DEPLOY_CONFIG]
payload['deploy_values']['deployment_manager_chart'] = \
payload[consts.DEPLOY_CHART]
payload['deploy_values']['deployment_manager_overrides'] = \
payload[consts.DEPLOY_OVERRIDES]
del payload['sysadmin_password']
@ -270,6 +334,9 @@ class SubcloudManager(manager.Manager):
self._create_subcloud_inventory(payload,
ansible_subcloud_inventory_file)
# create subcloud intermediate certificate and pass in keys
self._create_intermediate_ca_cert(payload)
# Write this subclouds overrides to file
# NOTE: This file should not be deleted if subcloud add fails
# as it is used for debugging
@ -284,7 +351,7 @@ class SubcloudManager(manager.Manager):
"ansible-playbook", ANSIBLE_SUBCLOUD_INSTALL_PLAYBOOK,
"-i", ansible_subcloud_inventory_file,
"--limit", subcloud.name,
"-e", "@%s" % ANSIBLE_OVERRIDES_PATH + "/" +
"-e", "@%s" % consts.ANSIBLE_OVERRIDES_PATH + "/" +
payload['name'] + '/' + "install_values.yml"
]
@ -298,17 +365,15 @@ class SubcloudManager(manager.Manager):
# which overrides to load
apply_command += [
"-e", str("override_files_dir='%s' region_name=%s") % (
ANSIBLE_OVERRIDES_PATH, subcloud.name)]
consts.ANSIBLE_OVERRIDES_PATH, subcloud.name)]
deploy_command = None
if "deploy_playbook" in payload:
deploy_command = [
"ansible-playbook", ANSIBLE_OVERRIDES_PATH + '/' +
payload['name'] + "_deploy.yml",
"-e", "@%s" % ANSIBLE_OVERRIDES_PATH + "/" +
"ansible-playbook", payload[consts.DEPLOY_PLAYBOOK],
"-e", "@%s" % consts.ANSIBLE_OVERRIDES_PATH + "/" +
payload['name'] + "_deploy_values.yml",
"-i",
ansible_subcloud_inventory_file,
"-i", ansible_subcloud_inventory_file,
"--limit", subcloud.name
]
@ -320,15 +385,13 @@ class SubcloudManager(manager.Manager):
return db_api.subcloud_db_model_to_dict(subcloud)
except Exception as e:
LOG.exception(e)
# If we failed to create the subcloud, clean up anything we may
# have done.
self._remove_subcloud_details(context,
subcloud,
ansible_subcloud_inventory_file,
dcorch_populated)
raise e
except Exception:
LOG.exception("Failed to create subcloud %s" % payload['name'])
# If we failed to create the subcloud, update the
# deployment status
db_api.subcloud_update(
context, subcloud.id,
deploy_status=consts.DEPLOY_STATE_DEPLOY_PREP_FAILED)
@staticmethod
def run_deploy(install_command, apply_command, deploy_command, subcloud,
@ -340,7 +403,8 @@ class SubcloudManager(manager.Manager):
deploy_status=consts.DEPLOY_STATE_PRE_INSTALL)
try:
install = SubcloudInstall(context, subcloud.name)
install.prep(ANSIBLE_OVERRIDES_PATH, payload['install_values'])
install.prep(consts.ANSIBLE_OVERRIDES_PATH,
payload['install_values'])
except Exception as e:
LOG.exception(e)
db_api.subcloud_update(
@ -488,7 +552,7 @@ class SubcloudManager(manager.Manager):
def _write_subcloud_ansible_config(self, context, payload):
"""Create the override file for usage with the specified subcloud"""
overrides_file = os.path.join(ANSIBLE_OVERRIDES_PATH,
overrides_file = os.path.join(consts.ANSIBLE_OVERRIDES_PATH,
payload['name'] + '.yml')
m_ks_client = KeystoneClient()
@ -518,19 +582,17 @@ class SubcloudManager(manager.Manager):
for k, v in payload.items():
if k not in ['deploy_playbook', 'deploy_values',
'install_values']:
'deploy_config', 'deploy_chart',
'deploy_overrides', 'install_values']:
f_out_overrides_file.write("%s: %s\n" % (k, json.dumps(v)))
def _write_deploy_files(self, payload):
"""Create the deploy playbook and value files for the subcloud"""
"""Create the deploy value files for the subcloud"""
deploy_playbook_file = os.path.join(
ANSIBLE_OVERRIDES_PATH, payload['name'] + '_deploy.yml')
deploy_values_file = os.path.join(
ANSIBLE_OVERRIDES_PATH, payload['name'] + '_deploy_values.yml')
consts.ANSIBLE_OVERRIDES_PATH, payload['name'] +
'_deploy_values.yml')
with open(deploy_playbook_file, 'w') as f_out_deploy_playbook_file:
json.dump(payload['deploy_playbook'], f_out_deploy_playbook_file)
with open(deploy_values_file, 'w') as f_out_deploy_values_file:
json.dump(payload['deploy_values'], f_out_deploy_values_file)
@ -558,18 +620,35 @@ class SubcloudManager(manager.Manager):
subcloud.systemcontroller_gateway_ip)),
1)
@staticmethod
def _delete_subcloud_cert(subcloud_name):
cert_name = SubcloudManager._get_subcloud_cert_name(subcloud_name)
secret_name = SubcloudManager._get_subcloud_cert_secret_name(
subcloud_name)
kube = kubeoperator.KubeOperator()
kube.delete_cert_manager_certificate(CERT_NAMESPACE, cert_name)
kube.kube_delete_secret(secret_name, CERT_NAMESPACE)
LOG.info("cert %s and secret %s are deleted" % (cert_name, secret_name))
def _remove_subcloud_details(self, context,
subcloud,
ansible_subcloud_inventory_file,
dcorch_populated=True):
ansible_subcloud_inventory_file):
"""Remove subcloud details from database and inform orchestrators"""
# Inform orchestrators that subcloud has been deleted
if dcorch_populated:
try:
self.dcorch_rpc_client.del_subcloud(context, subcloud.name)
except RemoteError as e:
if "SubcloudNotFound" in e:
pass
try:
self.dcorch_rpc_client.del_subcloud(context, subcloud.name)
except RemoteError as e:
if "SubcloudNotFound" in e:
pass
# delete the associated alarm entry
try:
db_api.subcloud_alarms_delete(context, subcloud.name)
except RemoteError as e:
if "SubcloudNotFound" in e:
pass
# We only delete subcloud endpoints, region and user information
# in the Central Region. The subcloud is already unmanaged and powered
@ -594,6 +673,9 @@ class SubcloudManager(manager.Manager):
# Delete the ansible inventory for the new subcloud
self._delete_subcloud_inventory(ansible_subcloud_inventory_file)
# Delete the subcloud intermediate certificate
SubcloudManager._delete_subcloud_cert(subcloud.name)
# Regenerate the addn_hosts_dc file
self._create_addn_hosts_dc(context)
@ -618,7 +700,7 @@ class SubcloudManager(manager.Manager):
# Ansible inventory filename for the specified subcloud
ansible_subcloud_inventory_file = os.path.join(
ANSIBLE_OVERRIDES_PATH,
consts.ANSIBLE_OVERRIDES_PATH,
subcloud.name + INVENTORY_FILE_POSTFIX)
self._remove_subcloud_details(context,
@ -644,8 +726,13 @@ class SubcloudManager(manager.Manager):
"subcloud %s" % subcloud.name)
LOG.exception(e)
def update_subcloud(self, context, subcloud_id, management_state=None,
description=None, location=None):
def update_subcloud(self,
context,
subcloud_id,
management_state=None,
description=None,
location=None,
group_id=None):
"""Update subcloud and notify orchestrators.
:param context: request context object
@ -653,6 +740,7 @@ class SubcloudManager(manager.Manager):
:param management_state: new management state
:param description: new description
:param location: new location
:param group_id: new subcloud group id
"""
LOG.info("Updating subcloud %s." % subcloud_id)
@ -683,10 +771,12 @@ class SubcloudManager(manager.Manager):
LOG.error("Invalid management_state %s" % management_state)
raise exceptions.InternalError()
subcloud = db_api.subcloud_update(context, subcloud_id,
subcloud = db_api.subcloud_update(context,
subcloud_id,
management_state=management_state,
description=description,
location=location)
location=location,
group_id=group_id)
# Inform orchestrators that subcloud has been updated
if management_state:
@ -951,3 +1041,153 @@ class SubcloudManager(manager.Manager):
self._update_subcloud_endpoint_status(
context, subcloud.name, endpoint_type, sync_status,
alarmable)
def _update_subcloud_state(self, context, subcloud_name,
management_state, availability_status):
try:
self.dcorch_rpc_client.update_subcloud_states(
context, subcloud_name, management_state, availability_status)
LOG.info('Notifying dcorch, subcloud:%s management: %s, '
'availability:%s' %
(subcloud_name,
management_state,
availability_status))
except Exception:
LOG.exception('Problem informing dcorch of subcloud state change,'
'subcloud: %s' % subcloud_name)
def _raise_or_clear_subcloud_status_alarm(self, subcloud_name,
availability_status):
entity_instance_id = "subcloud=%s" % subcloud_name
fault = self.fm_api.get_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
entity_instance_id)
if fault and (availability_status == consts.AVAILABILITY_ONLINE):
try:
self.fm_api.clear_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
entity_instance_id)
except Exception:
LOG.exception("Failed to clear offline alarm for subcloud: %s",
subcloud_name)
elif not fault and \
(availability_status == consts.AVAILABILITY_OFFLINE):
try:
fault = fm_api.Fault(
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
alarm_state=fm_const.FM_ALARM_STATE_SET,
entity_type_id=fm_const.FM_ENTITY_TYPE_SUBCLOUD,
entity_instance_id=entity_instance_id,
severity=fm_const.FM_ALARM_SEVERITY_CRITICAL,
reason_text=('%s is offline' % subcloud_name),
alarm_type=fm_const.FM_ALARM_TYPE_0,
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_29,
proposed_repair_action="Wait for subcloud to "
"become online; if "
"problem persists contact "
"next level of support.",
service_affecting=True)
self.fm_api.set_fault(fault)
except Exception:
LOG.exception("Failed to raise offline alarm for subcloud: %s",
subcloud_name)
def update_subcloud_availability(self, context, subcloud_name,
availability_status,
update_state_only=False,
audit_fail_count=None):
try:
subcloud = db_api.subcloud_get_by_name(context, subcloud_name)
except Exception:
LOG.exception("Failed to get subcloud by name: %s" % subcloud_name)
if update_state_only:
# Nothing has changed, but we want to send a state update for this
# subcloud as an audit. Get the most up-to-date data.
self._update_subcloud_state(context, subcloud_name,
subcloud.management_state,
availability_status)
elif availability_status is None:
# only update the audit fail count
try:
db_api.subcloud_update(self.context, subcloud.id,
audit_fail_count=audit_fail_count)
except exceptions.SubcloudNotFound:
# slim possibility subcloud could have been deleted since
# we found it in db, ignore this benign error.
LOG.info('Ignoring SubcloudNotFound when attempting '
'audit_fail_count update: %s' % subcloud_name)
return
else:
self._raise_or_clear_subcloud_status_alarm(subcloud_name,
availability_status)
if availability_status == consts.AVAILABILITY_OFFLINE:
# Subcloud is going offline, set all endpoint statuses to
# unknown.
self._update_subcloud_endpoint_status(
context, subcloud_name, endpoint_type=None,
sync_status=consts.SYNC_STATUS_UNKNOWN)
try:
updated_subcloud = db_api.subcloud_update(
context,
subcloud.id,
availability_status=availability_status,
audit_fail_count=audit_fail_count)
except exceptions.SubcloudNotFound:
# slim possibility subcloud could have been deleted since
# we found it in db, ignore this benign error.
LOG.info('Ignoring SubcloudNotFound when attempting state'
' update: %s' % subcloud_name)
return
# Send dcorch a state update
self._update_subcloud_state(context, subcloud_name,
updated_subcloud.management_state,
availability_status)
def update_subcloud_sync_endpoint_type(self, context,
subcloud_name,
endpoint_type_list,
openstack_installed):
operation = 'add' if openstack_installed else 'remove'
func_switcher = {
'add': (
self.dcorch_rpc_client.add_subcloud_sync_endpoint_type,
db_api.subcloud_status_create
),
'remove': (
self.dcorch_rpc_client.remove_subcloud_sync_endpoint_type,
db_api.subcloud_status_delete
)
}
try:
subcloud = db_api.subcloud_get_by_name(context, subcloud_name)
except Exception:
LOG.exception("Failed to get subcloud by name: %s" % subcloud_name)
try:
# Notify dcorch to add/remove sync endpoint type list
func_switcher[operation][0](self.context, subcloud_name,
endpoint_type_list)
LOG.info('Notifying dcorch, subcloud: %s new sync endpoint: %s' %
(subcloud_name, endpoint_type_list))
# Update subcloud status table by adding/removing openstack sync
# endpoint types
for endpoint_type in endpoint_type_list:
func_switcher[operation][1](self.context, subcloud.id,
endpoint_type)
# Update openstack_installed of subcloud table
db_api.subcloud_update(self.context, subcloud.id,
openstack_installed=openstack_installed)
except Exception:
LOG.exception('Problem informing dcorch of subcloud sync endpoint'
' type change, subcloud: %s' % subcloud_name)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,370 @@
# Copyright 2017 Ericsson AB.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import datetime
import threading
import time
from oslo_log import log as logging
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dcmanager.common import consts
from dcmanager.common import context
from dcmanager.common import exceptions
from dcmanager.common import scheduler
from dcmanager.db import api as db_api
from dcmanager.manager.patch_audit_manager import PatchAuditManager
LOG = logging.getLogger(__name__)
class SwUpgradeOrchThread(threading.Thread):
"""SwUpgrade Orchestration Thread
This thread is responsible for executing the upgrade orchestration strategy.
Here is how it works:
- The user creates an update strategy from CLI (or REST API) of 'upgrade'
- This ends up being handled by the SwUpdateManager class, which
runs under the main dcmanager thread. The strategy is created and stored
in the database.
- The user then applies the strategy from the CLI (or REST API). The
SwUpdateManager code updates the state of the strategy in the database.
- The SwUpgradeOrchThread wakes up periodically and checks the database for
a strategy that is in an active state (applying, aborting, etc...). If
so, it executes the strategy, updating the strategy and steps in the
database as it goes, with state and progress information.
"""
def __init__(self, strategy_lock):
super(SwUpgradeOrchThread, self).__init__()
self.context = context.get_admin_context()
self._stop = threading.Event()
# Used to protect strategy when an atomic read/update is required.
self.strategy_lock = strategy_lock
# Keeps track of greenthreads we create to do work.
self.thread_group_manager = scheduler.ThreadGroupManager(
thread_pool_size=100)
# Track worker created for each subcloud.
self.subcloud_workers = dict()
# When an upgrade is initiated, this is the first state
self.starting_state = consts.STRATEGY_STATE_INSTALLING_LICENSE
def stopped(self):
return self._stop.isSet()
def stop(self):
LOG.info("SwUpgradeOrchThread Stopping")
self._stop.set()
def run(self):
self.upgrade_orch()
# Stop any greenthreads that are still running
self.thread_group_manager.stop()
LOG.info("SwUpgradeOrchThread Stopped")
@staticmethod
def get_ks_client(region_name=consts.DEFAULT_REGION_NAME):
"""This will get a cached keystone client (and token)"""
try:
os_client = OpenStackDriver(
region_name=region_name,
region_clients=None)
return os_client.keystone_client
except Exception:
LOG.warn('Failure initializing KeystoneClient')
raise
@staticmethod
def get_region_name(strategy_step):
"""Get the region name for a strategy step"""
if strategy_step.subcloud_id is None:
# This is the SystemController.
return consts.DEFAULT_REGION_NAME
return strategy_step.subcloud.name
def strategy_step_update(self, subcloud_id, state=None, details=None):
"""Update the strategy step in the DB
Sets the start and finished timestamp if necessary, based on state.
"""
started_at = None
finished_at = None
if state == self.starting_state:
started_at = datetime.datetime.now()
elif state in [consts.STRATEGY_STATE_COMPLETE,
consts.STRATEGY_STATE_ABORTED,
consts.STRATEGY_STATE_FAILED]:
finished_at = datetime.datetime.now()
db_api.strategy_step_update(
self.context,
subcloud_id,
state=state,
details=details,
started_at=started_at,
finished_at=finished_at)
def upgrade_orch(self):
while not self.stopped():
try:
LOG.debug('Running upgrade orchestration')
sw_update_strategy = db_api.sw_update_strategy_get(
self.context)
if sw_update_strategy.type == consts.SW_UPDATE_TYPE_UPGRADE:
if sw_update_strategy.state in [
consts.SW_UPDATE_STATE_APPLYING,
consts.SW_UPDATE_STATE_ABORTING]:
self.apply(sw_update_strategy)
elif sw_update_strategy.state == \
consts.SW_UPDATE_STATE_ABORT_REQUESTED:
self.abort(sw_update_strategy)
elif sw_update_strategy.state == \
consts.SW_UPDATE_STATE_DELETING:
self.delete(sw_update_strategy)
except exceptions.NotFound:
# Nothing to do if a strategy doesn't exist
pass
except Exception as e:
# We catch all exceptions to avoid terminating the thread.
LOG.exception(e)
# Wake up every 10 seconds to see if there is work to do.
time.sleep(10)
LOG.info("SwUpgradeOrchThread ended main loop")
def apply(self, sw_update_strategy):
"""Apply an upgrade strategy"""
LOG.info("Applying upgrade strategy")
strategy_steps = db_api.strategy_step_get_all(self.context)
# Figure out which stage we are working on
current_stage = None
stop_after_stage = None
failure_detected = False
abort_detected = False
for strategy_step in strategy_steps:
if strategy_step.state == consts.STRATEGY_STATE_COMPLETE:
# This step is complete
continue
elif strategy_step.state == consts.STRATEGY_STATE_ABORTED:
# This step was aborted
abort_detected = True
continue
elif strategy_step.state == consts.STRATEGY_STATE_FAILED:
failure_detected = True
# This step has failed and needs no further action
if strategy_step.subcloud_id is None:
# Strategy on SystemController failed. We are done.
LOG.info("Stopping strategy due to failure while "
"processing upgrade step on SystemController")
with self.strategy_lock:
db_api.sw_update_strategy_update(
self.context, state=consts.SW_UPDATE_STATE_FAILED)
# Trigger audit to update the sync status for
# each subcloud.
PatchAuditManager.trigger_audit()
return
elif sw_update_strategy.stop_on_failure:
# We have been told to stop on failures
stop_after_stage = strategy_step.stage
current_stage = strategy_step.stage
break
continue
# We have found the first step that isn't complete or failed.
# This is the stage we are working on now.
current_stage = strategy_step.stage
break
else:
# The strategy application is complete
if failure_detected:
LOG.info("Strategy application has failed.")
with self.strategy_lock:
db_api.sw_update_strategy_update(
self.context, state=consts.SW_UPDATE_STATE_FAILED)
elif abort_detected:
LOG.info("Strategy application was aborted.")
with self.strategy_lock:
db_api.sw_update_strategy_update(
self.context, state=consts.SW_UPDATE_STATE_ABORTED)
else:
LOG.info("Strategy application is complete.")
with self.strategy_lock:
db_api.sw_update_strategy_update(
self.context, state=consts.SW_UPDATE_STATE_COMPLETE)
# Trigger audit to update the sync status for each subcloud.
PatchAuditManager.trigger_audit()
return
if stop_after_stage is not None:
work_remaining = False
# We are going to stop after the steps in this stage have finished.
for strategy_step in strategy_steps:
if strategy_step.stage == stop_after_stage:
if strategy_step.state != consts.STRATEGY_STATE_COMPLETE \
and strategy_step.state != \
consts.STRATEGY_STATE_FAILED:
# There is more work to do in this stage
work_remaining = True
break
if not work_remaining:
# We have completed the stage that failed
LOG.info("Stopping strategy due to failure in stage %d" %
stop_after_stage)
with self.strategy_lock:
db_api.sw_update_strategy_update(
self.context, state=consts.SW_UPDATE_STATE_FAILED)
# Trigger audit to update the sync status for each subcloud.
PatchAuditManager.trigger_audit()
return
LOG.info("Working on stage %d" % current_stage)
for strategy_step in strategy_steps:
if strategy_step.stage == current_stage:
region = self.get_region_name(strategy_step)
if strategy_step.state == \
consts.STRATEGY_STATE_INITIAL:
# Don't start upgrading this subcloud if it has been
# unmanaged by the user. If orchestration was already
# started, it will be allowed to complete.
if strategy_step.subcloud_id is not None and \
strategy_step.subcloud.management_state == \
consts.MANAGEMENT_UNMANAGED:
message = ("Subcloud %s is unmanaged." %
strategy_step.subcloud.name)
LOG.warn(message)
self.strategy_step_update(
strategy_step.subcloud_id,
state=consts.STRATEGY_STATE_FAILED,
details=message)
continue
# We are just getting started, enter the first state
self.strategy_step_update(
strategy_step.subcloud_id,
state=consts.STRATEGY_STATE_INSTALLING_LICENSE)
if region in self.subcloud_workers:
# A worker already exists. Let it finish whatever it
# was doing.
LOG.error("Worker should not exist for %s." % region)
else:
# Create a greenthread to do the upgrades
self.subcloud_workers[region] = \
self.thread_group_manager.start(
self.update_subcloud_upgrade,
strategy_step)
elif strategy_step.state == \
consts.STRATEGY_STATE_INSTALLING_LICENSE:
LOG.info("Upgrade install license not yet implemented")
self.strategy_step_update(
strategy_step.subcloud_id,
state=consts.STRATEGY_STATE_FAILED,
details="Upgrade install license not yet implemented")
else:
LOG.error("Unimplemented state %s" % strategy_step.state)
self.strategy_step_update(
strategy_step.subcloud_id,
state=consts.STRATEGY_STATE_FAILED,
details=("Upgrade state not implemented: %s"
% strategy_step.state))
if self.stopped():
LOG.info("Exiting because task is stopped")
return
def update_subcloud_upgrade(self, strategy_step):
"""Upload/Apply/Remove upgrades in this subcloud
Removes the worker reference after the operation is complete.
"""
try:
self.do_update_subcloud_upgrade(strategy_step)
except Exception as e:
LOG.exception(e)
finally:
# The worker is done.
region = self.get_region_name(strategy_step)
if region in self.subcloud_workers:
del self.subcloud_workers[region]
def do_update_subcloud_upgrade(self, strategy_step):
"""Upload/Apply/Remove upgrade in this subcloud"""
if strategy_step.subcloud_id is None:
# This is the SystemController. It is the master so no update
# is necessary.
LOG.info("Skipping upgrade for SystemController")
self.strategy_step_update(
strategy_step.subcloud_id,
state=consts.STRATEGY_STATE_CREATING_STRATEGY)
return
LOG.info("Updating upgrade for subcloud %s" %
strategy_step.subcloud.name)
LOG.error("do_update_subcloud_upgrade not implemented yet")
raise NotImplementedError
def abort(self, sw_update_strategy):
"""Abort an upgrade strategy"""
LOG.info("Aborting upgrade strategy")
# Mark any steps that have not yet started as aborted,
# so we will not run them later.
strategy_steps = db_api.strategy_step_get_all(self.context)
for strategy_step in strategy_steps:
if strategy_step.state == consts.STRATEGY_STATE_INITIAL:
LOG.info("Aborting step for subcloud %s" %
self.get_region_name(strategy_step))
self.strategy_step_update(
strategy_step.subcloud_id,
state=consts.STRATEGY_STATE_ABORTED,
details="")
with self.strategy_lock:
db_api.sw_update_strategy_update(
self.context, state=consts.SW_UPDATE_STATE_ABORTING)
def delete(self, sw_update_strategy):
"""Delete an upgrade strategy"""
LOG.info("Deleting upgrade strategy")
# todo(abailey): determine if we should validate the strategy_steps
# before allowing the delete
# Remove the strategy from the database
try:
db_api.strategy_step_destroy_all(self.context)
db_api.sw_update_strategy_destroy(self.context)
except Exception as e:
LOG.exception(e)
raise e

View File

@ -64,7 +64,7 @@ class ManagerClient(object):
return client.cast(ctxt, method, **kwargs)
def add_subcloud(self, ctxt, payload):
return self.call(ctxt, self.make_msg('add_subcloud',
return self.cast(ctxt, self.make_msg('add_subcloud',
payload=payload))
def delete_subcloud(self, ctxt, subcloud_id):
@ -72,12 +72,13 @@ class ManagerClient(object):
subcloud_id=subcloud_id))
def update_subcloud(self, ctxt, subcloud_id, management_state=None,
description=None, location=None):
description=None, location=None, group_id=None):
return self.call(ctxt, self.make_msg('update_subcloud',
subcloud_id=subcloud_id,
management_state=management_state,
description=description,
location=location))
location=location,
group_id=group_id))
def update_subcloud_endpoint_status(self, ctxt, subcloud_name=None,
endpoint_type=None,
@ -88,6 +89,31 @@ class ManagerClient(object):
endpoint_type=endpoint_type,
sync_status=sync_status))
def update_subcloud_availability(self, ctxt,
subcloud_name,
availability_status,
update_state_only=False,
audit_fail_count=None):
return self.call(
ctxt,
self.make_msg('update_subcloud_availability',
subcloud_name=subcloud_name,
availability_status=availability_status,
update_state_only=update_state_only,
audit_fail_count=audit_fail_count))
def update_subcloud_sync_endpoint_type(self, ctxt, subcloud_id,
subcloud_name,
endpoint_type_list,
openstack_installed):
return self.cast(
ctxt,
self.make_msg('update_subcloud_sync_endpoint_type',
subcloud_id=subcloud_id,
subcloud_name=subcloud_name,
endpoint_type_list=endpoint_type_list,
openstack_installed=openstack_installed))
def create_sw_update_strategy(self, ctxt, payload):
return self.call(ctxt, self.make_msg('create_sw_update_strategy',
payload=payload))

View File

@ -40,18 +40,37 @@ from sqlalchemy.engine import Engine
from sqlalchemy import event
SUBCLOUD_SAMPLE_DATA_0 = [
6, "subcloud-4", "demo subcloud", "Ottawa-Lab-Aisle_3-Rack_C",
"20.01", "managed", "online", "fd01:3::0/64", "fd01:3::1",
"fd01:3::2", "fd01:3::f", "fd01:1::1", 0, "NULL", "NULL",
"2018-05-15 14:45:12.508708", "2018-05-24 10:48:18.090931",
"NULL", 0, "10.10.10.0/24", "10.10.10.1", "10.10.10.12", "testpass"
6, # id
"subcloud-4", # name
"demo subcloud", # description
"Ottawa-Lab-Aisle_3-Rack_C", # location
"20.01", # software-version
"managed", # management-state
"online", # availability-status
"fd01:3::0/64", # management_subnet
"fd01:3::1", # management_gateway_address
"fd01:3::2", # management_start_address
"fd01:3::f", # management_end_address
"fd01:1::1", # systemcontroller_gateway_address
0, # audit-fail-count
"NULL", # reserved-1
"NULL", # reserved-2
"2018-05-15 14:45:12.508708", # created-at
"2018-05-24 10:48:18.090931", # updated-at
"NULL", # deleted-at
0, # deleted
"10.10.10.0/24", # external_oam_subnet
"10.10.10.1", # external_oam_gateway_address
"10.10.10.12", # external_oam_floating_address
"testpass", # sysadmin_password
1 # group_id
]
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.execute("PRAGMA foreign_keys=ON;")
cursor.close()

View File

@ -0,0 +1,74 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import mock
from six.moves import http_client
from dcmanager.api.controllers.v1 import alarm_manager
from dcmanager.tests.unit.api import test_root_controller as testroot
from dcmanager.tests import utils
FAKE_URL = '/v1.0/alarms'
FAKE_TENANT = utils.UUID1
FAKE_ID = '1'
FAKE_HEADERS = {'X-Tenant-Id': FAKE_TENANT, 'X_ROLE': 'admin',
'X-Identity-Status': 'Confirmed'}
class TestSubcloudAlarmController(testroot.DCManagerApiTest):
def setUp(self):
super(TestSubcloudAlarmController, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch.object(alarm_manager, 'db_api')
def test_get_alarms(self, mock_db_api):
get_url = FAKE_URL
alarms_from_db = [{'name': 'subcloud1',
'uuid': utils.UUID2,
'critical_alarms': 1,
'major_alarms': 2,
'minor_alarms': 3,
'warnings': 0,
'cloud_status': 'critical'},
{'name': 'subcloud2',
'uuid': utils.UUID3,
'critical_alarms': 0,
'major_alarms': 2,
'minor_alarms': 3,
'warnings': 4,
'cloud_status': 'degraded'}]
subcloud_summary = [{'region_name': 'subcloud1',
'uuid': utils.UUID2,
'critical_alarms': 1,
'major_alarms': 2,
'minor_alarms': 3,
'warnings': 0,
'cloud_status': 'critical'},
{'region_name': 'subcloud2',
'uuid': utils.UUID3,
'critical_alarms': 0,
'major_alarms': 2,
'minor_alarms': 3,
'warnings': 4,
'cloud_status': 'degraded'}]
mock_db_api.subcloud_alarms_get_all.return_value = alarms_from_db
response = self.app.get(get_url, headers=FAKE_HEADERS)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(subcloud_summary, response.json.get('alarm_summary'))

View File

@ -0,0 +1,84 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import mock
from six.moves import http_client
from dcmanager.api.controllers.v1 import subcloud_deploy
from dcmanager.common import consts
from dcmanager.tests.unit.api import test_root_controller as testroot
from dcmanager.tests import utils
FAKE_TENANT = utils.UUID1
FAKE_ID = '1'
FAKE_URL = '/v1.0/subcloud-deploy'
FAKE_HEADERS = {'X-Tenant-Id': FAKE_TENANT, 'X_ROLE': 'admin',
'X-Identity-Status': 'Confirmed'}
class TestSubcloudDeploy(testroot.DCManagerApiTest):
def setUp(self):
super(TestSubcloudDeploy, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
def test_post_subcloud_deploy(self, mock_upload_files):
fields = list()
for opt in consts.DEPLOY_COMMON_FILE_OPTIONS:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fields.append((opt, fake_name, fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields)
self.assertEqual(response.status_code, http_client.OK)
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
def test_post_subcloud_deploy_missing_file(self, mock_upload_files):
opts = [consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES]
fields = list()
for opt in opts:
fake_name = opt + "_fake"
fake_content = "fake content".encode('utf-8')
fields.append((opt, fake_name, fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields,
expect_errors=True)
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
@mock.patch.object(subcloud_deploy.SubcloudDeployController,
'_upload_files')
def test_post_subcloud_deploy_missing_file_name(self, mock_upload_files):
fields = list()
for opt in consts.DEPLOY_COMMON_FILE_OPTIONS:
fake_content = "fake content".encode('utf-8')
fields.append((opt, "", fake_content))
mock_upload_files.return_value = True
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
upload_files=fields,
expect_errors=True)
self.assertEqual(response.status_code, http_client.BAD_REQUEST)

View File

@ -0,0 +1,539 @@
# Copyright (c) 2017 Ericsson AB
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import mock
from six.moves import http_client
from dcmanager.common import consts
from dcmanager.db.sqlalchemy import api as db_api
from dcmanager.rpc import client as rpc_client
from dcmanager.tests.unit.api import test_root_controller as testroot
from dcmanager.tests.unit.api.v1.controllers.test_subclouds \
import FAKE_SUBCLOUD_DATA
from dcmanager.tests import utils
SAMPLE_SUBCLOUD_GROUP_NAME = 'GroupX'
SAMPLE_SUBCLOUD_GROUP_DESCRIPTION = 'A Group of mystery'
SAMPLE_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE = consts.SUBCLOUD_APPLY_TYPE_SERIAL
SAMPLE_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS = 3
# APIMixin can be moved to its own file, once the other
# unit tests are refactored to utilize it
class APIMixin(object):
FAKE_TENANT = utils.UUID1
api_headers = {
'X-Tenant-Id': FAKE_TENANT,
'X_ROLE': 'admin',
'X-Identity-Status': 'Confirmed'
}
# subclasses should provide methods
# get_api_prefix
# get_result_key
def setUp(self):
super(APIMixin, self).setUp()
def get_api_headers(self):
return self.api_headers
def get_single_url(self, uuid):
return '%s/%s' % (self.get_api_prefix(), uuid)
def get_api_prefix(self):
raise NotImplementedError
def get_result_key(self):
raise NotImplementedError
def get_expected_api_fields(self):
raise NotImplementedError
def get_omitted_api_fields(self):
raise NotImplementedError
# base mixin subclass MUST override these methods if the api supports them
def _create_db_object(self, context):
raise NotImplementedError
# base mixin subclass should provide this method for testing of POST
def get_post_object(self):
raise NotImplementedError
def get_update_object(self):
raise NotImplementedError
def assert_fields(self, api_object):
# Verify that expected attributes are returned
for field in self.get_expected_api_fields():
self.assertIn(field, api_object)
# Verify that hidden attributes are not returned
for field in self.get_omitted_api_fields():
self.assertNotIn(field, api_object)
#
# --------------------- POST -----------------------------------
#
# An API test will mixin only one of: PostMixin or PostRejectedMixin
# depending on whether or not the API supports a post operation or not
class PostMixin(object):
@mock.patch.object(rpc_client, 'ManagerClient')
def test_create_success(self, mock_client):
# Test that a POST operation is supported by the API
ndict = self.get_post_object()
response = self.app.post_json(self.get_api_prefix(),
ndict,
headers=self.get_api_headers())
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
self.assert_fields(response.json)
class PostRejectedMixin(object):
# Test that a POST operation is blocked by the API
# API should return 400 BAD_REQUEST or FORBIDDEN 403
@mock.patch.object(rpc_client, 'ManagerClient')
def test_create_not_allowed(self, mock_client):
ndict = self.get_post_object()
response = self.app.post_json(self.API_PREFIX,
ndict,
headers=self.get_api_headers(),
expect_errors=True)
self.assertEqual(response.status_code, http_client.FORBIDDEN)
self.assertTrue(response.json['error_message'])
self.assertIn("Operation not permitted.",
response.json['error_message'])
# ------ API GET mixin
class GetMixin(object):
# Mixins can override initial_list_size if a table is not empty during
# DB creation and migration sync
initial_list_size = 0
# Performing a GET on this ID should fail. subclass mixins can override
invalid_id = '123'
def validate_entry(self, result_item):
self.assert_fields(result_item)
def validate_list(self, expected_length, results):
self.assertIn(self.get_result_key(), results)
result_list = results.get(self.get_result_key())
self.assertEqual(expected_length, len(result_list))
for result_item in result_list:
self.validate_entry(result_item)
def validate_list_response(self, expected_length, response):
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
# validate the list length
self.validate_list(expected_length, response.json)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_initial_list_size(self, mock_client):
# Test that a GET operation for a list is supported by the API
response = self.app.get(self.get_api_prefix(),
headers=self.get_api_headers())
# Validate the initial length
self.validate_list_response(self.initial_list_size, response)
# Add an entry
context = utils.dummy_context()
self._create_db_object(context)
response = self.app.get(self.get_api_prefix(),
headers=self.get_api_headers())
self.validate_list_response(self.initial_list_size + 1, response)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_fail_get_single(self, mock_client):
# Test that a GET operation for an invalid ID returns the
# appropriate error results
response = self.app.get(self.get_single_url(self.invalid_id),
headers=self.get_api_headers(),
expect_errors=True)
# Failures will return text rather than json
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.status_code, http_client.NOT_FOUND)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_get_single(self, mock_client):
# create a group
context = utils.dummy_context()
group_name = 'TestGroup'
db_group = self._create_db_object(context, name=group_name)
# Test that a GET operation for a valid ID works
response = self.app.get(self.get_single_url(db_group.id),
headers=self.get_api_headers())
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
self.validate_entry(response.json)
# ------ API Update Mixin
class UpdateMixin(object):
def validate_updated_fields(self, sub_dict, full_obj):
for key, value in sub_dict.items():
self.assertEqual(value, full_obj.get(key))
@mock.patch.object(rpc_client, 'ManagerClient')
def test_update_success(self, mock_client):
context = utils.dummy_context()
single_obj = self._create_db_object(context)
update_data = self.get_update_object()
response = self.app.patch_json(self.get_single_url(single_obj.id),
headers=self.get_api_headers(),
params=update_data)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
self.validate_updated_fields(update_data, response.json)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_update_empty_changeset(self, mock_client):
context = utils.dummy_context()
single_obj = self._create_db_object(context)
update_data = {}
response = self.app.patch_json(self.get_single_url(single_obj.id),
headers=self.get_api_headers(),
params=update_data,
expect_errors=True)
# Failures will return text rather than json
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
# ------ API Delete Mixin
class DeleteMixin(object):
@mock.patch.object(rpc_client, 'ManagerClient')
def test_delete_success(self, mock_client):
context = utils.dummy_context()
single_obj = self._create_db_object(context)
response = self.app.delete(self.get_single_url(single_obj.id),
headers=self.get_api_headers())
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_double_delete(self, mock_client):
context = utils.dummy_context()
single_obj = self._create_db_object(context)
response = self.app.delete(self.get_single_url(single_obj.id),
headers=self.get_api_headers())
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
# delete the same object a second time. this should fail (NOT_FOUND)
response = self.app.delete(self.get_single_url(single_obj.id),
headers=self.get_api_headers(),
expect_errors=True)
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.status_code, http_client.NOT_FOUND)
class SubcloudGroupAPIMixin(APIMixin):
API_PREFIX = '/v1.0/subcloud-groups'
RESULT_KEY = 'subcloud_groups'
EXPECTED_FIELDS = ['id',
'name',
'description',
'max_parallel_subclouds',
'update_apply_type',
'created-at',
'updated-at']
def setUp(self):
super(SubcloudGroupAPIMixin, self).setUp()
self.fake_rpc_client.some_method = mock.MagicMock()
def _get_test_subcloud_group_dict(self, **kw):
# id should not be part of the structure
group = {
'name': kw.get('name', SAMPLE_SUBCLOUD_GROUP_NAME),
'description': kw.get('description',
SAMPLE_SUBCLOUD_GROUP_DESCRIPTION),
'update_apply_type': kw.get(
'update_apply_type',
SAMPLE_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE),
'max_parallel_subclouds': kw.get(
'max_parallel_subclouds',
SAMPLE_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS)
}
return group
def _post_get_test_subcloud_group(self, **kw):
post_body = self._get_test_subcloud_group_dict(**kw)
return post_body
# The following methods are required for subclasses of APIMixin
def get_api_prefix(self):
return self.API_PREFIX
def get_result_key(self):
return self.RESULT_KEY
def get_expected_api_fields(self):
return self.EXPECTED_FIELDS
def get_omitted_api_fields(self):
return []
def _create_db_object(self, context, **kw):
creation_fields = self._get_test_subcloud_group_dict(**kw)
return db_api.subcloud_group_create(context, **creation_fields)
def get_post_object(self):
return self._post_get_test_subcloud_group()
def get_update_object(self):
update_object = {
'description': 'Updated description'
}
return update_object
# Combine Subcloud Group API with mixins to test post, get, update and delete
class TestSubcloudGroupPost(testroot.DCManagerApiTest,
SubcloudGroupAPIMixin,
PostMixin):
def setUp(self):
super(TestSubcloudGroupPost, self).setUp()
def verify_post_failure(self, response):
# Failures will return text rather than json
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_create_with_numerical_name_fails(self, mock_client):
# A numerical name is not permitted. otherwise the 'get' operations
# which support getting by either name or ID could become confused
# if a name for one group was the same as an ID for another.
ndict = self.get_post_object()
ndict['name'] = '123'
response = self.app.post_json(self.get_api_prefix(),
ndict,
headers=self.get_api_headers(),
expect_errors=True)
self.verify_post_failure(response)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_create_with_blank_name_fails(self, mock_client):
# An empty name is not permitted
ndict = self.get_post_object()
ndict['name'] = ''
response = self.app.post_json(self.get_api_prefix(),
ndict,
headers=self.get_api_headers(),
expect_errors=True)
self.verify_post_failure(response)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_create_with_default_name_fails(self, mock_client):
# A name that is the same as the 'Default' group is not permitted.
# This would be a duplicate, and names must be unique.
ndict = self.get_post_object()
ndict['name'] = 'Default'
response = self.app.post_json(self.get_api_prefix(),
ndict,
headers=self.get_api_headers(),
expect_errors=True)
self.verify_post_failure(response)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_create_with_empty_description_fails(self, mock_client):
# An empty description is considered invalid
ndict = self.get_post_object()
ndict['description'] = ''
response = self.app.post_json(self.get_api_prefix(),
ndict,
headers=self.get_api_headers(),
expect_errors=True)
self.verify_post_failure(response)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_create_with_bad_apply_type(self, mock_client):
# update_apply_type must be either 'serial' or 'parallel'
ndict = self.get_post_object()
ndict['update_apply_type'] = 'something_invalid'
response = self.app.post_json(self.get_api_prefix(),
ndict,
headers=self.get_api_headers(),
expect_errors=True)
self.verify_post_failure(response)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_create_with_bad_max_parallel_subclouds(self, mock_client):
# max_parallel_subclouds must be an integer between 1 and 100
ndict = self.get_post_object()
# All the entries in bad_values should be considered invalid
bad_values = [0, 101, -1, 'abc']
for bad_value in bad_values:
ndict['max_parallel_subclouds'] = bad_value
response = self.app.post_json(self.get_api_prefix(),
ndict,
headers=self.get_api_headers(),
expect_errors=True)
self.verify_post_failure(response)
class TestSubcloudGroupGet(testroot.DCManagerApiTest,
SubcloudGroupAPIMixin,
GetMixin):
def setUp(self):
super(TestSubcloudGroupGet, self).setUp()
# Override initial_list_size. Default group is setup during db sync
self.initial_list_size = 1
@mock.patch.object(rpc_client, 'ManagerClient')
def test_get_single_by_name(self, mock_client):
# create a group
context = utils.dummy_context()
# todo(abailey) make this a generic method
group_name = 'TestGroup'
self._create_db_object(context, name=group_name)
# Test that a GET operation for a valid ID works
response = self.app.get(self.get_single_url(group_name),
headers=self.get_api_headers())
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
self.validate_entry(response.json)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_list_subclouds_empty(self, mock_client):
# API GET on: subcloud-groups/<uuid>/subclouds
uuid = 1 # The Default Subcloud Group is always ID=1
url = '%s/%s/subclouds' % (self.get_api_prefix(), uuid)
response = self.app.get(url,
headers=self.get_api_headers())
# This API returns 'subclouds' rather than 'subcloud-groups'
self.assertIn('subclouds', response.json)
# no subclouds exist yet, so this length should be zero
result_list = response.json.get('subclouds')
self.assertEqual(0, len(result_list))
def _create_subcloud_db_object(self, context):
creation_fields = {
'name': FAKE_SUBCLOUD_DATA.get('name'),
'description': FAKE_SUBCLOUD_DATA.get('description'),
'location': FAKE_SUBCLOUD_DATA.get('location'),
'software_version': FAKE_SUBCLOUD_DATA.get('software_version'),
'management_subnet': FAKE_SUBCLOUD_DATA.get('management_subnet'),
'management_gateway_ip':
FAKE_SUBCLOUD_DATA.get('management_gateway_ip'),
'management_start_ip':
FAKE_SUBCLOUD_DATA.get('management_start_ip'),
'management_end_ip': FAKE_SUBCLOUD_DATA.get('management_end_ip'),
'systemcontroller_gateway_ip':
FAKE_SUBCLOUD_DATA.get('systemcontroller_gateway_ip'),
'deploy_status': FAKE_SUBCLOUD_DATA.get('deploy_status'),
'openstack_installed':
FAKE_SUBCLOUD_DATA.get('openstack_installed'),
'group_id': FAKE_SUBCLOUD_DATA.get('group_id', 1)
}
return db_api.subcloud_create(context, **creation_fields)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_list_subclouds_populated(self, mock_client):
# subclouds are to Default group by default (unless specified)
context = utils.dummy_context()
self._create_subcloud_db_object(context)
# API GET on: subcloud-groups/<uuid>/subclouds
uuid = 1 # The Default Subcloud Group is always ID=1
url = '%s/%s/subclouds' % (self.get_api_prefix(), uuid)
response = self.app.get(url,
headers=self.get_api_headers())
# This API returns 'subclouds' rather than 'subcloud-groups'
self.assertIn('subclouds', response.json)
# the subcloud created earlier will have been queried
result_list = response.json.get('subclouds')
self.assertEqual(1, len(result_list))
class TestSubcloudGroupUpdate(testroot.DCManagerApiTest,
SubcloudGroupAPIMixin,
UpdateMixin):
def setUp(self):
super(TestSubcloudGroupUpdate, self).setUp()
@mock.patch.object(rpc_client, 'ManagerClient')
def test_update_invalid_apply_type(self, mock_client):
context = utils.dummy_context()
single_obj = self._create_db_object(context)
update_data = {
'update_apply_type': 'something_bad'
}
response = self.app.patch_json(self.get_single_url(single_obj.id),
headers=self.get_api_headers(),
params=update_data,
expect_errors=True)
# Failures will return text rather than json
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
@mock.patch.object(rpc_client, 'ManagerClient')
def test_update_invalid_max_parallel(self, mock_client):
context = utils.dummy_context()
single_obj = self._create_db_object(context)
update_data = {
'max_parallel_subclouds': -1
}
response = self.app.patch_json(self.get_single_url(single_obj.id),
headers=self.get_api_headers(),
params=update_data,
expect_errors=True)
# Failures will return text rather than json
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
class TestSubcloudGroupDelete(testroot.DCManagerApiTest,
SubcloudGroupAPIMixin,
DeleteMixin):
def setUp(self):
super(TestSubcloudGroupDelete, self).setUp()
@mock.patch.object(rpc_client, 'ManagerClient')
def test_delete_default_fails(self, mock_client):
default_zone_id = 1
response = self.app.delete(self.get_single_url(default_zone_id),
headers=self.get_api_headers(),
expect_errors=True)
# Failures will return text rather than json
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)

View File

@ -13,13 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import base64
import copy
import mock
import six
@ -51,8 +52,7 @@ FAKE_SUBCLOUD_DATA = {"name": "subcloud1",
"external_oam_subnet": "10.10.10.0/24",
"external_oam_gateway_address": "10.10.10.1",
"external_oam_floating_address": "10.10.10.12",
"availability-status": "disabled",
"sysadmin_password": "testpass"}
"availability-status": "disabled"}
FAKE_SUBCLOUD_INSTALL_VALUES = {
"image": "http://192.168.101.2:8080/iso/bootimage.iso",
@ -72,6 +72,11 @@ FAKE_SUBCLOUD_INSTALL_VALUES = {
"boot_device": "/dev/disk/by-path/pci-0000:5c:00.0-scsi-0:1:0:0"
}
FAKE_BOOTSTRAP_VALUE = {
'bootstrap-address': '10.10.10.12',
'sysadmin_password': base64.b64encode('testpass'.encode("utf-8"))
}
class FakeAddressPool(object):
def __init__(self, pool_network, pool_prefix, pool_start, pool_end):
@ -103,45 +108,88 @@ class TestSubclouds(testroot.DCManagerApiTest):
super(TestSubclouds, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch.object(subclouds.SubcloudsController,
'_add_subcloud_to_database')
@mock.patch.object(subclouds.SubcloudsController,
'_upload_deploy_config_file')
@mock.patch.object(subclouds.SubcloudsController,
'_get_request_data')
@mock.patch.object(subclouds.SubcloudsController,
'_get_management_address_pool')
@mock.patch.object(rpc_client, 'ManagerClient')
@mock.patch.object(subclouds, 'db_api')
def test_post_subcloud(self, mock_db_api, mock_rpc_client,
mock_get_management_address_pool):
data = FAKE_SUBCLOUD_DATA
mock_get_management_address_pool,
mock_get_request_data,
mock_upload_deploy_config_file,
mock_add_subcloud_to_database):
management_address_pool = FakeAddressPool('192.168.204.0', 24,
'192.168.204.2',
'192.168.204.100')
mock_get_management_address_pool.return_value = management_address_pool
mock_rpc_client().add_subcloud.return_value = True
response = self.app.post_json(FAKE_URL,
headers=FAKE_HEADERS,
params=data)
fields = list()
for f in subclouds.SUBCLOUD_ADD_MANDATORY_FILE:
fake_name = f + "_fake"
fake_content = "fake content".encode('utf-8')
fields.append((f, fake_name, fake_content))
data = copy.copy(FAKE_SUBCLOUD_DATA)
data.update(FAKE_BOOTSTRAP_VALUE)
mock_get_request_data.return_value = data
mock_upload_deploy_config_file.return_value = True
mock_db_api.subcloud_db_model_to_dict.return_value = data
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
params=FAKE_BOOTSTRAP_VALUE,
upload_files=fields)
mock_add_subcloud_to_database.assert_called_once()
mock_rpc_client().add_subcloud.assert_called_once_with(
mock.ANY,
data)
self.assertEqual(response.status_int, 200)
@mock.patch.object(subclouds.SubcloudsController,
'_add_subcloud_to_database')
@mock.patch.object(subclouds.SubcloudsController,
'_upload_deploy_config_file')
@mock.patch.object(subclouds.SubcloudsController,
'_get_request_data')
@mock.patch.object(subclouds.SubcloudsController,
'_get_management_address_pool')
@mock.patch.object(rpc_client, 'ManagerClient')
@mock.patch.object(subclouds, 'db_api')
def test_post_subcloud_with_install_values(
self, mock_db_api, mock_rpc_client,
mock_get_management_address_pool):
mock_get_management_address_pool,
mock_get_request_data,
mock_upload_deploy_config_file,
mock_add_subcloud_to_database):
data = copy.copy(FAKE_SUBCLOUD_DATA)
install_data = copy.copy(FAKE_SUBCLOUD_INSTALL_VALUES)
data['bmc_password'] = 'bmc_password'
data.update({'install_values': install_data})
management_address_pool = FakeAddressPool('192.168.204.0', 24,
'192.168.204.2',
'192.168.204.100')
mock_get_management_address_pool.return_value = management_address_pool
mock_rpc_client().add_subcloud.return_value = True
response = self.app.post_json(FAKE_URL,
headers=FAKE_HEADERS,
params=data)
fields = list()
for f in subclouds.SUBCLOUD_ADD_GET_FILE_CONTENTS:
fake_name = f + "_fake"
fake_content = "fake content".encode('utf-8')
fields.append((f, fake_name, fake_content))
params = copy.copy(FAKE_BOOTSTRAP_VALUE)
params.update(
{'bmc_password':
base64.b64encode('bmc_password'.encode("utf-8")).decode('utf-8')})
data.update(params)
mock_get_request_data.return_value = data
mock_upload_deploy_config_file.return_value = True
mock_db_api.subcloud_db_model_to_dict.return_value = data
response = self.app.post(FAKE_URL,
headers=FAKE_HEADERS,
params=params,
upload_files=fields)
mock_add_subcloud_to_database.assert_called_once()
self.assertEqual(response.status_int, 200)
@mock.patch.object(subclouds.SubcloudsController,
@ -499,7 +547,8 @@ class TestSubclouds(testroot.DCManagerApiTest):
mock.ANY,
management_state=consts.MANAGEMENT_UNMANAGED,
description=None,
location=None)
location=None,
group_id=None)
self.assertEqual(response.status_int, 200)
@mock.patch.object(rpc_client, 'ManagerClient')

View File

@ -0,0 +1,140 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import mock
from dccommon.drivers.openstack import sdk_platform as sdk
from dcmanager.audit import alarm_aggregation
from dcmanager.common import exceptions
from dcmanager.tests import base
from dcmanager.tests import utils
class FakeFmAlarmSummarySubcloud1(object):
def __init__(self):
self.critical = 1
self.major = 2
self.minor = 3
self.warnings = 4
class FakeFmAlarmSummarySubcloud2(object):
def __init__(self):
self.critical = 0
self.major = 1
self.minor = 2
self.warnings = 3
class FakeFmAlarmSummarySubcloud3(object):
def __init__(self):
self.critical = 0
self.major = 0
self.minor = 0
self.warnings = 1
class FakeFmClientSubcloud1(object):
def get_alarm_summary(self):
summary_list = []
summary_list.append(FakeFmAlarmSummarySubcloud1())
return summary_list
class FakeFmClientSubcloud2(object):
def get_alarm_summary(self):
summary_list = []
summary_list.append(FakeFmAlarmSummarySubcloud2())
return summary_list
class FakeFmClientSubcloud3(object):
def get_alarm_summary(self):
summary_list = []
summary_list.append(FakeFmAlarmSummarySubcloud3())
return summary_list
class FakeFmClientSubcloud4(object):
def get_alarm_summary(self):
raise exceptions.SubcloudNotFound(subcloud_id='subcloud4')
class FakeOpenStackDriver(object):
def __init__(self, region_name='RegionOne'):
fm_clients = {'subcloud1': FakeFmClientSubcloud1,
'subcloud2': FakeFmClientSubcloud2,
'subcloud3': FakeFmClientSubcloud3,
'subcloud4': FakeFmClientSubcloud4}
self.fm_client = fm_clients[region_name]()
class TestAlarmAggregation(base.DCManagerTestCase):
def setUp(self):
super(TestAlarmAggregation, self).setUp()
self.ctxt = utils.dummy_context()
def test_init(self):
aam = alarm_aggregation.AlarmAggregation(self.ctxt)
self.assertIsNotNone(aam)
self.assertEqual(self.ctxt, aam.context)
@mock.patch.object(alarm_aggregation, 'LOG')
@mock.patch.object(sdk, 'OpenStackDriver')
@mock.patch.object(alarm_aggregation, 'db_api')
def test_update_alarm_summary(self, mock_db_api, mock_openstack_driver,
mock_logging):
mock_openstack_driver.side_effect = FakeOpenStackDriver
aam = alarm_aggregation.AlarmAggregation(self.ctxt)
fake_openstackdriver = FakeOpenStackDriver('subcloud1')
aam.update_alarm_summary('subcloud1', fake_openstackdriver.fm_client)
expected_alarm_update = {'critical_alarms': 1,
'major_alarms': 2,
'minor_alarms': 3,
'warnings': 4,
'cloud_status': 'critical'}
mock_db_api.subcloud_alarms_update.assert_called_with(
self.ctxt, 'subcloud1', expected_alarm_update)
fake_openstackdriver = FakeOpenStackDriver('subcloud2')
aam.update_alarm_summary('subcloud2', fake_openstackdriver.fm_client)
expected_alarm_update = {'critical_alarms': 0,
'major_alarms': 1,
'minor_alarms': 2,
'warnings': 3,
'cloud_status': 'degraded'}
mock_db_api.subcloud_alarms_update.assert_called_with(
self.ctxt, 'subcloud2', expected_alarm_update)
fake_openstackdriver = FakeOpenStackDriver('subcloud3')
aam.update_alarm_summary('subcloud3', fake_openstackdriver.fm_client)
expected_alarm_update = {'critical_alarms': 0,
'major_alarms': 0,
'minor_alarms': 0,
'warnings': 1,
'cloud_status': 'OK'}
mock_db_api.subcloud_alarms_update.assert_called_with(
self.ctxt, 'subcloud3', expected_alarm_update)
fake_openstackdriver = FakeOpenStackDriver('subcloud4')
aam.update_alarm_summary('subcloud4', fake_openstackdriver.fm_client)
mock_logging.error.assert_called_with('Failed to update alarms for '
'subcloud4 error: Subcloud with '
'id subcloud4 doesn\'t exist.')

View File

@ -0,0 +1,45 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
from dcmanager.audit import service
from dcmanager.common import scheduler
from dcmanager.tests import base
from dcmanager.tests import utils
from oslo_config import cfg
CONF = cfg.CONF
class TestDCManagerAuditService(base.DCManagerTestCase):
def setUp(self):
super(TestDCManagerAuditService, self).setUp()
self.tenant_id = 'fake_admin'
self.thm = scheduler.ThreadGroupManager()
self.context = utils.dummy_context(user='test_user',
tenant=self.tenant_id)
self.service_obj = service.DCManagerAuditService()
def test_init(self):
self.assertEqual(self.service_obj.host, 'localhost')
self.assertEqual(self.service_obj.topic, 'dcmanager-audit')
def test_init_tgm(self):
self.service_obj.init_tgm()
self.assertIsNotNone(self.service_obj.TG)

View File

@ -24,19 +24,25 @@ import sys
sys.modules['fm_core'] = mock.Mock()
from dccommon import consts as dccommon_consts
from dcmanager.audit import subcloud_audit_manager
from dcmanager.common import consts
from dcmanager.db.sqlalchemy import api as db_api
from dcmanager.manager import subcloud_audit_manager
from dcmanager.manager import subcloud_manager
# from dcmanager.manager import subcloud_manager
from dcmanager.tests import base
class FakeDCOrchAPI(object):
class FakeDCManagerAPI(object):
def __init__(self):
self.update_subcloud_states = mock.MagicMock()
self.add_subcloud_sync_endpoint_type = mock.MagicMock()
self.update_subcloud_availability = mock.MagicMock()
self.update_subcloud_sync_endpoint_type = mock.MagicMock()
class FakeAlarmAggregation(object):
def __init__(self):
self.update_alarm_summary = mock.MagicMock()
class FakeServiceGroup(object):
@ -178,28 +184,35 @@ class FakeSysinvClient(object):
return self.get_applications_result
class FakeFmClient(object):
def get_alarm_summary(self):
pass
class FakeOpenStackDriver(object):
def __init__(self, region_name):
self.sysinv_client = FakeSysinvClient('fake_region', 'fake_session')
self.fm_client = FakeFmClient()
class TestAuditManager(base.DCManagerTestCase):
def setUp(self):
super(TestAuditManager, self).setUp()
# Mock the DCOrch API
self.fake_dcorch_api = FakeDCOrchAPI()
p = mock.patch('dcorch.rpc.client.EngineClient')
self.mock_dcorch_api = p.start()
self.mock_dcorch_api.return_value = self.fake_dcorch_api
# Mock the DCManager API
self.fake_dcmanager_api = FakeDCManagerAPI()
p = mock.patch('dcmanager.rpc.client.ManagerClient')
self.mock_dcmanager_api = p.start()
self.mock_dcmanager_api.return_value = self.fake_dcmanager_api
self.addCleanup(p.stop)
# Mock the SysinvClient
self.fake_sysinv_client = FakeSysinvClient('fake_region',
'fake_session')
p = mock.patch.object(subcloud_audit_manager, 'SysinvClient')
self.mock_sysinv_client = p.start()
self.mock_sysinv_client.return_value = self.fake_sysinv_client
self.addCleanup(p.stop)
# Mock the KeystoneClient
# Mock the OpenStackDriver
self.fake_openstack_client = FakeOpenStackDriver('fake_region')
p = mock.patch.object(subcloud_audit_manager, 'OpenStackDriver')
self.mock_openstack_driver = p.start()
self.mock_openstack_driver.return_value = self.fake_openstack_client
self.addCleanup(p.stop)
# Mock the context
@ -208,6 +221,15 @@ class TestAuditManager(base.DCManagerTestCase):
self.mock_context.get_admin_context.return_value = self.ctx
self.addCleanup(p.stop)
# Mock alarm aggregation
self.fake_alarm_aggr = FakeAlarmAggregation()
p = mock.patch.object(subcloud_audit_manager,
'alarm_aggregation')
self.mock_alarm_aggr = p.start()
self.mock_alarm_aggr.AlarmAggregation.return_value = \
self.fake_alarm_aggr
self.addCleanup(p.stop)
@staticmethod
def create_subcloud_static(ctxt, **kwargs):
values = {
@ -222,65 +244,52 @@ class TestAuditManager(base.DCManagerTestCase):
'systemcontroller_gateway_ip': "192.168.204.101",
'deploy_status': "not-deployed",
'openstack_installed': False,
'group_id': 1,
}
values.update(kwargs)
return db_api.subcloud_create(ctxt, **values)
def test_init(self):
sm = subcloud_manager.SubcloudManager()
am = subcloud_audit_manager.SubcloudAuditManager(subcloud_manager=sm)
am = subcloud_audit_manager.SubcloudAuditManager()
self.assertIsNotNone(am)
self.assertEqual('subcloud_audit_manager', am.service_name)
self.assertEqual('localhost', am.host)
self.assertEqual(self.ctx, am.context)
def test_periodic_subcloud_audit(self):
mock_sm = mock.Mock()
am = subcloud_audit_manager.SubcloudAuditManager(
subcloud_manager=mock_sm)
am.periodic_subcloud_audit()
am = subcloud_audit_manager.SubcloudAuditManager()
am._periodic_subcloud_audit_loop()
def test_audit_subcloud_online(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
mock_sm = mock.Mock()
am = subcloud_audit_manager.SubcloudAuditManager(
subcloud_manager=mock_sm)
# No stx-openstack application
self.fake_sysinv_client.get_application_results = []
am = subcloud_audit_manager.SubcloudAuditManager()
# Audit the subcloud
am._audit_subcloud(subcloud.name, update_subcloud_state=False,
audit_openstack=False)
# Verify the subcloud was set to online
self.fake_dcorch_api.update_subcloud_states.assert_called_with(
self.fake_dcmanager_api.update_subcloud_availability.assert_called_with(
mock.ANY, subcloud.name, consts.AVAILABILITY_ONLINE,
False, 0)
mock.ANY, subcloud.name, consts.MANAGEMENT_UNMANAGED,
consts.AVAILABILITY_ONLINE)
# Verify the openstack endpoints were not added
self.fake_dcorch_api.add_subcloud_sync_endpoint_type.\
# Verify the openstack endpoints were not updated
self.fake_dcmanager_api.update_subcloud_sync_endpoint_type.\
assert_not_called()
# Verify the subcloud openstack_installed was not updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.openstack_installed, False)
# Verify alarm update is called
self.fake_alarm_aggr.update_alarm_summary.assert_called_with(
subcloud.name, self.fake_openstack_client.fm_client)
def test_audit_subcloud_online_no_change(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
mock_sm = mock.Mock()
am = subcloud_audit_manager.SubcloudAuditManager(
subcloud_manager=mock_sm)
# No stx-openstack application
self.fake_sysinv_client.get_application_results = []
am = subcloud_audit_manager.SubcloudAuditManager()
# Set the subcloud to online
db_api.subcloud_update(
@ -292,27 +301,23 @@ class TestAuditManager(base.DCManagerTestCase):
audit_openstack=False)
# Verify the subcloud state was not updated
self.fake_dcorch_api.update_subcloud_states.assert_not_called()
# Verify the openstack endpoints were not added
self.fake_dcorch_api.add_subcloud_sync_endpoint_type.\
self.fake_dcmanager_api.update_subcloud_availability.\
assert_not_called()
# Verify the subcloud openstack_installed was not updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.openstack_installed, False)
# Verify the openstack endpoints were not added
self.fake_dcmanager_api.update_subcloud_sync_endpoint_type.\
assert_not_called()
# Verify alarm update is called
self.fake_alarm_aggr.update_alarm_summary.assert_called_with(
'subcloud1', self.fake_openstack_client.fm_client)
def test_audit_subcloud_online_no_change_force_update(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
mock_sm = mock.Mock()
am = subcloud_audit_manager.SubcloudAuditManager(
subcloud_manager=mock_sm)
# No stx-openstack application
self.fake_sysinv_client.get_application_results = []
am = subcloud_audit_manager.SubcloudAuditManager()
# Set the subcloud to online
db_api.subcloud_update(
@ -324,26 +329,24 @@ class TestAuditManager(base.DCManagerTestCase):
audit_openstack=False)
# Verify the subcloud state was updated even though no change
self.fake_dcorch_api.update_subcloud_states.assert_called_with(
mock.ANY, 'subcloud1', consts.MANAGEMENT_UNMANAGED,
consts.AVAILABILITY_ONLINE)
self.fake_dcmanager_api.update_subcloud_availability.assert_called_with(
mock.ANY, subcloud.name, consts.AVAILABILITY_ONLINE,
True, None)
# Verify the openstack endpoints were not added
self.fake_dcorch_api.add_subcloud_sync_endpoint_type.\
# Verify the openstack endpoints were not updated
self.fake_dcmanager_api.update_subcloud_sync_endpoint_type.\
assert_not_called()
# Verify the subcloud openstack_installed was not updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.openstack_installed, False)
# Verify alarm update is called
self.fake_alarm_aggr.update_alarm_summary.assert_called_with(
'subcloud1', self.fake_openstack_client.fm_client)
def test_audit_subcloud_offline(self):
def test_audit_subcloud_go_offline(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
mock_sm = mock.Mock()
am = subcloud_audit_manager.SubcloudAuditManager(
subcloud_manager=mock_sm)
am = subcloud_audit_manager.SubcloudAuditManager()
# Set the subcloud to online
db_api.subcloud_update(
@ -351,58 +354,161 @@ class TestAuditManager(base.DCManagerTestCase):
availability_status=consts.AVAILABILITY_ONLINE)
# Mark a service group as inactive
self.fake_sysinv_client.get_service_groups_result = \
self.fake_openstack_client.sysinv_client.get_service_groups_result = \
copy.deepcopy(FAKE_SERVICE_GROUPS)
self.fake_sysinv_client.get_service_groups_result[3].state = 'inactive'
self.fake_openstack_client.sysinv_client. \
get_service_groups_result[3].state = 'inactive'
# Audit the subcloud
am._audit_subcloud(subcloud.name, update_subcloud_state=False,
audit_openstack=False)
# Verify the subcloud was not set to offline
self.fake_dcorch_api.update_subcloud_states.assert_not_called()
# Verify the audit fail count was updated
audit_fail_count = 1
self.fake_dcmanager_api.update_subcloud_availability.\
assert_called_with(mock.ANY, subcloud.name,
None, False, audit_fail_count)
# Verify the audit_fail_count was updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.audit_fail_count, 1)
db_api.subcloud_update(self.ctx, subcloud.id,
audit_fail_count=audit_fail_count)
# Audit the subcloud again
am._audit_subcloud(subcloud.name, update_subcloud_state=False,
audit_openstack=False)
audit_fail_count = audit_fail_count + 1
# Verify the subcloud was set to offline
self.fake_dcorch_api.update_subcloud_states.assert_called_with(
mock.ANY, 'subcloud1', consts.MANAGEMENT_UNMANAGED,
consts.AVAILABILITY_OFFLINE)
self.fake_dcmanager_api.update_subcloud_availability.\
assert_called_with(mock.ANY, subcloud.name,
consts.AVAILABILITY_OFFLINE, False,
audit_fail_count)
# Verify the sublcoud availability was updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.availability_status,
consts.AVAILABILITY_OFFLINE)
def test_audit_subcloud_online_with_openstack(self):
# Verify alarm update is called only once
self.fake_alarm_aggr.update_alarm_summary.assert_called_once_with(
subcloud.name, self.fake_openstack_client.fm_client)
def test_audit_subcloud_offline_no_change(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
mock_sm = mock.Mock()
am = subcloud_audit_manager.SubcloudAuditManager(
subcloud_manager=mock_sm)
am = subcloud_audit_manager.SubcloudAuditManager()
db_api.subcloud_update(self.ctx, subcloud.id,
audit_fail_count=consts.AVAIL_FAIL_COUNT_MAX)
# Mark a service group as inactive
self.fake_openstack_client.sysinv_client.get_service_groups_result = \
copy.deepcopy(FAKE_SERVICE_GROUPS)
self.fake_openstack_client.sysinv_client. \
get_service_groups_result[3].state = 'inactive'
# Audit the subcloud
am._audit_subcloud(subcloud.name, update_subcloud_state=False,
audit_openstack=True)
# Verify the subcloud was set to online
self.fake_dcorch_api.update_subcloud_states.assert_called_with(
mock.ANY, 'subcloud1', consts.MANAGEMENT_UNMANAGED,
consts.AVAILABILITY_ONLINE)
# Verify the subcloud state was not updated
self.fake_dcmanager_api.update_subcloud_availability.\
assert_not_called()
# Verify the openstack endpoints were not updated
self.fake_dcmanager_api.update_subcloud_sync_endpoint_type.\
assert_not_called()
# Verify alarm update is not called
self.fake_alarm_aggr.update_alarm_summary.assert_not_called()
def test_audit_subcloud_online_with_openstack_installed(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
am = subcloud_audit_manager.SubcloudAuditManager()
# Set the subcloud to online
db_api.subcloud_update(
self.ctx, subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE)
# Audit the subcloud
am._audit_subcloud(subcloud.name, update_subcloud_state=False,
audit_openstack=True)
# Verify the subcloud state was not updated
self.fake_dcmanager_api.update_subcloud_availability.\
assert_not_called()
# Verify the openstack endpoints were added
self.fake_dcorch_api.add_subcloud_sync_endpoint_type.\
assert_called_with(mock.ANY, 'subcloud1',
dccommon_consts.ENDPOINT_TYPES_LIST_OS)
# self.fake_dcmanager_api.update_subcloud_sync_endpoint_type.\
# assert_called_with(mock.ANY, 'subcloud1',
# dccommon_consts.ENDPOINT_TYPES_LIST_OS,
# True)
# Verify the subcloud openstack_installed was updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.openstack_installed, True)
# Verify alarm update is called
self.fake_alarm_aggr.update_alarm_summary.assert_called_once_with(
'subcloud1', self.fake_openstack_client.fm_client)
def test_audit_subcloud_online_with_openstack_removed(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
am = subcloud_audit_manager.SubcloudAuditManager()
# Set the subcloud to online and openstack installed
db_api.subcloud_update(
self.ctx, subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE,
openstack_installed=True)
# Remove stx-openstack application
FAKE_APPLICATIONS.pop(1)
# Audit the subcloud
am._audit_subcloud(subcloud.name, update_subcloud_state=False,
audit_openstack=True)
# Verify the subcloud state was not updated
self.fake_dcmanager_api.update_subcloud_availability.\
assert_not_called()
# Verify the openstack endpoints were removed
self.fake_dcmanager_api.update_subcloud_sync_endpoint_type.\
assert_called_with(mock.ANY, 'subcloud1',
dccommon_consts.ENDPOINT_TYPES_LIST_OS, False)
# Verify alarm update is called
self.fake_alarm_aggr.update_alarm_summary.assert_called_once_with(
'subcloud1', self.fake_openstack_client.fm_client)
def test_audit_subcloud_online_with_openstack_inactive(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
am = subcloud_audit_manager.SubcloudAuditManager()
# Set the subcloud to online and openstack installed
db_api.subcloud_update(
self.ctx, subcloud.id,
availability_status=consts.AVAILABILITY_ONLINE,
openstack_installed=True)
# stx-openstack application is not active
FAKE_APPLICATIONS[1].active = False
# Audit the subcloud
am._audit_subcloud(subcloud.name, update_subcloud_state=False,
audit_openstack=True)
# Verify the subcloud state was not updated
self.fake_dcmanager_api.update_subcloud_availability.\
assert_not_called()
# Verify the openstack endpoints were removed
self.fake_dcmanager_api.update_subcloud_sync_endpoint_type.\
assert_called_with(mock.ANY, 'subcloud1',
dccommon_consts.ENDPOINT_TYPES_LIST_OS, False)
# Verify alarm update is called
self.fake_alarm_aggr.update_alarm_summary.assert_called_once_with(
'subcloud1', self.fake_openstack_client.fm_client)

View File

@ -0,0 +1,137 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import sqlalchemy
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db import options
from dcmanager.common import consts
from dcmanager.common import exceptions as exception
from dcmanager.db import api as api
from dcmanager.db.sqlalchemy import api as db_api
from dcmanager.tests import base
from dcmanager.tests import utils
get_engine = api.get_engine
class DBAPISubcloudAlarm(base.DCManagerTestCase):
def setup_dummy_db(self):
options.cfg.set_defaults(options.database_opts,
sqlite_synchronous=False)
options.set_defaults(cfg.CONF, connection="sqlite://")
engine = get_engine()
db_api.db_sync(engine)
engine.connect()
@staticmethod
def reset_dummy_db():
engine = get_engine()
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
for table in reversed(meta.sorted_tables):
if table.name == 'migrate_version':
continue
engine.execute(table.delete())
@staticmethod
def create_subcloud_alarms(ctxt, name):
values = {'critical_alarms': -1,
'major_alarms': -1,
'minor_alarms': -1,
'warnings': -1,
'cloud_status': consts.ALARMS_DISABLED}
return db_api.subcloud_alarms_create(ctxt, name, values)
def setUp(self):
super(DBAPISubcloudAlarm, self).setUp()
self.setup_dummy_db()
self.addCleanup(self.reset_dummy_db)
self.ctxt = utils.dummy_context()
def test_subcloud_alarms_create(self):
result = self.create_subcloud_alarms(self.ctxt, 'subcloud1')
self.assertIsNotNone(result)
self.assertEqual(result['name'], 'subcloud1')
self.assertEqual(result['cloud_status'], 'disabled')
def test_subcloud_alarms_create_duplicate(self):
result = self.create_subcloud_alarms(self.ctxt, 'subcloud1')
self.assertIsNotNone(result)
self.assertRaises(db_exception.DBDuplicateEntry,
self.create_subcloud_alarms,
self.ctx, 'subcloud1')
def test_subcloud_alarms_get(self):
result = self.create_subcloud_alarms(self.ctxt, 'subcloud1')
self.assertIsNotNone(result)
subcloud = db_api.subcloud_alarms_get(self.ctxt, 'subcloud1')
self.assertIsNotNone(subcloud)
self.assertEqual(subcloud['name'], 'subcloud1')
def test_subcloud_alarms_get_not_found(self):
result = self.create_subcloud_alarms(self.ctxt, 'subcloud1')
self.assertIsNotNone(result)
self.assertRaises(exception.SubcloudNotFound,
db_api.subcloud_alarms_get,
self.ctx, 'subcloud2')
def test_subcloud_alarms_get_all(self):
result = self.create_subcloud_alarms(self.ctxt, 'subcloud1')
self.assertIsNotNone(result)
result = self.create_subcloud_alarms(self.ctxt, 'subcloud2')
self.assertIsNotNone(result)
subclouds = db_api.subcloud_alarms_get_all(self.ctxt)
self.assertEqual(len(subclouds), 2)
self.assertEqual(subclouds[0]['name'], 'subcloud2')
self.assertEqual(subclouds[1]['name'], 'subcloud1')
def test_subcloud_alarms_get_one(self):
result = self.create_subcloud_alarms(self.ctxt, 'subcloud1')
self.assertIsNotNone(result)
result = self.create_subcloud_alarms(self.ctxt, 'subcloud2')
self.assertIsNotNone(result)
subclouds = db_api.subcloud_alarms_get_all(self.ctxt, 'subcloud1')
self.assertEqual(subclouds[0]['name'], 'subcloud1')
def test_subcloud_alarms_update(self):
result = self.create_subcloud_alarms(self.ctxt, 'subcloud1')
self.assertIsNotNone(result)
values = {'critical_alarms': 0,
'major_alarms': 1,
'minor_alarms': 2,
'warnings': 3,
'cloud_status': consts.ALARM_DEGRADED_STATUS}
result = db_api.subcloud_alarms_update(self.ctxt, 'subcloud1', values)
self.assertIsNotNone(result)
self.assertEqual(result['major_alarms'], 1)
subcloud = db_api.subcloud_alarms_get(self.ctxt, 'subcloud1')
self.assertIsNotNone(subcloud)
self.assertEqual(subcloud['major_alarms'], 1)
def test_subcloud_alarms_delete(self):
result = self.create_subcloud_alarms(self.ctxt, 'subcloud1')
self.assertIsNotNone(result)
db_api.subcloud_alarms_delete(self.ctxt, 'subcloud1')
subclouds = db_api.subcloud_alarms_get_all(self.ctxt)
self.assertEqual(len(subclouds), 0)

View File

@ -83,6 +83,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
'systemcontroller_gateway_ip': "192.168.204.101",
'deploy_status': "not-deployed",
'openstack_installed': False,
'group_id': 1,
}
values.update(kwargs)
return db_api.subcloud_create(ctxt, **values)
@ -102,6 +103,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
'systemcontroller_gateway_address'],
'deploy_status': "not-deployed",
'openstack_installed': False,
'group_id': 1,
}
return db_api.subcloud_create(ctxt, **values)

View File

@ -53,8 +53,14 @@ class Subcloud(object):
class Load(object):
def __init__(self, software_version):
def __init__(self, software_version, state):
self.software_version = software_version
self.state = state
class Upgrade(object):
def __init__(self, state):
self.state = state
class FakePatchingClientInSync(object):
@ -175,11 +181,47 @@ class FakePatchingClientExtraPatches(object):
class FakeSysinvClientOneLoad(object):
def __init__(self, region, session):
self.loads = [Load('17.07')]
self.loads = [Load('17.07', 'active')]
self.upgrades = []
def get_loads(self):
return self.loads
def get_upgrades(self):
return self.upgrades
class FakeSysinvClientOneLoadUnmatchedSoftwareVersion(object):
def __init__(self, region, session):
self.region = region
self.loads = [Load('17.07', 'active')]
self.upgrades = []
def get_loads(self):
if self.region == 'subcloud2':
return [Load('17.06', 'active')]
else:
return self.loads
def get_upgrades(self):
return self.upgrades
class FakeSysinvClientOneLoadUpgradeInProgress(object):
def __init__(self, region, session):
self.region = region
self.loads = [Load('17.07', 'active')]
self.upgrades = []
def get_loads(self):
return self.loads
def get_upgrades(self):
if self.region == 'subcloud2':
return [Upgrade('started')]
else:
return self.upgrades
class TestAuditManager(base.DCManagerTestCase):
def setUp(self):
@ -232,10 +274,18 @@ class TestAuditManager(base.DCManagerTestCase):
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
]
mock_sm.update_subcloud_endpoint_status.assert_has_calls(
expected_calls)
@ -277,22 +327,39 @@ class TestAuditManager(base.DCManagerTestCase):
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud3',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud3',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud4',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud4',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
]
mock_sm.update_subcloud_endpoint_status.assert_has_calls(
expected_calls)
@mock.patch.object(patch_audit_manager, 'SysinvClient')
@mock.patch.object(patch_audit_manager, 'db_api')
@mock.patch.object(patch_audit_manager, 'PatchingClient')
@mock.patch.object(patch_audit_manager, 'OpenStackDriver')
@ -301,7 +368,8 @@ class TestAuditManager(base.DCManagerTestCase):
self, mock_context,
mock_openstack_driver,
mock_patching_client,
mock_db_api):
mock_db_api,
mock_sysinv_client):
mock_context.get_admin_context.return_value = self.ctxt
mock_sm = mock.Mock()
am = patch_audit_manager.PatchAuditManager(
@ -349,10 +417,110 @@ class TestAuditManager(base.DCManagerTestCase):
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
]
mock_sm.update_subcloud_endpoint_status.assert_has_calls(
expected_calls)
@mock.patch.object(patch_audit_manager, 'SysinvClient')
@mock.patch.object(patch_audit_manager, 'db_api')
@mock.patch.object(patch_audit_manager, 'PatchingClient')
@mock.patch.object(patch_audit_manager, 'OpenStackDriver')
@mock.patch.object(patch_audit_manager, 'context')
def test_periodic_patch_audit_unmatched_software_version(
self, mock_context,
mock_openstack_driver,
mock_patching_client,
mock_db_api,
mock_sysinv_client):
mock_context.get_admin_context.return_value = self.ctxt
mock_sm = mock.Mock()
am = patch_audit_manager.PatchAuditManager(subcloud_manager=mock_sm)
mock_patching_client.side_effect = FakePatchingClientInSync
mock_sysinv_client.side_effect = FakeSysinvClientOneLoadUnmatchedSoftwareVersion
fake_subcloud1 = Subcloud(1, 'subcloud1',
is_managed=True, is_online=True)
fake_subcloud2 = Subcloud(2, 'subcloud2',
is_managed=True, is_online=True)
mock_db_api.subcloud_get_all.return_value = [fake_subcloud1,
fake_subcloud2]
am._periodic_patch_audit_loop()
expected_calls = [
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
]
mock_sm.update_subcloud_endpoint_status.assert_has_calls(
expected_calls)
@mock.patch.object(patch_audit_manager, 'SysinvClient')
@mock.patch.object(patch_audit_manager, 'db_api')
@mock.patch.object(patch_audit_manager, 'PatchingClient')
@mock.patch.object(patch_audit_manager, 'OpenStackDriver')
@mock.patch.object(patch_audit_manager, 'context')
def test_periodic_patch_audit_upgrade_in_progress(
self, mock_context,
mock_openstack_driver,
mock_patching_client,
mock_db_api,
mock_sysinv_client):
mock_context.get_admin_context.return_value = self.ctxt
mock_sm = mock.Mock()
am = patch_audit_manager.PatchAuditManager(subcloud_manager=mock_sm)
mock_patching_client.side_effect = FakePatchingClientInSync
mock_sysinv_client.side_effect = FakeSysinvClientOneLoadUpgradeInProgress
fake_subcloud1 = Subcloud(1, 'subcloud1',
is_managed=True, is_online=True)
fake_subcloud2 = Subcloud(2, 'subcloud2',
is_managed=True, is_online=True)
mock_db_api.subcloud_get_all.return_value = [fake_subcloud1,
fake_subcloud2]
am._periodic_patch_audit_loop()
expected_calls = [
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud1',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
sync_status=consts.SYNC_STATUS_IN_SYNC),
mock.call(mock.ANY,
subcloud_name='subcloud2',
endpoint_type=dcorch_consts.ENDPOINT_TYPE_LOAD,
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC),
]
mock_sm.update_subcloud_endpoint_status.assert_has_calls(
expected_calls)

View File

@ -22,7 +22,7 @@ import mock
import sys
sys.modules['fm_core'] = mock.Mock()
from dcmanager.manager import scheduler
from dcmanager.common import scheduler
from dcmanager.manager import service
from dcmanager.tests import base
from dcmanager.tests import utils
@ -56,10 +56,8 @@ class TestDCManagerService(base.DCManagerTestCase):
self.service_obj.init_tgm()
self.assertIsNotNone(self.service_obj.TG)
@mock.patch.object(service, 'SubcloudAuditManager')
def test_init_audit_managers(self, mock_audit_manager):
def test_init_audit_managers(self):
self.service_obj.init_audit_managers()
self.assertIsNotNone(self.service_obj.subcloud_audit_manager)
self.assertIsNotNone(self.service_obj.patch_audit_manager)
@mock.patch.object(service, 'SwUpdateManager')
@ -72,29 +70,16 @@ class TestDCManagerService(base.DCManagerTestCase):
@mock.patch.object(service, 'SwUpdateManager')
@mock.patch.object(service, 'SubcloudManager')
@mock.patch.object(service, 'SubcloudAuditManager')
@mock.patch.object(service, 'rpc_messaging')
def test_start(self, mock_rpc, mock_audit_manager, mock_subcloud_manager,
def test_start(self, mock_rpc, mock_subcloud_manager,
mock_sw_update_manager):
self.service_obj.start()
mock_rpc.get_rpc_server.assert_called_once_with(
self.service_obj.target, self.service_obj)
mock_rpc.get_rpc_server().start.assert_called_once_with()
@mock.patch.object(service, 'SubcloudAuditManager')
@mock.patch.object(service, 'PatchAuditManager')
def test_periodic_audit_subclouds(self, mock_patch_audit_manager,
mock_subcloud_audit_manager):
self.service_obj.init_tgm()
self.service_obj.init_audit_managers()
self.service_obj.subcloud_audit()
mock_subcloud_audit_manager().periodic_subcloud_audit.\
assert_called_once_with()
@mock.patch.object(service, 'SubcloudAuditManager')
@mock.patch.object(service, 'PatchAuditManager')
def test_periodic_audit_patches(self, mock_patch_audit_manager,
mock_subcloud_audit_manager):
def test_periodic_audit_patches(self, mock_patch_audit_manager):
self.service_obj.init_tgm()
self.service_obj.init_audit_managers()
self.service_obj.patch_audit()
@ -131,25 +116,24 @@ class TestDCManagerService(base.DCManagerTestCase):
self.service_obj.update_subcloud(
self.context, subcloud_id=1, management_state='testmgmtstatus')
mock_subcloud_manager().update_subcloud.\
assert_called_once_with(self.context, mock.ANY, mock.ANY, mock.ANY,
mock.ANY)
assert_called_once_with(self.context, mock.ANY,
mock.ANY, mock.ANY,
mock.ANY, mock.ANY)
@mock.patch.object(service, 'SwUpdateManager')
@mock.patch.object(service, 'SubcloudManager')
@mock.patch.object(service, 'SubcloudAuditManager')
@mock.patch.object(service, 'rpc_messaging')
def test_stop_rpc_server(self, mock_rpc, mock_audit_manager,
mock_subcloud_manager, mock_sw_update_manager):
def test_stop_rpc_server(self, mock_rpc, mock_subcloud_manager,
mock_sw_update_manager):
self.service_obj.start()
self.service_obj._stop_rpc_server()
mock_rpc.get_rpc_server().stop.assert_called_once_with()
@mock.patch.object(service, 'SwUpdateManager')
@mock.patch.object(service, 'SubcloudManager')
@mock.patch.object(service, 'SubcloudAuditManager')
@mock.patch.object(service, 'rpc_messaging')
def test_stop(self, mock_rpc, mock_audit_manager,
mock_subcloud_manager, mock_sw_update_manager):
def test_stop(self, mock_rpc, mock_subcloud_manager,
mock_sw_update_manager):
self.service_obj.start()
self.service_obj.stop()
mock_rpc.get_rpc_server().stop.assert_called_once_with()

View File

@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
# Copyright (c) 2017-2020 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@ -27,6 +27,7 @@ sys.modules['fm_core'] = mock.Mock()
import threading
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
from dcmanager.common import exceptions
from dcmanager.db.sqlalchemy import api as db_api
@ -34,13 +35,15 @@ from dcmanager.manager import subcloud_manager
from dcmanager.tests import base
from dcmanager.tests import utils
from dcorch.common import consts as dcorch_consts
from dcorch.rpc import client as dcorch_rpc_client
class FakeDCOrchAPI(object):
def __init__(self):
self.update_subcloud_states = mock.MagicMock()
self.add_subcloud_sync_endpoint_type = mock.MagicMock()
self.remove_subcloud_sync_endpoint_type = mock.MagicMock()
self.del_subcloud = mock.MagicMock()
self.add_subcloud = mock.MagicMock()
class FakeService(object):
@ -88,6 +91,10 @@ FAKE_CONTROLLERS = [
]
class FakeException(Exception):
pass
class Subcloud(object):
def __init__(self, data, is_online):
self.id = data['id']
@ -147,6 +154,7 @@ class TestSubcloudManager(base.DCManagerTestCase):
"systemcontroller_gateway_ip": "192.168.204.101",
'deploy_status': "not-deployed",
'openstack_installed': False,
'group_id': 1,
}
values.update(kwargs)
return db_api.subcloud_create(ctxt, **values)
@ -158,10 +166,10 @@ class TestSubcloudManager(base.DCManagerTestCase):
self.assertEqual('localhost', sm.host)
self.assertEqual(self.ctx, sm.context)
@mock.patch.object(subcloud_manager.SubcloudManager,
'_create_intermediate_ca_cert')
@mock.patch.object(subcloud_manager.SubcloudManager,
'_delete_subcloud_inventory')
@mock.patch.object(dcorch_rpc_client, 'EngineClient')
@mock.patch.object(subcloud_manager, 'context')
@mock.patch.object(subcloud_manager, 'KeystoneClient')
@mock.patch.object(subcloud_manager, 'db_api')
@mock.patch.object(subcloud_manager, 'SysinvClient')
@ -179,15 +187,16 @@ class TestSubcloudManager(base.DCManagerTestCase):
mock_write_subcloud_ansible_config,
mock_create_subcloud_inventory,
mock_create_addn_hosts, mock_sysinv_client,
mock_db_api, mock_keystone_client, mock_context,
mock_dcorch_rpc_client,
mock_delete_subcloud_inventory):
mock_db_api, mock_keystone_client,
mock_delete_subcloud_inventory,
mock_create_intermediate_ca_cert):
values = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0)
controllers = FAKE_CONTROLLERS
services = FAKE_SERVICES
mock_context.get_admin_context.return_value = self.ctx
mock_db_api.subcloud_get_by_name.side_effect = \
exceptions.SubcloudNameNotFound()
# dcmanager add_subcloud queries the data from the db
fake_subcloud = Subcloud(values, False)
mock_db_api.subcloud_get_by_name.return_value = fake_subcloud
mock_sysinv_client().get_controller_hosts.return_value = controllers
mock_keystone_client().services_list = services
@ -195,18 +204,46 @@ class TestSubcloudManager(base.DCManagerTestCase):
sm = subcloud_manager.SubcloudManager()
sm.add_subcloud(self.ctx, payload=values)
mock_db_api.subcloud_create.assert_called_once()
mock_db_api.subcloud_status_create.assert_called()
mock_sysinv_client().create_route.assert_called()
mock_dcorch_rpc_client().add_subcloud.assert_called_once()
self.fake_dcorch_api.add_subcloud.assert_called_once()
mock_create_addn_hosts.assert_called_once()
mock_create_subcloud_inventory.assert_called_once()
mock_write_subcloud_ansible_config.assert_called_once()
mock_keyring.get_password.assert_called()
mock_thread_start.assert_called_once()
mock_create_intermediate_ca_cert.assert_called_once()
@mock.patch.object(dcorch_rpc_client, 'EngineClient')
@mock.patch.object(subcloud_manager, 'context')
@mock.patch.object(subcloud_manager, 'KeystoneClient')
@mock.patch.object(subcloud_manager, 'db_api')
@mock.patch.object(subcloud_manager, 'SysinvClient')
def test_add_subcloud_deploy_prep_failed(self,
mock_sysinv_client,
mock_db_api,
mock_keystone_client):
values = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0)
controllers = FAKE_CONTROLLERS
services = FAKE_SERVICES
# dcmanager add_subcloud queries the data from the db
fake_subcloud = Subcloud(values, False)
mock_db_api.subcloud_get_by_name.return_value = fake_subcloud
self.fake_dcorch_api.add_subcloud.side_effect = FakeException('boom')
mock_sysinv_client().get_controller_hosts.return_value = controllers
mock_keystone_client().services_list = services
sm = subcloud_manager.SubcloudManager()
sm.add_subcloud(self.ctx, payload=values)
mock_db_api.subcloud_status_create.assert_called()
mock_sysinv_client().create_route.assert_called()
mock_db_api.subcloud_update.\
assert_called_with(self.ctx,
mock_db_api.subcloud_get_by_name().id,
deploy_status=consts.DEPLOY_STATE_DEPLOY_PREP_FAILED)
@mock.patch.object(subcloud_manager.SubcloudManager,
'_delete_subcloud_cert')
@mock.patch.object(subcloud_manager, 'db_api')
@mock.patch.object(subcloud_manager, 'SysinvClient')
@mock.patch.object(subcloud_manager, 'KeystoneClient')
@ -216,10 +253,8 @@ class TestSubcloudManager(base.DCManagerTestCase):
mock_keystone_client,
mock_sysinv_client,
mock_db_api,
mock_context,
mock_dcorch_rpc_client):
mock_delete_subcloud_cert):
controllers = FAKE_CONTROLLERS
mock_context.get_admin_context.return_value = self.ctx
data = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0)
fake_subcloud = Subcloud(data, False)
mock_db_api.subcloud_get.return_value = fake_subcloud
@ -230,21 +265,17 @@ class TestSubcloudManager(base.DCManagerTestCase):
mock_keystone_client().delete_region.assert_called_once()
mock_db_api.subcloud_destroy.assert_called_once()
mock_create_addn_hosts.assert_called_once()
mock_delete_subcloud_cert.assert_called_once()
@mock.patch.object(dcorch_rpc_client, 'EngineClient')
@mock.patch.object(subcloud_manager, 'context')
@mock.patch.object(subcloud_manager, 'KeystoneClient')
@mock.patch.object(subcloud_manager, 'db_api')
def test_update_subcloud(self, mock_db_api,
mock_endpoint, mock_context,
mock_dcorch_rpc_client):
mock_context.get_admin_context.return_value = self.ctx
def test_update_subcloud(self, mock_db_api):
data = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0)
subcloud_result = Subcloud(data, True)
mock_db_api.subcloud_get.return_value = subcloud_result
mock_db_api.subcloud_update.return_value = subcloud_result
sm = subcloud_manager.SubcloudManager()
sm.update_subcloud(self.ctx, data['id'],
sm.update_subcloud(self.ctx,
data['id'],
management_state=consts.MANAGEMENT_MANAGED,
description="subcloud new description",
location="subcloud new location")
@ -253,7 +284,29 @@ class TestSubcloudManager(base.DCManagerTestCase):
data['id'],
management_state=consts.MANAGEMENT_MANAGED,
description="subcloud new description",
location="subcloud new location")
location="subcloud new location",
group_id=None)
@mock.patch.object(subcloud_manager, 'db_api')
def test_update_subcloud_group_id(self, mock_db_api):
data = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0)
subcloud_result = Subcloud(data, True)
mock_db_api.subcloud_get.return_value = subcloud_result
mock_db_api.subcloud_update.return_value = subcloud_result
sm = subcloud_manager.SubcloudManager()
sm.update_subcloud(self.ctx,
data['id'],
management_state=consts.MANAGEMENT_MANAGED,
description="subcloud new description",
location="subcloud new location",
group_id=2)
mock_db_api.subcloud_update.assert_called_once_with(
mock.ANY,
data['id'],
management_state=consts.MANAGEMENT_MANAGED,
description="subcloud new description",
location="subcloud new location",
group_id=2)
def test_update_subcloud_endpoint_status(self):
# create a subcloud
@ -393,3 +446,136 @@ class TestSubcloudManager(base.DCManagerTestCase):
self.assertIsNotNone(updated_subcloud_status)
self.assertEqual(updated_subcloud_status.sync_status,
consts.SYNC_STATUS_OUT_OF_SYNC)
def test_update_subcloud_availability_go_online(self):
# create a subcloud
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
self.assertEqual(subcloud.availability_status,
consts.AVAILABILITY_OFFLINE)
sm = subcloud_manager.SubcloudManager()
sm.update_subcloud_availability(self.ctx, subcloud.name,
consts.AVAILABILITY_ONLINE)
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
# Verify the subcloud was set to online
self.assertEqual(updated_subcloud.availability_status,
consts.AVAILABILITY_ONLINE)
# Verify notifying dcorch
self.fake_dcorch_api.update_subcloud_states.assert_called_once_with(
self.ctx, subcloud.name, updated_subcloud.management_state,
consts.AVAILABILITY_ONLINE)
def test_update_subcloud_availability_go_offline(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
# Set the subcloud to online/managed
db_api.subcloud_update(self.ctx, subcloud.id,
management_state=consts.MANAGEMENT_MANAGED,
availability_status=consts.AVAILABILITY_ONLINE)
sm = subcloud_manager.SubcloudManager()
# create sync statuses for endpoints and set them to in-sync
for endpoint in [dcorch_consts.ENDPOINT_TYPE_PLATFORM,
dcorch_consts.ENDPOINT_TYPE_IDENTITY,
dcorch_consts.ENDPOINT_TYPE_PATCHING,
dcorch_consts.ENDPOINT_TYPE_FM,
dcorch_consts.ENDPOINT_TYPE_NFV]:
db_api.subcloud_status_create(
self.ctx, subcloud.id, endpoint)
sm.update_subcloud_endpoint_status(
self.ctx, subcloud_name=subcloud.name,
endpoint_type=endpoint,
sync_status=consts.SYNC_STATUS_IN_SYNC)
# Audit fails once
audit_fail_count = 1
sm.update_subcloud_availability(self.ctx, subcloud.name,
availability_status=None,
audit_fail_count=audit_fail_count)
# Verify the subclcoud availability was not updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.availability_status,
consts.AVAILABILITY_ONLINE)
# Verify dcorch was not notified
self.fake_dcorch_api.update_subcloud_states.assert_not_called()
# Verify the audit_fail_count was updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.audit_fail_count, audit_fail_count)
# Audit fails again
audit_fail_count = audit_fail_count + 1
sm.update_subcloud_availability(self.ctx, subcloud.name,
consts.AVAILABILITY_OFFLINE,
audit_fail_count=audit_fail_count)
# Verify the subclcoud availability was updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, 'subcloud1')
self.assertEqual(updated_subcloud.availability_status,
consts.AVAILABILITY_OFFLINE)
# Verify notifying dcorch
self.fake_dcorch_api.update_subcloud_states.assert_called_once_with(
self.ctx, subcloud.name, updated_subcloud.management_state,
consts.AVAILABILITY_OFFLINE)
# Verify all endpoint statuses set to unknown
for subcloud, subcloud_status in db_api. \
subcloud_get_with_status(self.ctx, subcloud.id):
self.assertIsNotNone(subcloud_status)
self.assertEqual(subcloud_status.sync_status,
consts.SYNC_STATUS_UNKNOWN)
def test_update_subcloud_sync_endpoint_type(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
sm = subcloud_manager.SubcloudManager()
endpoint_type_list = dccommon_consts.ENDPOINT_TYPES_LIST_OS
# Test openstack app installed
openstack_installed = True
sm.update_subcloud_sync_endpoint_type(self.ctx, subcloud.name,
endpoint_type_list,
openstack_installed)
# Verify notifying dcorch to add subcloud sync endpoint type
self.fake_dcorch_api.add_subcloud_sync_endpoint_type.\
assert_called_once_with(self.ctx, subcloud.name,
endpoint_type_list)
# Verify the subcloud status created for os endpoints
for endpoint in endpoint_type_list:
subcloud_status = db_api.subcloud_status_get(
self.ctx, subcloud.id, endpoint)
self.assertIsNotNone(subcloud_status)
self.assertEqual(subcloud_status.sync_status,
consts.SYNC_STATUS_UNKNOWN)
# Verify the subcloud openstack_installed was updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name)
self.assertEqual(updated_subcloud.openstack_installed, True)
# Test openstack app removed
openstack_installed = False
sm.update_subcloud_sync_endpoint_type(self.ctx, subcloud.name,
endpoint_type_list,
openstack_installed)
# Verify notifying dcorch to remove subcloud sync endpoint type
self.fake_dcorch_api.remove_subcloud_sync_endpoint_type.\
assert_called_once_with(self.ctx, subcloud.name,
endpoint_type_list)
# Verify the subcloud status is deleted for os endpoints
for endpoint in endpoint_type_list:
self.assertRaises(exceptions.SubcloudStatusNotFound,
db_api.subcloud_status_get, self.ctx,
subcloud.id, endpoint)
# Verify the subcloud openstack_installed was updated
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, subcloud.name)
self.assertEqual(updated_subcloud.openstack_installed, False)

View File

@ -16,20 +16,23 @@
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import copy
import mock
from os import path as os_path
import threading
from oslo_config import cfg
from oslo_utils import timeutils
from dcorch.common import consts as dcorch_consts
from dcmanager.common import consts
from dcmanager.common import context
from dcmanager.common import exceptions
from dcmanager.manager import patch_orch_thread
from dcmanager.manager import sw_update_manager
from dcmanager.tests import base
from dcmanager.tests import utils
from dcorch.common import consts as dcorch_consts
CONF = cfg.CONF
FAKE_ID = '1'
@ -336,26 +339,47 @@ class SwUpdateStrategy(object):
self.updated_at = timeutils.utcnow()
class FakeFwUpdateOrchThread(object):
def __init__(self):
# Mock methods that are called in normal execution of this thread
self.start = mock.MagicMock()
class FakeSwUpgradeOrchThread(object):
def __init__(self):
# Mock methods that are called in normal execution of this thread
self.start = mock.MagicMock()
class TestSwUpdateManager(base.DCManagerTestCase):
def setUp(self):
super(TestSwUpdateManager, self).setUp()
# Mock the context
self.ctxt = utils.dummy_context()
p = mock.patch.object(context, 'get_admin_context')
self.mock_get_admin_context = p.start()
self.mock_get_admin_context.return_value = self.ctx
self.addCleanup(p.stop)
# Note: mock where an item is used, not where it comes from
self.fake_sw_upgrade_orch_thread = FakeSwUpgradeOrchThread()
p = mock.patch.object(sw_update_manager, 'SwUpgradeOrchThread')
self.mock_sw_upgrade_orch_thread = p.start()
self.mock_sw_upgrade_orch_thread.return_value = \
self.fake_sw_upgrade_orch_thread
self.addCleanup(p.stop)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
def test_init(self, mock_context, mock_patch_orch_thread):
mock_context.get_admin_context.return_value = self.ctxt
def test_init(self, mock_patch_orch_thread):
um = sw_update_manager.SwUpdateManager()
self.assertIsNotNone(um)
self.assertEqual('sw_update_manager', um.service_name)
self.assertEqual('localhost', um.host)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_create_sw_update_strategy_no_subclouds(
self, mock_db_api, mock_context, mock_patch_orch_thread):
mock_context.get_admin_context.return_value = self.ctxt
self, mock_db_api, mock_patch_orch_thread):
mock_db_api.sw_update_strategy_get.side_effect = \
exceptions.NotFound()
um = sw_update_manager.SwUpdateManager()
@ -373,11 +397,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
expected_calls)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_create_sw_update_strategy_parallel(
self, mock_db_api, mock_context, mock_patch_orch_thread):
mock_context.get_admin_context.return_value = self.ctxt
self, mock_db_api, mock_patch_orch_thread):
mock_db_api.sw_update_strategy_get.side_effect = \
exceptions.NotFound()
@ -452,11 +474,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
expected_calls)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_create_sw_update_strategy_serial(
self, mock_db_api, mock_context, mock_patch_orch_thread):
mock_context.get_admin_context.return_value = self.ctxt
self, mock_db_api, mock_patch_orch_thread):
mock_db_api.sw_update_strategy_get.side_effect = \
exceptions.NotFound()
@ -533,11 +553,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
expected_calls)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_create_sw_update_strategy_unknown_sync_status(
self, mock_db_api, mock_context, mock_patch_orch_thread):
mock_context.get_admin_context.return_value = self.ctxt
self, mock_db_api, mock_patch_orch_thread):
mock_db_api.sw_update_strategy_get.side_effect = \
exceptions.NotFound()
@ -572,11 +590,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.ctxt, payload=FAKE_SW_UPDATE_DATA)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_delete_sw_update_strategy(self, mock_db_api, mock_context,
def test_delete_sw_update_strategy(self, mock_db_api,
mock_patch_orch_thread):
mock_context.get_admin_context.return_value = self.ctxt
fake_sw_update_strategy = SwUpdateStrategy(FAKE_ID,
FAKE_SW_UPDATE_DATA)
mock_db_api.sw_update_strategy_get.return_value = \
@ -587,13 +603,11 @@ class TestSwUpdateManager(base.DCManagerTestCase):
mock.ANY, state=consts.SW_UPDATE_STATE_DELETING)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_delete_sw_update_strategy_invalid_state(
self, mock_db_api, mock_context, mock_patch_orch_thread):
self, mock_db_api, mock_patch_orch_thread):
data = copy.copy(FAKE_SW_UPDATE_DATA)
data['state'] = consts.SW_UPDATE_STATE_APPLYING
mock_context.get_admin_context.return_value = self.ctxt
fake_sw_update_strategy = SwUpdateStrategy(FAKE_ID,
data)
mock_db_api.sw_update_strategy_get.return_value = \
@ -604,11 +618,9 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.ctxt)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_apply_sw_update_strategy(self, mock_db_api, mock_context,
def test_apply_sw_update_strategy(self, mock_db_api,
mock_patch_orch_thread):
mock_context.get_admin_context.return_value = self.ctxt
fake_sw_update_strategy = SwUpdateStrategy(FAKE_ID,
FAKE_SW_UPDATE_DATA)
mock_db_api.sw_update_strategy_get.return_value = \
@ -619,13 +631,11 @@ class TestSwUpdateManager(base.DCManagerTestCase):
mock.ANY, state=consts.SW_UPDATE_STATE_APPLYING)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_apply_sw_update_strategy_invalid_state(
self, mock_db_api, mock_context, mock_patch_orch_thread):
self, mock_db_api, mock_patch_orch_thread):
data = copy.copy(FAKE_SW_UPDATE_DATA)
data['state'] = consts.SW_UPDATE_STATE_APPLYING
mock_context.get_admin_context.return_value = self.ctxt
fake_sw_update_strategy = SwUpdateStrategy(FAKE_ID,
data)
mock_db_api.sw_update_strategy_get.return_value = \
@ -636,13 +646,11 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.ctxt)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_abort_sw_update_strategy(
self, mock_db_api, mock_context, mock_patch_orch_thread):
self, mock_db_api, mock_patch_orch_thread):
data = copy.copy(FAKE_SW_UPDATE_DATA)
data['state'] = consts.SW_UPDATE_STATE_APPLYING
mock_context.get_admin_context.return_value = self.ctxt
fake_sw_update_strategy = SwUpdateStrategy(FAKE_ID,
data)
mock_db_api.sw_update_strategy_get.return_value = \
@ -653,13 +661,11 @@ class TestSwUpdateManager(base.DCManagerTestCase):
mock.ANY, state=consts.SW_UPDATE_STATE_ABORT_REQUESTED)
@mock.patch.object(sw_update_manager, 'PatchOrchThread')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
def test_abort_sw_update_strategy_invalid_state(
self, mock_db_api, mock_context, mock_patch_orch_thread):
self, mock_db_api, mock_patch_orch_thread):
data = copy.copy(FAKE_SW_UPDATE_DATA)
data['state'] = consts.SW_UPDATE_STATE_COMPLETE
mock_context.get_admin_context.return_value = self.ctxt
fake_sw_update_strategy = SwUpdateStrategy(FAKE_ID,
data)
mock_db_api.sw_update_strategy_get.return_value = \
@ -669,32 +675,31 @@ class TestSwUpdateManager(base.DCManagerTestCase):
um.apply_sw_update_strategy,
self.ctxt)
@mock.patch.object(sw_update_manager, 'SysinvClient')
@mock.patch.object(sw_update_manager, 'os')
@mock.patch.object(sw_update_manager, 'PatchingClient')
@mock.patch.object(sw_update_manager, 'threading')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
@mock.patch.object(patch_orch_thread, 'SysinvClient')
@mock.patch.object(os_path, 'isfile')
@mock.patch.object(patch_orch_thread, 'PatchingClient')
@mock.patch.object(threading, 'Thread')
@mock.patch.object(patch_orch_thread, 'db_api')
def test_update_subcloud_patches(
self, mock_db_api, mock_context, mock_threading,
mock_patching_client, mock_os, mock_sysinv_client):
self, mock_db_api, mock_threading,
mock_patching_client, mock_os_path_isfile, mock_sysinv_client):
mock_os.path.isfile.return_value = True
mock_patching_client.side_effect = FakePatchingClientOutOfSync
mock_os_path_isfile.return_value = True
fake_subcloud = Subcloud(1, 'subcloud1',
is_managed=True, is_online=True)
data = copy.copy(FAKE_STRATEGY_STEP_DATA)
data['state'] = consts.STRATEGY_STATE_UPDATING_PATCHES
data['subcloud'] = fake_subcloud
data['subcloud_name'] = 'subcloud1'
mock_context.get_admin_context.return_value = self.ctxt
fake_strategy_step = StrategyStep(**data)
mock_patching_client.side_effect = FakePatchingClientOutOfSync
mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
FakePatchingClientOutOfSync.apply = mock.Mock()
FakePatchingClientOutOfSync.remove = mock.Mock()
FakePatchingClientOutOfSync.upload = mock.Mock()
sw_update_manager.PatchOrchThread.stopped = lambda x: False
pot = sw_update_manager.PatchOrchThread()
mock_strategy_lock = mock.Mock()
pot = sw_update_manager.PatchOrchThread(mock_strategy_lock)
pot.get_ks_client = mock.Mock()
pot.update_subcloud_patches(fake_strategy_step)
@ -717,23 +722,21 @@ class TestSwUpdateManager(base.DCManagerTestCase):
finished_at=mock.ANY,
)
@mock.patch.object(sw_update_manager, 'SysinvClient')
@mock.patch.object(sw_update_manager, 'os')
@mock.patch.object(sw_update_manager, 'PatchingClient')
@mock.patch.object(sw_update_manager, 'threading')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
@mock.patch.object(patch_orch_thread, 'SysinvClient')
@mock.patch.object(os_path, 'isfile')
@mock.patch.object(patch_orch_thread, 'PatchingClient')
@mock.patch.object(threading, 'Thread')
@mock.patch.object(patch_orch_thread, 'db_api')
def test_update_subcloud_patches_bad_committed(
self, mock_db_api, mock_context, mock_threading,
mock_patching_client, mock_os, mock_sysinv_client):
self, mock_db_api, mock_threading,
mock_patching_client, mock_os_path_isfile, mock_sysinv_client):
mock_os.path.isfile.return_value = True
mock_os_path_isfile.return_value = True
fake_subcloud = Subcloud(1, 'subcloud1',
is_managed=True, is_online=True)
data = copy.copy(FAKE_STRATEGY_STEP_DATA)
data['state'] = consts.STRATEGY_STATE_UPDATING_PATCHES
data['subcloud'] = fake_subcloud
mock_context.get_admin_context.return_value = self.ctxt
fake_strategy_step = StrategyStep(**data)
mock_patching_client.side_effect = FakePatchingClientSubcloudCommitted
mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
@ -741,7 +744,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
FakePatchingClientOutOfSync.remove = mock.Mock()
FakePatchingClientOutOfSync.upload = mock.Mock()
sw_update_manager.PatchOrchThread.stopped = lambda x: False
pot = sw_update_manager.PatchOrchThread()
mock_strategy_lock = mock.Mock()
pot = sw_update_manager.PatchOrchThread(mock_strategy_lock)
pot.get_ks_client = mock.Mock()
pot.update_subcloud_patches(fake_strategy_step)
@ -754,23 +758,21 @@ class TestSwUpdateManager(base.DCManagerTestCase):
finished_at=mock.ANY,
)
@mock.patch.object(sw_update_manager, 'SysinvClient')
@mock.patch.object(sw_update_manager, 'os')
@mock.patch.object(sw_update_manager, 'PatchingClient')
@mock.patch.object(sw_update_manager, 'threading')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
@mock.patch.object(patch_orch_thread, 'SysinvClient')
@mock.patch.object(os_path, 'isfile')
@mock.patch.object(patch_orch_thread, 'PatchingClient')
@mock.patch.object(threading, 'Thread')
@mock.patch.object(patch_orch_thread, 'db_api')
def test_update_subcloud_patches_bad_state(
self, mock_db_api, mock_context, mock_threading,
mock_patching_client, mock_os, mock_sysinv_client):
self, mock_db_api, mock_threading,
mock_patching_client, mock_os_path_isfile, mock_sysinv_client):
mock_os.path.isfile.return_value = True
mock_os_path_isfile.return_value = True
fake_subcloud = Subcloud(1, 'subcloud1',
is_managed=True, is_online=True)
data = copy.copy(FAKE_STRATEGY_STEP_DATA)
data['state'] = consts.STRATEGY_STATE_UPDATING_PATCHES
data['subcloud'] = fake_subcloud
mock_context.get_admin_context.return_value = self.ctxt
fake_strategy_step = StrategyStep(**data)
mock_patching_client.side_effect = FakePatchingClientSubcloudUnknown
mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
@ -778,7 +780,8 @@ class TestSwUpdateManager(base.DCManagerTestCase):
FakePatchingClientOutOfSync.remove = mock.Mock()
FakePatchingClientOutOfSync.upload = mock.Mock()
sw_update_manager.PatchOrchThread.stopped = lambda x: False
pot = sw_update_manager.PatchOrchThread()
mock_strategy_lock = mock.Mock()
pot = sw_update_manager.PatchOrchThread(mock_strategy_lock)
pot.get_ks_client = mock.Mock()
pot.update_subcloud_patches(fake_strategy_step)
@ -791,28 +794,27 @@ class TestSwUpdateManager(base.DCManagerTestCase):
finished_at=mock.ANY,
)
@mock.patch.object(sw_update_manager, 'os')
@mock.patch.object(sw_update_manager, 'PatchingClient')
@mock.patch.object(sw_update_manager, 'threading')
@mock.patch.object(sw_update_manager, 'context')
@mock.patch.object(sw_update_manager, 'db_api')
@mock.patch.object(os_path, 'isfile')
@mock.patch.object(patch_orch_thread, 'PatchingClient')
@mock.patch.object(threading, 'Thread')
@mock.patch.object(patch_orch_thread, 'db_api')
def test_finish(
self, mock_db_api, mock_context, mock_threading,
mock_patching_client, mock_os):
self, mock_db_api, mock_threading,
mock_patching_client, mock_os_path_isfile):
mock_os.path.isfile.return_value = True
mock_os_path_isfile.return_value = True
fake_subcloud = Subcloud(1, 'subcloud1',
is_managed=True, is_online=True)
data = copy.copy(FAKE_STRATEGY_STEP_DATA)
data['state'] = consts.STRATEGY_STATE_UPDATING_PATCHES
data['subcloud'] = fake_subcloud
mock_context.get_admin_context.return_value = self.ctxt
fake_strategy_step = StrategyStep(**data)
mock_patching_client.side_effect = FakePatchingClientFinish
FakePatchingClientFinish.delete = mock.Mock()
FakePatchingClientFinish.commit = mock.Mock()
sw_update_manager.PatchOrchThread.stopped = lambda x: False
pot = sw_update_manager.PatchOrchThread()
mock_strategy_lock = mock.Mock()
pot = sw_update_manager.PatchOrchThread(mock_strategy_lock)
pot.get_ks_client = mock.Mock()
pot.finish(fake_strategy_step)

View File

@ -117,4 +117,5 @@ def create_subcloud_dict(data_list):
'external_oam_subnet': data_list[19],
'external_oam_gateway_address': data_list[20],
'external_oam_floating_address': data_list[21],
'sysadmin_password': data_list[22]}
'sysadmin_password': data_list[22],
'group_id': data_list[23]}

View File

@ -1,85 +0,0 @@
# Copyright (c) 2017 Ericsson AB.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dcorch.api.controllers import restcomm
from dcorch.db import api as db_api
from dcorch.rpc import client as rpc_client
from oslo_log import log as logging
from pecan import expose
LOG = logging.getLogger(__name__)
class SubcloudAlarmController(object):
VERSION_ALIASES = {
'Newton': '1.0',
}
def __init__(self, *args, **kwargs):
super(SubcloudAlarmController, self).__init__(*args, **kwargs)
self.rpc_client = rpc_client.EngineClient()
# to do the version compatibility for future purpose
def _determine_version_cap(self, target):
version_cap = 1.0
return version_cap
@expose(generic=True, template='json')
def index(self):
# Route the request to specific methods with parameters
pass
def _get_alarm_aggregates(self):
context = restcomm.extract_context_from_environ()
alarms = db_api.subcloud_alarms_get_all(context)
summary = []
for alarm in alarms:
alarm_dict = {'region_name': alarm['region_name'],
'uuid': alarm['uuid'],
'critical_alarms': alarm['critical_alarms'],
'major_alarms': alarm['major_alarms'],
'minor_alarms': alarm['minor_alarms'],
'warnings': alarm['warnings'],
'cloud_status': alarm['cloud_status']}
summary.append(alarm_dict)
return summary
@index.when(method='GET', template='json')
def get(self, project):
"""Get List of alarm summarys
"""
return self._get_alarm_aggregates()
def _get_alarm_summary(self):
context = restcomm.extract_context_from_environ()
alarms = db_api.subcloud_alarms_get_all(context)
summary = {'critical': 0,
'degraded': 0,
'ok': 0,
'unreachable': 0}
for alarm in alarms:
summary[alarm['cloud_status']] += 1
return summary
@index.when(method='summary', template='json')
def summary(self, project):
"""Get an agregate of all subcloud status
:param project: UUID the project.
"""
return self._get_alarm_summary()

View File

@ -14,7 +14,6 @@
# under the License.
from dcorch.api.controllers.v1 import alarm_manager
from dcorch.api.controllers.v1 import subcloud_manager
import pecan
@ -31,8 +30,6 @@ class Controller(object):
if minor_version == '0':
sub_controllers["subclouds"] = subcloud_manager.\
SubcloudController
sub_controllers["alarms"] = alarm_manager.\
SubcloudAlarmController
for name, ctrl in sub_controllers.items():
setattr(self, name, ctrl)

View File

@ -45,7 +45,7 @@ LOG = logging.getLogger(__name__)
patch_opts = [
cfg.StrOpt('patch_vault',
default='/opt/patch-vault/',
default='/opt/dc-vault/patches/',
help='file system for patch storage on SystemController'),
]

View File

@ -20,6 +20,8 @@ from oslo_config import cfg
from oslo_log import log as logging
from dcorch.common import config
from dcorch.common import context
from dcorch.common.i18n import _
from dcorch.db import api
from dcorch import version
@ -40,6 +42,16 @@ def do_db_sync():
api.db_sync(api.get_engine(), CONF.command.version)
def do_db_clean():
"""Purge deleted orch requests, related jobs and resources."""
age_in_days = CONF.command.age_in_days
if age_in_days < 0:
sys.exit(_("Must supply a non-negative value for age."))
ctxt = context.get_admin_context()
api.purge_deleted_records(ctxt, age_in_days)
def add_command_parsers(subparsers):
parser = subparsers.add_parser('db_version')
parser.set_defaults(func=do_db_version)
@ -49,6 +61,12 @@ def add_command_parsers(subparsers):
parser.add_argument('version', nargs='?')
parser.add_argument('current_version', nargs='?')
parser = subparsers.add_parser('db_clean')
parser.set_defaults(func=do_db_clean)
parser.add_argument('age_in_days', type=int,
default=1)
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Show available commands.',

View File

@ -1,42 +0,0 @@
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DC Orchestrators SNMP Server.
"""
from dcorch.common import messaging
from dcorch.snmp import service as snmp_engine
from dcorch.snmp import snmp_config
from oslo_config import cfg
from oslo_log import log as logging
import sys
LOG = logging.getLogger('dcorch.snmp')
def main():
snmp_config.init(sys.argv[1:])
cfg.CONF(project='dcorch', prog='dcorch-snmp')
logging.setup(cfg.CONF, 'dcorch-snmp')
logging.set_defaults()
messaging.setup()
snmp_engine.SNMPService(cfg)
if __name__ == '__main__':
main()

View File

@ -179,24 +179,6 @@ common_opts = [
help='endpoints for which audit is disabled')
]
snmp_server_opts = [
cfg.StrOpt('snmp_ip', default='0.0.0.0',
help='ip to listen on'),
cfg.IntOpt('snmp_port',
default=162,
help='snmp trap port'),
cfg.StrOpt('snmp_comm_str', default='dcorchAlarmAggregator',
help='community string'),
cfg.StrOpt('snmp_sec_area', default='fm-aggregator',
help='security area'),
cfg.IntOpt('delay_time',
default=60,
help='min time between update requests per server'),
cfg.IntOpt('alarm_audit_interval_time',
default=787,
help='interval of periodic updates in seconds')
]
fernet_opts = [
cfg.IntOpt('key_rotation_interval',
default=168,
@ -219,9 +201,6 @@ openstack_cache_opt_group = cfg.OptGroup(name='openstack_cache',
title='Containerized OpenStack'
' Credentials')
snmp_opt_group = cfg.OptGroup(name='snmp',
title='SNMP Options')
fernet_opt_group = cfg.OptGroup(name='fernet',
title='Fernet Options')
@ -234,7 +213,6 @@ def list_opts():
yield openstack_cache_opt_group.name, cache_opts
yield scheduler_opt_group.name, scheduler_opts
yield pecan_group.name, pecan_opts
yield snmp_opt_group.name, snmp_server_opts
yield fernet_opt_group.name, fernet_opts
yield None, global_opts
yield None, common_opts

View File

@ -63,8 +63,6 @@ RPC_API_VERSION = "1.0"
TOPIC_ORCH_ENGINE = "dcorch-engine"
ALARMS_DISABLED = "disabled"
# SyncRequest States
ORCH_REQUEST_NONE = None
ORCH_REQUEST_QUEUED = "queued" # in database, not in thread
@ -128,11 +126,17 @@ ENDPOINT_TYPE_PATCHING = "patching"
ENDPOINT_TYPE_IDENTITY = "identity"
ENDPOINT_TYPE_FM = "faultmanagement"
ENDPOINT_TYPE_NFV = "nfv"
ENDPOINT_TYPE_LOAD = "load"
# platform endpoint types
# All endpoint types
ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_PATCHING,
ENDPOINT_TYPE_IDENTITY]
ENDPOINT_TYPE_IDENTITY,
ENDPOINT_TYPE_LOAD]
# Dcorch sync endpoint types
SYNC_ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_IDENTITY]
ENDPOINT_QUOTA_MAPPING = {
ENDPOINT_TYPE_COMPUTE: NOVA_QUOTA_FIELDS,
@ -142,6 +146,7 @@ ENDPOINT_QUOTA_MAPPING = {
# DB sync agent endpoint
DBS_ENDPOINT_INTERNAL = "internal"
DBS_ENDPOINT_ADMIN = "admin"
DBS_ENDPOINT_DEFAULT = DBS_ENDPOINT_INTERNAL
# Do we need separate patch/put operations or could we just use
@ -168,11 +173,6 @@ ACTION_REMOVETENANTACCESS = "removeTenantAccess"
ACTION_EXTRASPECS_POST = "extra_specs"
ACTION_EXTRASPECS_DELETE = "extra_specs_delete"
# Alarm aggregation
ALARM_OK_STATUS = "OK"
ALARM_DEGRADED_STATUS = "degraded"
ALARM_CRITICAL_STATUS = "critical"
# Subcloud initial sync state
INITIAL_SYNC_STATE_NONE = "none"
INITIAL_SYNC_STATE_REQUESTED = "requested"

View File

@ -253,6 +253,10 @@ def orch_request_get(context, orch_request_id):
return IMPL.orch_request_get(context, orch_request_id)
def orch_request_get_most_recent_failed_request(context):
return IMPL.orch_request_get_most_recent_failed_request(context)
def orch_request_get_all(context, orch_job_id=None):
return IMPL.orch_request_get_all(context, orch_job_id=orch_job_id)
@ -296,22 +300,11 @@ def orch_request_delete_by_subcloud(context, region_name):
return IMPL.orch_request_delete_by_subcloud(context, region_name)
# Alarm Resources
def subcloud_alarms_get(context, region_id):
return IMPL.subcloud_alarms_get(context, region_id)
def orch_request_delete_previous_failed_requests(context, delete_timestamp):
return IMPL.orch_request_delete_previous_failed_requests(
context, delete_timestamp)
def subcloud_alarms_get_all(context, region_name=None):
return IMPL.subcloud_alarms_get_all(context, region_name=region_name)
def subcloud_alarms_create(context, region_name, values):
return IMPL.subcloud_alarms_create(context, region_name, values)
def subcloud_alarms_update(context, region_name, values):
return IMPL.subcloud_alarms_update(context, region_name, values)
def subcloud_alarms_delete(context, region_name):
return IMPL.subcloud_alarms_delete(context, region_name)
# Periodic cleanup
def purge_deleted_records(context, age_in_days=1):
return IMPL.purge_deleted_records(context, age_in_days)

View File

@ -25,12 +25,12 @@
Implementation of SQLAlchemy backend.
"""
import datetime
import sys
import threading
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
# from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log as logging
from oslo_utils import strutils
@ -43,6 +43,7 @@ from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import joinedload_all
from dcorch.common import consts
from dcorch.common import exceptions as exception
from dcorch.common.i18n import _
from dcorch.db.sqlalchemy import migration
@ -789,6 +790,18 @@ def orch_request_get(context, orch_request_id):
return _orch_request_get(context, orch_request_id)
@require_context
def orch_request_get_most_recent_failed_request(context):
query = model_query(context, models.OrchRequest). \
filter_by(deleted=0). \
filter_by(state=consts.ORCH_REQUEST_STATE_FAILED)
try:
return query.order_by(desc(models.OrchRequest.updated_at)).first()
except NoResultFound:
return None
@require_context
def orch_request_get_all(context, orch_job_id=None):
query = model_query(context, models.OrchRequest). \
@ -895,63 +908,54 @@ def orch_request_delete_by_subcloud(context, region_name):
delete()
@require_context
def _subcloud_alarms_get(context, region_id, session=None):
query = model_query(context, models.SubcloudAlarmSummary, session=session). \
filter_by(deleted=0)
query = add_identity_filter(query, region_id, use_region_name=True)
@require_admin_context
def orch_request_delete_previous_failed_requests(context, delete_timestamp):
"""Soft delete orch_request entries.
try:
return query.one()
except NoResultFound:
raise exception.SubcloudNotFound(region_name=region_id)
except MultipleResultsFound:
raise exception.InvalidParameterValue(
err="Multiple entries found for subcloud %s" % region_id)
This is used to soft delete all previously failed requests at
the end of each audit cycle.
"""
LOG.info('Soft deleting failed orch requests at and before %s',
delete_timestamp)
with write_session() as session:
query = session.query(models.OrchRequest). \
filter_by(deleted=0). \
filter_by(state=consts.ORCH_REQUEST_STATE_FAILED). \
filter(models.OrchRequest.updated_at <= delete_timestamp)
@require_context
def subcloud_alarms_get(context, region_id):
return _subcloud_get(context, region_id)
@require_context
def subcloud_alarms_get_all(context, region_name=None):
query = model_query(context, models.SubcloudAlarmSummary). \
filter_by(deleted=0)
if region_name:
query = add_identity_filter(query, region_name, use_region_name=True)
return query.order_by(desc(models.SubcloudAlarmSummary.id)).all()
count = query.update({'deleted': 1,
'deleted_at': timeutils.utcnow()})
LOG.info('%d previously failed sync requests soft deleted', count)
@require_admin_context
def subcloud_alarms_create(context, region_name, values):
def purge_deleted_records(context, age_in_days):
deleted_age = \
timeutils.utcnow() - datetime.timedelta(days=age_in_days)
LOG.info('Purging deleted records older than %s', deleted_age)
with write_session() as session:
result = models.SubcloudAlarmSummary()
result.region_name = region_name
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
result.update(values)
try:
session.add(result)
except db_exc.DBDuplicateEntry:
raise exception.SubcloudAlreadyExists(region_name=region_name)
return result
# Purging orch_request table
count = session.query(models.OrchRequest). \
filter_by(deleted=1). \
filter(models.OrchRequest.deleted_at < deleted_age).delete()
LOG.info('%d records were purged from orch_request table.', count)
# Purging orch_job table
subquery = model_query(context, models.OrchRequest.orch_job_id). \
group_by(models.OrchRequest.orch_job_id)
@require_admin_context
def subcloud_alarms_update(context, region_name, values):
with write_session() as session:
result = _subcloud_alarms_get(context, region_name, session)
result.update(values)
result.save(session)
return result
count = session.query(models.OrchJob). \
filter(~models.OrchJob.id.in_(subquery)). \
delete(synchronize_session='fetch')
LOG.info('%d records were purged from orch_job table.', count)
# Purging resource table
subquery = model_query(context, models.OrchJob.resource_id). \
group_by(models.OrchJob.resource_id)
@require_admin_context
def subcloud_alarms_delete(context, region_name):
with write_session() as session:
session.query(models.SubcloudAlarmSummary).\
filter_by(region_name=region_name).delete()
count = session.query(models.Resource). \
filter(~models.Resource.id.in_(subquery)). \
delete(synchronize_session='fetch')
LOG.info('%d records were purged from resource table.', count)

View File

@ -0,0 +1,33 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2020 Wind River Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
subcloud_alarms = sqlalchemy.Table('subcloud_alarms', meta, autoload=True)
subcloud_alarms.drop()
def downgrade(migrate_engine):
raise NotImplementedError('Database downgrade not supported - '
'would drop all tables')

View File

@ -0,0 +1,42 @@
# Copyright (c) 2020 Wind River Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Index
ORCH_JOB_ID_INDEX_NAME = 'orch_request_orch_job_id_idx'
UPDATED_AT_STATE_INDEX_NAME = 'orch_request_updated_at_state_idx'
DELETED_AT_INDEX_NAME = 'orch_request_deleted_at_idx'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
orch_request = Table('orch_request', meta, autoload=True)
index = Index(UPDATED_AT_STATE_INDEX_NAME,
orch_request.c.updated_at, orch_request.c.state)
index.create(migrate_engine)
index = Index(DELETED_AT_INDEX_NAME, orch_request.c.deleted_at)
index.create(migrate_engine)
index = Index(ORCH_JOB_ID_INDEX_NAME, orch_request.c.orch_job_id)
index.create(migrate_engine)
def downgrade(migrate_engine):
raise NotImplementedError('Database downgrade not supported - '
'would drop all tables')

View File

@ -186,23 +186,6 @@ class Subcloud(BASE, OrchestratorBase):
default=consts.INITIAL_SYNC_STATE_NONE)
class SubcloudAlarmSummary(BASE, OrchestratorBase):
"""Represents a Distributed Cloud subcloud alarm aggregate"""
__tablename__ = 'subcloud_alarms'
__table_args__ = (
Index('subcloud_alarm_region_name_idx', 'region_name'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), unique=True)
region_name = Column('region_name', String(255), unique=True) # keystone
critical_alarms = Column('critical_alarms', Integer)
major_alarms = Column('major_alarms', Integer)
minor_alarms = Column('minor_alarms', Integer)
warnings = Column('warnings', Integer)
cloud_status = Column('cloud_status', String(64))
capabilities = Column(JSONEncodedDict)
class Resource(BASE, OrchestratorBase):
"""Represents a Distributed Cloud Orchestrator Resource"""

View File

@ -1,177 +0,0 @@
# Copyright 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from dccommon import consts as dccommon_consts
from dccommon import exceptions as dccommon_exceptions
from dcmanager.common import consts as dcm_consts
from dcorch.common import consts
from dcorch.common import context
from dcorch.common import exceptions
from dcorch.common.i18n import _
from dcorch.common import manager
from dcorch.db import api as db_api
from dccommon.drivers.openstack.fm import FmClient
from dccommon.drivers.openstack.keystone_v3 import KeystoneClient
from dccommon.drivers.openstack import sdk_platform as sdk
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from oslo_config import cfg
from oslo_log import log as logging
import threading
import time
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class AlarmAggregateManager(manager.Manager):
"""Manages tasks related to alarm aggregation"""
def __init__(self, *args, **kwargs):
LOG.debug(_('AlarmAggregateManager initialization...'))
super(AlarmAggregateManager, self).\
__init__(service_name="alarm_aggregate_manager", *args, **kwargs)
self.context = context.get_admin_context()
self.alarm_update_thread = PeriodicAlarmUpdate(self)
self.alarm_update_thread.start()
def shutdown(self):
self.alarm_update_thread.stop()
self.alarm_update_thread.join()
def enable_snmp(self, ctxt, subcloud_name):
LOG.info("Enabling fm-aggregation trap for region_name=%s" %
subcloud_name)
payload = {"ip_address": CONF.snmp.snmp_ip,
"community": CONF.snmp.snmp_comm_str}
try:
ks_client = KeystoneClient(subcloud_name)
sysinv_client = SysinvClient(subcloud_name, ks_client.session)
fm_client = FmClient(subcloud_name, ks_client.session,
dccommon_consts.KS_ENDPOINT_DEFAULT)
sysinv_client.snmp_trapdest_create(payload)
self.update_alarm_summary(self.context, subcloud_name,
fm_client=fm_client)
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
exceptions.TimeOut):
LOG.info("snmp_trapdest_create exception Timeout region_name=%s" %
subcloud_name)
pass
except AttributeError:
LOG.info("snmp_trapdest_create AttributeError region_name=%s" %
subcloud_name)
pass
except dccommon_exceptions.TrapDestAlreadyExists:
LOG.info("snmp_trapdest_create TrapDestAlreadyExists "
"region_name=%s payload %s" % (subcloud_name, payload))
pass
except Exception:
LOG.info("snmp_trapdest_create exception region_name=%s" %
subcloud_name)
pass
def update_alarm_summary(self, cntx, region_name, thread_name=None,
fm_client=None):
LOG.info("Updating alarm summary for %s" % region_name)
try:
if fm_client is not None:
alarms = fm_client.get_alarm_summary()
else:
os_client = sdk.OpenStackDriver(region_name=region_name,
thread_name=thread_name)
alarms = os_client.fm_client.get_alarm_summary()
alarm_updates = {'critical_alarms': alarms[0].critical,
'major_alarms': alarms[0].major,
'minor_alarms': alarms[0].minor,
'warnings': alarms[0].warnings}
alarm_updates = self._set_cloud_status(alarm_updates)
db_api.subcloud_alarms_update(self.context, region_name,
alarm_updates)
except Exception:
LOG.error('Failed to update alarms for %s' % region_name)
def _set_cloud_status(self, alarm_dict):
status = consts.ALARM_OK_STATUS
if (alarm_dict.get('major_alarms') > 0) or\
(alarm_dict.get('minor_alarms') > 0):
status = consts.ALARM_DEGRADED_STATUS
if (alarm_dict.get('critical_alarms') > 0):
status = consts.ALARM_CRITICAL_STATUS
alarm_dict['cloud_status'] = status
return alarm_dict
def get_alarm_summary(self, ctxt):
alarms = db_api.subcloud_alarms_get_all(self.context)
summary = []
for alarm in alarms:
alarm_dict = {'region_name': alarm['region_name'],
'uuid': alarm['uuid'],
'critical_alarms': alarm['critical_alarms'],
'major_alarms': alarm['major_alarms'],
'minor_alarms': alarm['minor_alarms'],
'warnings': alarm['warnings'],
'cloud_status': alarm['cloud_status']}
summary.append(alarm_dict)
return summary
class PeriodicAlarmUpdate(threading.Thread):
def __init__(self, parent):
super(PeriodicAlarmUpdate, self).__init__()
self.parent = parent
self.context = context.get_admin_context()
self._stop = threading.Event()
self.interval = CONF.snmp.alarm_audit_interval_time
self.system_last_update = datetime.datetime.now()
def run_updates(self):
while not self.stopped():
delta = (datetime.datetime.now() -
self.system_last_update).total_seconds()
if delta < self.interval:
time.sleep(1.0)
continue
try:
LOG.info('Running alarm summary update sync')
self.system_last_update = datetime.datetime.now()
subclouds = db_api.subcloud_get_all(self.context)
for subcloud in subclouds:
if self.stopped():
break
if subcloud['availability_status'] ==\
dcm_consts.AVAILABILITY_ONLINE:
self.parent.\
update_alarm_summary(self.context,
subcloud['region_name'],
self.name)
except Exception:
pass
time.sleep(1.0)
LOG.info("Periodic Alarm Update Thread Stopped")
def stopped(self):
return self._stop.isSet()
def stop(self):
LOG.info("Periodic Alarm Update Thread Stopping")
self._stop.set()
def run(self):
self.run_updates()

View File

@ -33,11 +33,10 @@ SYNC_FAIL_HOLD_OFF = 60
class InitialSyncManager(object):
"""Manages the initial sync for each subcloud."""
def __init__(self, gsm, fkm, aam, *args, **kwargs):
def __init__(self, gsm, fkm, *args, **kwargs):
super(InitialSyncManager, self).__init__()
self.gsm = gsm
self.fkm = fkm
self.aam = aam
self.context = context.get_admin_context()
# Keeps track of greenthreads we create to do work.
self.thread_group_manager = scheduler.ThreadGroupManager(
@ -143,7 +142,6 @@ class InitialSyncManager(object):
try:
self.gsm.initial_sync(self.context, subcloud_name)
self.fkm.distribute_keys(self.context, subcloud_name)
self.aam.enable_snmp(self.context, subcloud_name)
except Exception as e:
LOG.exception('Initial sync failed for %s: %s', subcloud_name, e)
# We need to try again

View File

@ -28,7 +28,6 @@ from dcorch.common import context
from dcorch.common import exceptions
from dcorch.common.i18n import _
from dcorch.common import messaging as rpc_messaging
from dcorch.engine.alarm_aggregate_manager import AlarmAggregateManager
from dcorch.engine.fernet_key_manager import FernetKeyManager
from dcorch.engine.generic_sync_manager import GenericSyncManager
from dcorch.engine.initial_sync_manager import InitialSyncManager
@ -82,7 +81,6 @@ class EngineService(service.Service):
self._rpc_server = None
self.qm = None
self.gsm = None
self.aam = None
self.fkm = None
self.ism = None
@ -97,14 +95,11 @@ class EngineService(service.Service):
self.gsm = GenericSyncManager()
self.gsm.init_from_db(ctxt)
def init_aam(self):
self.aam = AlarmAggregateManager()
def init_fkm(self):
self.fkm = FernetKeyManager(self.gsm)
def init_ism(self):
self.ism = InitialSyncManager(self.gsm, self.fkm, self.aam)
self.ism = InitialSyncManager(self.gsm, self.fkm)
self.ism.init_actions()
self.TG.start(self.ism.initial_sync_thread)
@ -113,7 +108,6 @@ class EngineService(service.Service):
self.init_tgm()
self.init_qm()
self.init_gsm()
self.init_aam()
self.init_fkm()
self.init_ism()
target = oslo_messaging.Target(version=self.rpc_api_version,
@ -133,7 +127,7 @@ class EngineService(service.Service):
LOG.info("Adding periodic tasks for the engine to perform")
self.TG.add_timer(self.periodic_interval,
self.periodic_sync_audit,
initial_delay=self.periodic_interval / 2)
initial_delay=30)
self.TG.add_timer(CONF.fernet.key_rotation_interval *
dccommon_consts.SECONDS_IN_HOUR,
self.periodic_key_rotation,
@ -264,14 +258,6 @@ class EngineService(service.Service):
def update_subcloud_version(self, ctxt, subcloud_name, sw_version):
self.gsm.update_subcloud_version(ctxt, subcloud_name, sw_version)
@request_context
def update_alarm_summary(self, ctxt, region_name):
self.aam.update_alarm_summary(ctxt, region_name)
@request_context
def get_alarm_summary(self, ctxt):
return self.aam.get_alarm_summary(ctxt)
@request_context
# The sync job info has been written to the DB, alert the sync engine
# that there is work to do.
@ -300,8 +286,6 @@ class EngineService(service.Service):
self._stop_rpc_server()
self.TG.stop()
if self.aam:
self.aam.shutdown()
# Terminate the engine process
LOG.info("All threads were gone, terminating engine")
super(EngineService, self).stop()

View File

@ -48,9 +48,7 @@ class SubCloudEngine(object):
self.subcloud = subcloud
else:
capabilities = {}
endpoint_type_list = dco_consts.ENDPOINT_TYPES_LIST[:]
# patching is handled by dcmanager
endpoint_type_list.remove(dco_consts.ENDPOINT_TYPE_PATCHING)
endpoint_type_list = dco_consts.SYNC_ENDPOINT_TYPES_LIST[:]
capabilities.update({'endpoint_types': endpoint_type_list})
self.subcloud = Subcloud(
context, region_name=name, software_version=version,

View File

@ -61,7 +61,7 @@ class ComputeSyncThread(SyncThread):
if (not self.sc_nova_client and self.sc_admin_session):
self.sc_nova_client = novaclient.Client(
'2.38', session=self.sc_admin_session,
endpoint_type=dccommon_consts.KS_ENDPOINT_INTERNAL,
endpoint_type=dccommon_consts.KS_ENDPOINT_ADMIN,
region_name=self.subcloud_engine.subcloud.region_name)
def initialize(self):

View File

@ -1,4 +1,4 @@
# Copyright 2018 Wind River
# Copyright 2018-2020 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -101,17 +101,43 @@ class IdentitySyncThread(SyncThread):
if (not self.sc_dbs_client and self.sc_admin_session):
self.sc_dbs_client = dbsyncclient.Client(
session=self.sc_admin_session,
endpoint_type=consts.DBS_ENDPOINT_INTERNAL,
endpoint_type=consts.DBS_ENDPOINT_ADMIN,
region_name=self.subcloud_engine.subcloud.region_name)
def reinitialize_m_clients(self):
def reauthenticate_m_dbs_client(self):
if self.m_dbs_client and self.admin_session:
self.m_dbs_client.update(session=self.admin_session)
def reinitialize_sc_clients(self):
def reauthenticate_m_ks_client(self):
if self.sc_ks_client and self.sc_admin_session:
self.sc_ks_client.authenticate(
auth_url=self.admin_session.auth.auth_url,
username=self.admin_session.auth._username,
password=self.admin_session.auth._password,
project_name=self.admin_session.auth._project_name,
user_domain_name=self.admin_session.auth._user_domain_name,
project_domain_name=self.admin_session.auth._project_domain_name,
)
def reauthenticate_sc_clients(self):
self.reauthenticate_sc_dbs_client()
self.reauthenticate_sc_ks_client()
def reauthenticate_sc_dbs_client(self):
if self.sc_dbs_client and self.sc_admin_session:
self.sc_dbs_client.update(session=self.sc_admin_session)
def reauthenticate_sc_ks_client(self):
if self.sc_ks_client and self.sc_admin_session:
self.sc_ks_client.authenticate(
auth_url=self.sc_admin_session.auth.auth_url,
username=self.sc_admin_session.auth._username,
password=self.sc_admin_session.auth._password,
project_name=self.sc_admin_session.auth._project_name,
user_domain_name=self.sc_admin_session.auth._user_domain_name,
project_domain_name=self.sc_admin_session.auth._project_domain_name,
)
def initialize(self):
# Subcloud may be enabled a while after being added.
# Keystone endpoints for the subcloud could be added in
@ -131,7 +157,10 @@ class IdentitySyncThread(SyncThread):
extra=self.log_extra)
def _initial_sync_users(self, m_users, sc_users):
# Particularly sync users with same name but different ID
# Particularly sync users with same name but different ID. admin user
# is a special case as the id's will match (as this is forced during
# the subcloud deploy) but the details will not so we still need to
# sync it here.
m_client = self.m_dbs_client.identity_manager
sc_client = self.sc_dbs_client.identity_manager
@ -139,7 +168,9 @@ class IdentitySyncThread(SyncThread):
for sc_user in sc_users:
if (m_user.local_user.name == sc_user.local_user.name and
m_user.domain_id == sc_user.domain_id and
m_user.id != sc_user.id):
(m_user.id != sc_user.id or
sc_user.local_user.name ==
dccommon_consts.ADMIN_USER_NAME)):
user_records = m_client.user_detail(m_user.id)
if not user_records:
LOG.error("No data retrieved from master cloud for"
@ -157,7 +188,7 @@ class IdentitySyncThread(SyncThread):
.format(sc_user.id,
self.subcloud_engine.subcloud.
region_name, str(e)))
self.reinitialize_sc_clients()
self.reauthenticate_sc_dbs_client()
user_ref = sc_client.update_user(sc_user.id,
user_records)
@ -167,8 +198,9 @@ class IdentitySyncThread(SyncThread):
raise exceptions.SyncRequestFailed
# If admin user get synced, the client need to
# re-authenticate.
if sc_user.local_user.name == "admin":
self.reinitialize_sc_clients()
if sc_user.local_user.name == \
dccommon_consts.ADMIN_USER_NAME:
self.reauthenticate_sc_clients()
def _initial_sync_projects(self, m_projects, sc_projects):
# Particularly sync projects with same name but different ID.
@ -197,7 +229,7 @@ class IdentitySyncThread(SyncThread):
.format(sc_project.id,
self.subcloud_engine.subcloud.
region_name, str(e)))
self.reinitialize_sc_clients()
self.reauthenticate_sc_dbs_client()
project_ref = sc_client.update_project(sc_project.id,
project_records)
@ -208,8 +240,46 @@ class IdentitySyncThread(SyncThread):
raise exceptions.SyncRequestFailed
# If admin project get synced, the client need to
# re-authenticate.
if sc_project.name == "admin":
self.reinitialize_sc_clients()
if sc_project.name == dccommon_consts.ADMIN_PROJECT_NAME:
self.reauthenticate_sc_clients()
def _initial_sync_roles(self, m_roles, sc_roles):
# Particularly sync roles with same name but different ID
m_client = self.m_dbs_client.role_manager
sc_client = self.sc_dbs_client.role_manager
for m_role in m_roles:
for sc_role in sc_roles:
if (m_role.name == sc_role.name and
m_role.domain_id == sc_role.domain_id and
m_role.id != sc_role.id):
role_record = m_client.role_detail(m_role.id)
if not role_record:
LOG.error("No data retrieved from master cloud for"
" role {} to update its equivalent in"
" subcloud.".format(m_role.id))
raise exceptions.SyncRequestFailed
# update the role by pushing down the DB records to
# subcloud
try:
role_ref = sc_client.update_role(sc_role.id,
role_record)
# Retry once if unauthorized
except dbsync_exceptions.Unauthorized as e:
LOG.info("Update role {} request failed for {}: {}."
.format(sc_role.id,
self.subcloud_engine.subcloud.
region_name, str(e)))
self.reauthenticate_sc_dbs_client()
role_ref = sc_client.update_role(sc_role.id,
role_record)
if not role_ref:
LOG.error("No role data returned when updating role {}"
" in subcloud {}."
.format(sc_role.id, self.subcloud_engine.
subcloud.region_name))
raise exceptions.SyncRequestFailed
def initial_sync(self):
# Service users and projects are created at deployment time. They exist
@ -266,6 +336,26 @@ class IdentitySyncThread(SyncThread):
self._initial_sync_projects(m_projects, sc_projects)
# get roles from master cloud
m_roles = self.get_master_resources(
consts.RESOURCE_TYPE_IDENTITY_ROLES)
if not m_roles:
LOG.error("No roles returned from {}".
format(dccommon_consts.VIRTUAL_MASTER_CLOUD))
raise exceptions.SyncRequestFailed
# get roles from the subcloud
sc_roles = self.get_subcloud_resources(
consts.RESOURCE_TYPE_IDENTITY_ROLES)
if not sc_roles:
LOG.error("No roles returned from subcloud {}".
format(self.subcloud_engine.subcloud.region_name))
raise exceptions.SyncRequestFailed
self._initial_sync_roles(m_roles, sc_roles)
# Return True if no exceptions
return True
@ -297,12 +387,13 @@ class IdentitySyncThread(SyncThread):
.format(self.subcloud_engine.subcloud.region_name,
str(e)), extra=self.log_extra)
raise exceptions.SyncRequestTimeout
except dbsync_exceptions.Unauthorized as e:
except (dbsync_exceptions.Unauthorized,
keystone_exceptions.Unauthorized) as e:
LOG.info("Request [{}] failed for {}: {}"
.format(request.orch_job.operation_type,
self.subcloud_engine.subcloud.region_name,
str(e)), extra=self.log_extra)
self.reinitialize_sc_clients()
self.reauthenticate_sc_clients()
raise exceptions.SyncRequestFailedRetry
except exceptions.SyncRequestFailed:
raise
@ -1360,7 +1451,6 @@ class IdentitySyncThread(SyncThread):
# All are found
else:
result = True
return result
def _has_same_identity_ids(self, m, sc):
@ -1472,62 +1562,86 @@ class IdentitySyncThread(SyncThread):
# Retrieve master resources from DB or through Keystone.
# users, projects, roles, and token revocation events use
# dbsync client, other resources use keystone client.
try:
if resource_type == consts.RESOURCE_TYPE_IDENTITY_USERS or \
resource_type == consts.RESOURCE_TYPE_IDENTITY_PROJECTS or \
resource_type == consts.RESOURCE_TYPE_IDENTITY_ROLES or \
resource_type == \
consts.RESOURCE_TYPE_IDENTITY_TOKEN_REVOKE_EVENTS or \
resource_type == \
consts.RESOURCE_TYPE_IDENTITY_TOKEN_REVOKE_EVENTS_FOR_USER:
if self.is_resource_handled_by_dbs_client(resource_type):
try:
return self._get_resource_audit_handler(resource_type,
self.m_dbs_client)
return self._get_resource_audit_handler(resource_type,
self.m_ks_client)
except dbsync_exceptions.Unauthorized as e:
LOG.info("Get resource [{}] request failed for {}: {}."
.format(resource_type,
dccommon_consts.VIRTUAL_MASTER_CLOUD,
str(e)), extra=self.log_extra)
# In case of token expires, re-authenticate and retry once
self.reinitialize_m_clients()
return self._get_resource_audit_handler(resource_type,
self.m_dbs_client)
except Exception as e:
LOG.exception(e)
return None
except dbsync_exceptions.Unauthorized as e:
LOG.info("Get master resource [{}] request failed for {}: {}."
.format(resource_type,
dccommon_consts.VIRTUAL_MASTER_CLOUD,
str(e)), extra=self.log_extra)
# Token might be expired, re-authenticate dbsync client
self.reauthenticate_m_dbs_client()
# Retry with re-authenticated dbsync client
return self._get_resource_audit_handler(resource_type,
self.m_dbs_client)
except Exception as e:
LOG.exception(e)
return None
else:
try:
return self._get_resource_audit_handler(resource_type,
self.m_ks_client)
except keystone_exceptions.Unauthorized as e:
LOG.info("Get master resource [{}] request failed for {}: {}."
.format(resource_type,
dccommon_consts.VIRTUAL_MASTER_CLOUD,
str(e)), extra=self.log_extra)
# Token might be expired, re-authenticate ks client
self.reauthenticate_m_ks_client()
# Retry with re-authenticated ks client
return self._get_resource_audit_handler(resource_type,
self.m_ks_client)
except Exception as e:
LOG.exception(e)
return None
def get_subcloud_resources(self, resource_type):
self.initialize_sc_clients()
# Retrieve master resources from DB or through keystone.
# Retrieve subcloud resources from DB or through keystone.
# users, projects, roles, and token revocation events use
# dbsync client, other resources use keystone client.
try:
if resource_type == consts.RESOURCE_TYPE_IDENTITY_USERS or \
resource_type == \
consts.RESOURCE_TYPE_IDENTITY_PROJECTS or \
resource_type == consts.RESOURCE_TYPE_IDENTITY_ROLES or \
resource_type == \
consts.RESOURCE_TYPE_IDENTITY_TOKEN_REVOKE_EVENTS or \
resource_type == \
consts.RESOURCE_TYPE_IDENTITY_TOKEN_REVOKE_EVENTS_FOR_USER:
if self.is_resource_handled_by_dbs_client(resource_type):
try:
return self._get_resource_audit_handler(resource_type,
self.sc_dbs_client)
return self._get_resource_audit_handler(resource_type,
self.sc_ks_client)
except dbsync_exceptions.Unauthorized as e:
LOG.info("Get subcloud resource [{}] request failed for {}: {}."
.format(resource_type,
self.subcloud_engine.subcloud.region_name,
str(e)), extra=self.log_extra)
except dbsync_exceptions.Unauthorized as e:
LOG.info("Get resource [{}] request failed for {}: {}."
.format(resource_type,
self.subcloud_engine.subcloud.region_name,
str(e)), extra=self.log_extra)
# In case of token expires, re-authenticate and retry once
self.reinitialize_sc_clients()
return self._get_resource_audit_handler(resource_type,
self.sc_dbs_client)
except Exception as e:
LOG.exception(e)
return None
# Token might be expired, re-authenticate dbsync client
self.reauthenticate_sc_dbs_client()
# Retry with re-authenticated dbsync client
return self._get_resource_audit_handler(resource_type,
self.sc_dbs_client)
except Exception as e:
LOG.exception(e)
return None
else:
try:
return self._get_resource_audit_handler(resource_type,
self.sc_ks_client)
except keystone_exceptions.Unauthorized as e:
LOG.info("Get subcloud resource [{}] request failed for {}: {}."
.format(resource_type,
self.subcloud_engine.subcloud.region_name,
str(e)), extra=self.log_extra)
# Token might be expired, re-authenticate ks client
self.reauthenticate_sc_ks_client()
# Retry with re-authenticated ks client
return self._get_resource_audit_handler(resource_type,
self.sc_ks_client)
except Exception as e:
LOG.exception(e)
return None
def same_resource(self, resource_type, m_resource, sc_resource):
if (resource_type ==
@ -1674,3 +1788,15 @@ class IdentitySyncThread(SyncThread):
exist = True
break
return exist
@staticmethod
def is_resource_handled_by_dbs_client(resource_type):
if resource_type in [
consts.RESOURCE_TYPE_IDENTITY_USERS,
consts.RESOURCE_TYPE_IDENTITY_PROJECTS,
consts.RESOURCE_TYPE_IDENTITY_ROLES,
consts.RESOURCE_TYPE_IDENTITY_TOKEN_REVOKE_EVENTS,
consts.RESOURCE_TYPE_IDENTITY_TOKEN_REVOKE_EVENTS_FOR_USER
]:
return True
return False

View File

@ -62,7 +62,7 @@ class NetworkSyncThread(SyncThread):
if (not self.sc_neutron_client and self.sc_admin_session):
self.sc_neutron_client = neutronclient.Client(
"2.0", session=self.sc_admin_session,
endpoint_type=dccommon_consts.KS_ENDPOINT_INTERNAL,
endpoint_type=dccommon_consts.KS_ENDPOINT_ADMIN,
region_name=self.subcloud_engine.subcloud.region_name)
def initialize(self):

View File

@ -51,6 +51,8 @@ class SysinvSyncThread(SyncThread):
CERTIFICATE_SIG_NULL = 'NoCertificate'
RESOURCE_UUID_NULL = 'NoResourceUUID'
AVOID_SYNC_CERTIFICATES = ["ssl"]
def __init__(self, subcloud_engine):
super(SysinvSyncThread, self).__init__(subcloud_engine)
@ -353,6 +355,9 @@ class SysinvSyncThread(SyncThread):
extra=self.log_extra)
return
if payload.get('certtype') in self.AVOID_SYNC_CERTIFICATES:
return
if isinstance(payload, dict):
signature = payload.get('signature')
LOG.info("signature from dict={}".format(signature))
@ -393,7 +398,7 @@ class SysinvSyncThread(SyncThread):
return
try:
certificates = s_os_client.sysinv_client.get_certificates()
certificates = self.get_certificates_resources(s_os_client)
cert_to_delete = None
for certificate in certificates:
if certificate.signature == subcloud_rsrc.subcloud_resource_id:
@ -651,7 +656,13 @@ class SysinvSyncThread(SyncThread):
return os_client.sysinv_client.snmp_community_list()
def get_certificates_resources(self, os_client):
return os_client.sysinv_client.get_certificates()
certificate_list = os_client.sysinv_client.get_certificates()
# Filter SSL certificates to avoid sync
filtered_list = [certificate
for certificate in certificate_list
if certificate.certtype not in
self.AVOID_SYNC_CERTIFICATES]
return filtered_list
def get_user_resource(self, os_client):
return os_client.sysinv_client.get_user()

View File

@ -56,7 +56,7 @@ class VolumeSyncThread(SyncThread):
if (not self.sc_cinder_client and self.sc_admin_session):
self.sc_cinder_client = cinderclient.Client(
"3.0", session=self.sc_admin_session,
endpoint_type=dccommon_consts.KS_ENDPOINT_INTERNAL,
endpoint_type=dccommon_consts.KS_ENDPOINT_ADMIN,
region_name=self.subcloud_engine.subcloud.region_name)
def initialize(self):

View File

@ -19,6 +19,7 @@ import threading
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from dccommon import consts as dccommon_consts
from dcdbsync.dbsyncclient import client as dbsyncclient
@ -91,6 +92,7 @@ class SyncThread(object):
self.admin_session = None
self.ks_client = None
self.dbs_client = None
self.initial_audit_in_progress = True
def start(self):
if self.status == STATUS_NEW:
@ -206,6 +208,7 @@ class SyncThread(object):
def enable(self):
# Called when DC manager thinks this subcloud is good to go.
self.initialize()
self.initial_audit_in_progress = True
self.wake()
self.run_sync_audit()
@ -281,6 +284,11 @@ class SyncThread(object):
self.sync_status = sync_status
self.subcloud_managed = subcloud_managed
# If initial audit is in progress, do not send the endpoint
# status update to dcmanager
if self.initial_audit_in_progress:
return
self.dcmanager_rpc_client.update_subcloud_endpoint_status(
self.ctxt, self.subcloud_engine.subcloud.region_name,
self.endpoint_type, sync_status)
@ -298,6 +306,7 @@ class SyncThread(object):
states = [
consts.ORCH_REQUEST_QUEUED,
consts.ORCH_REQUEST_IN_PROGRESS,
consts.ORCH_REQUEST_FAILED,
]
sync_requests = orchrequest.OrchRequestList.get_by_attrs(
self.ctxt, self.endpoint_type,
@ -316,7 +325,13 @@ class SyncThread(object):
else:
self.set_sync_status(dcmanager_consts.SYNC_STATUS_IN_SYNC)
if (not sync_requests or not subcloud_enabled or
# Failed orch requests were taken into consideration when reporting
# sync status to the dcmanager. They need to be removed from the
# orch requests list before proceeding.
actual_sync_requests = \
[r for r in sync_requests if r.state != consts.ORCH_REQUEST_STATE_FAILED]
if (not actual_sync_requests or not subcloud_enabled or
self.status == STATUS_TIMEDOUT):
# Either there are no sync requests, or subcloud is disabled,
# or we timed out trying to talk to it.
@ -335,7 +350,7 @@ class SyncThread(object):
# we have work to do.
self.condition.release()
try:
for request in sync_requests:
for request in actual_sync_requests:
if not self.subcloud_engine.is_enabled() or \
self.should_exit():
# Oops, someone disabled the endpoint while
@ -347,9 +362,15 @@ class SyncThread(object):
while retry_count < self.MAX_RETRY:
try:
self.sync_resource(request)
# Sync succeeded, mark the request as
# completed for tracking/debugging purpose
# and tag it for purge when its deleted
# time exceeds the data retention period.
request.state = \
consts.ORCH_REQUEST_STATE_COMPLETED
request.save() # save to DB
request.deleted = 1
request.deleted_at = timeutils.utcnow()
request.save()
break
except exceptions.SyncRequestTimeout:
request.try_count += 1
@ -384,7 +405,7 @@ class SyncThread(object):
except exceptions.EndpointNotReachable:
# Endpoint not reachable, throw away all the sync requests.
LOG.info("EndpointNotReachable, {} sync requests pending"
.format(len(sync_requests)))
.format(len(actual_sync_requests)))
# del sync_requests[:] #This fails due to:
# 'OrchRequestList' object does not support item deletion
self.condition.acquire()
@ -430,6 +451,16 @@ class SyncThread(object):
LOG.debug("{}: starting sync audit".format(self.audit_thread.name),
extra=self.log_extra)
most_recent_failed_request = \
orchrequest.OrchRequest.get_most_recent_failed_request(self.ctxt)
if most_recent_failed_request:
LOG.debug('Most recent failed request id=%s, timestamp=%s',
most_recent_failed_request.id,
most_recent_failed_request.updated_at)
else:
LOG.debug('There are no failed requests.')
total_num_of_audit_jobs = 0
for resource_type in self.audit_resources:
if not self.subcloud_engine.is_enabled() or self.should_exit():
@ -477,8 +508,9 @@ class SyncThread(object):
num_of_audit_jobs += self.audit_find_extra(
resource_type, m_resources, db_resources, sc_resources,
abort_resources)
except Exception as e:
LOG.exception(e)
except Exception:
LOG.exception("Unexpected error while auditing %s",
resource_type)
# Extra resources in subcloud are not impacted by the audit.
@ -487,6 +519,22 @@ class SyncThread(object):
extra=self.log_extra)
total_num_of_audit_jobs += num_of_audit_jobs
if most_recent_failed_request:
# Soft delete all failed requests in the previous sync audit.
try:
orchrequest.OrchRequest.delete_previous_failed_requests(
self.ctxt, most_recent_failed_request.updated_at)
# The sync() thread may have already finished processing
# the sync requests by the time we get here, wake it up
# to send the latest status update.
if not self.initial_audit_in_progress:
self.wake()
except Exception:
# shouldn't get here
LOG.exception("Unexpected error!")
if not total_num_of_audit_jobs:
# todo: if we had an "unable to sync this
# subcloud/endpoint" alarm raised, then clear it
@ -496,6 +544,13 @@ class SyncThread(object):
extra=self.log_extra)
self.post_audit()
# Once initial audit is complete, we wake up the sync thread
# so that it sends a proper sync status update (either in-sync or
# out-of-sync) to dcmanager for that endpoint type.
if self.initial_audit_in_progress:
self.initial_audit_in_progress = False
self.wake()
@lockutils.synchronized(AUDIT_LOCK_NAME)
def post_audit(self):
# reset the cached master resources

View File

@ -35,7 +35,10 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
'api_version': fields.StringField(nullable=True),
'target_region_name': fields.StringField(),
'orch_job_id': fields.IntegerField(),
'orch_job': fields.ObjectField('OrchJob')
'orch_job': fields.ObjectField('OrchJob'),
'updated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'deleted': fields.IntegerField()
}
def create(self):
@ -86,6 +89,15 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
db_orch_request = db_api.orch_request_get(context, id)
return cls._from_db_object(context, cls(), db_orch_request)
@classmethod
def get_most_recent_failed_request(cls, context):
db_orch_request = \
db_api.orch_request_get_most_recent_failed_request(context)
if db_orch_request:
return cls._from_db_object(context, cls(), db_orch_request)
else:
return None
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
@ -98,6 +110,11 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
def delete(self):
db_api.orch_request_destroy(self._context, self.id)
@classmethod
def delete_previous_failed_requests(cls, context, delete_time):
db_api.orch_request_delete_previous_failed_requests(
context, delete_time)
@base.OrchestratorObjectRegistry.register
class OrchRequestList(ovo_base.ObjectListBase, base.OrchestratorObject):

View File

@ -19,7 +19,6 @@
"""Subcloud object."""
from oslo_log import log as logging
from dcorch.common import consts
from dcorch.common import exceptions
from dcorch.db import api as db_api
from dcorch.objects import base
@ -57,14 +56,6 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
reason="cannot create a Subcloud object without a "
"region_name set")
try:
# create entry into alarm summary table, we will get real values later
alarm_updates = {'critical_alarms': -1,
'major_alarms': -1,
'minor_alarms': -1,
'warnings': -1,
'cloud_status': consts.ALARMS_DISABLED}
db_api.subcloud_alarms_create(self._context, region_name,
alarm_updates)
db_subcloud = db_api.subcloud_create(
self._context, region_name, updates)
return self._from_db_object(self._context, self, db_subcloud)
@ -99,12 +90,6 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
except Exception as e:
LOG.error("Failed to delete orchestration request for %s: %s"
% (self.region_name, e))
# delete the associated alarm entry
try:
db_api.subcloud_alarms_delete(self._context, self.region_name)
except Exception as e:
LOG.error("Failed to delete alarm entry for %s: %s"
% (self.region_name, e))
try:
db_api.subcloud_delete(self._context, self.region_name)
except Exception as e:

View File

@ -100,7 +100,7 @@ class EngineClient(object):
def add_subcloud_sync_endpoint_type(self, ctxt, subcloud_name,
endpoint_type_list):
return self.call(
return self.cast(
ctxt,
self.make_msg('add_subcloud_sync_endpoint_type',
subcloud_name=subcloud_name,
@ -108,7 +108,7 @@ class EngineClient(object):
def remove_subcloud_sync_endpoint_type(self, ctxt, subcloud_name,
endpoint_type_list):
return self.call(
return self.cast(
ctxt,
self.make_msg('remove_subcloud_sync_endpoint_type',
subcloud_name=subcloud_name,
@ -120,15 +120,6 @@ class EngineClient(object):
self.make_msg('update_subcloud_version',
subcloud_name=subcloud_name, sw_version=sw_version))
def update_alarm_summary(self, ctxt, region_name):
return self.cast(
ctxt, self.make_msg('update_alarm_summary',
region_name=region_name))
def get_alarm_summary(self, ctxt):
return self.call(
ctxt, self.make_msg('get_alarm_summary'))
# The sync job info has been written to the DB, alert the sync engine
# that there is work to do.
def sync_request(self, ctxt, endpoint_type):

View File

@ -1,9 +0,0 @@
===============================
snmp
===============================
DC Orchestrator SNMP is an SNMP Trap reciver service to handle alarm events
generated in the subclouds. Alarms are throttled and sent by rpc to the
orchestrator engine to update the alarm summary of the subcloud

View File

@ -1,105 +0,0 @@
# Copyright (c) 2017 Ericsson AB.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import datetime
from dccommon import consts as dccommon_consts
from dcorch.common import context
from dcorch.rpc import client as rpc_client
from multiprocessing import Queue
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class Controller(object):
system_throttle_timers = {}
system_last_updates = {}
system_in_delay = {}
system_trap_tstamps = {}
def __init__(self, systems, cfg):
self.cfg = cfg
self.event_queue = Queue()
self.rpc_client = rpc_client.EngineClient()
self.throttle_threshold = self.cfg.snmp.throttle_threshold
for i in systems:
self._add_system(i)
def send_notification(self, system):
LOG.debug("Sending update request for %s" % (system))
try:
ctx = context.get_admin_context()
self.rpc_client.update_alarm_summary(ctx, system)
except Exception:
LOG.error('Failed to send update for system %s' % system)
return
self.system_last_updates[system] = datetime.datetime.now()
def _add_system(self, system):
# Arbitrarily distant last update, ensures first trap updates
self.system_last_updates[system] = datetime.datetime(1989, 3, 9)
self.system_throttle_timers[system] = None
self.system_in_delay[system] = False
self.system_trap_tstamps[system] = collections.deque()
def handle_trap(self, system, msg):
if system == dccommon_consts.CLOUD_0:
return
if not (system in self.system_last_updates):
self._add_system(system)
tstamp = datetime.datetime.utcnow()
self.system_trap_tstamps[system].append(tstamp)
# we throttle the notification in the following condiftions
# 1. system is already being throttled (ignores notification)
# 2. If more than throttle_threshold traps have come within
# delay_time and we last updated within delay_time
# otherwise we request an update for the system
if len(self.system_trap_tstamps[system]) > self.throttle_threshold:
self.system_trap_tstamps[system].popleft()
if self.system_in_delay[system]:
LOG.debug("No action for %s , msg: %s. Already in delay" %
(system, msg))
return
if self._should_throttle_notification(system, tstamp):
delta = (tstamp -
self.system_last_updates[system]).total_seconds()
if delta > self.cfg.snmp.delay_time:
self.send_notification(system)
else:
notification_time = self.system_last_updates[system] +\
datetime.timedelta(0, self.cfg.snmp.delay_time)
self.system_throttle_timers[system] = notification_time
else:
self.send_notification(system)
def _should_throttle_notification(self, system, new_trap_tstamp):
d = self.system_trap_tstamps[system]
if len(d) < self.throttle_threshold:
return False
if d[0] < new_trap_tstamp -\
datetime.timedelta(0, self.cfg.snmp.delay_time):
return False
return True
def handle_delayed_notifications(self):
curr_time = datetime.datetime.utcnow()
for system, notify_time in self.system_throttle_timers.items():
if notify_time is not None:
if curr_time > notify_time:
self.send_notification(system)
self.system_throttle_timers[system] = None
self.system_in_delay[system] = False

View File

@ -1,56 +0,0 @@
# Copyright (c) 2017 Ericsson AB.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import threading
import time
LOG = logging.getLogger(__name__)
class QueueMonitor(threading.Thread):
def __init__(self, controller, snmp_process):
super(QueueMonitor, self).__init__()
self.snmp_process = snmp_process
self.controller = controller
self._stop = threading.Event()
def read_queue(self):
while not self.stopped():
item = None
try:
item = self.controller.event_queue.get(True, 0.1)
except Exception:
item = ""
if item == "":
time.sleep(0.1)
else:
system = item[0]
msg = item[1]
if not (system is None):
self.controller.handle_trap(system, msg)
self.controller.handle_delayed_notifications()
if not self.snmp_process.is_alive():
break
LOG.info("Stopping Queue Managment Thread")
def stopped(self):
return self._stop.isSet()
def stop(self):
self._stop.set()
def run(self):
self.read_queue()

View File

@ -1,46 +0,0 @@
# Copyright (c) 2017 Ericsson AB.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from controller import Controller
from multiprocessing import Process
from oslo_log import log as logging
from oslo_service import service
from queue_monitor import QueueMonitor
from snmp_server import SNMPTrapServer
LOG = logging.getLogger(__name__)
class SNMPService(service.Service):
def __init__(self, cfg):
super(SNMPService, self).__init__()
cont = Controller([], cfg.CONF)
self.snmp_server = Process(target=self.launch_SNMP_server,
args=(cont.event_queue, cfg.CONF))
self.snmp_server.start()
self.queue_thread = QueueMonitor(cont, self.snmp_server)
LOG.info('Starting Queue Monitor Thread')
self.queue_thread.start()
self.queue_thread.join()
def launch_SNMP_server(self, q, config):
trap_server = SNMPTrapServer(controller=q, cfg=config)
LOG.info('Starting SNMP Server Thread')
trap_server.run()
def end(self):
self.queue_thread.stop()
self.snmp_server.stop()

View File

@ -1,101 +0,0 @@
# Copyright 2016 Ericsson AB
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
File to store all the configurations
"""
from dcorch.common.i18n import _
from dcorch.common import version
from oslo_config import cfg
from oslo_log import log as logging
import sys
LOG = logging.getLogger(__name__)
snmp_server_opts = [
cfg.StrOpt('snmp_ip', default='0.0.0.0',
help='ip to listen on'),
cfg.IntOpt('snmp_port',
default=162,
help='snmp trap port'),
cfg.StrOpt('snmp_comm_str', default='dcorchAlarmAggregator',
help='community string'),
cfg.StrOpt('snmp_sec_area', default='fm-aggregator',
help='security area'),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.IntOpt('delay_time',
default=30,
help='min time (seconds) between update requests per server'),
cfg.IntOpt('alarm_audit_interval_time',
default=787,
help='interval of periodic updates in seconds'),
cfg.IntOpt('throttle_threshold',
default=10,
help='min number alarms over delay_time before throttling')
]
snmp_opt_group = cfg.OptGroup(name='snmp',
title='SNMP Options')
def init(args, **kwargs):
# Register the configuration options
# cfg.CONF.register_opts(common_opts)
# ks_session.Session.register_conf_options(cfg.CONF)
# auth.register_conf_options(cfg.CONF)
logging.register_options(cfg.CONF)
register_options()
cfg.CONF(args=args, project='dc-orch',
version='%%(prog)s %s' % version.version_info.release_string(),
**kwargs)
def setup_logging():
"""Sets up the logging options for a log with supplied name."""
product_name = "dc-orch"
logging.setup(cfg.CONF, product_name)
LOG.info("Logging enabled!")
LOG.info("%(prog)s version %(version)s",
{'prog': sys.argv[0],
'version': version.version_info.release_string()})
LOG.debug("command line: %s", " ".join(sys.argv))
def reset_service():
# Reset worker in case SIGHUP is called.
# Note that this is called only in case a service is running in
# daemon mode.
setup_logging()
# TODO(joehuang) enforce policy later
# policy.refresh()
def test_init():
# Register the configuration options
# cfg.CONF.register_opts(common_opts)
logging.register_options(cfg.CONF)
register_options()
setup_logging()
def list_opts():
yield snmp_opt_group.name, snmp_server_opts
def register_options():
for group, opts in list_opts():
cfg.CONF.register_opts(opts, group=group)

View File

@ -1,102 +0,0 @@
# Copyright 2016 Ericsson AB
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from netaddr import IPAddress
from oslo_log import log as logging
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.carrier.asynsock.dgram import udp6
from pysnmp.entity import config
from pysnmp.entity import engine
from pysnmp.entity.rfc3413 import ntfrcv
import threading
LOG = logging.getLogger(__name__)
class SNMPTrapServer(threading.Thread):
def __init__(self, controller, cfg):
threading.Thread.__init__(self)
self.controller = controller
self.cfg = cfg
self.snmp_engine = engine.SnmpEngine()
self.count = 0
# Transport setup
ipv4 = True
if IPAddress(self.cfg.snmp.snmp_ip).version == 6:
ipv4 = False
# Transport setup
if ipv4:
# UDP over IPv4, first listening interface/port
config.addSocketTransport(
self.snmp_engine,
udp.domainName + (1,),
udp.UdpTransport().openServerMode((self.cfg.snmp.snmp_ip,
self.cfg.snmp.snmp_port))
)
else:
# UDP over IPv6, first listening interface/port
config.addSocketTransport(
self.snmp_engine,
udp6.domainName + (1,),
udp6.Udp6Transport().openServerMode((self.cfg.snmp.snmp_ip,
self.cfg.snmp.snmp_port))
)
# SecurityName <-> CommunityName mapping
config.addV1System(self.snmp_engine,
self.cfg.snmp.snmp_sec_area,
self.cfg.snmp.snmp_comm_str)
ntfrcv.NotificationReceiver(self.snmp_engine, self.cb_fun)
def cb_fun(self, snmp_engine,
state_reference,
context_engine_id, context_name,
var_binds,
cb_ctx):
transport_domain, transport_address = \
self.snmp_engine.msgAndPduDsp.getTransportInfo(state_reference)
LOG.info('Notification received from %s' % (transport_address[0]))
system_oid = '1.3.6.1.4.1.731.1.1.1.1.1.1.4'
for oid, val in var_binds:
if str(oid) == system_oid:
system = ""
try:
system = self.parse_system_line(str(val))
except Exception:
return
self.controller.put((system, self.count))
# Used as a buffer clearing object for the Queue
# Without this the lock is not released on the payload object
# and get() returns nothing on the other end
# leaving 1 item in the queue
self.controller.put((None, None))
self.count += 1
return
def parse_system_line(self, system_line):
line_split = system_line.split('.')
system_split = line_split[0].split('=')
return system_split[1]
def run(self):
self.snmp_engine.transportDispatcher.jobStarted(1)
LOG.info('SNMP Transport Dispatcher Job Started')
try:
self.snmp_engine.transportDispatcher.runDispatcher()
except Exception:
self.snmp_engine.transportDispatcher.closeDispatcher()
raise
def stop(self):
self.snmp_engine.transportDispatcher.jobFinished(1)
self.snmp_engine.transportDispatcher.closeDispatcher()

View File

@ -13,11 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import oslo_db
import sqlalchemy
from oslo_config import cfg
from oslo_db import options
from oslo_utils import timeutils
from oslo_utils import uuidutils
from dcorch.common import config
@ -268,6 +270,59 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
self.assertEqual(1, len(orch_requests_sysinv))
self.assertEqual(1, len(orch_requests_flavor))
def test_orch_request_get_most_recent_failed_request(self):
orch_requests = self.create_some_failed_orch_requests()
orts = orch_requests[0].updated_at
orid = orch_requests[0].id
for request in orch_requests:
if request.updated_at > orts:
orid = request.id
most_recent = \
db_api.orch_request_get_most_recent_failed_request(self.ctx)
self.assertIsNotNone(most_recent)
self.assertEqual(orid,
most_recent.id)
def test_orch_request_delete_previous_failed_requests(self):
orch_requests = self.create_some_orch_requests()
total_count = len(orch_requests)
failed_count = 0
for request in orch_requests:
if request.state == consts.ORCH_REQUEST_STATE_FAILED:
failed_count += 1
expected_count = total_count - failed_count
db_api.orch_request_delete_previous_failed_requests(
self.ctx, timeutils.utcnow())
orch_requests = db_api.orch_request_get_all(self.ctx)
self.assertEqual(expected_count, len(orch_requests))
def create_some_failed_orch_requests(self):
# All db apis used in this method have already been verified
orch_requests = []
orch_request1 = self.create_default_sysinv_orch_job()
orch_request2 = self.create_default_sysinv_orch_job()
values = {'state': consts.ORCH_REQUEST_STATE_FAILED,
'try_count': 2}
db_api.orch_request_update(self.ctx,
orch_request1.uuid,
values)
db_api.orch_request_update(self.ctx,
orch_request2.uuid,
values)
orch_requests = db_api.orch_request_get_all(self.ctx)
return orch_requests
def create_some_orch_requests(self):
orch_requests = self.create_some_failed_orch_requests()
orch_requests.append(self.create_default_sysinv_orch_job())
return orch_requests
def create_default_sysinv_orch_job(self):
resource_sysinv = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_DNS)
@ -404,3 +459,28 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
self.assertEqual(1, len(orch_requests_attrs))
self.assertEqual(orch_request_compute.id, orch_requests_attrs[0].id)
def test_purge_deleted_records(self):
orch_requests = self.create_some_orch_requests()
total_count = len(orch_requests)
soft_deleted_count = 0
delete_time = timeutils.utcnow() - datetime.timedelta(days=2)
values = {'deleted': 1,
'deleted_at': delete_time}
for request in orch_requests:
if request == consts.ORCH_REQUEST_STATE_FAILED:
db_api.orch_request_update(self.ctx, request.uuid, values)
soft_deleted_count += 1
expected_count = total_count - soft_deleted_count
db_api.purge_deleted_records(self.ctx, 1)
# As each resource in this unit test has a single orch job which
# has a single orch request, the number of resources, orch jobs
# and orch requests after purge must be the same.
orch_requests = db_api.orch_request_get_all(self.ctx)
self.assertEqual(expected_count, len(orch_requests))
orch_jobs = db_api.orch_job_get_all(self.ctx)
self.assertEqual(expected_count, len(orch_jobs))
resources = db_api.resource_get_all(self.ctx)
self.assertEqual(expected_count, len(resources))

View File

@ -49,11 +49,6 @@ class FakeFKM(object):
self.distribute_keys = mock.MagicMock()
class FakeAAM(object):
def __init__(self):
self.enable_snmp = mock.MagicMock()
class TestInitialSyncManager(base.OrchestratorTestCase):
def setUp(self):
super(TestInitialSyncManager, self).setUp()
@ -69,10 +64,9 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
self.mock_context.get_admin_context.return_value = self.ctx
self.addCleanup(p.stop)
# Mock the GSM, FKM and AAM
# Mock the GSM and FKM
self.fake_gsm = FakeGSM(self.ctx)
self.fake_fkm = FakeFKM()
self.fake_aam = FakeAAM()
@staticmethod
def create_subcloud_static(ctxt, name, **kwargs):
@ -85,8 +79,7 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
def test_init(self):
ism = initial_sync_manager.InitialSyncManager(self.fake_gsm,
self.fake_fkm,
self.fake_aam)
self.fake_fkm)
self.assertIsNotNone(ism)
self.assertEqual(self.ctx, ism.context)
@ -110,8 +103,7 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
ism = initial_sync_manager.InitialSyncManager(self.fake_gsm,
self.fake_fkm,
self.fake_aam)
self.fake_fkm)
# Perform init actions
ism.init_actions()
@ -139,8 +131,7 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
self.assertIsNotNone(subcloud)
ism = initial_sync_manager.InitialSyncManager(self.fake_gsm,
self.fake_fkm,
self.fake_aam)
self.fake_fkm)
# Initial sync the subcloud
ism._initial_sync_subcloud(subcloud.region_name)
@ -150,8 +141,6 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
subcloud.region_name)
self.fake_fkm.distribute_keys.assert_called_with(self.ctx,
subcloud.region_name)
self.fake_aam.enable_snmp.assert_called_with(self.ctx,
subcloud.region_name)
# Verify that the subcloud was enabled
self.fake_gsm.enable_subcloud.assert_called_with(self.ctx,
@ -171,8 +160,7 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
self.assertIsNotNone(subcloud)
ism = initial_sync_manager.InitialSyncManager(self.fake_gsm,
self.fake_fkm,
self.fake_aam)
self.fake_fkm)
# Initial sync the subcloud
ism._initial_sync_subcloud(subcloud.region_name)
@ -193,8 +181,7 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
self.assertIsNotNone(subcloud)
ism = initial_sync_manager.InitialSyncManager(self.fake_gsm,
self.fake_fkm,
self.fake_aam)
self.fake_fkm)
# Force a failure
self.fake_gsm.initial_sync.side_effect = Exception('fake_exception')
@ -226,8 +213,7 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED)
ism = initial_sync_manager.InitialSyncManager(self.fake_gsm,
self.fake_fkm,
self.fake_aam)
self.fake_fkm)
# Reattempt sync success
ism._reattempt_sync('subcloud2')

View File

@ -1,9 +1,11 @@
#!/bin/sh
# OpenStack Distributed Cloud snmp Service (dcorch-snmp)
# OpenStack DC Manager Audit Service (dcmanager-audit)
#
# Description: Manages an OpenStack DC Orchestrator SNMP Service (dcorch-snmp) process as an HA resource
# Description:
# Manages an OpenStack DC Manager Audit Service (dcmanager-audit)
# process as an HA resource
#
# Copyright (c) 2017 Wind River Systems, Inc.
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -26,8 +28,8 @@
# Fill in some defaults if no values are specified
OCF_RESKEY_binary_default="dcorch-snmp"
OCF_RESKEY_config_default="/etc/dcorch/dcorch.conf"
OCF_RESKEY_binary_default="/usr/bin/dcmanager-audit"
OCF_RESKEY_config_default="/etc/dcmanager/dcmanager.conf"
OCF_RESKEY_user_default="root"
OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid"
@ -42,14 +44,14 @@ usage() {
cat <<UEND
usage: $0 (start|stop|validate-all|meta-data|status|monitor)
$0 manages an OpenStack Distributed Cloud snmp (dcorch-snmp) process as an HA resource
$0 manages an OpenStack DC Manager Audit service (dcmanager-audit) process as an HA resource
The 'start' operation starts the dorch-snmp service.
The 'stop' operation stops the dorch-snmp service.
The 'start' operation starts the dcmanager-audit service.
The 'stop' operation stops the dcmanager-audit service.
The 'validate-all' operation reports whether the parameters are valid
The 'meta-data' operation reports this RA's meta-data information
The 'status' operation reports whether the dorch-snmp service is running
The 'monitor' operation reports whether the dorch-snmp service seems to be working
The 'status' operation reports whether the dcmanager-audit service is running
The 'monitor' operation reports whether the dcmanager-audit service seems to be working
UEND
}
@ -58,52 +60,52 @@ meta_data() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="dorch-snmp">
<resource-agent name="dcmanager-audit">
<version>1.0</version>
<longdesc lang="en">
Resource agent for the DC Orchestrator SNMP Service (dorch-snmp)
Resource agent for the DC Manager service (dcmanager-audit)
</longdesc>
<shortdesc lang="en">Manages the OpenStack DC Orchestrator SNMP service(dorch-snmp)</shortdesc>
<shortdesc lang="en">Manages the OpenStack DC Manager Audit Service (dcmanager-audit)</shortdesc>
<parameters>
<parameter name="binary" unique="0" required="0">
<longdesc lang="en">
Location of the DC Orchestrator SNMP server binary (dorch-snmp)
Location of the DC Manager Audit Service binary (dcmanager-audit)
</longdesc>
<shortdesc lang="en">DC Orchestrator SNMP server binary (dorch-snmp)</shortdesc>
<shortdesc lang="en">DC Manager Audit Service binary (dcmanager-audit)</shortdesc>
<content type="string" default="${OCF_RESKEY_binary_default}" />
</parameter>
<parameter name="config" unique="0" required="0">
<longdesc lang="en">
Location of the DC Orchestrator SNMP (dorch-snmp) configuration file
Location of the DC Manager Audit Service (dcmanager-audit) configuration file
</longdesc>
<shortdesc lang="en">DC Orchestrator SNMP (dorch-snmp registry) config file</shortdesc>
<shortdesc lang="en">DC Manager Audit Service (dcmanager-audit registry) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_config_default}" />
</parameter>
<parameter name="user" unique="0" required="0">
<longdesc lang="en">
User running DC Orchestrator SNMP (dorch-snmp)
User running DC Manager Audit Service (dcmanager-audit)
</longdesc>
<shortdesc lang="en">DC Orchestrator SNMP (dorch-snmp) user</shortdesc>
<shortdesc lang="en">DC Manager Audit Service (dcmanager-audit) user</shortdesc>
<content type="string" default="${OCF_RESKEY_user_default}" />
</parameter>
<parameter name="pid" unique="0" required="0">
<longdesc lang="en">
The pid file to use for this DC Orchestrator SNMP (dorch-snmp) instance
The pid file to use for this DC Manager Audit Service (dcmanager-audit) instance
</longdesc>
<shortdesc lang="en">DC Orchestrator SNMP (dorch-snmp) pid file</shortdesc>
<shortdesc lang="en">DC Manager Audit Service (dcmanager-audit) pid file</shortdesc>
<content type="string" default="${OCF_RESKEY_pid_default}" />
</parameter>
<parameter name="additional_parameters" unique="0" required="0">
<longdesc lang="en">
Additional parameters to pass on to the OpenStack NovaAPI (dorch-snmp)
Additional parameters to pass on to the dcmanager-audit
</longdesc>
<shortdesc lang="en">Additional parameters for dorch-snmp</shortdesc>
<shortdesc lang="en">Additional parameters for dcmanager-audit</shortdesc>
<content type="string" />
</parameter>
@ -124,7 +126,7 @@ END
#######################################################################
# Functions invoked by resource manager actions
dcorch_snmp_validate() {
dcmanager_audit_validate() {
local rc
check_binary $OCF_RESKEY_binary
@ -154,12 +156,12 @@ dcorch_snmp_validate() {
true
}
dcorch_snmp_status() {
dcmanager_audit_status() {
local pid
local rc
if [ ! -f $OCF_RESKEY_pid ]; then
ocf_log info "DC Orchestrator SNMP (dorch-snmp) is not running"
ocf_log info "DC Manager Audit Service (dcmanager-audit) is not running"
return $OCF_NOT_RUNNING
else
pid=`cat $OCF_RESKEY_pid`
@ -170,16 +172,16 @@ dcorch_snmp_status() {
if [ $rc -eq 0 ]; then
return $OCF_SUCCESS
else
ocf_log info "Old PID file found, but DC Orchestrator SNMP (dorch-snmp) is not running"
ocf_log info "Old PID file found, but DC Manager Audit Service (dcmanager-audit) is not running"
rm -f $OCF_RESKEY_pid
return $OCF_NOT_RUNNING
fi
}
dcorch_snmp_monitor() {
dcmanager_audit_monitor() {
local rc
dcorch_snmp_status
dcmanager_audit_status
rc=$?
# If status returned anything but success, return that immediately
@ -187,26 +189,24 @@ dcorch_snmp_monitor() {
return $rc
fi
# Further verify the service availibility.
ocf_log debug "DC Orchestrator SNMP (dorch-snmp) monitor succeeded"
ocf_log debug "DC Manager Audit Service (dcmanager-audit) monitor succeeded"
return $OCF_SUCCESS
}
dcorch_snmp_start() {
dcmanager_audit_start() {
local rc
dcorch_snmp_status
dcmanager_audit_status
rc=$?
if [ $rc -eq $OCF_SUCCESS ]; then
ocf_log info "DC Orchestrator SNMP (dorch-snmp) already running"
ocf_log info "DC Manager Audit Service (dcmanager-audit) already running"
return $OCF_SUCCESS
fi
# Change the working dir to /, to be sure it's accesible
cd /
# run the actual dorch-snmp daemon. Don't use ocf_run as we're sending the tool's output
# run the actual dcmanager-audit daemon. Don't use ocf_run as we're sending the tool's output
# straight to /dev/null anyway and using ocf_run would break stdout-redirection here.
su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \
$OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid
@ -214,21 +214,21 @@ dcorch_snmp_start() {
# Spin waiting for the server to come up.
# Let the CRM/LRM time us out if required
while true; do
dcorch_snmp_monitor
dcmanager_audit_monitor
rc=$?
[ $rc -eq $OCF_SUCCESS ] && break
if [ $rc -ne $OCF_NOT_RUNNING ]; then
ocf_log err "DC Orchestrator SNMP (dorch-snmp) start failed"
ocf_log err "DC Manager Audit Service (dcmanager-audit) start failed"
exit $OCF_ERR_GENERIC
fi
sleep 1
done
ocf_log info "DC Orchestrator SNMP (dorch-snmp) started"
ocf_log info "DC Manager Audit Service (dcmanager-audit) started"
return $OCF_SUCCESS
}
dcorch_snmp_confirm_stop() {
dcmanager_audit_confirm_stop() {
local my_bin
local my_processes
@ -242,15 +242,15 @@ dcorch_snmp_confirm_stop() {
fi
}
dcorch_snmp_stop() {
dcmanager_audit_stop() {
local rc
local pid
dcorch_snmp_status
dcmanager_audit_status
rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then
ocf_log info "DC Orchestrator SNMP (dorch-snmp) already stopped"
dcorch_snmp_confirm_stop
ocf_log info "DC Manager Audit Service (dcmanager-audit) already stopped"
dcmanager_audit_confirm_stop
return $OCF_SUCCESS
fi
@ -259,8 +259,8 @@ dcorch_snmp_stop() {
ocf_run kill -s TERM $pid
rc=$?
if [ $rc -ne 0 ]; then
ocf_log err "DC Orchestrator SNMP (dorch-snmp) couldn't be stopped"
dcorch_snmp_confirm_stop
ocf_log err "DC Manager Audit Service (dcmanager-audit) couldn't be stopped"
dcmanager_audit_confirm_stop
exit $OCF_ERR_GENERIC
fi
@ -271,27 +271,27 @@ dcorch_snmp_stop() {
fi
count=0
while [ $count -lt $shutdown_timeout ]; do
dcorch_snmp_status
dcmanager_audit_status
rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then
break
fi
count=`expr $count + 1`
sleep 1
ocf_log debug "DC Orchestrator SNMP (dorch-snmp) still hasn't stopped yet. Waiting ..."
ocf_log debug "DC Manager Audit Service (dcmanager-audit) still hasn't stopped yet. Waiting ..."
done
dcorch_snmp_status
dcmanager_audit_status
rc=$?
if [ $rc -ne $OCF_NOT_RUNNING ]; then
# SIGTERM didn't help either, try SIGKILL
ocf_log info "DC Orchestrator SNMP (dorch-snmp) failed to stop after ${shutdown_timeout}s \
ocf_log info "DC Manager Audit Service (dcmanager-audit) failed to stop after ${shutdown_timeout}s \
using SIGTERM. Trying SIGKILL ..."
ocf_run kill -s KILL $pid
fi
dcorch_snmp_confirm_stop
dcmanager_audit_confirm_stop
ocf_log info "DC Orchestrator SNMP (dorch-snmp) stopped"
ocf_log info "DC Manager Audit Service (dcmanager-audit) stopped"
rm -f $OCF_RESKEY_pid
@ -308,16 +308,15 @@ case "$1" in
esac
# Anything except meta-data and help must pass validation
dcorch_snmp_validate || exit $?
dcmanager_audit_validate || exit $?
# What kind of method was invoked?
case "$1" in
start) dcorch_snmp_start;;
stop) dcorch_snmp_stop;;
status) dcorch_snmp_status;;
monitor) dcorch_snmp_monitor;;
start) dcmanager_audit_start;;
stop) dcmanager_audit_stop;;
status) dcmanager_audit_status;;
monitor) dcmanager_audit_monitor;;
validate-all) ;;
*) usage
exit $OCF_ERR_UNIMPLEMENTED;;
esac

View File

@ -35,12 +35,41 @@ load-plugins=
# R detect Refactor for a "good practice" metric violation
# C detect Convention for coding standard violation
# W detect Warning for stylistic problems, or minor programming issues
# W0102: dangerous-default-value
# W0105: pointless-string-statement
# W0107: unnecessary-pass
# W0123: eval-used
# W0201: attribute-defined-outside-init
# W0211: bad-staticmethod-argument
# W0212: protected-access
# W0221: arguments-differ
# W0223: abstract-method
# W0231: super-init-not-called
# W0235: useless-super-delegation
# W0311: bad-indentation
# W0402: deprecated-module
# W0403: relative-import
# W0603: global-statement
# W0612: unused-variable
# W0613: unused-argument
# W0621: redefined-outer-name
# W0622: redefined-builtin
# W0631: undefined-loop-variable
# W0703: broad-except
# W0706: try-except-raise
# W1113: keyword-arg-before-vararg
# W1201: logging-not-lazy
# W1401: anomalous-backslash-in-string
# E detect Errors for important programming issues (i.e. most probably bug)
# E1101: no-member
# E1102: not-callable
# E1120: no-value-for-parameter (sqlalchemy)
disable=C,R,W,
E1101,E1102,E1120
# E1128: assignment-from-none
disable=C,R,fixme,
W0102,W0105,W0107,W0123,W0201,W0211,W0212,W0221,W0223,W0231,W0235,
W0311,W0402,W0403,W0603,W0612,W0613,W0621,W0622,W0631,W0703,W0706,
W1113,W1201,W1401,
E1101,E1102,E1120,E1128
[REPORTS]

View File

@ -46,5 +46,5 @@ python-cinderclient>=2.1.0 # Apache-2.0
python-novaclient>=7.1.0 # Apache-2.0
python-keystoneclient>=3.8.0 # Apache-2.0
pycrypto>=2.6 # Public Domain
pysnmp>=4.2.3 # BSD
requests_toolbelt
kubernetes # Apache-2.0

View File

@ -29,19 +29,18 @@ packages =
[entry_points]
console_scripts =
dcmanager-api = dcmanager.cmd.api:main
dcmanager-audit = dcmanager.cmd.audit:main
dcmanager-manager = dcmanager.cmd.manager:main
dcmanager-manage = dcmanager.cmd.manage:main
dcorch-api = dcorch.cmd.api:main
dcorch-engine = dcorch.cmd.engine:main
dcorch-manage = dcorch.cmd.manage:main
dcorch-snmp = dcorch.cmd.snmp:main
dcorch-api-proxy = dcorch.cmd.api_proxy:main
dcdbsync-api = dcdbsync.cmd.api:main
oslo.config.opts =
dcorch.common.config = dcorch.common.config:list_opts
dcorch.common.api.api_config = dcorch.api.api_config:list_opts
dcorch.common.snmp.snmp_config = dcorch.snmp.snmp_config:list_opts
dcorch.engine.quota_manager = dcorch.engine.quota_manager:list_opts
dcorch.engine.dcorch_lock = dcorch.engine.dcorch_lock:list_opts
dcmanager.common.config = dcmanager.common.config:list_opts

View File

@ -12,7 +12,6 @@ fmclient_src_dir = {[dc]stx_fault_dir}/python-fmclient/fmclient
fm_api_src_dir = {[dc]stx_fault_dir}/fm-api
sysinv_src_dir = ../../config/sysinv/sysinv/sysinv
tsconfig_src_dir = ../../config/tsconfig/tsconfig
controllerconfig_src_dir = ../../config/controllerconfig/controllerconfig
cgtsclient_src_dir = ../../config/sysinv/cgts-client/cgts-client
cgcs_patch_src_dir = ../../update/cgcs-patch/cgcs-patch
@ -47,7 +46,25 @@ deps = -r{toxinidir}/test-requirements.txt
-e{[dc]tsconfig_src_dir}
-e{[dc]fmclient_src_dir}
-e{[dc]fm_api_src_dir}
-e{[dc]controllerconfig_src_dir}
-e{[dc]cgtsclient_src_dir}
setenv =
CURRENT_CFG_FILE={toxinidir}/.current.cfg
SINGLE_REPO=True
OSLO_LOCK_PATH={toxinidir}
commands =
find {toxinidir} -not -path '{toxinidir}/.tox/*' -name '*.py[c|o]' -delete
python setup.py testr --slowest --testr-args='{posargs}'
[testenv:py36]
basepython = python3.6
deps = -r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt
keyring
-e{[dc]nfv_client_src_dir}
-e{[dc]sysinv_src_dir}
-e{[dc]tsconfig_src_dir}
-e{[dc]fmclient_src_dir}
-e{[dc]fm_api_src_dir}
-e{[dc]cgtsclient_src_dir}
setenv =
CURRENT_CFG_FILE={toxinidir}/.current.cfg
@ -82,13 +99,12 @@ usedevelop = True
deps = -r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt
keyring
-e../{[dc]nfv_client_src_dir}
-e../{[dc]sysinv_src_dir}
-e../{[dc]tsconfig_src_dir}
-e../{[dc]fmclient_src_dir}
-e../{[dc]fm_api_src_dir}
-e../{[dc]controllerconfig_src_dir}
-e../{[dc]cgtsclient_src_dir}
-e{[dc]nfv_client_src_dir}
-e{[dc]sysinv_src_dir}
-e{[dc]tsconfig_src_dir}
-e{[dc]fmclient_src_dir}
-e{[dc]fm_api_src_dir}
-e{[dc]cgtsclient_src_dir}
setenv =
CURRENT_CFG_FILE={toxinidir}/.current.cfg
SINGLE_REPO=True
@ -173,7 +189,7 @@ commands =
import_exceptions = dcmanager.common.i18n,dcorch.common.i18n
[testenv:pylint]
basepython = python2.7
basepython = python3
sitepackages = False
deps = {[testenv:py27]deps}
-e{[dc]cgcs_patch_src_dir}