diff --git a/api-ref/source/api-ref-sysinv-v1-config.rst b/api-ref/source/api-ref-sysinv-v1-config.rst index c948d43f4f..86ba3eff39 100644 --- a/api-ref/source/api-ref-sysinv-v1-config.rst +++ b/api-ref/source/api-ref-sysinv-v1-config.rst @@ -326,16 +326,6 @@ itemNotFound (404) "rel": "bookmark" } ], - "iprofile": [ - { - "href": "http://10.10.10.2:6385/v1/iprofile/", - "rel": "self" - }, - { - "href": "http://10.10.10.2:6385/iprofile/", - "rel": "bookmark" - } - ], "servicenodes": [ { "href": "http://10.10.10.2:6385/v1/servicenodes/", @@ -3619,457 +3609,6 @@ This will remove from the interface the datanetwork assigned. This operation does not accept a request body. ---------- -Profiles ---------- - -These APIs allow the create, display and delete of host profiles. This -includes interface profiles, cpu profiles, and volume profiles. NOTE -that the same record is used in the database for both hosts and host -profiles. - -******************** -Lists all profiles -******************** - -.. rest_method:: GET /v1/iprofile - -**Normal response codes** - -200 - -**Error response codes** - -computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400), -unauthorized (401), forbidden (403), badMethod (405), overLimit (413), -itemNotFound (404) - -**Response parameters** - -.. csv-table:: - :header: "Parameter", "Style", "Type", "Description" - :widths: 20, 20, 20, 60 - - "iprofiles (Optional)", "plain", "xsd:list", "The list of profile entities." - "recordtype (Optional)", "plain", "xsd:string", "Indicates that the record is being used for host profile rather than a host." - "hostname (Optional)", "plain", "xsd:string", "The name of the profile." - "uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object." - "links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage." - "created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created." - "updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated." - -:: - - { - "iprofiles": [ - { - "uuid": "b6bde724-4fda-4941-ae3f-15abd3d4107b", - "recordtype": "profile", - "task": null, - "reserved": "False", - "mgmt_ip": null, - "links": [ - { - "href": "http://192.168.204.2:6385/v1/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b", - "rel": "bookmark" - } - ], - "personality": null, - "created_at": "2014-09-29T13:36:36.760707+00:00", - "hostname": "ifprofile-type-1", - "updated_at": null, - "id": 23, - "ihost_uuid": null, - "profiletype": null, - "location": { - }, - "action": "none", - "profilename": null, - "operational": "disabled", - "administrative": "locked", - "availability": "offline", - "uptime": 0, - "mgmt_mac": null - }, - { - "uuid": "85b8d979-a1d5-4b06-8666-22646d45dcdf", - "recordtype": "profile", - "task": null, - "reserved": "False", - "mgmt_ip": null, - "links": [ - { - "href": "http://192.168.204.2:6385/v1/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf", - "rel": "bookmark" - } - ], - "personality": null, - "created_at": "2014-09-29T13:42:40.592612+00:00", - "hostname": "ifprofile-type-2", - "updated_at": null, - "id": 24, - "ihost_uuid": null, - "profiletype": null, - "location": { - }, - "action": "none", - "profilename": null, - "operational": "disabled", - "administrative": "locked", - "availability": "offline", - "uptime": 0, - "mgmt_mac": null - } - ] - } - -This operation does not accept a request body. - -******************************************** -Shows information about a specific profile -******************************************** - -.. rest_method:: GET /v1/iprofile/​{profile_id}​ - -**Normal response codes** - -200 - -**Error response codes** - -computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400), -unauthorized (401), forbidden (403), badMethod (405), overLimit (413), -itemNotFound (404) - -**Request parameters** - -.. csv-table:: - :header: "Parameter", "Style", "Type", "Description" - :widths: 20, 20, 20, 60 - - "profile_id", "URI", "csapi:UUID", "The unique identifier of an existing profile." - -**Response parameters** - -.. csv-table:: - :header: "Parameter", "Style", "Type", "Description" - :widths: 20, 20, 20, 60 - - "recordtype (Optional)", "plain", "xsd:string", "Indicates that the record is being used for host profile rather than a host." - "hostname (Optional)", "plain", "xsd:string", "The name of the profile." - "ports (Optional)", "plain", "xsd:list", "Links to the ports of the profile." - "interfaces (Optional)", "plain", "xsd:list", "Links to the interfaces of the profile." - "idisks (Optional)", "plain", "xsd:list", "Links to the disks of the profile." - "partitions (Optional)", "plain", "xsd:list", "Links to the partitions of the profile." - "istors (Optional)", "plain", "xsd:list", "Links to the physical volume storage resources of the profile." - "ipvs (Optional)", "plain", "xsd:list", "Links to the physical volumes of the profile." - "ilvgs (Optional)", "plain", "xsd:list", "Links to the logical volume group storage resources of the profile." - "inodes (Optional)", "plain", "xsd:list", "Links to the NUMA Nodes of the profile." - "icpus (Optional)", "plain", "xsd:list", "Links to the logical cores (CPUs) of the profile." - "uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object." - "links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage." - "created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created." - "updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated." - -:: - - { - "ports" : [ - { - "rel" : "self", - "href" : "http://128.224.151.244:6385/v1/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf/ports" - }, - { - "rel" : "bookmark", - "href" : "http://128.224.151.244:6385/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf/ports" - } - ], - "operational" : "disabled", - "imemorys" : [ - { - "rel" : "self", - "href" : "http://128.224.151.244:6385/v1/ihosts/85b8d979-a1d5-4b06-8666-22646d45dcdf/imemorys" - }, - { - "rel" : "bookmark", - "href" : "http://128.224.151.244:6385/ihosts/85b8d979-a1d5-4b06-8666-22646d45dcdf/imemorys" - } - ], - "iinterfaces" : [ - { - "rel" : "self", - "href" : "http://128.224.151.244:6385/v1/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf/iinterfaces" - }, - { - "rel" : "bookmark", - "href" : "http://128.224.151.244:6385/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf/iinterfaces" - } - ], - "personality" : null, - "serialId" : null, - "hostname" : "ifprofile-type-2", - "profilename" : null, - "uuid" : "85b8d979-a1d5-4b06-8666-22646d45dcdf", - "profiletype" : null, - "ihost_uuid" : null, - "created_at" : "2014-09-29T13:42:40.592612+00:00", - "availability" : "offline", - "recordtype" : "profile", - "istors" : [ - { - "rel" : "self", - "href" : "http://128.224.151.244:6385/v1/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf/istors" - }, - { - "rel" : "bookmark", - "href" : "http://128.224.151.244:6385/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf/istors" - } - ], - "idisks" : [ - { - "rel" : "self", - "href" : "http://128.224.151.244:6385/v1/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf/idisks" - }, - { - "rel" : "bookmark", - "href" : "http://128.224.151.244:6385/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf/idisks" - } - ], - "uptime" : 0, - "icpus" : [ - { - "rel" : "self", - "href" : "http://128.224.151.244:6385/v1/ihosts/85b8d979-a1d5-4b06-8666-22646d45dcdf/icpus" - }, - { - "rel" : "bookmark", - "href" : "http://128.224.151.244:6385/ihosts/85b8d979-a1d5-4b06-8666-22646d45dcdf/icpus" - } - ], - "id" : 24, - "mgmt_ip" : null, - "links" : [ - { - "rel" : "self", - "href" : "http://128.224.151.244:6385/v1/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf" - }, - { - "rel" : "bookmark", - "href" : "http://128.224.151.244:6385/iprofile/85b8d979-a1d5-4b06-8666-22646d45dcdf" - } - ], - "location" : {}, - "inodes" : [ - { - "rel" : "self", - "href" : "http://128.224.151.244:6385/v1/ihosts/85b8d979-a1d5-4b06-8666-22646d45dcdf/inodes" - }, - { - "rel" : "bookmark", - "href" : "http://128.224.151.244:6385/ihosts/85b8d979-a1d5-4b06-8666-22646d45dcdf/inodes" - } - ], - "task" : null, - "mgmt_mac" : null, - "invprovision" : null, - "administrative" : "locked", - "updated_at" : null, - "action" : "none", - "reserved" : "False" - } - -This operation does not accept a request body. - -******************* -Creates a profile -******************* - -.. rest_method:: POST /v1/iprofile - -**Normal response codes** - -200 - -**Error response codes** - -badMediaType (415) - -**Request parameters** - -.. csv-table:: - :header: "Parameter", "Style", "Type", "Description" - :widths: 20, 20, 20, 60 - - "profilename (Optional)", "plain", "xsd:string", "The name for the new profile." - "profiletype (Optional)", "plain", "xsd:string", "The type of profile to be created. Valid values are: ``if``, ``cpu`` or ``stor``." - "ihost_uuid (Optional)", "plain", "csapi:UUID", "The UUID of the Host to create the profile based on." - -**Response parameters** - -.. csv-table:: - :header: "Parameter", "Style", "Type", "Description" - :widths: 20, 20, 20, 60 - - "recordtype (Optional)", "plain", "xsd:string", "Indicates that the record is being used for host profile rather than a host." - "hostname (Optional)", "plain", "xsd:string", "The name of the profile." - "ports (Optional)", "plain", "xsd:list", "Links to the ports of the profile." - "interfaces (Optional)", "plain", "xsd:list", "Links to the interfaces of the profile." - "idisks (Optional)", "plain", "xsd:list", "Links to the disks of the profile." - "partitions (Optional)", "plain", "xsd:list", "Links to the partitions of the profile." - "istors (Optional)", "plain", "xsd:list", "Links to the physical volume storage resources of the profile." - "ipvs (Optional)", "plain", "xsd:list", "Links to the physical volumes of the profile." - "ilvgs (Optional)", "plain", "xsd:list", "Links to the logical volume group storage resources of the profile." - "inodes (Optional)", "plain", "xsd:list", "Links to the NUMA Nodes of the profile." - "icpus (Optional)", "plain", "xsd:list", "Links to the logical cores (CPUs) of the profile." - "uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object." - "links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage." - "created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created." - "updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated." - -:: - - { - "profilename": "ifprofile-type-1", - "profiletype": "if", - "ihost_uuid": "959f785b-6387-4b98-aa30-bc861061d7a1" - } - -:: - - { - "ports": [ - { - "href": "http://192.168.204.2:6385/v1/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b/ports", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b/ports", - "rel": "bookmark" - } - ], - "reserved": "False", - "links": [ - { - "href": "http://192.168.204.2:6385/v1/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b", - "rel": "bookmark" - } - ], - "idisks": [ - { - "href": "http://192.168.204.2:6385/v1/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b/idisks", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b/idisks", - "rel": "bookmark" - } - ], - "availability": "offline", - "updated_at": null, - "ihost_uuid": null, - "id": 23, - "icpus": [ - { - "href": "http://192.168.204.2:6385/v1/ihosts/b6bde724-4fda-4941-ae3f-15abd3d4107b/icpus", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/ihosts/b6bde724-4fda-4941-ae3f-15abd3d4107b/icpus", - "rel": "bookmark" - } - ], - "uptime": 0, - "uuid": "b6bde724-4fda-4941-ae3f-15abd3d4107b", - "mgmt_ip": null, - "hostname": "ifprofile-type-1", - "istors": [ - { - "href": "http://192.168.204.2:6385/v1/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b/istors", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b/istors", - "rel": "bookmark" - } - ], - "operational": "disabled", - "location": { - }, - "invprovision": null, - "administrative": "locked", - "personality": null, - "iinterfaces": [ - { - "href": "http://192.168.204.2:6385/v1/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b/iinterfaces", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/iprofile/b6bde724-4fda-4941-ae3f-15abd3d4107b/iinterfaces", - "rel": "bookmark" - } - ], - "profiletype": null, - "mgmt_mac": null, - "task": null, - "recordtype": "profile", - "created_at": "2014-09-29T13:36:36.760707+00:00", - "action": "none", - "profilename": null, - "serialId": null, - "inodes": [ - { - "href": "http://192.168.204.2:6385/v1/ihosts/b6bde724-4fda-4941-ae3f-15abd3d4107b/inodes", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/ihosts/b6bde724-4fda-4941-ae3f-15abd3d4107b/inodes", - "rel": "bookmark" - } - ], - "imemorys": [ - { - "href": "http://192.168.204.2:6385/v1/ihosts/b6bde724-4fda-4941-ae3f-15abd3d4107b/imemorys", - "rel": "self" - }, - { - "href": "http://192.168.204.2:6385/ihosts/b6bde724-4fda-4941-ae3f-15abd3d4107b/imemorys", - "rel": "bookmark" - } - ] - } - -**************************** -Deletes a specific profile -**************************** - -.. rest_method:: DELETE /v1/iprofile/​{profile_id}​ - -**Normal response codes** - -204 - -**Request parameters** - -.. csv-table:: - :header: "Parameter", "Style", "Type", "Description" - :widths: 20, 20, 20, 60 - - "profile_id", "URI", "csapi:UUID", "The unique identifier of an existing profile." - -This operation does not accept a request body. - ---- DNS ---- diff --git a/controllerconfig/controllerconfig/upgrade-scripts/95-del-profile-ihost.py b/controllerconfig/controllerconfig/upgrade-scripts/95-del-profile-ihost.py deleted file mode 100755 index 2b69cbee99..0000000000 --- a/controllerconfig/controllerconfig/upgrade-scripts/95-del-profile-ihost.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2021 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -# This script will delete hosts which recordtype is a profile. -# This is required because host hardware profiles -# for creating re-usable configuration had been removed from GUI, CLI and -# API endpoinds. Profiles created prior the upgrade should be deleted. - -import psycopg2 -import sys - -from psycopg2.extras import RealDictCursor -from controllerconfig.common import log - -LOG = log.get_logger(__name__) - - -def main(): - action = None - from_release = None - to_release = None - arg = 1 - - while arg < len(sys.argv): - if arg == 1: - from_release = sys.argv[arg] - elif arg == 2: - to_release = sys.argv[arg] # noqa - elif arg == 3: - action = sys.argv[arg] - else: - print("Invalid option %s." % sys.argv[arg]) - return 1 - arg += 1 - - log.configure() - - LOG.debug("%s invoked with from_release = %s to_release = %s action = %s" - % (sys.argv[0], from_release, to_release, action)) - - if action == "migrate": - if from_release == '21.05': - try: - delete_profile_host() - except Exception as ex: - LOG.exception(ex) - return 1 - - -def delete_profile_host(): - conn = psycopg2.connect("dbname=sysinv user=postgres") - with conn: - with conn.cursor(cursor_factory=RealDictCursor) as cur: - cur.execute("delete from i_host where recordtype='profile'") - - LOG.info("Delete profile hosts completed") - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/controllerconfig/controllerconfig/upgrade-scripts/97-reset-config-target.py b/controllerconfig/controllerconfig/upgrade-scripts/97-reset-config-target.py index 3df20302eb..80d60283ab 100755 --- a/controllerconfig/controllerconfig/upgrade-scripts/97-reset-config-target.py +++ b/controllerconfig/controllerconfig/upgrade-scripts/97-reset-config-target.py @@ -54,10 +54,10 @@ def reset_config_target(): conn = psycopg2.connect("dbname=sysinv user=postgres") with conn: with conn.cursor(cursor_factory=RealDictCursor) as cur: - cur.execute("update i_host set config_target=NULL where " - "recordtype!='profile'",) + cur.execute("update i_host set config_target=NULL",) LOG.info("Reset host config_target completed") + if __name__ == "__main__": sys.exit(main()) diff --git a/devstack/lib/config b/devstack/lib/config index b0b33281d8..1f0814033f 100644 --- a/devstack/lib/config +++ b/devstack/lib/config @@ -4,7 +4,7 @@ # # Copyright (C) 2019 Intel Corporation # -# Copyright (c) 2019 Wind River Systems, Inc. +# Copyright (c) 2021 Wind River Systems, Inc. # # lib/config # Functions to control the configuration and operation of stx-config @@ -109,7 +109,6 @@ function cleanup_sysinv { sudo rm -f $SYSINV_ETC_GOENABLEDD/sysinv_goenabled_check.sh sudo rm -f $SYSINV_CONF_DIR/policy.json - sudo rm -f $SYSINV_CONF_DIR/profileSchema.xsd sudo rm -f $SYSINV_ETC_MOTDD/10-system sudo rm -f $SYSINV_CONF_DIR/upgrades/delete_load.sh sudo rm -f $STX_OCF_ROOT/resource.d/platform/sysinv-api @@ -249,7 +248,6 @@ function install_sysinv { sudo install -p -D -m 755 $SYSINV_DIR/etc/sysinv/sysinv_goenabled_check.sh $SYSINV_ETC_GOENABLEDD/sysinv_goenabled_check.sh sudo install -d -m 755 $SYSINV_CONF_DIR sudo install -p -D -m 755 $SYSINV_DIR/etc/sysinv/policy.json $SYSINV_CONF_DIR/policy.json - sudo install -p -D -m 640 $SYSINV_DIR/etc/sysinv/profileSchema.xsd $SYSINV_CONF_DIR/profileSchema.xsd sudo install -d -m 755 $SYSINV_ETC_MOTDD sudo install -p -D -m 755 $SYSINV_DIR/etc/sysinv/motd-system $SYSINV_ETC_MOTDD/10-system sudo install -d -m 755 $SYSINV_CONF_DIR/upgrades diff --git a/releasenotes/notes/release-summary-6738ff2f310f9b57.yaml b/releasenotes/notes/release-summary-6738ff2f310f9b57.yaml index cf958aead6..a8275d2b57 100644 --- a/releasenotes/notes/release-summary-6738ff2f310f9b57.yaml +++ b/releasenotes/notes/release-summary-6738ff2f310f9b57.yaml @@ -29,7 +29,6 @@ features: - Node role and role profiles. - Core and memory (including huge page) assignments. - Network Interfaces and storage assignments. - - Bulk configuration of nodes through system profiles. - | User Interface: diff --git a/sysinv/sysinv/centos/sysinv.spec b/sysinv/sysinv/centos/sysinv.spec index 2c1329be9b..a652ae0c3f 100644 --- a/sysinv/sysinv/centos/sysinv.spec +++ b/sysinv/sysinv/centos/sysinv.spec @@ -89,7 +89,6 @@ install -p -D -m 755 etc/sysinv/sysinv_goenabled_check.sh %{buildroot}%{local_et install -d -m 755 %{buildroot}%{local_etc_sysinv} install -p -D -m 755 etc/sysinv/policy.json %{buildroot}%{local_etc_sysinv}/policy.json -install -p -D -m 640 etc/sysinv/profileSchema.xsd %{buildroot}%{local_etc_sysinv}/profileSchema.xsd install -p -D -m 644 etc/sysinv/crushmap-storage-model.txt %{buildroot}%{local_etc_sysinv}/crushmap-storage-model.txt install -p -D -m 644 etc/sysinv/crushmap-controller-model.txt %{buildroot}%{local_etc_sysinv}/crushmap-controller-model.txt diff --git a/sysinv/sysinv/opensuse/sysinv.spec b/sysinv/sysinv/opensuse/sysinv.spec index 4e335af233..9577ec1042 100644 --- a/sysinv/sysinv/opensuse/sysinv.spec +++ b/sysinv/sysinv/opensuse/sysinv.spec @@ -90,7 +90,6 @@ install -p -D -m 755 etc/sysinv/sysinv_goenabled_check.sh %{buildroot}%{local_et install -d -m 755 %{buildroot}%{local_etc_sysinv} install -p -D -m 644 etc/sysinv/policy.json %{buildroot}%{local_etc_sysinv}/policy.json -install -p -D -m 640 etc/sysinv/profileSchema.xsd %{buildroot}%{local_etc_sysinv}/profileSchema.xsd install -p -D -m 644 etc/sysinv/crushmap-storage-model.txt %{buildroot}%{local_etc_sysinv}/crushmap-storage-model.txt install -p -D -m 644 etc/sysinv/crushmap-controller-model.txt %{buildroot}%{local_etc_sysinv}/crushmap-controller-model.txt diff --git a/sysinv/sysinv/sysinv/etc/sysinv/profileSchema.xsd b/sysinv/sysinv/sysinv/etc/sysinv/profileSchema.xsd deleted file mode 100644 index c8aee238bf..0000000000 --- a/sysinv/sysinv/sysinv/etc/sysinv/profileSchema.xsd +++ /dev/null @@ -1,354 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/sysinv/sysinv/sysinv/etc/sysinv/sampleProfile.xml b/sysinv/sysinv/sysinv/etc/sysinv/sampleProfile.xml deleted file mode 100644 index a35ac332fd..0000000000 --- a/sysinv/sysinv/sysinv/etc/sysinv/sampleProfile.xml +++ /dev/null @@ -1,339 +0,0 @@ - - - - - - - 2 - - 10 - - - - false - - - - - - - - - - - - - - - - - - - - - 2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/__init__.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/__init__.py index 60e76e9452..48ed47c1e7 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/__init__.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/__init__.py @@ -64,7 +64,6 @@ from sysinv.api.controllers.v1 import ntp from sysinv.api.controllers.v1 import partition from sysinv.api.controllers.v1 import pci_device from sysinv.api.controllers.v1 import port -from sysinv.api.controllers.v1 import profile from sysinv.api.controllers.v1 import ptp from sysinv.api.controllers.v1 import pv from sysinv.api.controllers.v1 import registry_image @@ -135,9 +134,6 @@ class V1(base.APIBase): imemory = [link.Link] "Links to the imemory resource" - iprofile = [link.Link] - "Links to the iprofile resource" - iuser = [link.Link] "Links to the iuser resource" @@ -348,14 +344,6 @@ class V1(base.APIBase): bookmark=True) ] - v1.iprofile = [link.Link.make_link('self', pecan.request.host_url, - 'iprofile', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'iprofile', '', - bookmark=True) - ] - v1.iinterfaces = [link.Link.make_link('self', pecan.request.host_url, 'iinterfaces', ''), @@ -896,7 +884,6 @@ class Controller(rest.RestController): ipvs = pv.PVController() idisks = disk.DiskController() partitions = partition.PartitionController() - iprofile = profile.ProfileController() iuser = user.UserController() idns = dns.DNSController() intp = ntp.NTPController() diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu.py index 7851bdfa24..5e33f8fa11 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2013-2019 Wind River Systems, Inc. +# Copyright (c) 2013-2021 Wind River Systems, Inc. # @@ -377,7 +377,6 @@ class CPUController(rest.RestController): # only allow patching allocated_function and capabilities # replace ihost_uuid and inode_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) - from_profile = False action = None for p in patch_obj: if p['path'] == '/ihost_uuid': @@ -395,9 +394,6 @@ class CPUController(rest.RestController): except exception.SysinvException: p['value'] = None - if p['path'] == '/allocated_function': - from_profile = True - if p['path'] == '/action': value = p['value'] patch.remove(p) @@ -428,8 +424,7 @@ class CPUController(rest.RestController): # Semantic checks ihost = pecan.request.dbapi.ihost_get(cpu.forihostid) _check_host(ihost) - if not from_profile: - _check_cpu(cpu, ihost) + _check_cpu(cpu, ihost) # Update only the fields that have changed try: @@ -463,7 +458,7 @@ class CPUController(rest.RestController): ############## # UTILS ############## -def _update(cpu_uuid, cpu_values, from_profile=False): +def _update(cpu_uuid, cpu_values): # Get CPU cpu = objects.cpu.get_by_uuid( pecan.request.context, cpu_uuid) @@ -471,8 +466,7 @@ def _update(cpu_uuid, cpu_values, from_profile=False): # Semantic checks ihost = pecan.request.dbapi.ihost_get(cpu.forihostid) _check_host(ihost) - if not from_profile: - _check_cpu(cpu, ihost) + _check_cpu(cpu, ihost) # Update cpu pecan.request.dbapi.icpu_update(cpu_uuid, cpu_values) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu_utils.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu_utils.py index 64031fd4b7..6008adc1ed 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu_utils.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu_utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013-2020 Wind River Systems, Inc. +# Copyright (c) 2013-2021 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -26,100 +26,6 @@ VSWITCH_MIN_CORES = 0 VSWITCH_MAX_CORES = 8 -class CpuProfile(object): - class CpuConfigure(object): - def __init__(self): - self.platform = 0 - self.vswitch = 0 - self.shared = 0 - self.vms = 0 - self.numa_node = 0 - - # cpus is a list of icpu sorted by numa_node, core and thread - # if not, provide a node list sorted by numa_node (id might not be reliable) - def __init__(self, cpus, nodes=None): - if nodes is not None: - cpus = CpuProfile.sort_cpu_by_numa_node(cpus, nodes) - cores = [] - - self.number_of_cpu = 0 - self.cores_per_cpu = 0 - self.hyper_thread = False - self.processors = [] - cur_processor = None - - for cpu in cpus: - key = '{0}-{1}'.format(cpu.numa_node, cpu.core) - if key not in cores: - cores.append(key) - else: - self.hyper_thread = True - continue - - if cur_processor is None or cur_processor.numa_node != cpu.numa_node: - cur_processor = CpuProfile.CpuConfigure() - cur_processor.numa_node = cpu.numa_node - self.processors.append(cur_processor) - - if cpu.allocated_function == constants.PLATFORM_FUNCTION: - cur_processor.platform += 1 - elif cpu.allocated_function == constants.VSWITCH_FUNCTION: - cur_processor.vswitch += 1 - elif cpu.allocated_function == constants.SHARED_FUNCTION: - cur_processor.shared += 1 - elif cpu.allocated_function == constants.APPLICATION_FUNCTION: - cur_processor.vms += 1 - - self.number_of_cpu = len(self.processors) - self.cores_per_cpu = len(cores) // self.number_of_cpu - - @staticmethod - def sort_cpu_by_numa_node(cpus, nodes): - newlist = [] - for node in nodes: - for cpu in cpus: - if cpu.forinodeid == node.id: - cpu.numa_node = node.numa_node - newlist.append(cpu) - return newlist - - -class HostCpuProfile(CpuProfile): - def __init__(self, subfunctions, cpus, nodes=None): - super(HostCpuProfile, self).__init__(cpus, nodes) - self.subfunctions = subfunctions - - # see if a cpu profile is applicable to this host - def profile_applicable(self, profile): - if self.number_of_cpu == profile.number_of_cpu and \ - self.cores_per_cpu == profile.cores_per_cpu: - return self.check_profile_core_functions(profile) - return False # Profile is not applicable to host - - def check_profile_core_functions(self, profile): - platform_cores = 0 - vswitch_cores = 0 - shared_cores = 0 - vm_cores = 0 - for cpu in profile.processors: - platform_cores += cpu.platform - vswitch_cores += cpu.vswitch - shared_cores += cpu.shared - vm_cores += cpu.vms - - error_string = "" - if platform_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.PLATFORM_FUNCTION - elif constants.WORKER in self.subfunctions and vswitch_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.VSWITCH_FUNCTION - elif constants.WORKER in self.subfunctions and vm_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.APPLICATION_FUNCTION - return error_string - - def lookup_function(s): for f in CORE_FUNCTIONS: if s.lower() == f.lower(): @@ -127,31 +33,6 @@ def lookup_function(s): return s -def check_profile_core_functions(personality, profile): - - platform_cores = 0 - vswitch_cores = 0 - shared_cores = 0 - vm_cores = 0 - for cpu in profile.processors: - platform_cores += cpu.platform - vswitch_cores += cpu.vswitch - shared_cores += cpu.shared - vm_cores += cpu.vms - - error_string = "" - if platform_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.PLATFORM_FUNCTION - elif constants.WORKER in personality and vswitch_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.VSWITCH_FUNCTION - elif constants.WORKER in personality and vm_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.APPLICATION_FUNCTION - return error_string - - def check_core_functions(personality, icpus): platform_cores = 0 vswitch_cores = 0 diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py index 5cf6f7cdbb..b64d00c1eb 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py @@ -67,7 +67,6 @@ from sysinv.api.controllers.v1 import lvg as lvg_api from sysinv.api.controllers.v1 import host_fs as host_fs_api from sysinv.api.controllers.v1 import memory from sysinv.api.controllers.v1 import node as node_api -from sysinv.api.controllers.v1 import profile from sysinv.api.controllers.v1 import pv as pv_api from sysinv.api.controllers.v1 import sensor as sensor_api from sysinv.api.controllers.v1 import sensorgroup @@ -460,9 +459,6 @@ class Host(base.APIBase): isystem_uuid = types.uuid "The UUID of the system this host belongs to" - iprofile_uuid = types.uuid - "The UUID of the iprofile to apply to host" - peers = types.MultiType({dict}) "This peers of this host in the cluster" @@ -567,9 +563,6 @@ class Host(base.APIBase): for k in self.fields: setattr(self, k, kwargs.get(k)) - self.fields.append('iprofile_uuid') - setattr(self, 'iprofile_uuid', kwargs.get('iprofile_uuid', None)) - self.fields.append('peers') setattr(self, 'peers', kwargs.get('peers', None)) @@ -903,7 +896,6 @@ class HostUpdate(object): self.ihost_orig = dict(ihost_orig) self.ihost_patch = dict(ihost_patch) self._delta = list(delta) - self._iprofile_uuid = None self._ihost_val_prenotify = {} self._ihost_val = {} @@ -946,14 +938,6 @@ class HostUpdate(object): def nextstep(self, val): self._nextstep = val - @property - def iprofile_uuid(self): - return self._iprofile_uuid - - @iprofile_uuid.setter - def iprofile_uuid(self, val): - self._iprofile_uuid = val - @property def configure_required(self): return self._configure_required @@ -1502,8 +1486,7 @@ class HostController(rest.RestController): pecan.request.context, ihost_dict['rootfs_device']) controller_ihost = pecan.request.rpcapi.create_ihost( pecan.request.context, ihost_dict) - if 'recordtype' in ihost_dict and \ - ihost_dict['recordtype'] != "profile": + if 'recordtype' in ihost_dict: pecan.request.rpcapi.configure_ihost( pecan.request.context, controller_ihost) @@ -1756,8 +1739,7 @@ class HostController(rest.RestController): " contain(s) a management mac address" " from local network adapters") - self._patch(ihost_obj[0]['uuid'], - changed_paths, None) + self._patch(ihost_obj[0]['uuid'], changed_paths) else: self._do_post(new_host) @@ -1829,34 +1811,28 @@ class HostController(rest.RestController): """ utils.validate_patch(patch) - profile_uuid = None optimizable = 0 optimize_list = ['/uptime', '/location', '/serialid', '/task'] for p in patch: - # Check if this patch contains a profile path = p['path'] - if path == '/iprofile_uuid': - profile_uuid = p['value'] - patch.remove(p) - if path in optimize_list: optimizable += 1 if len(patch) == optimizable: - return self._patch(uuid, patch, profile_uuid) + return self._patch(uuid, patch) elif (pecan.request.user_agent.startswith('mtce') or pecan.request.user_agent.startswith('vim')): - return self._patch_sys(uuid, patch, profile_uuid) + return self._patch_sys(uuid, patch) else: - return self._patch_gen(uuid, patch, profile_uuid) + return self._patch_gen(uuid, patch) @cutils.synchronized(LOCK_NAME_SYS) - def _patch_sys(self, uuid, patch, profile_uuid): - return self._patch(uuid, patch, profile_uuid) + def _patch_sys(self, uuid, patch): + return self._patch(uuid, patch) @cutils.synchronized(LOCK_NAME) - def _patch_gen(self, uuid, patch, profile_uuid): - return self._patch(uuid, patch, profile_uuid) + def _patch_gen(self, uuid, patch): + return self._patch(uuid, patch) @staticmethod def _validate_capability_is_not_set(old, new): @@ -1914,7 +1890,7 @@ class HostController(rest.RestController): "name={}, value={}. ").format( capability, new_value)) - def _patch(self, uuid, patch, myprofile_uuid): + def _patch(self, uuid, patch): log_start = cutils.timestamped("ihost_patch_start") patch_obj = jsonpatch.JsonPatch(patch) @@ -2004,8 +1980,6 @@ class HostController(rest.RestController): hostupdate.notify_mtce, hostupdate.skip_notify_mtce)) - hostupdate.iprofile_uuid = myprofile_uuid - if self.stage_action(myaction, hostupdate): LOG.info("%s Action staged: %s" % (hostupdate.displayid, myaction)) @@ -2409,13 +2383,6 @@ class HostController(rest.RestController): ihost = objects.host.get_by_uuid(pecan.request.context, ihost_id) - # Do not allow profiles to be deleted by system host-delete - if ihost['recordtype'] == "profile": - LOG.error("host %s of recordtype %s cannot be deleted via " - "host-delete command." - % (ihost['uuid'], ihost['recordtype'])) - raise exception.HTTPNotFound - if ihost['administrative'] == constants.ADMIN_UNLOCKED: if ihost.hostname is None: host = ihost.uuid @@ -4641,7 +4608,6 @@ class HostController(rest.RestController): constants.VIM_SERVICES_DISABLE_FAILED, constants.VIM_SERVICES_DISABLE_EXTEND, constants.VIM_SERVICES_DELETE_FAILED, - constants.APPLY_PROFILE_ACTION, constants.SUBFUNCTION_CONFIG_ACTION] if action not in valid_actions: @@ -4727,8 +4693,6 @@ class HostController(rest.RestController): self.update_vim_progress_status(action, hostupdate) elif action == constants.VIM_SERVICES_DELETE_FAILED: self.update_vim_progress_status(action, hostupdate) - elif action == constants.APPLY_PROFILE_ACTION: - self._check_apply_profile(hostupdate) elif action == constants.SUBFUNCTION_CONFIG_ACTION: self._check_subfunction_config(hostupdate) self._semantic_check_nova_local_storage( @@ -4783,20 +4747,6 @@ class HostController(rest.RestController): else: return False - @staticmethod - def _check_apply_profile(hostupdate): - ihost = hostupdate.ihost_orig - if (ihost['administrative'] == constants.ADMIN_UNLOCKED and - not utils.is_host_simplex_controller(ihost)): - raise wsme.exc.ClientSideError( - _("Can not apply profile to an 'unlocked' host %s; " - "Please 'Lock' first." % hostupdate.displayid)) - - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError(_( - "Applying a profile on a simplex system is not allowed.")) - return True - @staticmethod def check_notify_mtce(action, hostupdate): """Determine whether mtce should be notified of this patch request @@ -6636,8 +6586,6 @@ class HostController(rest.RestController): self._handle_vim_services_delete_failed(hostupdate) hostupdate.nextstep = hostupdate.EXIT_UPDATE_PREVAL rc = False - elif action == constants.APPLY_PROFILE_ACTION: - self._stage_apply_profile_action(hostupdate) elif action == constants.SUBFUNCTION_CONFIG_ACTION: # Not a mtc action; disable mtc checks and config self._stage_subfunction_config(hostupdate) @@ -6655,18 +6603,6 @@ class HostController(rest.RestController): return rc - @staticmethod - def _stage_apply_profile_action(hostupdate): - """Stage apply profile action.""" - LOG.info("%s _stage_apply_profile_action uuid=%s profile_uuid=%s" % - (hostupdate.displayid, - hostupdate.ihost_patch['uuid'], - hostupdate.iprofile_uuid)) - profile.apply_profile(hostupdate.ihost_patch['uuid'], - hostupdate.iprofile_uuid) - hostupdate.notify_mtce = False - hostupdate.configure_required = False - @staticmethod def _check_subfunction_config(hostupdate): """Check subfunction config.""" diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py index 5dc7c1e25e..ab6bac8952 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py @@ -698,7 +698,7 @@ def _set_defaults(interface): return interface_merged -def _check_interface_vlan_id(op, interface, ihost, from_profile=False): +def _check_interface_vlan_id(op, interface, ihost): # Check vlan_id if 'vlan_id' in interface.keys() and interface['vlan_id'] is not None: if not str(interface['vlan_id']).isdigit(): @@ -712,7 +712,7 @@ def _check_interface_vlan_id(op, interface, ihost, from_profile=False): return interface -def _check_interface_name(op, interface, ihost, from_profile=False): +def _check_interface_name(op, interface, ihost): ihost_id = interface['forihostid'] ifname = interface['ifname'] iftype = interface['iftype'] @@ -763,7 +763,7 @@ def _check_interface_name(op, interface, ihost, from_profile=False): return interface -def _check_interface_mtu(interface, ihost, from_profile=False): +def _check_interface_mtu(interface, ihost): # Check imtu if 'imtu' in interface.keys() and interface['imtu'] is not None: if not str(interface['imtu']).isdigit(): @@ -774,7 +774,7 @@ def _check_interface_mtu(interface, ihost, from_profile=False): return interface -def _check_interface_sriov(interface, ihost, from_profile=False): +def _check_interface_sriov(interface, ihost): sriov_update = False if 'ifclass' in interface.keys() and not interface['ifclass']: @@ -1116,7 +1116,7 @@ def _check_interface_data(op, interface, ihost, existing_interface, ihost_uuid = interface['ihost_uuid'] # Check interface name for validity - _check_interface_name(op, interface, ihost, existing_interface) + _check_interface_name(op, interface, ihost) if op == "add": this_interface_id = 0 @@ -1477,21 +1477,19 @@ def _allocate_pool_address(interface_id, pool_uuid, address_name=None): interface_id, pool_uuid, address_name) -def _update_ipv6_address_mode(interface, mode=None, pool=None, - from_profile=False): +def _update_ipv6_address_mode(interface, mode=None, pool=None): mode = interface['ipv6_mode'] if not mode else mode pool = interface['ipv6_pool'] if not pool else pool utils.update_address_mode(interface, constants.IPV6_FAMILY, mode, pool) - if mode == constants.IPV6_POOL and not from_profile: + if mode == constants.IPV6_POOL: _allocate_pool_address(interface['id'], pool) -def _update_ipv4_address_mode(interface, mode=None, pool=None, - interface_profile=False): +def _update_ipv4_address_mode(interface, mode=None, pool=None): mode = interface['ipv4_mode'] if not mode else mode pool = interface['ipv4_pool'] if not pool else pool utils.update_address_mode(interface, constants.IPV4_FAMILY, mode, pool) - if mode == constants.IPV4_POOL and not interface_profile: + if mode == constants.IPV4_POOL: _allocate_pool_address(interface['id'], pool) @@ -1528,13 +1526,11 @@ def _add_extended_attributes(ihost, interface, attributes): if attributes.get('ipv4_mode'): _update_ipv4_address_mode(interface_data, attributes.get('ipv4_mode'), - attributes.get('ipv4_pool'), - attributes.get('interface_profile')) + attributes.get('ipv4_pool')) if attributes.get('ipv6_mode'): _update_ipv6_address_mode(interface_data, attributes.get('ipv6_mode'), - attributes.get('ipv6_pool'), - attributes.get('interface_profile')) + attributes.get('ipv6_pool')) def _update_ports(op, interface, ihost, ports): @@ -1728,13 +1724,13 @@ def update_upper_interface_macs(ihost, interface): # This method allows creating an interface through a non-HTTP -# request e.g. through profile.py while still passing -# through interface semantic checks and osd configuration +# request while still passing through interface semantic checks and osd +# configuration # Hence, not declared inside a class # # Param: # interface - dictionary of interface values -def _create(interface, from_profile=False): +def _create(interface): # Get host ihostId = interface.get('forihostid') or interface.get('ihost_uuid') ihost = pecan.request.dbapi.ihost_get(ihostId) @@ -1776,21 +1772,19 @@ def _create(interface, from_profile=False): interface.update({'used_by': []}) # Check mtu before setting defaults - interface = _check_interface_mtu(interface, ihost, from_profile=from_profile) + interface = _check_interface_mtu(interface, ihost) # Check vlan_id before setting defaults - interface = _check_interface_vlan_id("add", interface, ihost, from_profile=from_profile) + interface = _check_interface_vlan_id("add", interface, ihost) # Set defaults - before checks to allow for optional attributes - if not from_profile: - interface = _set_defaults(interface) + interface = _set_defaults(interface) # Semantic checks - interface = _check("add", interface, ports=ports, ifaces=uses_if, from_profile=from_profile) + interface = _check("add", interface, ports=ports, ifaces=uses_if) - if not from_profile: - # Select appropriate MAC address from lower interface(s) - interface = set_interface_mac(ihost, interface) + # Select appropriate MAC address from lower interface(s) + interface = set_interface_mac(ihost, interface) new_interface = pecan.request.dbapi.iinterface_create( forihostid, @@ -1845,8 +1839,8 @@ def _create(interface, from_profile=False): return new_interface -def _check(op, interface, ports=None, ifaces=None, from_profile=False, - existing_interface=None, datanetworks=None): +def _check(op, interface, ports=None, ifaces=None, existing_interface=None, + datanetworks=None): # Semantic checks ihost = pecan.request.dbapi.ihost_get(interface['ihost_uuid']).as_dict() @@ -1864,63 +1858,64 @@ def _check(op, interface, ports=None, ifaces=None, from_profile=False, if check_host: _check_host(ihost) - if not from_profile: - if ports: - _check_ports(op, interface, ihost, ports) - if ifaces: - interfaces = pecan.request.dbapi.iinterface_get_by_ihost(interface['ihost_uuid']) - if len(ifaces) > 1 and \ - interface['iftype'] == constants.INTERFACE_TYPE_VLAN: - # Can only have one interface associated to vlan interface type + + if ports: + _check_ports(op, interface, ihost, ports) + + if ifaces: + interfaces = pecan.request.dbapi.iinterface_get_by_ihost(interface['ihost_uuid']) + if len(ifaces) > 1 and \ + interface['iftype'] == constants.INTERFACE_TYPE_VLAN: + # Can only have one interface associated to vlan interface type + raise wsme.exc.ClientSideError( + _("Can only have one interface for vlan type. (%s)" % ifaces)) + if interface['iftype'] == constants.INTERFACE_TYPE_ETHERNET: + if len(ifaces) > 1: raise wsme.exc.ClientSideError( - _("Can only have one interface for vlan type. (%s)" % ifaces)) - if interface['iftype'] == constants.INTERFACE_TYPE_ETHERNET: - if len(ifaces) > 1: - raise wsme.exc.ClientSideError( - _("Can only have one lower interface for ethernet type." - "(%s)" % ifaces)) - lower = pecan.request.dbapi.iinterface_get(ifaces[0], - interface['ihost_uuid']) - if not (lower['iftype'] == constants.INTERFACE_TYPE_ETHERNET - and lower['ifclass'] == - constants.INTERFACE_CLASS_PCI_SRIOV): - # Can only have pci_sriov ethernet type lower interface - # associated to ethernet interface type - raise wsme.exc.ClientSideError( - _("Can only use pci-sriov ethernet interface for " - "ethernet type. (%s)" % ifaces)) + _("Can only have one lower interface for ethernet type." + "(%s)" % ifaces)) + lower = pecan.request.dbapi.iinterface_get(ifaces[0], + interface['ihost_uuid']) + if not (lower['iftype'] == constants.INTERFACE_TYPE_ETHERNET + and lower['ifclass'] == + constants.INTERFACE_CLASS_PCI_SRIOV): + # Can only have pci_sriov ethernet type lower interface + # associated to ethernet interface type + raise wsme.exc.ClientSideError( + _("Can only use pci-sriov ethernet interface for " + "ethernet type. (%s)" % ifaces)) - for i in ifaces: - for iface in interfaces: - if iface['uuid'] == i or iface['ifname'] == i: - existing_iface = copy.deepcopy(iface) + for i in ifaces: + for iface in interfaces: + if iface['uuid'] == i or iface['ifname'] == i: + existing_iface = copy.deepcopy(iface) - # Get host - ihost = pecan.request.dbapi.ihost_get( - iface.get('forihostid')) + # Get host + ihost = pecan.request.dbapi.ihost_get( + iface.get('forihostid')) - if 'vlan_id' not in iface: - iface['vlan_id'] = None + if 'vlan_id' not in iface: + iface['vlan_id'] = None - if 'aemode' not in iface: - iface['aemode'] = None + if 'aemode' not in iface: + iface['aemode'] = None - if 'txhashpolicy' not in iface: - iface['txhashpolicy'] = None + if 'txhashpolicy' not in iface: + iface['txhashpolicy'] = None - if 'primary_reselect' not in iface: - iface['primary_reselect'] = None + if 'primary_reselect' not in iface: + iface['primary_reselect'] = None - _check_interface_data( - "modify", iface, ihost, existing_iface, datanetworks) + _check_interface_data( + "modify", iface, ihost, existing_iface, datanetworks) - interface = _check_interface_data( - op, interface, ihost, existing_interface, datanetworks) + interface = _check_interface_data( + op, interface, ihost, existing_interface, datanetworks) return interface -def _update(interface_uuid, interface_values, from_profile): +def _update(interface_uuid, interface_values): return objects.interface.get_by_uuid(pecan.request.context, interface_uuid) @@ -1968,21 +1963,20 @@ def _clear_interface_state_fault(hostname, interface_uuid): FM.clear_fault(fm_constants.FM_ALARM_ID_NETWORK_INTERFACE, entity_instance_id) -def _delete(interface, from_profile=False): +def _delete(interface): ihost = pecan.request.dbapi.ihost_get(interface['forihostid']).as_dict() - if not from_profile: - check_host = True - if (cutils.is_aio_simplex_system(pecan.request.dbapi) - and interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV - and interface['iftype'] == constants.INTERFACE_TYPE_VF): - # user can delete interface SR-IOV VF without host lock in AIO-SX - check_host = False + check_host = True + if (cutils.is_aio_simplex_system(pecan.request.dbapi) + and interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV + and interface['iftype'] == constants.INTERFACE_TYPE_VF): + # user can delete interface SR-IOV VF without host lock in AIO-SX + check_host = False - if check_host: - _check_host(ihost) + if check_host: + _check_host(ihost) - if not from_profile and interface['iftype'] == 'ethernet' and not interface['uses']: + if interface['iftype'] == 'ethernet' and not interface['uses']: msg = _("Cannot delete a system created ethernet interface") raise wsme.exc.ClientSideError(msg) @@ -2075,3 +2069,5 @@ def _is_interface_address_allowed(interface): elif interface['ifclass'] == constants.INTERFACE_CLASS_PLATFORM: return True return False + +# TODO (pbovina): Move utils methods within InterfaceController class diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface_network.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface_network.py index 86bb26aef0..e13679e958 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface_network.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface_network.py @@ -16,7 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2013-2018 Wind River Systems, Inc. +# Copyright (c) 2013-2021 Wind River Systems, Inc. # import os @@ -175,26 +175,25 @@ class InterfaceNetworkController(rest.RestController): constants.IPV4_DISABLED, None) # Assign an address to the interface - if host.recordtype != "profile": - _update_host_address(host, interface_obj, network_type) - if network_type == constants.NETWORK_TYPE_MGMT: - ethernet_port_mac = None - if not interface_obj.uses: - # Get the ethernet port associated with the interface - interface_ports = pecan.request.dbapi.ethernet_port_get_by_interface( - interface_obj.uuid) - for p in interface_ports: - if p is not None: - ethernet_port_mac = p.mac - break - else: - tmp_interface = interface_obj.as_dict() - ethernet_port_mac = tmp_interface['imac'] - _update_host_mgmt_mac(host, ethernet_port_mac) - cutils.perform_distributed_cloud_config(pecan.request.dbapi, + _update_host_address(host, interface_obj, network_type) + if network_type == constants.NETWORK_TYPE_MGMT: + ethernet_port_mac = None + if not interface_obj.uses: + # Get the ethernet port associated with the interface + interface_ports = pecan.request.dbapi.ethernet_port_get_by_interface( + interface_obj.uuid) + for p in interface_ports: + if p is not None: + ethernet_port_mac = p.mac + break + else: + tmp_interface = interface_obj.as_dict() + ethernet_port_mac = tmp_interface['imac'] + _update_host_mgmt_mac(host, ethernet_port_mac) + cutils.perform_distributed_cloud_config(pecan.request.dbapi, interface_id) - elif network_type == constants.NETWORK_TYPE_OAM: - pecan.request.rpcapi.initialize_oam_config(pecan.request.context, host) + elif network_type == constants.NETWORK_TYPE_OAM: + pecan.request.rpcapi.initialize_oam_config(pecan.request.context, host) return InterfaceNetwork.convert_with_links(result) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/lvg.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/lvg.py index a38abb326a..4458538949 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/lvg.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/lvg.py @@ -16,7 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2013-2020 Wind River Systems, Inc. +# Copyright (c) 2013-2021 Wind River Systems, Inc. # import jsonpatch @@ -590,7 +590,7 @@ def _check(op, lvg): return lvg -def _create(lvg, iprofile=None, applyprofile=None): +def _create(lvg): # Get host ihostId = lvg.get('forihostid') or lvg.get('ihost_uuid') ihost = pecan.request.dbapi.ihost_get(ihostId) @@ -611,48 +611,41 @@ def _create(lvg, iprofile=None, applyprofile=None): # See if this volume group already exists ilvgs = pecan.request.dbapi.ilvg_get_all(forihostid=forihostid) lvg_in_db = False - if not iprofile: - for vg in ilvgs: - if vg['lvm_vg_name'] == lvg['lvm_vg_name']: - lvg_in_db = True - # User is adding again so complain - if (vg['vg_state'] == constants.LVG_ADD or - vg['vg_state'] == constants.PROVISIONED): - raise wsme.exc.ClientSideError(_("Volume Group (%s) " - "already present" % - vg['lvm_vg_name'])) + for vg in ilvgs: + if vg['lvm_vg_name'] == lvg['lvm_vg_name']: + lvg_in_db = True + # User is adding again so complain + if (vg['vg_state'] == constants.LVG_ADD or + vg['vg_state'] == constants.PROVISIONED): + raise wsme.exc.ClientSideError(_("Volume Group (%s) " + "already present" % + vg['lvm_vg_name'])) - # Prevent re-adding so that we don't end up in a state where - # the cloud admin has removed a subset of the PVs rendering the - # VG as unusable because of LV corruption - if vg['vg_state'] == constants.LVG_DEL: - # User changed mind and is re-adding - values = {'vg_state': constants.LVG_ADD} - if applyprofile: - # inherit the capabilities, - if 'capabilities' in lvg and lvg['capabilities']: - values['capabilities'] = lvg['capabilities'] - - try: - LOG.info("Update ilvg values: %s" % values) - pecan.request.dbapi.ilvg_update(vg.id, values) - except exception.HTTPNotFound: - msg = _("LVG update failed: host (%s) LVG (%s)" - % (ihost['hostname'], vg['lvm_pv_name'])) - raise wsme.exc.ClientSideError(msg) - ret_lvg = vg - break + # Prevent re-adding so that we don't end up in a state where + # the cloud admin has removed a subset of the PVs rendering the + # VG as unusable because of LV corruption + if vg['vg_state'] == constants.LVG_DEL: + # User changed mind and is re-adding + values = {'vg_state': constants.LVG_ADD} + try: + LOG.info("Update ilvg values: %s" % values) + pecan.request.dbapi.ilvg_update(vg.id, values) + except exception.HTTPNotFound: + msg = _("LVG update failed: host (%s) LVG (%s)" + % (ihost['hostname'], vg['lvm_pv_name'])) + raise wsme.exc.ClientSideError(msg) + ret_lvg = vg + break if not lvg_in_db: # Add the default volume group parameters - if lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES and not iprofile: + if lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES: lvg_caps = lvg['capabilities'] - if (constants.LVG_CINDER_PARAM_LVM_TYPE in lvg_caps) or applyprofile: + if (constants.LVG_CINDER_PARAM_LVM_TYPE in lvg_caps): # defined from create or inherit the capabilities - LOG.info("%s defined from create %s applyprofile=%s" % - (constants.LVG_CINDER_PARAM_LVM_TYPE, lvg_caps, - applyprofile)) + LOG.info("%s defined from create %s" % + (constants.LVG_CINDER_PARAM_LVM_TYPE, lvg_caps)) else: # Default LVM type lvg_caps_dict = { diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py index 7128c67f75..bb010c4590 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/partition.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017-2018 Wind River Systems, Inc. +# Copyright (c) 2017-2021 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -630,7 +630,7 @@ def _semantic_checks(operation, partition): return partition -def _create(partition, iprofile=None, applyprofile=None): +def _create(partition): # Reject operation if we are upgrading the system. ihostid = partition.get('forihostid') or partition.get('ihost_uuid') ihost = pecan.request.dbapi.ihost_get(ihostid) @@ -653,8 +653,7 @@ def _create(partition, iprofile=None, applyprofile=None): # Set the status of the new partition if (ihost.invprovision in [constants.PROVISIONED, - constants.PROVISIONING] and - not iprofile): + constants.PROVISIONING]): partition['status'] = constants.PARTITION_CREATE_IN_SVC_STATUS else: partition['status'] = constants.PARTITION_CREATE_ON_UNLOCK_STATUS @@ -678,8 +677,7 @@ def _create(partition, iprofile=None, applyprofile=None): # - PROVISIONING: AIO (after config_controller) and before worker # configuration if (ihost.invprovision in [constants.PROVISIONED, - constants.PROVISIONING] and - not iprofile): + constants.PROVISIONING]): # Instruct puppet to implement the change pecan.request.rpcapi.update_partition_config(pecan.request.context, partition) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py deleted file mode 100644 index c9cc739cf1..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py +++ /dev/null @@ -1,3229 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2021 Wind River Systems, Inc. -# - -import jsonpatch -import pecan -from pecan import expose -from pecan import rest -import six -import wsme -import wsmeext.pecan as wsme_pecan - -from oslo_config import cfg -from oslo_log import log -from sysinv._i18n import _ -from sysinv import objects -from sysinv.api.controllers.v1 import base -from sysinv.api.controllers.v1 import collection -from sysinv.api.controllers.v1 import cpu as cpu_api -from sysinv.api.controllers.v1 import disk as disk_api -from sysinv.api.controllers.v1 import partition as partition_api -from sysinv.api.controllers.v1 import interface as interface_api -from sysinv.api.controllers.v1 import interface_network as ifnet_api -from sysinv.api.controllers.v1 import memory as memory_api -from sysinv.api.controllers.v1 import node as node_api -from sysinv.api.controllers.v1 import storage as storage_api -from sysinv.api.controllers.v1 import lvg as lvg_api -from sysinv.api.controllers.v1 import pv as pv_api -from sysinv.api.controllers.v1 import link -from sysinv.api.controllers.v1 import utils -from sysinv.api.controllers.v1 import cpu_utils -from sysinv.api.controllers.v1 import types -from sysinv.api.controllers.v1 import port as port_api -from sysinv.api.controllers.v1 import ethernet_port as ethernet_port_api -from sysinv.common import ceph -from sysinv.common import constants -from sysinv.common import exception -from sysinv.common import utils as cutils -import xml.etree.ElementTree as et -from lxml import etree -from sysinv.api.controllers.v1 import profile_utils -from oslo_db import exception as dbException -from wsme import types as wtypes -from sysinv.common.storage_backend_conf import StorageBackendConfig - -LOG = log.getLogger(__name__) - -CONF = cfg.CONF -CONF.import_opt('journal_min_size', - 'sysinv.api.controllers.v1.storage', - group='journal') -CONF.import_opt('journal_max_size', - 'sysinv.api.controllers.v1.storage', - group='journal') -CONF.import_opt('journal_default_size', - 'sysinv.api.controllers.v1.storage', - group='journal') - -# Defines the fields that must be copied in/out of interface profiles -INTERFACE_PROFILE_FIELDS = ['ifname', 'iftype', 'imtu', 'networktype', - 'ifclass', 'aemode', 'networks', - 'txhashpolicy', 'forihostid', 'datanetworks', - 'vlan_id', 'ipv4_mode', 'ipv6_mode', - 'ipv4_pool', 'ipv6_pool', - 'sriov_numvfs', 'sriov_vf_driver', 'max_tx_rate', - 'primary_reselect'] - - -class Profile(base.APIBase): - """API representation of a host profile. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation - of an ihost. - """ - - _ihost_uuid = None - _profilename = None - - def _get_ihost_uuid(self): - return self._ihost_uuid - - def _set_ihost_uuid(self, value): - if value and self._ihost_uuid != value: - try: - ihost = objects.host.get_by_uuid(pecan.request.context, value) - self._ihost_uuid = ihost.uuid - # NOTE(lucasagomes): Create the node_id attribute on-the-fly - # to satisfy the api -> rpc object - # conversion. - # self.host_id = ihost.id - self.forihostid = ihost.id - except exception.NodeNotFound as e: - # Change error code because 404 (NotFound) is inappropriate - # response for a POST request to create a Port - e.code = 400 # BadRequest - raise e - elif value == wtypes.Unset: - self._ihost_uuid = wtypes.Unset - - def _get_profilename(self): - if self.recordtype == 'profile': - return self.hostname - else: - return self._profilename - - def _set_profilename(self, value): - self._profilename = str(value) - - # NOTE: translate 'id' publicly to 'uuid' internally - id = int - - created_at = wtypes.datetime.datetime - updated_at = wtypes.datetime.datetime - - uuid = types.uuid - hostname = wtypes.text - profilename = wsme.wsproperty(wtypes.text, - _get_profilename, - _set_profilename, - mandatory=True) - - profiletype = wtypes.text - "Represent the profiletype of the iprofile - cpu, if, stor, memory" - - recordtype = wtypes.text - "Represent the recordtype of the iprofile" - - invprovision = wtypes.text - "Represent the current (not transition) provision state of the ihost" - - mgmt_mac = wtypes.text - "Represent the provisioned Boot mgmt MAC address of the ihost." - - mgmt_ip = wtypes.text - "Represent the provisioned Boot mgmt IP address of the ihost." - - personality = wtypes.text - "Represent the personality of the ihost" - - # target_provision_state = wtypes.text - # "The user modified desired provision state of the ihost." - - # NOTE: allow arbitrary dicts for driver_info and extra so that drivers - # and vendors can expand on them without requiring API changes. - # NOTE: translate 'driver_info' internally to 'management_configuration' - serialid = wtypes.text - - administrative = wtypes.text - operational = wtypes.text - availability = wtypes.text - - # The 'action' field is used for action based administration compared - # to existing state change administration. - # Actions like 'reset','reboot', and 'reinstall' are now supported - # by this new method along with 'swact', 'lock' and 'unlock'. - action = wtypes.text - - # Maintenance FSM task is just a text string - task = wtypes.text - - reserved = wtypes.text - - ihost_uuid = wsme.wsproperty(types.uuid, - _get_ihost_uuid, - _set_ihost_uuid, - mandatory=True) - "The UUID of the ihost this profile was created from" - - # Host uptime - uptime = int - - # NOTE: properties should use a class to enforce required properties - # current list: arch, cpus, disk, partition, ram, image - location = {wtypes.text: utils.ValidTypes(wtypes.text, six.integer_types)} - - # NOTE: translate 'chassis_id' to a link to the chassis resource - # and accept a chassis uuid when creating an ihost. - # (Leaf not ihost) - - links = [link.Link] - "A list containing a self link and associated ihost links" - - iinterfaces = [link.Link] - "Links to the collection of iinterfaces on this ihost" - - ports = [link.Link] - "Links to the collection of ports on this ihost" - - ethernet_ports = [link.Link] - "Links to the collection of ethernet_ports on this ihost" - - inodes = [link.Link] - "Links to the collection of inodes on this ihost" - - icpus = [link.Link] - "Links to the collection of icpus on this ihost" - - imemorys = [link.Link] - "Links to the collection of imemorys on this ihost" - - istors = [link.Link] - "Links to the collection of istors on this ihost" - - ipvs = [link.Link] - "Links to the collection of ipvs on this ihost" - - ilvgs = [link.Link] - "Links to the collection of ilvgs on this ihost" - - idisks = [link.Link] - "Links to the collection of idisks on this ihost" - - partitions = [link.Link] - "Links to the collection of partitions on this ihost" - - boot_device = wtypes.text - rootfs_device = wtypes.text - install_output = wtypes.text - console = wtypes.text - tboot = wtypes.text - - def __init__(self, **kwargs): - self.fields = list(objects.host.fields.keys()) - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - self.fields.append('profilename') - setattr(self, 'profilename', kwargs.get('profilename', None)) - - self.fields.append('profiletype') - setattr(self, 'profiletype', kwargs.get('profiletype', None)) - - self.fields.append('ihost_uuid') - setattr(self, 'ihost_uuid', kwargs.get('ihost_uuid', None)) - - @classmethod - def convert_with_links(cls, rpc_ihost, expand=True): - minimum_fields = ['id', 'uuid', 'hostname', 'personality', - 'administrative', 'operational', 'availability', - 'task', 'action', 'uptime', 'reserved', - 'mgmt_mac', 'mgmt_ip', 'location', 'recordtype', - 'created_at', 'updated_at', 'boot_device', - 'rootfs_device', 'install_output', 'console', - 'tboot', 'profilename', 'profiletype'] - fields = minimum_fields if not expand else None - iProfile = Profile.from_rpc_object(rpc_ihost, fields) - iProfile.profiletype = rpc_ihost.profiletype - iProfile.links = [link.Link.make_link('self', pecan.request.host_url, - 'iprofile', iProfile.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'iprofile', iProfile.uuid, - bookmark=True) - ] - if expand: - iProfile.iinterfaces = [link.Link.make_link('self', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/iinterfaces"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/iinterfaces", - bookmark=True) - ] - - iProfile.ports = [link.Link.make_link('self', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/ports"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/ports", - bookmark=True) - ] - - iProfile.ethernet_ports = [link.Link.make_link('self', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/ethernet_ports"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/ethernet_ports", - bookmark=True) - ] - - iProfile.inodes = [link.Link.make_link('self', - pecan.request.host_url, - 'ihosts', - iProfile.uuid + "/inodes"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'ihosts', - iProfile.uuid + "/inodes", - bookmark=True) - ] - - iProfile.icpus = [link.Link.make_link('self', - pecan.request.host_url, - 'ihosts', - iProfile.uuid + "/icpus"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'ihosts', - iProfile.uuid + "/icpus", - bookmark=True) - ] - - iProfile.imemorys = [link.Link.make_link('self', - pecan.request.host_url, - 'ihosts', - iProfile.uuid + "/imemorys"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'ihosts', - iProfile.uuid + "/imemorys", - bookmark=True) - ] - - iProfile.istors = [link.Link.make_link('self', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/istors"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/istors", - bookmark=True) - ] - - iProfile.ilvgs = [link.Link.make_link('self', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/ilvgs"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/ilvgs", - bookmark=True) - ] - - iProfile.ipvs = [link.Link.make_link('self', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/ipvs"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/ipvs", - bookmark=True) - ] - - iProfile.idisks = [link.Link.make_link('self', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/idisks"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/idisks", - bookmark=True) - ] - - iProfile.partitions = [ - link.Link.make_link( - 'self', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/partitions"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'iprofile', - iProfile.uuid + "/partitions", - bookmark=True) - ] - - return iProfile - - -class BaseProfile(base.APIBase): - """API representation of a type specific profile. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation. - """ - uuid = types.uuid - "uuid of the profile" - - profilename = wtypes.text - "name of the profile" - - profiletype = wtypes.text - "type of the profile" - - -class InterfaceProfile(BaseProfile): - - ports = [ethernet_port_api.EthernetPort] - "list of port objects" - - interfaces = [interface_api.Interface] - "list of interface objects" - - -class CpuProfile(BaseProfile): - cpus = [cpu_api.CPU] - "list of cpu objects" - - nodes = [node_api.Node] - "list of node objects" - - -class MemoryProfile(BaseProfile): - memory = [memory_api.Memory] - "list of memory objects" - - nodes = [node_api.Node] - "list of node objects" - - -class StorageProfile(BaseProfile): - disks = [disk_api.Disk] - "list of disk objects" - - partitions = [partition_api.Partition] - "list of partition objects" - - stors = [storage_api.Storage] - "list of storage volume objects" - - pvs = [pv_api.PV] - "list of physical volume objects" - - lvgs = [lvg_api.LVG] - "list of logical volume group objects" - - -class ProfileCollection(collection.Collection): - """API representation of a collection of ihosts.""" - - iprofiles = [Profile] - "A list containing ihosts objects" - - def __init__(self, **kwargs): - self._type = 'iprofiles' - - @classmethod - def convert_with_links(cls, iprofiles, limit, url=None, - expand=False, **kwargs): - collection = ProfileCollection() - collection.iprofiles = [ - Profile.convert_with_links(n, expand) for n in iprofiles] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'ProfileController' - - -class ProfileController(rest.RestController): - """REST controller for iprofiles.""" - - iinterfaces = interface_api.InterfaceController( - from_ihosts=True) - "Expose iinterfaces as a sub-element of iprofiles" - - ports = port_api.PortController(from_ihosts=True) - "Expose ports as a sub-element of iprofiles" - - ethernet_ports = ethernet_port_api.EthernetPortController(from_ihosts=True) - "Expose ethernet_ports as a sub-element of iprofiles" - - inodes = node_api.NodeController(from_ihosts=True) - "Expose inodes as a sub-element of iprofiles" - - icpus = cpu_api.CPUController(from_ihosts=True) - "Expose icpus as a sub-element of iprofiles" - - imemorys = memory_api.MemoryController(from_ihosts=True) - "Expose imemorys as a sub-element of iprofiles" - - istors = storage_api.StorageController(from_ihosts=True) - "Expose istors as a sub-element of iprofiles" - - ilvgs = lvg_api.LVGController(from_ihosts=True) - "Expose ilvgs as a sub-element of iprofiles" - - ipvs = pv_api.PVController(from_ihosts=True) - "Expose ipvs as a sub-element of iprofiles" - - idisks = disk_api.DiskController(from_ihosts=True) - "Expose idisks as a sub-element of iprofiles" - - partitions = partition_api.PartitionController(from_ihosts=True) - "Expose partitions as a sub-element of iprofiles" - - _custom_actions = { - 'detail': ['GET'], - 'ifprofiles_list': ['GET'], - 'cpuprofiles_list': ['GET'], - 'memprofiles_list': ['GET'], - 'storprofiles_list': ['GET'], - 'import_profile': ['POST'], - } - - ############# - # INIT - ############# - def __init__(self, from_chassis=False): - self._from_chassis = from_chassis - - @staticmethod - def _iprofiles_get(chassis_id, marker, limit, sort_key, sort_dir): - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.host.get_by_uuid(pecan.request.context, - marker) - - ihosts = pecan.request.dbapi.ihost_get_list( - limit, marker_obj, - recordtype="profile", - sort_key=sort_key, - sort_dir=sort_dir) - - # The subqueries required to get the profiletype does not scale well, - # therefore the type is not defined when getting a generic list of - # profiles. The type will only be set on the type specific queries. - for host in ihosts: - host.profiletype = None - - return ihosts - - @staticmethod - def _interface_profile_list(marker, limit, sort_key, sort_dir, session): - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.host.get_by_uuid(pecan.request.context, - marker) - - profiles = pecan.request.dbapi.interface_profile_get_list( - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - session=session) - - return profiles - - @staticmethod - def _cpu_profile_list(marker, limit, sort_key, sort_dir, session): - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.host.get_by_uuid(pecan.request.context, - marker) - - profiles = pecan.request.dbapi.cpu_profile_get_list( - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - session=session) - - return profiles - - @staticmethod - def _memory_profile_list(marker, limit, sort_key, sort_dir, session): - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.host.get_by_uuid(pecan.request.context, - marker) - - profiles = pecan.request.dbapi.memory_profile_get_list( - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - session=session) - - return profiles - - @staticmethod - def _storage_profile_list(marker, limit, sort_key, sort_dir, session): - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.host.get_by_uuid(pecan.request.context, - marker) - - profiles = pecan.request.dbapi.storage_profile_get_list( - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - session=session) - - return profiles - - ############# - # REQUESTS - ############# - - @wsme_pecan.wsexpose([InterfaceProfile]) - def ifprofiles_list(self, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of interface profiles.""" - - # session is held for the duration of the profile list - session = pecan.request.dbapi.get_session() - - profiles = self._interface_profile_list(marker, limit, - sort_key, sort_dir, session) - - if_profiles = [] - for profile in profiles: - interfaces = [] - ports = [] - - for i in profile.interfaces: - interface = objects.interface.from_db_object(i) - ic = interface_api.Interface.convert_with_links(interface) - interfaces.append(ic) - - for p in profile.ports: - port = objects.ethernet_port.from_db_object(p) - pc = ethernet_port_api.EthernetPort.convert_with_links(port) - ports.append(pc) - - if_profiles.append( - InterfaceProfile(uuid=profile.uuid, - profilename=profile.hostname, - profiletype=constants.PROFILE_TYPE_INTERFACE, - ports=ports, - interfaces=interfaces)) - - LOG.debug("ifprofiles_list response result %s" % if_profiles) - - return if_profiles - - @wsme_pecan.wsexpose([CpuProfile]) - def cpuprofiles_list(self, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of cpu profiles.""" - - # session is held for the duration of the profile list - session = pecan.request.dbapi.get_session() - - profiles = self._cpu_profile_list(marker, limit, - sort_key, sort_dir, session) - - cpu_profiles = [] - for profile in profiles: - cpus = [] - nodes = [] - - for c in profile.cpus: - cpu = objects.cpu.from_db_object(c) - cc = cpu_api.CPU.convert_with_links(cpu) - cpus.append(cc) - - for n in profile.nodes: - node = objects.node.from_db_object(n) - nc = node_api.Node.convert_with_links(node) - nodes.append(nc) - - cpu_profiles.append( - CpuProfile(uuid=profile.uuid, - profilename=profile.hostname, - profiletype=constants.PROFILE_TYPE_CPU, - cpus=cpus, - nodes=nodes)) - - LOG.debug("cpuprofiles_list response result %s" % cpu_profiles) - - return cpu_profiles - - @wsme_pecan.wsexpose([MemoryProfile]) - def memprofiles_list(self, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of memory profiles.""" - - # session is held for the duration of the profile list - session = pecan.request.dbapi.get_session() - - profiles = self._memory_profile_list(marker, limit, - sort_key, sort_dir, session) - - memory_profiles = [] - for profile in profiles: - memory = [] - nodes = [] - - for m in profile.memory: - mem = objects.memory.from_db_object(m) - mc = memory_api.Memory.convert_with_links(mem) - memory.append(mc) - - for n in profile.nodes: - node = objects.node.from_db_object(n) - nc = node_api.Node.convert_with_links(node) - nodes.append(nc) - - memory_profiles.append( - MemoryProfile(uuid=profile.uuid, - profilename=profile.hostname, - profiletype=constants.PROFILE_TYPE_MEMORY, - memory=memory, - nodes=nodes)) - - LOG.debug("memprofiles_list response result %s" % memory_profiles) - - return memory_profiles - - @wsme_pecan.wsexpose([StorageProfile]) - def storprofiles_list(self, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of storage profiles.""" - - # session is held for the duration of the profile list - session = pecan.request.dbapi.get_session() - - profiles = self._storage_profile_list(marker, limit, - sort_key, sort_dir, session) - - stor_profiles = [] - for profile in profiles: - disks = [] - partitions = [] - stors = [] - lvgs = [] - pvs = [] - - for d in profile.disks: - disk = objects.disk.from_db_object(d) - dc = disk_api.Disk.convert_with_links(disk) - disks.append(dc) - - for part in profile.partitions: - partition = objects.partition.from_db_object(part) - partc = partition_api.Partition.convert_with_links(partition) - partitions.append(partc) - - for s in profile.stors: - stor = objects.storage.from_db_object(s) - sc = storage_api.Storage.convert_with_links(stor) - stors.append(sc) - - for p in profile.pvs: - pv = objects.pv.from_db_object(p) - pc = pv_api.PV.convert_with_links(pv) - pvs.append(pc) - - for l in profile.lvgs: - lvg = objects.lvg.from_db_object(l) - lc = lvg_api.LVG.convert_with_links(lvg) - lvgs.append(lc) - - profiletype = constants.PROFILE_TYPE_LOCAL_STORAGE \ - if lvgs else constants.PROFILE_TYPE_STORAGE - - stor_profiles.append( - StorageProfile(uuid=profile.uuid, - profilename=profile.hostname, - profiletype=profiletype, - disks=disks, - partitions=partitions, - stors=stors, - lvgs=lvgs, - pvs=pvs)) - - LOG.debug("storprofiles_list response result %s" % stor_profiles) - - return stor_profiles - - @wsme_pecan.wsexpose(ProfileCollection, six.text_type, six.text_type, int, - six.text_type, six.text_type) - def get_all(self, chassis_id=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of ihosts.""" - ihosts = self._iprofiles_get( - chassis_id, marker, limit, sort_key, sort_dir) - return ProfileCollection.convert_with_links(ihosts, limit, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(ProfileCollection, six.text_type, six.text_type, int, - six.text_type, six.text_type) - def detail(self, chassis_id=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of ihosts with detail.""" - # /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "ihosts": - raise exception.HTTPNotFound - - ihosts = self._iprofiles_get( - chassis_id, marker, limit, sort_key, sort_dir) - resource_url = '/'.join(['ihosts', 'detail']) - return ProfileCollection.convert_with_links(ihosts, limit, - url=resource_url, - expand=True, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(Profile, six.text_type) - def get_one(self, uuid): - """Retrieve information about the given ihost.""" - if self._from_chassis: - raise exception.OperationNotPermitted - - rpc_ihost = objects.host.get_by_uuid(pecan.request.context, - uuid) - rpc_ihost.profiletype = _get_profiletype(rpc_ihost) - return Profile.convert_with_links(rpc_ihost) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(Profile, body=Profile) - def post(self, iprofile): - """Create a new ihost profile.""" - if self._from_chassis: - raise exception.OperationNotPermitted - - system_mode = utils.get_system_mode() - if system_mode == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError(_( - "Creating a profile on a simplex system is not allowed.")) - - try: - # Ensure recordtype is a profile - profile_dict = iprofile.as_dict() - recordtype_profile = {'recordtype': 'profile'} - profile_dict.update(recordtype_profile) - - # Parent host - ihost_uuid = '' - if 'ihost_uuid' in profile_dict: - ihost_uuid = profile_dict['ihost_uuid'] - - if 'profilename' in profile_dict and profile_dict['profilename']: - profile_dict['hostname'] = profile_dict['profilename'] - del profile_dict['profilename'] - - # Semantic checks - _check_profilename(profile_dict['hostname']) - - from_ihost = pecan.request.dbapi.ihost_get(ihost_uuid) - - # Before proceeding, check if the host is provisioned. - # Adding a profile while the host hasn't been provisioned - # will result in an entry being created in the ihost - # table for this profile, but no corresponding - # entries in the {storage, cpu, interface, etc} tables - if from_ihost.invprovision != constants.PROVISIONED: - raise wsme.exc.ClientSideError(_("Cannot create profile %s " - "until host %s is unlocked for the first time." % - (profile_dict['hostname'], from_ihost.hostname))) - - profile_dict['subfunctions'] = from_ihost.subfunctions - - profiletype = '' - if 'profiletype' in profile_dict and profile_dict['profiletype']: - profiletype = profile_dict['profiletype'] - if profiletype == constants.PROFILE_TYPE_STORAGE: - stor_model = ceph.get_ceph_storage_model() - if constants.WORKER in from_ihost.subfunctions: - # combo has no ceph - profiletype = constants.PROFILE_TYPE_LOCAL_STORAGE - LOG.info("No ceph backend for stor profile, assuming " - "%s" % profiletype) - elif not StorageBackendConfig.has_backend_configured( - pecan.request.dbapi, - constants.CINDER_BACKEND_CEPH - ): - raise wsme.exc.ClientSideError(_("Storage profiles " - "not applicable for %s with non Ceph backend." % - from_ihost.hostname)) - elif (from_ihost.personality == constants.CONTROLLER and - stor_model != constants.CEPH_CONTROLLER_MODEL): - raise wsme.exc.ClientSideError(_("Storage profiles " - "not applicable for %s as storage deployment " - "model is: %s" % - (from_ihost.hostname, stor_model))) - - # Create profile - LOG.debug("iprofileihost is: %s " % profile_dict) - new_ihost = pecan.request.dbapi.ihost_create(profile_dict) - - try: - profile_copy_data(from_ihost, new_ihost, profiletype) - except wsme.exc.ClientSideError as cse: - pecan.request.dbapi.ihost_destroy(new_ihost.id) - LOG.exception(cse) - raise cse - except Exception as e: - pecan.request.dbapi.ihost_destroy(new_ihost.id) - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to copy data to profile")) - - except exception.SysinvException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Invalid data")) - - return iprofile.convert_with_links(new_ihost) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(Profile, six.text_type, body=[six.text_type]) - def patch(self, uuid, patch): - """Update an existing iprofile. - """ - - iHost = objects.host.get_by_uuid(pecan.request.context, uuid) - - if iHost['recordtype'] != "profile": - raise wsme.exc.ClientSideError(_("Cannot update " - "non profile record type")) - - iHost_dict = iHost.as_dict() - utils.validate_patch(patch) - patch_obj = jsonpatch.JsonPatch(patch) - - # Prevent auto populated fields from being updated - state_rel_path = ['/uuid', '/id', '/recordtype'] - if any(p['path'] in state_rel_path for p in patch_obj): - raise wsme.exc.ClientSideError(_("The following fields cannot be " - "modified: uuid, id, recordtype")) - - try: - # Update profile - patched_iHost = jsonpatch.apply_patch(iHost_dict, - patch_obj) - except jsonpatch.JsonPatchException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Patching Error: %s") % e) - - # Semantic checks - _check_profilename(patched_iHost['hostname']) - - # Once the host has been provisioned lock down additional fields - provision_state = [constants.PROVISIONED, constants.PROVISIONING] - if iHost['invprovision'] in provision_state: - state_rel_path = ['/hostname', '/recordtype'] - if any(p['path'] in state_rel_path for p in patch_obj): - raise wsme.exc.ClientSideError( - _("The following fields cannot be modified because this " - "host has been configured: hostname, recordtype ")) - - try: - # Update only the fields that have changed - for field in objects.profile.fields: - if iHost[field] != patched_iHost[field]: - iHost[field] = patched_iHost[field] - - iHost.save() - return Profile.convert_with_links(iHost) - except exception.HTTPNotFound: - msg = _("Profile update failed: %s : patch %s" - % (patched_iHost['hostname'], patch)) - raise wsme.exc.ClientSideError(msg) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(None, six.text_type, status_code=204) - def delete(self, ihost_id): - """Delete an ihost profile. - """ - - ihost = objects.host.get_by_uuid(pecan.request.context, - ihost_id) - - # Profiles do not require un/configuration or mtc notification - if ihost.recordtype == "profile": - try: - profile_delete_data(ihost) - except wsme.exc.ClientSideError as cse: - LOG.exception(cse) - raise cse - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to delete data from profile")) - - pecan.request.dbapi.ihost_destroy(ihost_id) - else: - raise wsme.exc.ClientSideError(_("Delete not allowed - recordtype " - "is not a profile.")) - - @cutils.synchronized(LOCK_NAME) - @expose('json') - def import_profile(self, file): - class ProfileObj(object): # noqa: F823 - display = "" - proc = None - - def __init__(self, display, proc): - self.display = display - self.proc = proc - - results = [] - file = pecan.request.POST['file'] - contents = file.file.read() - try: - # validate against profileschema.xsd - with open('/etc/sysinv/profileSchema.xsd', 'r') as f: - schema_root = etree.XML(f.read()) - - schema = etree.XMLSchema(schema_root) - xmlparser = etree.XMLParser(schema=schema) - - try: - etree.fromstring(contents, xmlparser) - except etree.XMLSchemaError as e: - return [{'result': 'Invalid', - 'type': '', - 'name': '', - 'msg': "Profile is invalid", - 'detail': e.message}] - - root = et.fromstring(contents) - except Exception as e: - LOG.exception(e) - error = six.text_type(e) - return [{'result': 'Invalid', - 'type': '', 'name': '', - 'msg': 'Profile is invalid', - 'detail': error}] - - profile_types = ["cpuProfile", "memoryProfile", "interfaceProfile", - "storageProfile", "localstorageProfile"] - profile_lookup = { - "cpuProfile": ProfileObj("CPU Profile", _create_cpu_profile), - "interfaceProfile": ProfileObj("Interface profile", - _create_if_profile), - "memoryProfile": ProfileObj("Memory profile", _create_mem_profile), - "storageProfile": ProfileObj("Storage profile", - _create_storage_profile), - "localstorageProfile": ProfileObj("Local Storage profile", - _create_localstorage_profile) - } - - hosts = pecan.request.dbapi.ihost_get_list(recordtype=None) - hostnames = [] - for host in hosts: - hostnames.append(host.hostname) - - for profile_node in root: - tag = profile_node.tag - profile_name = profile_node.get("name") - - if tag not in profile_types: - results.append({'result': 'Error', - 'type': 'unknown', - 'name': '', - 'msg': 'error: profile type %s is unrecognizable.' % tag, - 'detail': None}) - else: - object = profile_lookup[tag] - if not profile_name: - results.append({'result': 'Error', - 'type': object.display, - 'name': '', - 'msg': 'error: profile name is missing', - 'detail': None}) - else: - if profile_name not in hostnames: - hostnames.append(profile_name) - - try: - result, msg, detail = \ - object.proc(profile_name, profile_node) - results.append({'result': result, - 'type': object.display, - 'name': profile_name, - 'msg': msg, - 'detail': detail}) - except Exception as e: - results.append({'result': "Error", - 'type': object.display, - 'name': profile_name, - 'msg': _('error: failed to import %s %s.' % ( - object.display, profile_name - )), - 'detail': str(e) - }) - - else: - results.append({'result': "Warning", - 'type': object.display, - 'msg': _('warning: %s %s already exists and is not imported.') % - (object.display, profile_name), - 'detail': None}) - return results - - -def _create_cpu_profile(profile_name, profile_node): - class CoreFunction(object): - def __init__(self, p_index, c_index, t_index=0): - self.processor_index = p_index - self.core_index = c_index - self.thread_index = t_index - self.core_function = constants.APPLICATION_FUNCTION - - # The xml is validated against schema. - # Validations that are covered by the schema are not checked below. - values = dict(recordtype="profile", hostname=profile_name) - - processor = profile_node.find('processor') - number_of_cpu = 0 - node = processor.find('numberOfProcessor') - if node is not None: - number_of_cpu = int(node.text) - node = processor.find('coresPerProcessor') - cores_per_cpu = int(node.text) - - hyper_threading = False - node = processor.find('hyperThreading') - if node is not None: - hyper_threading = (node.text == 'true') - - if hyper_threading: - max_thread = 2 - else: - max_thread = 1 - - platform_cores = [[CoreFunction(i, j) for j in range(cores_per_cpu)] for i in range(number_of_cpu)] - platform_core_index = [0 for i in range(number_of_cpu)] - - core_function_list = [{'node_name': 'platformCores', 'node_function': constants.PLATFORM_FUNCTION}, - {'node_name': 'vswitchCores', 'node_function': constants.VSWITCH_FUNCTION}, - {'node_name': 'sharedCores', 'node_function': constants.SHARED_FUNCTION}] - - try: - for core_function in core_function_list: - function_node = profile_node.find(core_function['node_name']) - function_name = core_function['node_function'] - if function_node is None: - continue - for processor_node in function_node.findall('processor'): - p_idx = int(processor_node.get('index')) - if p_idx >= number_of_cpu: - raise profile_utils.InvalidProfileData('Invalid processor index %d. ' - 'Valid range is 0 to %d (numberOfProcessor - 1)' % - (p_idx, number_of_cpu - 1)) - cores_node = processor_node.get('numberOfCores') - cores = int(cores_node) - count = 0 - for count in range(cores): - platform_cores[p_idx][platform_core_index[p_idx]].core_function = function_name - - platform_core_index[p_idx] = platform_core_index[p_idx] + 1 - if platform_core_index[p_idx] >= cores_per_cpu: - raise profile_utils.InvalidProfileData("Too many core functions assigned to a processor") - - except profile_utils.InvalidProfileData as e: - return "Error", 'error: CPU profile %s is invalid' % profile_name, six.text_type(e) - - try: - ihost = pecan.request.dbapi.ihost_create(values) - except dbException.DBDuplicateEntry as e: - LOG.exception(e) - return "Warning", _('warning: CPU profile %s already exists and is not imported.') % profile_name, None - except Exception as e: - LOG.exception(e) - return "Error", _('error: importing CPU profile %s failed.') % profile_name, six.text_type(e) - - iprofile_id = ihost['id'] - - cpu_idx = 0 - node_idx = 0 - - try: - for cpulist in platform_cores: - ndict = {'numa_node': node_idx} - new_node = pecan.request.dbapi.inode_create(iprofile_id, ndict) - for core in cpulist: - for thread_id in range(max_thread): - cdict = {"cpu": cpu_idx, - "core": core.core_index, - "thread": thread_id, - "allocated_function": core.core_function, - 'forinodeid': new_node['id']} - pecan.request.dbapi.icpu_create(iprofile_id, cdict) - cpu_idx = cpu_idx + 1 - - node_idx = node_idx + 1 - except Exception as exc: - cpuprofile_delete_data(ihost) - pecan.request.dbapi.ihost_destroy(iprofile_id) - LOG.exception(exc) - raise exc - - return "Success", _('CPU profile %s is successfully imported.') % profile_name, None - - -def _create_route(ifUuid, ifId, routes): - # ['interface_uuid', 'network', 'prefix', - # 'gateway', 'metric'] - for r in routes: - r['interface_id'] = ifId - pecan.request.dbapi.route_create(ifId, r) - - -def _create_if_profile(profile_name, profile_node): - ethInterfaces = [] - interfaceNames = [] - detail_msg = None - - try: - for ethIfNode in profile_node.findall("ethernetInterface"): - ethIf = profile_utils.EthInterface(ethIfNode) - ethIf.validate() - if ethIf.name not in interfaceNames: - interfaceNames.append(ethIf.name) - ethInterfaces.append(ethIf) - else: - msg = _('Interface name must be unique (%s)' % ethIf.name) - raise profile_utils.InvalidProfileData(msg) - - aeInterfaces = [] - for aeIfNode in profile_node.findall("aeInterface"): - aeIf = profile_utils.AeInterface(aeIfNode) - if aeIf.name not in interfaceNames: - interfaceNames.append(aeIf.name) - aeInterfaces.append(aeIf) - else: - msg = _('Interface name must be unique (%s)' % aeIf.name) - raise profile_utils.InvalidProfileData(msg) - - vlanInterfaces = [] - for vlanIfNode in profile_node.findall("vlanInterface"): - vlanIf = profile_utils.VlanInterface(vlanIfNode) - if vlanIf.name not in interfaceNames: - interfaceNames.append(vlanIf.name) - vlanInterfaces.append(vlanIf) - else: - msg = _('Interface name must be unique (%s)' % aeIf.name) - raise profile_utils.InvalidProfileData(msg) - - ethIfMap = [] - aeIfMap = {} - vlanMap = [] - allProviderNetworks = [] - - def _verifyProviderNetworks(pnetworks): - for pnet in pnetworks: - if pnet not in allProviderNetworks: - allProviderNetworks.append(pnet) - else: - msg = _('provider network %s is already assigned to the other interface.') % pnet - raise profile_utils.InvalidProfileData(msg) - - cnt_port = True - cnt_pciaddr = True - for ethIf in ethInterfaces: - if not ethIf.port: - cnt_port = False - if not ethIf.pciAddress: - cnt_pciaddr = False - ethIfMap.append(ethIf.name) - _verifyProviderNetworks(ethIf.providerNetworks) - - if cnt_pciaddr and cnt_port: - detail_msg = _('Eth port PCI address and name are both provided, ' - 'only PCI address will be used for port matching') - elif cnt_pciaddr: - detail_msg = _('PCI address will be used for port matching') - elif cnt_port: - detail_msg = _('Eth port name will be used for port matching') - else: - raise profile_utils.InvalidProfileData(_('pciAddress must be provided for each Eth port.' - 'Name for each Eth port can be provided as alternative.')) - - for aeIf in aeInterfaces: - aeIfMap[aeIf.name] = aeIf - _verifyProviderNetworks(aeIf.providerNetworks) - - for vlanIf in vlanInterfaces: - vlanMap.append(vlanIf.name) - _verifyProviderNetworks(vlanIf.providerNetworks) - - for ae in aeInterfaces: - ae.validateWithIfNames(interfaceNames) - - for vlan in vlanInterfaces: - vlan.validateWithIfNames(interfaceNames, aeIfMap, vlanMap, ethIfMap) - - except profile_utils.InvalidProfileData as ie: - return "Error", _('error: Interface profile %s is invalid.') % profile_name, six.text_type(ie) - - values = {'recordtype': 'profile', 'hostname': profile_name} - try: - ihost = pecan.request.dbapi.ihost_create(values) - except dbException.DBDuplicateEntry as e: - LOG.exception(e) - return "Warning", _('warning: interface profile %s already exists and is not imported.') % profile_name, None - except Exception as e: - LOG.exception(e) - return "Error", _('error: importing interface profile %s failed.') % profile_name, six.text_type(e) - - iprofile_id = ihost['id'] - try: - # create interfaces in dependency order - # eth-interfaces always go first - newIfList = [] - # TODO: get mtu from eth ports as default mtu - for ethIf in ethInterfaces: - nt, providernets = ethIf.getNetworks() - ipv4_mode = ethIf.ipv4Mode - ipv6_mode = ethIf.ipv6Mode - idict = {'ifname': ethIf.name, - 'iftype': 'ethernet', - 'imtu': ethIf.mtu, - 'networktype': nt, - 'forihostid': iprofile_id, - 'datanetworks': providernets, - 'ipv4_mode': ipv4_mode['mode'], - 'ipv6_mode': ipv6_mode['mode'], - 'ipv4_pool': ipv4_mode['pool'], - 'ipv6_pool': ipv6_mode['pool'], - 'sriov_numvfs': ethIf.virtualFunctions, - 'sriov_vf_driver': ethIf.virtualFunctionDriver, - 'max_tx_rate': ethIf.maxTxRate, - 'interface_profile': True - } - newIf = interface_api._create(idict, from_profile=True) - newIf.ifData = ethIf - newIfList.append(newIf) - ifId = newIf.id - - pdict = { - 'host_id': iprofile_id, - 'interface_id': ifId, - 'name': ethIf.port, - 'pciaddr': ethIf.pciAddress, - 'pclass': ethIf.pclass, - 'pdevice': ethIf.pdevice, - 'mtu': ethIf.mtu - } - - pecan.request.dbapi.ethernet_port_create(iprofile_id, pdict) - - routes = ethIf.routes - _create_route(newIf.uuid, newIf.id, routes) - - for aeIf in aeInterfaces: - nt, providernets = aeIf.getNetworks() - ipv4_mode = aeIf.ipv4Mode['mode'] - ipv6_mode = aeIf.ipv6Mode['mode'] - ipv4_pool = aeIf.ipv4Mode['pool'] - ipv6_pool = aeIf.ipv6Mode['pool'] - idict = {'ifname': aeIf.name, - 'iftype': 'ae', - 'networktype': nt, - 'aemode': aeIf.aeMode, - 'txhashpolicy': aeIf.txPolicy, - 'primary_reselect': aeIf.primary_reselect, - 'forihostid': iprofile_id, - 'datanetworks': providernets, - 'ipv4_mode': ipv4_mode, - 'ipv6_mode': ipv6_mode, - 'ipv4_pool': ipv4_pool, - 'ipv6_pool': ipv6_pool, - 'imtu': aeIf.mtu, - 'sriov_numvfs': ethIf.virtualFunctions, - 'sriov_vf_driver': ethIf.virtualFunctionDriver, - 'max_tx_rate': ethIf.maxTxRate, - 'interface_profile': True - } - - newIf = interface_api._create(idict, from_profile=True) - newIf.ifData = aeIf - newIfList.append(newIf) - routes = aeIf.routes - _create_route(newIf.uuid, newIf.id, routes) - - for vlanIf in vlanInterfaces: - nt, providernets = vlanIf.getNetworks() - ipv4_mode = vlanIf.ipv4Mode['mode'] - ipv6_mode = vlanIf.ipv6Mode['mode'] - ipv4_pool = vlanIf.ipv4Mode['pool'] - ipv6_pool = vlanIf.ipv6Mode['pool'] - idict = {'ifname': vlanIf.name, - 'iftype': 'vlan', - 'networktype': nt, - 'vlan_id': vlanIf.vlanId, - 'forihostid': iprofile_id, - 'datanetworks': providernets, - 'ipv4_mode': ipv4_mode, - 'ipv6_mode': ipv6_mode, - 'ipv4_pool': ipv4_pool, - 'ipv6_pool': ipv6_pool, - 'imtu': vlanIf.mtu, - 'sriov_numvfs': ethIf.virtualFunctions, - 'sriov_vf_driver': ethIf.virtualFunctionDriver, - 'max_tx_rate': ethIf.maxTxRate, - 'interface_profile': True - } - - newIf = interface_api._create(idict, from_profile=True) - newIf.ifData = vlanIf - newIfList.append(newIf) - routes = vlanIf.routes - _create_route(newIf.uuid, newIf.id, routes) - - # Generate the uses/used_by relationships - ifname_to_if = {} - used_by_list = {} - for i in newIfList: - ifname_to_if[i.ifname] = i - - for i in newIfList: - ifData = i.ifData - if hasattr(ifData, 'usesIf'): - uses_list = ifData.usesIf - for usesif in uses_list: - uuid = ifname_to_if[i.ifname] - if not hasattr(used_by_list, usesif): - used_by_list[usesif] = [uuid] - else: - used_by_list[usesif].append(uuid) - - for i in newIfList: - ifData = i.ifData - if not hasattr(ifData, 'usesIf'): - continue - - uses_uuid_list = [] - uses_list = ifData.usesIf - for usesif in uses_list: - mapIf = ifname_to_if[usesif] - uuid = mapIf.uuid - uses_uuid_list.append(uuid) - - idict = {} - idict['uses'] = uses_uuid_list - if hasattr(used_by_list, i.ifname): - idict['used_by'] = used_by_list[i.ifname] - - try: - pecan.request.dbapi.iinterface_update(i.uuid, idict) - except Exception: - raise wsme.exc.ClientSideError(_("Failed to link interface uses.")) - except Exception as exc: - ihost.ethernet_ports = \ - pecan.request.dbapi.ethernet_port_get_by_host(ihost.uuid) - - ifprofile_delete_data(ihost) - pecan.request.dbapi.ihost_destroy(iprofile_id) - LOG.exception(exc) - raise exc - - return "Success", _('Interface profile %s is successfully imported.') % profile_name, detail_msg - - -def _create_mem_profile(profile_name, profile_node): - class MemoryAssignment(object): - def __init__(self, processor_idx, size): - self.processor_idx = processor_idx - self.size = size - - # The xml is validated against schema. - # Validations that are covered by the schema are not checked below. - values = dict(recordtype="profile", hostname=profile_name) - - node = profile_node.find('numberOfProcessor') - number_of_cpu = int(node.text) - - def get_mem_assignment(profile_node, name): - mem_node = profile_node.find(name) - if node is None: - return - - mem_assignments = [] - processor_indexes = [] - for processor_node in mem_node.findall('processor'): - p_idx = int(processor_node.get('index')) - if p_idx >= number_of_cpu: - msg = _('Invalid processor index {0}. ' - 'Valid range is 0 to {1} (numberOfProcessor - 1)')\ - .format(p_idx, number_of_cpu - 1) - raise profile_utils.InvalidProfileData(msg) - - if p_idx in processor_indexes: - msg = _('Invalid processor index {0}, duplicated. ').format(p_idx) - raise profile_utils.InvalidProfileData(msg) - - processor_indexes.append(p_idx) - mem_size = int(processor_node.get('size')) - - mem_assignments.append(MemoryAssignment(p_idx, mem_size)) - return mem_assignments - - def get_mem_size(mem_assignments, processor_idx): - for mem_assignment in mem_assignments: - if mem_assignment.processor_idx == processor_idx: - return mem_assignment.size - - return 0 - - try: - platform_reserved = get_mem_assignment(profile_node, "platformReservedMiB") - vm_hp_2m = get_mem_assignment(profile_node, "vmHugePages2M") - vm_hp_1g = get_mem_assignment(profile_node, "vmHugePages1G") - vs_hp_nr = get_mem_assignment(profile_node, "vsHugePagesNr") - vs_hp_sz = get_mem_assignment(profile_node, "vsHugePagesSz") - except profile_utils.InvalidProfileData as e: - return "Error", _('error: CPU profile %s is invalid') % profile_name, six.text_type(e) - - try: - ihost = pecan.request.dbapi.ihost_create(values) - except dbException.DBDuplicateEntry as e: - LOG.exception(e) - return "Warning", _('warning: Memory profile %s already exists and is not imported.') % profile_name, None - except Exception as e: - LOG.exception(e) - return "Error", _('error: Creating memory profile %s failed.') % profile_name, six.text_type(e) - - iprofile_id = ihost['id'] - - node_idx = 0 - - try: - for cpulist in range(number_of_cpu): - ndict = {'numa_node': node_idx} - new_node = pecan.request.dbapi.inode_create(iprofile_id, ndict) - - mdict = {} - mdict['forihostid'] = iprofile_id - mdict['forinodeid'] = new_node['id'] - mdict['platform_reserved_mib'] = get_mem_size(platform_reserved, node_idx) - mdict['vm_hugepages_nr_2M_pending'] = get_mem_size(vm_hp_2m, node_idx) - mdict['vm_hugepages_nr_1G_pending'] = get_mem_size(vm_hp_1g, node_idx) - mdict['vswitch_hugepages_reqd'] = get_mem_size(vs_hp_nr, node_idx) - mdict['vswitch_hugepages_size_mib'] = get_mem_size(vs_hp_sz, node_idx) - pecan.request.dbapi.imemory_create(iprofile_id, mdict) - - node_idx += 1 - except Exception as exc: - memoryprofile_delete_data(ihost) - pecan.request.dbapi.ihost_destroy(iprofile_id) - LOG.exception(exc) - raise exc - - return "Success", _('Memory profile %s is successfully imported.') % profile_name, None - - -def _create_storage_profile(profile_name, profile_node): - if not StorageBackendConfig.has_backend_configured( - pecan.request.dbapi, - constants.CINDER_BACKEND_CEPH - ): - return "Error", _("error: Storage profile can only be imported into " - "a system with Ceph backend."), None - # The xml is validated against schema. - # Validations that are covered by the schema are not checked below. - values = dict(recordtype="profile", hostname=profile_name) - - disks = profile_node.findall('disk') - dev_paths = [] - - # Any supported storage functions should be appended here - supportedFuncs = [constants.STOR_FUNCTION_OSD, - constants.STOR_FUNCTION_JOURNAL] - - # Gather the storage tiers and build a map for the OSD create call - tier_map = {} - tiers = pecan.request.dbapi.storage_tier_get_all(type=constants.SB_TIER_TYPE_CEPH) - for t in tiers: - tier_map[t.name] = t - - journal_disks = [] - for disk in disks: - dev_path = disk.get('path') - dev_func = disk.get('volumeFunc') - # Convert from GiB to MiB - dev_size = int(disk.get('size')) * 1024 - journal_size = int(disk.get('journalSize', '0')) * 1024 - tier = disk.get('tier', constants.SB_TIER_DEFAULT_NAMES[ - constants.SB_TIER_TYPE_CEPH]) - if not dev_path: - return "Error", _('error: Storage profile %s is invalid') % \ - profile_name, _('path is empty.') - if dev_func not in supportedFuncs: - return "Error", _('error: Storage profile %s is invalid') % \ - profile_name, \ - _('volumeFunc (%s) is not supported.') % dev_func - if dev_path not in dev_paths: - dev_paths.append(dev_paths) - else: - return "Error", _('error: Storage profile %s is invalid') % profile_name, \ - _('Device %s is duplicated') % dev_path - if journal_size: - if journal_size < CONF.journal.journal_min_size and \ - journal_size > CONF.journal.journal_max_size: - return "Error", \ - _('error: Storage profile %s' - ' is invalid') % profile_name, \ - _('device path %(dev)s journal size of %(size)s' - ' is invalid.') % {'dev': dev_path, - 'size': journal_size // 1024}, \ - _('size should be between %(min)s and ' - ' %(max)s.') % {'min': CONF.journal.journal_min_size // 1024, - 'max': CONF.journal.journal_max_size // 1024} - - if dev_func == constants.STOR_FUNCTION_JOURNAL: - journal_disks.append(dev_path) - - if dev_func == constants.STOR_FUNCTION_OSD: - if tier not in tier_map: - return "Error", _('error: Storage profile %s is invalid') % profile_name, \ - _('Storage tier %s is not present in this cluster') % tier - - # Validate journal locations - for disk in disks: - dev_path = disk.get('path') - dev_func = disk.get('volumeFunc') - if len(journal_disks) > 1 and dev_func == constants.STOR_FUNCTION_OSD: - journal_location = disk.get('journalLocation') - if not journal_location: - return "Error", \ - _('error: Storage profile %s' - ' is invalid') % profile_name, \ - _('journal location not defined for %s and multiple ' - 'journal drives are available.') % dev_path - elif journal_location not in journal_disks: - return "Error", \ - _('error: Storage profile %s' - ' is invalid') % profile_name, \ - _('journal location for %s not on a ' - 'journal function device.') % dev_path - try: - ihost = pecan.request.dbapi.ihost_create(values) - except dbException.DBDuplicateEntry as e: - LOG.exception(e) - return "Warning", _('warning: Storage profile %s already exists and is not imported.') % profile_name, None - except Exception as e: - LOG.exception(e) - return "Error", _('error: importing storage profile %s failed.') % profile_name, six.text_type(e) - - profile_id = ihost['id'] - - try: - # First create the journals and keep (dev_name, uuid) associations - journals = {} - for disk in disks: - dev_func = disk.get('volumeFunc') - if dev_func == constants.STOR_FUNCTION_JOURNAL: - dev_path = disk.get('path') - # Convert disk size from GiB to MiB - dev_size = int(disk.get('size')) * 1024 - ddict = {'device_path': dev_path, - 'size_mib': dev_size, - 'forihostid': profile_id, - 'device_type': constants.DEVICE_TYPE_SSD} - newdisk = pecan.request.dbapi.idisk_create(profile_id, ddict) - - # create stor - sdict = {'function': dev_func, 'idisk_uuid': newdisk.uuid, 'forihostid': profile_id} - # this goes through istor semantic checks versus - # just adding to db (by calling dbapi.istor_create) - newstor = storage_api._create(sdict, iprofile=True) - journals[dev_path] = newstor.uuid - - # Create the other functions - for disk in disks: - dev_path = disk.get('path') - dev_func = disk.get('volumeFunc') - # convert disk size from GiB to MiB - dev_size = int(disk.get('size')) * 1024 - tier = disk.get('tier', constants.SB_TIER_DEFAULT_NAMES[ - constants.SB_TIER_TYPE_CEPH]) - - if dev_func != constants.STOR_FUNCTION_JOURNAL: - ddict = {'device_path': dev_path, - 'size_mib': dev_size, - 'forihostid': profile_id} - newdisk = pecan.request.dbapi.idisk_create(profile_id, ddict) - - # create stor - sdict = {'function': dev_func, 'idisk_uuid': newdisk.uuid, 'forihostid': profile_id} - if dev_func == constants.STOR_FUNCTION_OSD: - default_size = CONF.journal.journal_default_size - if len(journals) > 0: - # we don't expect collocated journals - if disk.get('journalSize'): - journal_size = int(disk.get('journalSize')) * 1024 - else: - journal_size = default_size - sdict['journal_size_mib'] = journal_size - if len(journals) > 1: - # multiple journal disks are available, use - # location, otherwise just do the default - # (journal will be placed on first disk) - location_dev = disk.get('journalLocation') - location_uuid = journals[location_dev] - sdict['journal_location'] = location_uuid - else: - # get the first journal - journal = journals[list(journals.keys())[0]] - sdict['journal_location'] = journal - else: - # journal is collocated - sdict['journal_size_mib'] = default_size - - sdict['fortierid'] = tier_map[tier].id - - # this goes through istor semantic checks versus - # just adding to db (by calling dbapi.istor_create) - newstor = storage_api._create(sdict, iprofile=True) - except Exception as exc: - storprofile_delete_data(ihost) - pecan.request.dbapi.ihost_destroy(profile_id) - LOG.exception(exc) - raise exc - - return "Success", _('Storage profile %s is successfully imported.') % profile_name, None - - -def _create_localstorage_profile(profile_name, profile_node): - """ Validate and create the localstorage profile from xml. - - The xml is validated against xsd schema. - """ - values = dict(recordtype="profile", - hostname=profile_name, - subfunctions=constants.WORKER) - - disks = profile_node.findall('disk') - all_ilvg_nodes = profile_node.findall('lvg') # should only be ONE ? - # ipv_nodes = profile_node.findall('pv') # can be multiple, base this on disks - dev_paths = [] - - prohibitedFuncs = ['osd'] # prohibited volumeFunc must be appended here - ilvgs_local = [ilvg for ilvg in all_ilvg_nodes if - ilvg.get('lvm_vg_name') == constants.LVG_NOVA_LOCAL] - - if not disks: - return ("Error", _('error: Local Storage profile %s is invalid') % - profile_name, _('No disk provided in profile.')) - - if not ilvgs_local: - return ("Error", _('error: Local Storage profile %s is invalid') % - profile_name, _('No lvg nova-local (logical volume group) ' - 'in profile.')) - else: - nova_local_nodes_len = len(ilvgs_local) - if nova_local_nodes_len > 1: - return ("Error", _('error: Local Storage profile %s is invalid') % - profile_name, _('Currently only one nova-local lvg ' - 'is allowed per host. Defined %s in %s.' % - (nova_local_nodes_len, profile_name))) - - for disk in disks: - dev_path = disk.get('path') - # Convert disk size from GiB to MiB - dev_size = int(disk.get('size')) * 1024 - dev_func = disk.get('volumeFunc') - - if dev_func and dev_func in prohibitedFuncs: - return ("Error", _('error: Local Storage profile %s is invalid') % - profile_name, _('dev_func %s is not required.') % dev_func) - - if not dev_path: - return ("Error", _('error: Local Storage profile %s is invalid') % - profile_name, _('path is empty.')) - - if dev_path not in dev_paths: - dev_paths.append(dev_path) - else: - return ("Error", _('error: Local Storage profile %s is invalid') % - profile_name, _('Device %s is duplicated') % dev_path) - - try: - ihost = pecan.request.dbapi.ihost_create(values) - except dbException.DBDuplicateEntry as e: - LOG.exception(e) - return ("Warning", _('warning: Local Storage profile %s already ' - 'exists and is not imported.') % profile_name, None) - except Exception as e: - LOG.exception(e) - return ("Error", _('error: importing Local Storage profile %s ' - 'failed.') % profile_name, six.text_type(e)) - - profile_id = ihost.id - try: - capabilities_dict = {} - - # create profile ilvg - lvgdict = {'capabilities': capabilities_dict, - 'lvm_vg_name': constants.LVG_NOVA_LOCAL, - 'forihostid': profile_id} - # this goes through ilvg semantic checks versus - # just adding to db (by calling dbapi.ilvg_create) - ilvg_pf = lvg_api._create(lvgdict, iprofile=True) - - for disk in disks: - dev_path = disk.get('path') - dev_size = int(disk.get('size')) * 1024 - - ddict = {'device_path': dev_path, - 'size_mib': dev_size, - 'forihostid': profile_id} - disk_pf = pecan.request.dbapi.idisk_create(profile_id, ddict) - - # create profile physical volume. nova-local:pv can be 1:n. - pvdict = {'disk_or_part_device_path': dev_path, - 'lvm_vg_name': ilvg_pf.lvm_vg_name, - 'disk_or_part_uuid': disk_pf.uuid, - 'forihostid': profile_id, - 'forilvgid': ilvg_pf.id} - - pv_api._create(pvdict, iprofile=True) - - except wsme.exc.ClientSideError as cse: - pecan.request.dbapi.ihost_destroy(ihost.uuid) - LOG.exception(cse) - return "Fail", _('Local Storage profile %s not imported.') % profile_name, str(cse) - - except Exception as exc: - pecan.request.dbapi.ihost_destroy(profile_id) - LOG.exception(exc) - return "Fail", _('Local Storage profile %s not imported.') % profile_name, str(exc) - - return "Success", _('Local Storage profile %s successfully imported.') % profile_name, None - - -################### -# CHECK -################### -def _check_profilename(profilename): - # Check if profile name already exists - iprofiles = pecan.request.dbapi.ihost_get_list(recordtype="profile") - for profile in iprofiles: - if profile.hostname == profilename: - raise wsme.exc.ClientSideError(_("Profile name already exists: %s." - % profilename)) - - # Check if profile name = hostname - ihosts = pecan.request.dbapi.ihost_get_list(recordtype="standard") - for host in ihosts: - if host.hostname == profilename: - raise wsme.exc.ClientSideError(_("Profile name must be different " - "than host name. %s" % profilename)) - - return True - - -def _get_profiletype(profile): - profile_id = profile['id'] - - profile.cpus = pecan.request.dbapi.icpu_get_by_ihost(profile_id) - if profile.cpus: - profile.nodes = pecan.request.dbapi.inode_get_by_ihost(profile_id) - return constants.PROFILE_TYPE_CPU - - profile.ethernet_ports = pecan.request.dbapi.ethernet_port_get_by_host( - profile_id) - if profile.ethernet_ports: - return constants.PROFILE_TYPE_INTERFACE - - profile.memory = pecan.request.dbapi.imemory_get_by_ihost(profile_id) - if profile.memory: - profile.nodes = pecan.request.dbapi.inode_get_by_ihost(profile_id) - return constants.PROFILE_TYPE_MEMORY - - profile.istor = pecan.request.dbapi.istor_get_by_ihost(profile_id) - if profile.istor: - return constants.PROFILE_TYPE_STORAGE - - profile.ilvgs = pecan.request.dbapi.ilvg_get_by_ihost(profile_id) - if profile.ilvgs: - return constants.PROFILE_TYPE_LOCAL_STORAGE - - return constants.PROFILE_TYPE_STORAGE - raise wsme.exc.ClientSideError( - _("Profile not found: %s" % profile['hostname'])) - - return None - - -################### -# CREATE -################### -def profile_copy_data(host, profile, profiletype): - profile.profiletype = profiletype - if constants.PROFILE_TYPE_CPU in profiletype.lower(): - return cpuprofile_copy_data(host, profile) - elif constants.PROFILE_TYPE_INTERFACE in profiletype.lower(): - return ifprofile_copy_data(host, profile) - elif constants.PROFILE_TYPE_MEMORY in profiletype.lower(): - return memoryprofile_copy_data(host, profile) - elif constants.PROFILE_TYPE_STORAGE in profiletype.lower(): - return storprofile_copy_data(host, profile) - elif constants.PROFILE_TYPE_LOCAL_STORAGE in profiletype.lower(): - return localstorageprofile_copy_data(host, profile) - else: - raise wsme.exc.ClientSideError(_("Must provide a value for 'profiletype'. " - "Choose from: cpu, if, stor, memory")) - - -def cpuprofile_copy_data(host, profile): - # Copy nodes and cpus from host - inodes = pecan.request.dbapi.inode_get_by_ihost(host['id']) - icpus = pecan.request.dbapi.icpu_get_by_ihost(host['id']) - - iprofile_id = profile['id'] - for n in inodes: - n.forihostid = iprofile_id - nodefields = ['numa_node', 'capabilities', 'forihostid'] - ndict = {k: v for (k, v) in n.as_dict().items() if k in nodefields} - new_node = pecan.request.dbapi.inode_create(iprofile_id, ndict) - - for c in icpus: - if c.forinodeid == n.id: - c.forihostid = iprofile_id - c.forinodeid = new_node.id - cpufields = ['cpu', 'numa_node', 'core', 'thread', 'allocated_function', - 'cpu_model', 'cpu_family', 'capabilities', - 'forihostid', 'forinodeid'] - cdict = {k: v for (k, v) in c.as_dict().items() if k in cpufields} - pecan.request.dbapi.icpu_create(iprofile_id, cdict) - - -ROUTE_FIELDS = ['family', 'network', 'prefix', 'gateway', 'metric'] - - -def _get_routes(host_id): - """ - Get routes associated to any interface on this host and then index by - interface uuid value. - """ - result = {} - routes = pecan.request.dbapi.routes_get_by_host(host_id) - for r in routes: - interface_uuid = r['interface_uuid'] - if interface_uuid not in result: - result[interface_uuid] = [] - route = {k: v for (k, v) in r.as_dict().items() if k in ROUTE_FIELDS} - result[interface_uuid].append(route) - return result - - -def ifprofile_copy_data(host, profile): - # Copy interfaces and ports from host - ethernet_ports = pecan.request.dbapi.ethernet_port_get_by_host(host['id']) - iinterfaces = pecan.request.dbapi.iinterface_get_by_ihost(host['id']) - routes = _get_routes(host['id']) - - iprofile_id = profile['id'] - newIfList = [] - for i in iinterfaces: - i.forihostid = iprofile_id - iffields = INTERFACE_PROFILE_FIELDS - idict = {k: v for (k, v) in i.as_dict().items() if k in iffields} - idict['interface_profile'] = True - newIf = interface_api._create(idict, from_profile=True) - newIfList.append(newIf) - - for r in routes.get(i.uuid, []): - pecan.request.dbapi.route_create(newIf.id, r) - - for p in ethernet_ports: - if p.interface_id == i.id: - p.host_id = iprofile_id - p.interface_id = newIf.id - - # forinodeid attribute for 001 only. - if hasattr(p, 'forinodeid'): - p.forinodeid = None - - ethernet_port_fields = ['name', 'pclass', 'pvendor', 'pdevice', - 'psvendor', 'psdevice', 'mtu', 'speed', - 'link_mode', 'bootp', 'pciaddr', 'dev_id', - 'host_id', 'interface_id', 'node_id'] - pdict = {k: v for (k, v) in p.as_dict().items() if k in ethernet_port_fields} - pecan.request.dbapi.ethernet_port_create(iprofile_id, pdict) - - if i.ifclass == constants.INTERFACE_CLASS_PLATFORM: - interface_networks = pecan.request.dbapi.interface_network_get_by_interface(i.id) - for ifnet in interface_networks: - ifnetdict = {} - ifnetdict['interface_id'] = newIf.id - ifnetdict['network_id'] = ifnet.network_id - pecan.request.dbapi.interface_network_create(ifnetdict) - else: - interface_datanetworks = pecan.request.dbapi.interface_datanetwork_get_by_interface(i.id) - for ifdn in interface_datanetworks: - ifdndict = {} - ifdndict['interface_id'] = newIf.id - ifdndict['datanetwork_id'] = ifdn.datanetwork_id - pecan.request.dbapi.interface_datanetwork_create(ifdndict) - - # Generate the uses/used_by relationships - for i in newIfList: - uses_list = [] - uses_uuid_list = [] - for u in iinterfaces: - if u.ifname == i.ifname: - uses_list = u.uses[:] - break - - for u in uses_list: - for interface in newIfList: - if u == interface.ifname: - uses_uuid_list.append(interface.uuid) - continue - - idict = {} - idict['uses'] = uses_uuid_list - try: - pecan.request.dbapi.iinterface_update(i.uuid, idict) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to link interface uses.")) - - -def _storprofile_copy_stor(profile, disk, stor): - # Create disk. - diskfields = ['device_node', 'device_path', 'device_num', - 'device_type', 'size_mib', - 'serial_id', 'capabilities', - 'forihostid'] - ddict = {k: v for (k, v) in disk.as_dict().items() if k in diskfields} - newdisk = pecan.request.dbapi.idisk_create(profile.id, ddict) - - # Create stor. - stor.forihostid = profile.id - stor.idisk_uuid = newdisk.uuid - storfields = ['function', 'idisk_uuid', 'forihostid', 'fortierid', - 'journal_location', 'journal_size_mib'] - sdict = {k: v for (k, v) in stor.as_dict().items() if k in storfields} - # This goes through istor semantic checks versus just adding to db (by - # calling dbapi.istor_create). - newstor = storage_api._create(sdict, iprofile=True) - - # If disk or stor weren't actually created, then delete profile and exit. - if not newdisk or not newstor: - raise wsme.exc.ClientSideError( - _("Could not create storage volumes or disks " - "for profile %s" % profile.hostname)) - return newstor - - -def storprofile_copy_data(host, profile): - # get host data - istors = pecan.request.dbapi.istor_get_by_ihost(host['id']) - idisks = pecan.request.dbapi.idisk_get_by_ihost(host['id']) - - if not idisks or not istors: - raise wsme.exc.ClientSideError(_("Storage profile cannot be created if there " - "are no disks associated to storage volumes. " - "Add storage volumes then try again.")) - - # first copy the journal stors from host and store the association - # between old journal_locations and the new ones - journals = {} - for d in idisks: - for s in istors: - if (d.foristorid == s.id and - s.function == constants.STOR_FUNCTION_JOURNAL): - s_ret = _storprofile_copy_stor(profile, d, s) - association = {s.uuid: s_ret.uuid} - journals.update(association) - - # copy the rest of the stors from host - for d in idisks: - for s in istors: - if (d.foristorid == s.id and - s.function != constants.STOR_FUNCTION_JOURNAL): - # replace the old journal location with the new one - if s.journal_location in journals: - s.journal_location = journals[s.journal_location] - else: - # collocated, clean journal location - s.journal_location = None - _storprofile_copy_stor(profile, d, s) - - -def _create_disk_profile(disk, iprofile_id): - fields = ['device_node', 'device_path', 'device_num', 'device_type', - 'size_mib', 'serial_id', 'capabilities'] - disk_profile_dict = {k: v for (k, v) in disk.as_dict().items() - if k in fields} - - disk_profile_dict['forihostid'] = iprofile_id - - try: - disk_profile = pecan.request.dbapi.idisk_create( - iprofile_id, disk_profile_dict) - except Exception as e: - err_msg = '{} {}: {}'.format( - "Could not create disk profile from disk", disk.uuid, str(e)) - raise wsme.exc.ClientSideError(_(err_msg)) - - return disk_profile - - -def _create_partition_profile(partition, iprofile_id): - fields = ['device_node', 'device_path', 'size_mib', 'capabilities', - 'type_guid', 'status'] - part_profile_dict = {k: v for (k, v) in partition.as_dict().items() - if k in fields} - # Obtain all the disks of the current profile. - profile_disks = pecan.request.dbapi.idisk_get_by_ihost(iprofile_id) - - # Obtain the disk this partition is residing on. - disk = pecan.request.dbapi.idisk_get(partition.idisk_uuid) - - # Check if the current profile already has the disk needed for the - # required partition. - disk_profile = None - if profile_disks: - disk_profile = next((d for d in profile_disks - if (d.device_path == disk.device_path or - d.device_node == disk.device_node)), - None) - - if disk_profile is None: - disk_profile = _create_disk_profile(disk, iprofile_id) - - part_profile_dict['forihostid'] = iprofile_id - part_profile_dict['status'] = constants.PARTITION_CREATE_ON_UNLOCK_STATUS - part_profile_dict['idisk_id'] = disk_profile.id - part_profile_dict['idisk_uuid'] = disk_profile.uuid - - try: - part_profile = pecan.request.dbapi.partition_create(iprofile_id, - part_profile_dict) - except Exception as e: - err_msg = '{} {}: {}'.format( - "Could not create partition profile from partition", - partition.uuid, str(e)) - raise wsme.exc.ClientSideError(_(err_msg)) - - return part_profile - - -def _create_device_profile(device, pv_type, iprofile_id): - """Create a profile disk or partition, depending on the physical volume - type.""" - device_profile = None - - if pv_type == constants.PV_TYPE_DISK: - device_profile = _create_disk_profile(device, iprofile_id) - elif pv_type == constants.PV_TYPE_PARTITION: - device_profile = _create_partition_profile(device, iprofile_id) - - return device_profile - - -def localstorageprofile_copy_data(host, profile): - """Create nova-local storage profile from host data - - All workers will have nova local storage and is independent of - the Cinder backend. - - Controller nodes in the small footprint scenario will always be - the Cinder/LVM configuration and nova local storage. - Ceph is not supported for the backend in the small footprint. - - A storage node should be the only host with a stor profile - (idisks + istors). - - A worker will only have a local stor profile - (idisks + ipvs + ilvgs). - - A combo controller should have a local stor profile - (idisks + ipvs + ilvgs) BUT we need to filter out the ipvs and ilvgs - not associated with the nova-local volume group since there are the - cinder-volumes and cgts-vg volume groups. - - A normal controller should have no storage profiles. - """ - - hostid = host['id'] - idisks = pecan.request.dbapi.idisk_get_by_ihost(hostid) - partitions = pecan.request.dbapi.partition_get_by_ihost(hostid) - - ilvgs_all = pecan.request.dbapi.ilvg_get_by_ihost(hostid) - ilvgs = [ilvg for ilvg in ilvgs_all if constants.LVG_NOVA_LOCAL - in ilvg.lvm_vg_name] - - ipvs = pecan.request.dbapi.ipv_get_by_ihost(hostid) - - if not idisks or not ilvgs or not ipvs: - raise wsme.exc.ClientSideError(_("Storage profile cannot be " - "created if there are no disks associated to logical volume " - "groups or physical volumes. Check %s storage configuration " - "then try again." % host['hostname'])) - - # Keep track of partitions used by PVs. - used_partitions = [] - - if len(ilvgs) > 1: - LOG.warn("ilvgs %s contain more than one nova local lvg" % ilvgs) - - ilvg = ilvgs[0] - - # Copy local storage configuration from host to new profile. - iprofile_id = profile.id - - # Create new profile logical volume. - lvgfields = ['capabilities', 'lvm_vg_name'] - lvgdict = {k: v for (k, v) in ilvg.as_dict().items() if k in lvgfields} - lvgdict['forihostid'] = iprofile_id - LOG.debug("lvgdict=%s" % lvgdict) - lvg_pf = lvg_api._create(lvgdict, iprofile=True) - LOG.info("lvg_pf=%s" % lvg_pf.as_dict()) - - for ipv in ipvs: - if ipv.forilvgid != ilvg.id: - continue - - device = None - # Gather the info about the disk/partition used by the current PV. - if ipv.get('pv_type') == constants.PV_TYPE_DISK: - try: - pv_disk = pecan.request.dbapi.idisk_get_by_ipv(ipv.get('uuid')) - except Exception: - err_msg = '{} {}'.format("Could not obtain the disk used by " - "physical volume", ipv.get('uuid')) - raise wsme.exc.ClientSideError(_(err_msg)) - - device = pv_disk[0] - - elif ipv.get('pv_type') == constants.PV_TYPE_PARTITION: - try: - pv_part = pecan.request.dbapi.partition_get_by_ipv( - ipv.get('uuid')) - except Exception: - err_msg = '{} {}'.format("Could not obtain the partition " - "used by physical volume", - ipv.get('uuid')) - raise wsme.exc.ClientSideError(_(err_msg)) - - device = pv_part[0] - used_partitions.append(device) - - # Create the profile object for the device used by the current PV. - device_profile = _create_device_profile( - device, ipv.get('pv_type'), iprofile_id) - - # Create new profile physical volume. - pvfields = ['disk_or_part_device_node', 'disk_or_part_device_path', - 'lvm_vg_name', 'pv_type'] - # 'lvm_pv_name', from Agent, not in profile. - - pvdict = {k: v for (k, v) in ipv.as_dict().items() if k in pvfields} - pvdict['disk_or_part_uuid'] = device_profile.uuid - pvdict['forihostid'] = iprofile_id - pvdict['forilvgid'] = lvg_pf.id - pv_profile = pv_api._create(pvdict, iprofile=True) - LOG.info("pv_pf=%s" % pv_profile.as_dict()) - - if not device_profile or not lvg_pf or not pv_profile: - hostname = profile.hostname - pecan.request.dbapi.ihost_destroy(iprofile_id) - emsg = ("Could not create local storage profile from host %s" - % hostname) - LOG.error("%s ddict=%s, lvg_pf=%s, pv_pf=%s" % - (emsg, device.as_dict(), lvg_pf.as_dict(), - pv_profile.as_dict())) - raise wsme.exc.ClientSideError(_(emsg)) - - # Create profiles for other remaining partitions. - unused_partitions = [ - p for p in partitions if p.device_path not in - [used_part.device_path for used_part in used_partitions]] - - for p in unused_partitions: - if p.type_guid == constants.USER_PARTITION_PHYSICAL_VOLUME: - _create_partition_profile(p, iprofile_id) - - -def memoryprofile_copy_data(host, profile): - # check if the node is provisioned - if host.invprovision != constants.PROVISIONED: - raise wsme.exc.ClientSideError(_("Could not create memory " - "profile until host %s is unlocked for the first time." % - host.hostname)) - - # Copy hugepage information from host - inodes = pecan.request.dbapi.inode_get_by_ihost(host['id']) - memory = pecan.request.dbapi.imemory_get_by_ihost(host['id']) - - iprofile_id = profile['id'] - for n in inodes: - n.forihostid = iprofile_id - nodefields = ['numa_node', 'capabilities', 'forihostid'] - ndict = {k: v for (k, v) in n.as_dict().items() if k in nodefields} - new_node = pecan.request.dbapi.inode_create(iprofile_id, ndict) - for m in memory: - if m.forinodeid == n.id: - m.forihostid = iprofile_id - m.forinodeid = new_node.id - memfields = ['numa_node', 'forihostid', 'forinodeid'] - mdict = {k: v for (k, v) in m.as_dict().items() if k in memfields} - mdict['platform_reserved_mib'] = m.platform_reserved_mib - mdict['vm_hugepages_nr_2M_pending'] = m.vm_hugepages_nr_2M - mdict['vm_hugepages_nr_1G_pending'] = m.vm_hugepages_nr_1G - mdict['vswitch_hugepages_reqd'] = m.vswitch_hugepages_nr - mdict['vswitch_hugepages_size_mib'] = m.vswitch_hugepages_size_mib - newmemory = pecan.request.dbapi.imemory_create(iprofile_id, mdict) - - # if memory wasn't actualy created, - # then delete profile and exit - if not newmemory: - raise wsme.exc.ClientSideError(_("Could not create memory " - "profile %s" % profile.hostname)) - - -################### -# DELETE -################### -def profile_delete_data(profile): - profiletype = _get_profiletype(profile) - if constants.PROFILE_TYPE_CPU in profiletype.lower(): - return cpuprofile_delete_data(profile) - elif constants.PROFILE_TYPE_INTERFACE in profiletype.lower(): - return ifprofile_delete_data(profile) - elif constants.PROFILE_TYPE_STORAGE in profiletype.lower(): - return storprofile_delete_data(profile) - elif constants.PROFILE_TYPE_MEMORY in profiletype.lower(): - return memoryprofile_delete_data(profile) - else: - return False - - -def cpuprofile_delete_data(profile): - for cpu in profile.cpus: - pecan.request.dbapi.icpu_destroy(cpu.uuid) - for node in profile.nodes: - pecan.request.dbapi.inode_destroy(node.uuid) - - -def ifprofile_delete_data(profile): - profile.interfaces = pecan.request.dbapi.iinterface_get_by_ihost(profile['id']) - for p in profile.ethernet_ports: - pecan.request.dbapi.ethernet_port_destroy(p.uuid) - for i in profile.interfaces: - pecan.request.dbapi.iinterface_destroy(i.uuid) - - -def storprofile_delete_data(profile): - profile.stors = pecan.request.dbapi.istor_get_by_ihost(profile['id']) - profile.disks = pecan.request.dbapi.idisk_get_by_ihost(profile['id']) - for stor in profile.stors: - pecan.request.dbapi.idisk_update(stor.idisk_uuid, {'foristorid': None}) - pecan.request.dbapi.istor_destroy(stor.uuid) - for disk in profile.disks: - pecan.request.dbapi.idisk_destroy(disk.uuid) - - -def memoryprofile_delete_data(profile): - profile.memory = pecan.request.dbapi.imemory_get_by_ihost(profile['id']) - for m in profile.memory: - pecan.request.dbapi.imemory_destroy(m.uuid) - for node in profile.nodes: - pecan.request.dbapi.inode_destroy(node.uuid) - - -################### -# APPLY -################### -def apply_profile(host_id, profile_id): - host = pecan.request.dbapi.ihost_get(host_id) - profile = pecan.request.dbapi.ihost_get(profile_id) - - """ - NOTE (neid): - if adding a functionality for some or 'all' profiles (eg applying cpu, if AND stor) - replace 'elif' with 'if' and do not 'return' after each callable - That way, can cycle through some or all of cpus, if, stors based on what's - included in the profile and apply the relevant items - - TODO: might need an action to continue on next profile type even if exception raised? - eg: if failed to apply cpuprofile, report error and continue to apply ifprofile - """ - profiletype = _get_profiletype(profile) - if constants.PROFILE_TYPE_CPU in profiletype.lower(): - return cpuprofile_apply_to_host(host, profile) - elif constants.PROFILE_TYPE_INTERFACE in profiletype.lower(): - return ifprofile_apply_to_host(host, profile) - elif constants.PROFILE_TYPE_MEMORY in profiletype.lower(): - return memoryprofile_apply_to_host(host, profile) - elif constants.PROFILE_TYPE_STORAGE in profiletype.lower(): - return storprofile_apply_to_host(host, profile) - elif constants.PROFILE_TYPE_LOCAL_STORAGE in profiletype.lower(): - return localstorageprofile_apply_to_host(host, profile) - else: - raise wsme.exc.ClientSideError("Profile %s is not applicable to host" % - profiletype) - - -@cutils.synchronized(cpu_api.LOCK_NAME) -def cpuprofile_apply_to_host(host, profile): - - cpu_api._check_host(host) - - # Populate the host and profile CPU data, order by logical core - host.cpus = pecan.request.dbapi.icpu_get_by_ihost( - host.uuid, sort_key='cpu') - host.nodes = pecan.request.dbapi.inode_get_by_ihost(host.uuid) - if not host.cpus or not host.nodes: - raise wsme.exc.ClientSideError("Host (%s) has no processors " - "or cores." % host.hostname) - - profile.cpus = pecan.request.dbapi.icpu_get_by_ihost( - profile.uuid, sort_key='cpu') - profile.nodes = pecan.request.dbapi.inode_get_by_ihost(profile.uuid) - if not profile.cpus or not profile.nodes: - raise wsme.exc.ClientSideError("Profile (%s) has no processors " - "or cores." % profile.hostname) - - if len(profile.nodes) != len(host.nodes) or len(profile.cpus) != \ - len(host.cpus): - raise wsme.exc.ClientSideError( - "Profile (%s) does not match CPU structure of host (%s)" % - (profile.hostname, host.hostname)) - - # Reorganize the profile cpu data for convenience - cpu_utils.restructure_host_cpu_data(profile) - - # Get the CPU counts for each socket and function for this host - cpu_counts = cpu_utils.get_cpu_counts(profile) - - # Semantic check to ensure the minimum/maximum values are enforced - cpu_utils.check_core_allocations(profile, cpu_counts) - - # Update the host cpu allocations as required - for index in range(len(profile.cpus)): - host_cpu = host.cpus[index] - profile_cpu = profile.cpus[index] - if (not host_cpu.allocated_function or - host_cpu.allocated_function.lower() != - profile_cpu.allocated_function.lower()): - values = {'allocated_function': profile_cpu.allocated_function} - pecan.request.dbapi.icpu_update(host_cpu.uuid, values) - - -def ifprofile_applicable(host, profile): - # If profile does not have the same number of ethernet ports than in host - if len(host.ethernet_ports) != len(profile.ethernet_ports): - raise wsme.exc.ClientSideError(_( - "Cannot apply the profile to host: " - "Number of ethernet ports not the same on host %s (%s) and " - "profile %s (%s)" % - (host.hostname, len(host.ethernet_ports), profile.hostname, - len(profile.ethernet_ports)))) - - # Check if the ethernet ports and their pci addresses have exact match - hset = set((h.name, h.pciaddr) for h in host.ethernet_ports) - pset = set((p.name, p.pciaddr) for p in profile.ethernet_ports) - if hset != pset: - raise wsme.exc.ClientSideError(_( - "Cannot apply the profile to host: " - "The port PCI devices are not the same in host %s and profile " - "%s." % (host.hostname, profile.hostname))) - - -def interface_type_sort_key(interface): - """Sort interfaces by interface type placing ethernet interfaces ahead of - aggregated ethernet interfaces, and vlan interfaces last.""" - if interface["iftype"] == constants.INTERFACE_TYPE_ETHERNET: - return 0, interface["ifname"] - elif interface["iftype"] == constants.INTERFACE_TYPE_AE: - return 1, interface["ifname"] - elif interface["iftype"] == constants.INTERFACE_TYPE_VLAN: - return 2, interface["ifname"] - else: - return 99, interface["ifname"] - - -@cutils.synchronized(interface_api.LOCK_NAME) -def ifprofile_apply_to_host(host, profile): - host.ethernet_ports = pecan.request.dbapi.ethernet_port_get_by_host(host.uuid) - host.interfaces = pecan.request.dbapi.iinterface_get_by_ihost(host.uuid) - if not host.ethernet_ports: - raise wsme.exc.ClientSideError(_("Host (%s) has no ports." % host.hostname)) - - profile.ethernet_ports = pecan.request.dbapi.ethernet_port_get_by_host(profile.uuid) - profile.interfaces = pecan.request.dbapi.iinterface_get_by_ihost(profile.uuid) - profile.routes = _get_routes(profile.id) - - ifprofile_applicable(host, profile) - - # Create Port Mapping between Interface Profile and Host - pci_addr_available = True - eth_name_available = True - for port in profile.ethernet_ports: - if not port.pciaddr: - pci_addr_available = False - if not port.name: - eth_name_available = False - - if pci_addr_available: - - def match_express(hport, port): - return hport.pciaddr == port.pciaddr - elif eth_name_available: - - def match_express(hport, port): - return hport.name == port.name - portPairings = [] - hostPortsUsed = [] - - for port in profile.ethernet_ports: - bestmatch = False - for hport in host.ethernet_ports: - if (hport.id not in hostPortsUsed and - port.pclass == hport.pclass and - port.pdevice == hport.pdevice): - - if match_express(hport, port): - hostPortsUsed.append(hport.id) - portPairings.append((hport, port)) - bestmatch = True - break - if not bestmatch: - raise wsme.exc.ClientSideError(_("Cannot apply this profile to host.")) - - prts = [] - for host_interface in host.interfaces: - # Save a list of the interfaces and ports per interface - ports = pecan.request.dbapi.ethernet_port_get_by_interface(host_interface.uuid) - for p in ports: - prts.append((host_interface, p)) - - # Unlink all ports from their interfaces. - for p in host.ethernet_ports: - data = {'interface_id': None} - try: - pecan.request.dbapi.ethernet_port_update(p.uuid, data) - except dbException.DBError: - raise wsme.exc.ClientSideError(_("Failed to unlink port from interface.")) - - # Delete all Host's interfaces in reverse order (VLANs, AEs, ethernet, etc) - for i in sorted(host.interfaces, key=interface_type_sort_key, reverse=True): - try: - # Re-read the interface from the DB because the uses/used_by list - # would have been updated by any preceeding delete operations. - interface = pecan.request.dbapi.iinterface_get( - i['ifname'], host.uuid) - interface_api._delete(interface, from_profile=True) - except Exception as e: - LOG.exception("Failed to delete existing" - " interface {}; {}".format(i['ifname'], e)) - - # Create New Host's interfaces and link them to Host's ports - interfacePairings = {} - for portPair in portPairings: - hport = portPair[0] - port = portPair[1] - - if port.interface_id not in interfacePairings.keys(): - for interface in profile.interfaces: - if interface.id == port.interface_id: - break - else: - raise wsme.exc.ClientSideError(_("Corrupt interface profile: %s." % profile.hostname)) - try: - fields = INTERFACE_PROFILE_FIELDS - data = dict((k, v) for k, v in interface.as_dict().items() if k in fields) - data['forihostid'] = host.id - data['imac'] = hport.mac - interface_found = False - iinterfaces = pecan.request.dbapi.iinterface_get_by_ihost(host.id) - for u in iinterfaces: - if str(u.ifname) == str(data['ifname']): - interface_found = True - break - - if interface_found is False: - hinterface = interface_api._create(data, from_profile=True) - if interface.ifclass == constants.INTERFACE_CLASS_PLATFORM: - interface_networks = \ - pecan.request.dbapi.interface_network_get_by_interface(interface.id) - for ifnet in interface_networks: - ifnetdict = {} - ifnetdict['interface_id'] = hinterface.id - ifnetdict['network_id'] = ifnet.network_id - pecan.request.dbapi.interface_network_create(ifnetdict) - network = pecan.request.dbapi.network_get_by_id(ifnet.network_id) - ifnet_api._update_host_address(host, hinterface, network.type) - else: - interface_datanetworks = \ - pecan.request.dbapi.interface_datanetwork_get_by_interface(interface.id) - for ifdn in interface_datanetworks: - ifdndict = {} - ifdndict['interface_id'] = hinterface.id - ifdndict['datanetwork_id'] = ifdn.datanetwork_id - pecan.request.dbapi.interface_datanetwork_create(ifdndict) - - except Exception as e: - # Delete all Host's interfaces - for p in host.ethernet_ports: - data = {'interface_id': None} - try: - pecan.request.dbapi.ethernet_port_update(p.uuid, data) - except dbException.DBError: - LOG.debug(_("Failed to unlink port from interface.")) - - for i in host.interfaces: - try: - interface_api._delete(i.as_dict(), from_profile=True) - except exception.SysinvException: - LOG.debug(_("Can not delete host interface: %s" % i.uuid)) - - # Restore the previous interfaces - for host_interface in host.interfaces: - try: - fields = INTERFACE_PROFILE_FIELDS - data = dict((k, v) for k, v in host_interface.as_dict().items() if k in fields) - data['forihostid'] = host.id - hinterface = interface_api._create(data, from_profile=True) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to create interface.")) - - # Restore the ports per interface - data = {'interface_id': hinterface.id} - for p in prts: - h_interface = p[0] - h_port = p[1] - - if h_interface.ifname == hinterface.ifname: - try: - pecan.request.dbapi.ethernet_port_update(h_port.uuid, data) - except Exception as e: - LOG.exception(e) - - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to update interface.")) - interfacePairings[port.interface_id] = hinterface.id - data = {'interface_id': interfacePairings[port.interface_id]} - try: - pecan.request.dbapi.ethernet_port_update(hport.uuid, data) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to link port to interface.")) - - # update interface pairings - iinterfaces = pecan.request.dbapi.iinterface_get_by_ihost(host.id) - for i in profile.interfaces: - found_interface = False - for u in iinterfaces: - if i.ifname == u.ifname: - found_interface = True - hinterface = u - break - if found_interface is False: - fields = INTERFACE_PROFILE_FIELDS - data = dict((k, v) for k, v in i.as_dict().items() if k in fields) - data['forihostid'] = host.id - hinterface = interface_api._create(data, from_profile=True) - - if i.ifclass == constants.INTERFACE_CLASS_PLATFORM: - interface_networks = pecan.request.dbapi.interface_network_get_by_interface(i.id) - for ifnet in interface_networks: - ifnetdict = {} - ifnetdict['interface_id'] = hinterface.id - ifnetdict['network_id'] = ifnet.network_id - pecan.request.dbapi.interface_network_create(ifnetdict) - network = pecan.request.dbapi.network_get_by_id(ifnet.network_id) - ifnet_api._update_host_address(host, hinterface, network.type) - else: - interface_datanetworks = pecan.request.dbapi.interface_datanetwork_get_by_interface(i.id) - for ifdn in interface_datanetworks: - ifdndict = {} - ifdndict['interface_id'] = hinterface.id - ifdndict['datanetwork_id'] = ifdn.datanetwork_id - pecan.request.dbapi.interface_datanetwork_create(ifdndict) - - for r in profile.routes.get(i.uuid, []): - pecan.request.dbapi.route_create(hinterface.id, r) - - iinterfaces = pecan.request.dbapi.iinterface_get_by_ihost(host.id) - - # interfaces need to be associated to each other based on their hierarchy - # to ensure that inspecting the uses list to have complete data before - # copying fields. - iinterfaces = sorted(iinterfaces, key=interface_type_sort_key) - - for i in iinterfaces: - idict = {} - for p in profile.interfaces: - if str(p.ifname) == str(i.ifname): - i.uses = p.uses - i.used_by = p.used_by - - if i.uses: - # convert uses from ifname to uuid - uses_list = [] - usedby_list = [] - for u in iinterfaces: - if six.text_type(u.ifname) in i.uses or u.uuid in i.uses: - uses_list.append(u.uuid) - if six.text_type(u.ifname) in i.used_by or u.uuid in i.used_by: - usedby_list.append(u.uuid) - - idict['uses'] = uses_list - idict['used_by'] = usedby_list - - # Set the MAC address on the interface based on the uses list - tmp_interface = i.as_dict() - tmp_interface.update(idict) - tmp_interface = interface_api.set_interface_mac(host, tmp_interface) - idict['imac'] = tmp_interface['imac'] - - try: - pecan.request.dbapi.iinterface_update(i.uuid, idict) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_( - "Failed to link interfaces to interface.")) - - -def storprofile_applicable(host, profile): - # If profile has more disks than in host. - if not len(host.disks) >= len(profile.disks): - return (False, _('profile has more disks than host does')) - - return (True, None) - - -@cutils.synchronized(storage_api.LOCK_NAME) -def storprofile_apply_to_host(host, profile): - # Prequisite checks - profile.disks = pecan.request.dbapi.idisk_get_by_ihost(profile.uuid) - profile.stors = pecan.request.dbapi.istor_get_by_ihost(profile.uuid) - if not profile.disks: - raise wsme.exc.ClientSideError(_("Profile (%s) has no disks" % profile.hostname)) - - host.disks = pecan.request.dbapi.idisk_get_by_ihost(host.uuid) - host.stors = pecan.request.dbapi.istor_get_by_ihost(host.uuid) - if not host.disks: - raise wsme.exc.ClientSideError(_("Host (%s) has no disks" % host.hostname)) - - # Check for applicability - (applicable, reason) = storprofile_applicable(host, profile) - if not applicable: - raise wsme.exc.ClientSideError(_("Can not apply this profile to host. Reason: {}").format(reason)) - - # Gather the storage tiers and build a map for the create call - tier_map = {} - tiers = pecan.request.dbapi.storage_tier_get_all(type=constants.SB_TIER_TYPE_CEPH) - for t in tiers: - tier_map[t.name] = t.uuid - - # Create mapping between Disk Profile and Host - # if for each disk in the profile, there exists a disk in the host - # with same path value and more than or equal profile disk's size - diskPairs = [] - disksUsed = [] - for pdisk in profile.disks: - match = False - for hdisk in host.disks: - if ((hdisk.device_path == pdisk.device_path or - hdisk.device_node == pdisk.device_node) and - hdisk.size_mib >= pdisk.size_mib): - match = True - diskPairs.append((hdisk, pdisk)) - disksUsed.append(hdisk.id) - break - if match: - # matched, continue to next pdisk - continue - else: - msg = _("Can not apply this profile to host. Please " - "check if host's disks match profile criteria.") - raise wsme.exc.ClientSideError(msg) - - # Delete host's stors that will be replaced - for disk in host.disks: - # There could be some disks that are on host but not in profile - if disk.id in disksUsed: - for stor in host.stors: - # If this stor was attached to a disk identified in the profile - # reject applying profile - if stor.id == disk.foristorid: - # deleting stor is not supported - # try: - # cc.istor.delete(stor.uuid) - # except Exception: - msg = _("A storage volume %s is already associated. " - "Please delete storage volume before applying profile" % stor.uuid) - raise wsme.exc.ClientSideError(msg) - - data = {'foristorid': None} - try: - pecan.request.dbapi.idisk_update(disk.uuid, data) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to unlink storage from disk")) - - # OSDs have journals that may be on different drives than the OSD data - # itself, therefore we first need to create the journals so that we can - # later grab their real uuid's. To do that, we store an association between - # the old uuid of the journals in the profile and the uuid of the newly - # created journals. - journalPairs = {} - storPairs = {} - # Create the journal devices first, keep the association - _create_stor(host, profile, diskPairs, constants.STOR_FUNCTION_JOURNAL, tier_map, - journalPairs, storPairs) - - # Create the OSDs - _create_stor(host, profile, diskPairs, constants.STOR_FUNCTION_OSD, tier_map, - journalPairs, storPairs) - - # Update foristorid for all the disks - for diskPair in diskPairs: - hdisk = diskPair[0] - pdisk = diskPair[1] - - pdata = {'foristorid': storPairs[pdisk.foristorid]} - try: - pecan.request.dbapi.idisk_update(hdisk.uuid, pdata) - except dbException.DBError: - raise wsme.exc.ClientSideError(_("Failed to link storage to disk")) - - -def _create_stor(host, profile, diskPairs, function, tier_map, # input - journalPairs, storPairs): # input & output - - for diskPair in diskPairs: - hdisk = diskPair[0] - pdisk = diskPair[1] - - if pdisk.foristorid not in storPairs.keys(): - for pstor in profile.stors: - if pstor.id == pdisk.foristorid: - break - else: - msg = _("Corrupt storage profile: %s" % profile.hostname) - raise wsme.exc.ClientSideError(msg) - - if pstor.function == function: - try: - fields = ['function', 'capabilities', - 'idisk_uuid', 'forihostid'] - if pstor.function == constants.STOR_FUNCTION_OSD: - # OSDs have more attributes - fields += ['journal_location', 'journal_size'] - data = dict((k, v) for k, v in pstor.as_dict().items() - if k in fields and v) - data['forihostid'] = host.id - data['idisk_uuid'] = hdisk.uuid - if pstor.function == constants.STOR_FUNCTION_OSD: - if pstor.journal_location == pstor.uuid: - # Journals are collocated, let _create handle this - data['journal_location'] = None - else: - # Journals are on a different drive than the OSD - # grab the uuid for the newly created journal stor - data['journal_location'] = \ - journalPairs[pstor.journal_location] - data['journal_size_mib'] = pstor['journal_size_mib'] - - # Need a storage tier uuid - tier = pstor.get('tier_name') - if tier: - data['tier_uuid'] = tier_map[tier] - else: - data['tier_uuid'] = tier_map[ - constants.SB_TIER_DEFAULT_NAMES[ - constants.SB_TIER_TYPE_CEPH]] - - hstor = storage_api._create(data) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_( - "Failed to create storage function. %s") % str(e)) - - # Save pairs for later use - if pstor.function == constants.STOR_FUNCTION_JOURNAL: - journalPairs[pstor.uuid] = hstor.uuid - storPairs[pdisk.foristorid] = hstor.id - - -def _partition_profile_apply_to_host(host, profile): - for disk in host.disks: - profile_partitions = [ - p for p in profile.partitions - if (disk.device_path in p.device_path or - disk.device_node in p.device_node)] - - if not profile_partitions: - LOG.info("No partitions for disk %s" % disk.device_path) - continue - - profile_partitions_paths = [] - profile_partitions_names = [] - for p in profile.partitions: - if disk.device_path in p.device_path: - profile_partitions_paths.append(p.device_path) - elif disk.device_node in p.device_node: - profile_partitions_names.append(p.device_node) - - total_part_size = sum(p.size_mib for p in profile_partitions) - - # Check there is enough space on the host's disk to accommodate the - # profile partitions. - LOG.info("Disk av space: %s needed: %s" % (disk.available_mib, - total_part_size)) - if disk.available_mib < total_part_size: - return (False, - _('Not enough free space on disk {0} for profile ' - 'partitions. At least {1} MiB are required.').format( - disk.device_path, total_part_size)) - - # Check the partition requested by the profile is not already present - # on the host's disk. - disk_partitions = pecan.request.dbapi.partition_get_by_idisk(disk.uuid) - for disk_part in disk_partitions: - if (disk_part.device_path in profile_partitions_paths or - disk_part.device_node in profile_partitions_names): - return (False, - _('Partition {0} already present on disk {1}').format( - disk_part.device_path, disk.device_path)) - - # Check the partitions requested by the profile and the ones already - # existing on the host are in order. - if not cutils.partitions_are_in_order(disk_partitions, - profile_partitions): - return (False, - _('The partitions present in the local storage profile ' - 'cannot be created on disk %s on the requested order. ') - .format(disk.device_path)) - - # Create the partitions. - for p in profile_partitions: - fields = ['size_mib', 'capabilities', 'type_guid', 'status'] - part_dict = {k: v for (k, v) in p.as_dict().items() - if k in fields} - part_dict['forihostid'] = host.id - part_dict['idisk_id'] = disk.id - part_dict['idisk_uuid'] = disk.uuid - partition_api._create(part_dict, iprofile=True) - - return True, None - - -def check_localstorageprofile_applicable(host, profile): - """Semantic checks for whether local storage profile is applicable to host. - - Host level administrative checks are already performed earlier in ihost. - """ - - subfunctions = host.subfunctions - if constants.WORKER not in subfunctions: - raise wsme.exc.ClientSideError(_("%s with subfunctions: %s " - "profile %s: Local storage profiles are applicable only to " - "hosts with 'worker' subfunction." % - (host.hostname, host.subfunctions, profile.hostname))) - - if not profile.disks: - raise wsme.exc.ClientSideError(_("Profile (%s) has no disks" % - profile.hostname)) - if not host.disks: - raise wsme.exc.ClientSideError(_("Host (%s) has no disks" % - host.hostname)) - num_host_disks = len(host.disks) - num_profile_disks = len(profile.disks) - if num_host_disks < num_profile_disks: - raise wsme.exc.ClientSideError( - "%s profile %s: Number of host disks %s is less than profile " - "disks %s" % - (host.hostname, profile.hostname, num_host_disks, - num_profile_disks)) - - -@cutils.synchronized(lvg_api.LOCK_NAME) -def localstorageprofile_apply_to_host(host, profile): - """Apply local storage profile to a host - """ - profile.disks = pecan.request.dbapi.idisk_get_by_ihost(profile.uuid) - profile.partitions = pecan.request.dbapi.partition_get_by_ihost( - profile.uuid) - profile.ilvgs = pecan.request.dbapi.ilvg_get_by_ihost(profile.uuid) - profile.ipvs = pecan.request.dbapi.ipv_get_by_ihost(profile.uuid) - - host.disks = pecan.request.dbapi.idisk_get_by_ihost(host.uuid) - host.partitions = pecan.request.dbapi.partition_get_by_ihost(host.uuid) - host.ipvs = pecan.request.dbapi.ipv_get_by_ihost(host.uuid) - - check_localstorageprofile_applicable(host, profile) - - # Create mapping between Disk Profile and Host - # if for each disk in the profile, there exists a disk in the host - # with same path value and more than or equal profile disk's size - diskPairs = [] - disksUsed = [] - for pdisk in profile.disks: - match = False - for hdisk in host.disks: - if ((hdisk.device_path == pdisk.device_path or - hdisk.device_node == pdisk.device_node) and - ((hdisk.size_mib is None and pdisk.size_mib is None) or - (hdisk.size_mib and pdisk.size_mib and - hdisk.size_mib >= pdisk.size_mib))): - match = True - diskPairs.append((hdisk, pdisk)) - disksUsed.append(hdisk.id) - break - if match: - # matched, continue to next pdisk - continue - else: - msg = _("Can not apply this profile to host. Please " - "check if host's disks match profile criteria.") - raise wsme.exc.ClientSideError(msg) - - # Delete host's stors that will be replaced - for disk in host.disks: - # There could be some disks that are on host but not in profile - if disk.id in disksUsed: - for ipv in host.ipvs: - # If this pv was attached to a disk identified in the profile - # reject applying profile - if ipv.id == disk.foripvid: - # combo case: there may be already cgts-vg - if ipv.lvm_vg_name == constants.LVG_NOVA_LOCAL: - msg = _( - "A physical volume %s is already associated. " - "Please delete physical volume before applying " - "profile" % ipv.uuid) - raise wsme.exc.ClientSideError(msg) - - # data = {'foripvid': None} - # try: - # pecan.request.dbapi.idisk_update(disk.uuid, data) - mydisk = pecan.request.dbapi.idisk_get(disk.uuid) - if mydisk.foripvid: - LOG.warn("mydisk %s foripvid %s" % - (mydisk.uuid, mydisk.foripvid)) - # except Exception as e: - # LOG.exception(e) - # raise wsme.exc.ClientSideError(_("Failed to unlink physical " - # "volume from disk %s" % disk.uuid)) - - # Apply partition profile - result, msg = _partition_profile_apply_to_host(host, profile) - if not result: - raise wsme.exc.ClientSideError(msg) - - # Create new host's physical volumes and link them to ihost's disks - host_id = host.id - ipvPairs = {} - - # Add the hilvg entry from pilvg - pilvg = None - for ilvg in profile.ilvgs: - if ilvg.lvm_vg_name == constants.LVG_NOVA_LOCAL: - pilvg = ilvg - LOG.info("pilvg found: %s" % ilvg.uuid) - break - - if not pilvg: - raise wsme.exc.ClientSideError( - _("No nova-local in profile logical volume")) - - LOG.info("pilvg=%s" % pilvg.as_dict()) - try: - lvgfields = ['capabilities', 'lvm_vg_name'] - lvgdict = {k: v for (k, v) in pilvg.as_dict().items() - if k in lvgfields} - lvgdict['forihostid'] = host_id - - newlvg = lvg_api._create(lvgdict, applyprofile=True) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to create storage " - "logical volume")) - LOG.info("newlvg=%s" % newlvg.as_dict()) # TODO: LOG.debug - - hpartitions = pecan.request.dbapi.partition_get_by_ihost(host.uuid) - - for pipv in profile.ipvs: - found_pv = False - pv_type = pipv.pv_type - if pv_type == constants.PV_TYPE_DISK: - for diskPair in diskPairs: - hdisk = diskPair[0] - pdisk = diskPair[1] - if pdisk.foripvid == pipv.id: - disk_or_part_uuid = hdisk.uuid - device_update_function = pecan.request.dbapi.idisk_update - found_pv = True - break - else: - for profile_part in profile.partitions: - if pipv.id == profile_part.foripvid: - disk_or_part_uuid = next( - hp.uuid for hp in hpartitions - if (hp.device_path == profile_part.device_path or - hp.device_node == profile_part.device_node)) - device_update_function = \ - pecan.request.dbapi.partition_update - found_pv = True - break - - if not found_pv: - msg = _("Corrupt storage profile: %s" % profile.hostname) - raise wsme.exc.ClientSideError(msg) - - try: - pvfields = ['disk_or_part_device_path', - 'lvm_vg_name'] - # 'lvm_pv_name', from Agent: not in profile - - pvdict = (dict((k, v) for k, v in pipv.as_dict().items() - if k in pvfields and v)) - pvdict['forihostid'] = host_id - pvdict['disk_or_part_uuid'] = disk_or_part_uuid - pvdict['forilvgid'] = newlvg.id - pvdict['pv_state'] = constants.LVG_ADD - pvdict['pv_type'] = pv_type - hipv = pv_api._create(pvdict, iprofile=True) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to create storage " - "physical volume")) - - LOG.info("new hipv=%s" % hipv.as_dict()) # TODO: LOG.debug - - ipvPairs[pdisk.foripvid] = hipv.id - - pdata = {'foripvid': ipvPairs[pdisk.foripvid]} - try: - device_update_function(disk_or_part_uuid, pdata) - except dbException.DBError: - raise wsme.exc.ClientSideError(_( - "Failed to link storage to device %s" % disk_or_part_uuid)) - - -def memoryprofile_applicable(host, profile): - # If profile has more nodes than in host - if not len(host.memory) >= len(profile.memory): - LOG.warn("Host memory %s not same as profile memory=%s" % - (len(host.memory), len(profile.memory))) - return False - if len(host.nodes) != len(profile.nodes): - LOG.warn("Host nodes %s not same as profile nodes=%s" % - (len(host.nodes), len(profile.nodes))) - return False - if constants.WORKER not in host.subfunctions: - LOG.warn("Profile cannot be applied to non-worker host") - return False - return True - - -@cutils.synchronized(memory_api.LOCK_NAME) -def memoryprofile_apply_to_host(host, profile): - # Prequisite checks - profile.memory = pecan.request.dbapi.imemory_get_by_ihost(profile.uuid) - profile.nodes = pecan.request.dbapi.inode_get_by_ihost(profile.uuid) - if not profile.memory or not profile.nodes: - raise wsme.exc.ClientSideError(_("Profile (%s) has no memory or processors" - % profile.hostname)) - - host.memory = pecan.request.dbapi.imemory_get_by_ihost(host.uuid) - host.nodes = pecan.request.dbapi.inode_get_by_ihost(host.uuid) - if not host.memory or not host.nodes: - raise wsme.exc.ClientSideError(_("Host (%s) has no memory or processors" - % host.hostname)) - - # Check for applicability - if not memoryprofile_applicable(host, profile): - raise wsme.exc.ClientSideError(_("Can not apply this profile to host")) - - # Create mapping between memory profile and host - # for each node in the profile, there exists a node in the host - for hmem in host.memory: - for pmem in profile.memory: - host_inode = pecan.request.dbapi.inode_get(hmem.forinodeid) - profile_inode = pecan.request.dbapi.inode_get(pmem.forinodeid) - if int(host_inode.numa_node) == int(profile_inode.numa_node): - data = {'vm_hugepages_nr_2M_pending': pmem.vm_hugepages_nr_2M_pending, - 'vm_hugepages_nr_1G_pending': pmem.vm_hugepages_nr_1G_pending, - 'platform_reserved_mib': pmem.platform_reserved_mib, - 'vswitch_hugepages_reqd': pmem.vswitch_hugepages_reqd, - 'vswitch_hugepages_size_mib': pmem.vswitch_hugepages_size_mib} - try: - memory_api._update(hmem.uuid, data) - except wsme.exc.ClientSideError as cse: - LOG.exception(cse) - raise wsme.exc.ClientSideError(_("Failed to update memory. %s" % (cse.message))) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Failed to update memory")) - continue diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile_utils.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile_utils.py deleted file mode 100644 index 780633480a..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile_utils.py +++ /dev/null @@ -1,409 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2021 Wind River Systems, Inc. -# - -import netaddr -import six - -from oslo_log import log -from sysinv._i18n import _ -from sysinv.common import constants - -LOG = log.getLogger(__name__) - - -class InvalidProfileData(Exception): - pass - - -class Network(object): - def __init__(self, node, networkType): - self.networkType = networkType - self.providerNetworks = [] - - providerNetworksNode = node.find('providerNetworks') - if providerNetworksNode: - for pnetNode in providerNetworksNode.findall('providerNetwork'): - pnetName = pnetNode.get('name') - self.addProviderNetwork(pnetName) - - def addProviderNetwork(self, pnet): - if pnet not in self.providerNetworks: - self.providerNetworks.append(pnet) - # ignore if provider network is duplicated within one interface - - def validate(self): - if len(self.providerNetworks) == 0: - # caller will do the translation - raise InvalidProfileData("At least one provider network must be selected.") - - -class DataclassNetwork(Network): - def __init__(self, node): - - super(DataclassNetwork, self).__init__(node, constants.NETWORK_TYPE_DATA) - self.ipv4Mode = DataclassNetwork.getIpMode(node, "ipv4") - self.ipv6Mode = DataclassNetwork.getIpMode(node, "ipv6") - self.routes = DataclassNetwork.getRoutes(node) - - @staticmethod - def getRoutes(node): - routesNode = node.find('routes') - if routesNode is None: - return [] - - routes = [] - for routeNode in routesNode.findall('route'): - route = {} - route['metric'] = int(routeNode.get('metric')) - network = routeNode.get('network') - gateway = routeNode.get('gateway') - - try: - addr = netaddr.IPAddress(gateway) - except netaddr.core.AddrFormatError: - raise InvalidProfileData(_('%s is not a valid IP address') % gateway) - - try: - net = netaddr.IPNetwork(network) - except netaddr.core.AddrFormatError: - raise InvalidProfileData(_('%s is not a valid network') % network) - - if addr.format() != gateway: - raise InvalidProfileData(_('%s is not a valid IP address') % gateway) - - if net.version != addr.version: - raise InvalidProfileData(_('network "%s" and gateway "%s" must be the same version.') % - (network, gateway)) - - route['network'] = net.network.format() - route['prefix'] = net.prefixlen - route['gateway'] = gateway - route['family'] = net.version - - routes.append(route) - return routes - - @staticmethod - def getIpMode(node, name): - modeNode = node.find(name) - if modeNode is None: - raise InvalidProfileData(_('%s is required for a datanetwork') % name) - - mode = modeNode.get('mode') - pool = None - if mode == 'pool': - poolNode = modeNode.find('pool') - if poolNode is None: - raise InvalidProfileData(_('A pool is required for a %s defined as "pool"') % name) - - pool = poolNode.get('name') - - return {'mode': mode, 'pool': pool} - - -class ExternalNetwork(object): - def __init__(self, node, networktype): - self.networkType = networktype - - def validate(self): - pass - - -class PciPassthrough(Network): - def __init__(self, node): - super(PciPassthrough, self).__init__(node, constants.NETWORK_TYPE_PCI_PASSTHROUGH) - - -class PciSriov(Network): - def __init__(self, node): - super(PciSriov, self).__init__(node, constants.NETWORK_TYPE_PCI_SRIOV) - self.virtualFunctions = int(node.get('virtualFunctions')) - self.virtualFunctionDriver = node.get('virtualFunctionDriver') - self.maxTxRate = node.get('maxTxRate') - - -class Interface(object): - def __init__(self, ifNode): - - self.providerNetworks = [] - self.networks = [] - self.name = ifNode.get('ifName') - self.mtu = ifNode.get('mtu') - self.ipv4Mode = {'mode': None, 'pool': None} - self.ipv6Mode = {'mode': None, 'pool': None} - self.routes = [] - self.virtualFunctions = 0 - self.virtualFunctionDriver = None - self.maxTxRate = None - networksNode = ifNode.find('networks') - if networksNode is not None: - for netNode in networksNode: - self.addNetwork(netNode) - - def getNetworkMap(self): - return {} - - def addNetwork(self, node): - tag = node.tag - networkMap = self.getNetworkMap() - if tag in networkMap: - network = networkMap[tag](node) - self.networks.append(network) - if network.networkType == constants.NETWORK_TYPE_DATA: - self.ipv4Mode = network.ipv4Mode - self.ipv6Mode = network.ipv6Mode - self.routes = network.routes - elif network.networkType == constants.NETWORK_TYPE_PCI_SRIOV: - self.virtualFunctions = network.virtualFunctions - self.virtualFunctionDriver = network.virtualFunctionDriver - self.maxTxRate = network.maxTxRate - - if isinstance(network, Network): - self.providerNetworks = network.providerNetworks - - else: - raise InvalidProfileData(_('network type (%s) not recognizable') % tag) - - def validate(self): - # raise InvalidProfileData exception with detail msg - numberOfNetworks = len(self.networks) - - if numberOfNetworks > 2: - raise InvalidProfileData(_('Too many network types selected for the interface.')) - - # when change, make sure modify the displayText as well - combineTypes = [constants.NETWORK_TYPE_MGMT, constants.NETWORK_TYPE_CLUSTER_HOST] - displayText = _('Only mgmt and cluster-host network types can be combined on a single interface') - if numberOfNetworks == 2: - if self.networks[0].networkType not in combineTypes or \ - self.networks[1].networkType not in combineTypes: - raise InvalidProfileData(displayText) - - if self.networks[0].networkType == self.networks[1].networkType: - raise InvalidProfileData(_('Interface can not combine with 2 networks with the same type.')) - - try: - for network in self.networks: - network.validate() - except InvalidProfileData as e: - raise InvalidProfileData(_(six.text_type(e) + ' Interface: %s') % self.name) - - def getNetworks(self): - pnets = '' - networkTypes = '' - hasNT = False - for network in self.networks: - if network.networkType is None: - continue - - hasNT = True - if networkTypes: - networkTypes += ',' - networkTypes = networkTypes + network.networkType - if hasattr(network, 'providerNetworks'): - # there should be only one network has providerNetwork - for pnet in network.providerNetworks: - if pnets: - pnets += ',' - pnets = pnets + pnet - - if not hasNT: - networkTypes = None - pnets = None - - return networkTypes, pnets - - -class EthInterface(Interface): - def __init__(self, ifNode): - super(EthInterface, self).__init__(ifNode) - self.port, self.pciAddress, self.pclass, self.pdevice = self.getPort(ifNode) - - def getPort(self, ifNode): - portNode = ifNode.find('port') - if portNode is None: - raise InvalidProfileData(_('Ethernet interface %s requires an Ethernet port ') % - ifNode.get('ifName')) - - pciAddress = '' - tmp = portNode.get('pciAddress') - try: - pciAddress = EthInterface.formatPciAddress(tmp) - except InvalidProfileData as exc: - raise InvalidProfileData(six.text_type(exc) + _('Interface %s, pciAddress %s') % - (ifNode.get('ifName'), tmp)) - - pclass = portNode.get('class') - if pclass: - pclass = pclass.strip() - - pdevice = portNode.get('device') - if pdevice: - pdevice = pdevice.strip() - - return portNode.get('name'), pciAddress, pclass, pdevice - - @staticmethod - def formatPciAddress(value): - # To parse a [X]:[X]:[X].[X] formatted pci address into [04x]:[02x]:[02x].[01x] pci address format - if value: - section_list1 = value.split(':') - else: - return '' - - if len(section_list1) != 3: - raise InvalidProfileData(_('pciAddress is not well formatted.')) - - section_list2 = section_list1[2].split('.') - if len(section_list2) != 2: - raise InvalidProfileData(_('pciAddress is not well formatted.')) - - try: - sec1 = int(section_list1[0], 16) - sec2 = int(section_list1[1], 16) - sec3 = int(section_list2[0], 16) - sec4 = int(section_list2[1], 16) - except (TypeError, ValueError): - raise InvalidProfileData(_('pciAddress is not well formatted.')) - - result = '{0:04x}:{1:02x}:{2:02x}.{3:01x}'.format(sec1, sec2, sec3, sec4) - - return result - - def getNetworkMap(self): - return { - 'dataclassNetwork': lambda node: DataclassNetwork(node), - 'clusterhostNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_CLUSTER_HOST), - 'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM), - 'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT), - 'pciPassthrough': lambda node: PciPassthrough(node), - 'pciSriov': lambda node: PciSriov(node) - } - - -class AeInterface(Interface): - def __init__(self, ifNode): - super(AeInterface, self).__init__(ifNode) - self.usesIf = [] - aeModeNode = ifNode.find('aeMode') # aeMode is mandatory required by schema - node = aeModeNode[0] # it is mandatory required by schema - - if node.tag == 'activeStandby': - self.aeMode = 'activeStandby' - self.txPolicy = None - self.primary_reselect = node.get('primary_reselect') - elif node.tag == 'balanced': - self.aeMode = 'balanced' - self.txPolicy = node.get('txPolicy') - self.primary_reselect = None - elif node.tag == 'ieee802.3ad': - self.aeMode = '802.3ad' - self.txPolicy = node.get('txPolicy') - self.primary_reselect = None - - node = ifNode.find('interfaces') - if node: - for usesIfNode in node.findall('interface'): - self.addUsesIf(usesIfNode.get('name')) - - def addUsesIf(self, ifName): - if not ifName: - raise InvalidProfileData(_('Interface name value cannot be empty.')) - if ifName == self.name: - raise InvalidProfileData(_('Aggregrated ethernet interface (%s) cannot use itself.') % self.name) - - if ifName not in self.usesIf: - self.usesIf.append(ifName) - - def getNetworkMap(self): - return { - 'dataclassNetwork': lambda node: DataclassNetwork(node), - 'clusterhostNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_CLUSTER_HOST), - 'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM), - 'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT) - } - - def validateWithIfNames(self, allInterfaceNames): - # raise InvalidProfileData exception if invalid - if len(self.usesIf) == 0: - msg = _('Aggregrated ethernet interface (%s) should have at least one interface.') % self.name - raise InvalidProfileData(msg) - - for usesIfName in self.usesIf: - if usesIfName not in allInterfaceNames: - msg = _('Aggregrated ethernet interface (%s) uses a undeclared interface (%s)') % \ - (self.name, usesIfName) - raise InvalidProfileData(msg) - super(AeInterface, self).validate() - - -class VlanInterface(Interface): - def __init__(self, ifNode): - super(VlanInterface, self).__init__(ifNode) - self.vlanId = int(ifNode.get('vlanId')) - usesIf = ifNode.get('interface') - - if not usesIf: - raise InvalidProfileData(_(' value cannot be empty.')) - if usesIf == self.name: - raise InvalidProfileData(_('vlan interface (%s) cannot use itself.') % self.name) - self.usesIfName = usesIf - self.usesIf = [usesIf] - - def getNetworkMap(self): - return { - 'dataclassNetwork': lambda node: DataclassNetwork(node), - 'clusterhostNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_CLUSTER_HOST), - 'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM), - 'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT) - } - - @staticmethod - def isEthInterface(ifName, ethIfMap): - return ifName in ethIfMap - - def validateWithIfNames(self, allInterfaceNames, aeIfMap, vlanIfMap, ethIfMap): - # raise InvalidProfileData exception if invalid - if self.usesIfName not in allInterfaceNames: - msg = _('vlan interface (%s) uses a undeclared interface (%s)') % \ - (self.name, self.usesIfName) - raise InvalidProfileData(msg) - - isEthIf = self.isEthInterface(self.usesIfName, ethIfMap) - - good = True - if not isEthIf: - ifNameToCheck = [self.usesIfName] - - while len(ifNameToCheck) > 0: - ifName = ifNameToCheck.pop(0) - if ifName in aeIfMap: - aeIf = aeIfMap[ifName] - for n in aeIf.usesIf: - ifNameToCheck.append(n) - elif ifName in vlanIfMap: - good = False - break # not good,a vlan in uses tree - - if not good: - raise InvalidProfileData(_('A vlan interface cannot use a vlan interface.')) - - super(VlanInterface, self).validate() diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pv.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pv.py index 4a99058704..f6eac747e2 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pv.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pv.py @@ -16,7 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2013-2017 Wind River Systems, Inc. +# Copyright (c) 2013-2021 Wind River Systems, Inc. # import jsonpatch @@ -357,14 +357,14 @@ class PVController(rest.RestController): # This method allows creating a physical volume through a non-HTTP -# request e.g. through profile.py while still passing -# through physical volume semantic checks and osd configuration +# request while still passing through physical volume semantic checks and osd +# configuration # Hence, not declared inside a class # # Param: # pv - dictionary of physical volume values # iprofile - True when created by a storage profile -def _create(pv, iprofile=None): +def _create(pv): LOG.debug("pv._create with initial params: %s" % pv) # Get host ihostId = pv.get('forihostid') or pv.get('ihost_uuid') @@ -427,7 +427,7 @@ def _create(pv, iprofile=None): values) # semantic check for root disk - if iprofile is not True and constants.WARNING_MESSAGE_INDEX in pv: + if constants.WARNING_MESSAGE_INDEX in pv: warning_message_index = pv.get(constants.WARNING_MESSAGE_INDEX) raise wsme.exc.ClientSideError( constants.PV_WARNINGS[warning_message_index]) @@ -714,11 +714,10 @@ def _check_device(new_pv, ihost): new_pv['disk_or_part_device_path'] = new_pv_device.device_path # Since physical volumes are reported as device nodes and not device - # paths, we need to translate this, but not for local storage profiles. - if ihost['recordtype'] != 'profile': - if new_pv_device.device_node: - new_pv['disk_or_part_device_node'] = new_pv_device.device_node - new_pv['lvm_pv_name'] = new_pv['disk_or_part_device_node'] + # paths, we need to translate this + if new_pv_device.device_node: + new_pv['disk_or_part_device_node'] = new_pv_device.device_node + new_pv['lvm_pv_name'] = new_pv['disk_or_part_device_node'] # relationship checks # - Only one pv for cinder-volumes @@ -899,3 +898,4 @@ def delete_pv(pv_uuid, force=False): # TODO (rchurch): Fix system host-pv-add 1 cinder-volumes => no error message # TODO (rchurch): Fix system host-pv-add -t disk 1 cinder-volumes => confusing message # TODO (rchurch): remove the -t options and use path/node/uuid to derive the type of PV +# TODO (pbovina): Move utils methods within PVController class diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage.py index 07a45630b8..dc5d9e08bd 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage.py @@ -16,7 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2013-2019 Wind River Systems, Inc. +# Copyright (c) 2013-2021 Wind River Systems, Inc. # from eventlet.green import subprocess @@ -518,16 +518,6 @@ class StorageController(rest.RestController): raise -def _check_profile(stor): - # semantic check: whether system has a ceph backend - if not StorageBackendConfig.has_backend_configured( - pecan.request.dbapi, - constants.SB_TYPE_CEPH - ): - raise wsme.exc.ClientSideError(_( - "System must have a %s backend" % constants.SB_TYPE_CEPH)) - - def _check_host(stor): ihost_id = stor['forihostid'] ihost = pecan.request.dbapi.ihost_get(ihost_id) @@ -794,14 +784,11 @@ def _check_journal(old_foristor, new_foristor): # This method allows creating a stor through a non-HTTP -# request e.g. through profile.py while still passing -# through istor semantic checks and osd configuration -# Hence, not declared inside a class +# request # # Param: # stor - dictionary of stor values -# iprofile - True when created by a storage profile -def _create(stor, iprofile=None): +def _create(stor): LOG.debug("storage._create stor with params: %s" % stor) # Init @@ -820,10 +807,7 @@ def _create(stor, iprofile=None): stor.update({'forihostid': forihostid}) # SEMANTIC CHECKS - if iprofile: - _check_profile(stor) - else: - _check_host(stor) + _check_host(stor) try: idisk_uuid = _check_disk(stor) @@ -834,12 +818,11 @@ def _create(stor, iprofile=None): # Assign the function if necessary. function = stor['function'] if function: - if function == constants.STOR_FUNCTION_OSD and not iprofile: + if function == constants.STOR_FUNCTION_OSD: osd_create = True else: function = stor['function'] = constants.STOR_FUNCTION_OSD - if not iprofile: - osd_create = True + osd_create = True create_attrs = {} create_attrs.update(stor) @@ -880,32 +863,31 @@ def _create(stor, iprofile=None): create_attrs['fortierid'] = tier.id - if not iprofile: - try: - journal_location = \ - _check_journal_location(stor['journal_location'], - stor, - constants.ACTION_CREATE_JOURNAL) - except exception.InvalidUUID as e: - raise wsme.exc.ClientSideError(_(str(e))) + try: + journal_location = \ + _check_journal_location(stor['journal_location'], + stor, + constants.ACTION_CREATE_JOURNAL) + except exception.InvalidUUID as e: + raise wsme.exc.ClientSideError(_(str(e))) - # If the journal is collocated, make sure its size is set to the - # default one. - if 'uuid' in stor and journal_location == stor['uuid']: - stor['journal_size_mib'] = CONF.journal.journal_default_size - elif journal_location: - if not stor['journal_size_mib']: - stor['journal_size_mib'] = \ - CONF.journal.journal_default_size + # If the journal is collocated, make sure its size is set to the + # default one. + if 'uuid' in stor and journal_location == stor['uuid']: + stor['journal_size_mib'] = CONF.journal.journal_default_size + elif journal_location: + if not stor['journal_size_mib']: + stor['journal_size_mib'] = \ + CONF.journal.journal_default_size - journal_istor = pecan.request.dbapi.istor_get(journal_location) - journal_idisk_uuid = journal_istor.idisk_uuid + journal_istor = pecan.request.dbapi.istor_get(journal_location) + journal_idisk_uuid = journal_istor.idisk_uuid - # Find out if there is enough space to keep the journal on the - # journal stor. - _check_journal_space(journal_idisk_uuid, - journal_location, - stor['journal_size_mib']) + # Find out if there is enough space to keep the journal on the + # journal stor. + _check_journal_space(journal_idisk_uuid, + journal_location, + stor['journal_size_mib']) elif function == constants.STOR_FUNCTION_JOURNAL: # Check that the journal stor resides on a device of SSD type. @@ -938,7 +920,7 @@ def _create(stor, iprofile=None): # Journals are created only for OSDs if new_stor.get("function") == constants.STOR_FUNCTION_OSD: - if iprofile or not journal_location: + if not journal_location: # iprofile either provides a valid location or assumes # collocation. For collocation: stor['journal_location'] = # stor['uuid'], since sometimes we get the UUID of the newly @@ -954,16 +936,15 @@ def _create(stor, iprofile=None): setattr(new_stor, "journal_location", new_journal.get("onistor_uuid")) setattr(new_stor, "journal_size", new_journal.get("size_mib")) - if not iprofile: - # Update the state of the storage tier - try: - pecan.request.dbapi.storage_tier_update( - tier.id, - {'status': constants.SB_TIER_STATUS_IN_USE}) - except exception.StorageTierNotFound as e: - # Shouldn't happen. Log exception. Stor is created but tier status - # is not updated. - LOG.exception(e) + # Update the state of the storage tier + try: + pecan.request.dbapi.storage_tier_update( + tier.id, + {'status': constants.SB_TIER_STATUS_IN_USE}) + except exception.StorageTierNotFound as e: + # Shouldn't happen. Log exception. Stor is created but tier status + # is not updated. + LOG.exception(e) # Apply runtime manifests for OSDs on "available" nodes. runtime_manifests = False diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_tier.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_tier.py index 3691276ed3..f4cf8bd1c1 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_tier.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_tier.py @@ -492,14 +492,10 @@ def _set_defaults(tier): # This method allows creating a storage tier through a non-HTTP -# request e.g. through profile.py while still passing -# through physical volume semantic checks and osd configuration -# Hence, not declared inside a class -# +# request # Param: # tier - dictionary of storage tier values -# iprofile - True when created by a storage profile -def _create(self, tier, iprofile=None): +def _create(self, tier): LOG.info("storage_tier._create with initial params: %s" % tier) # Set defaults - before checks to allow for optional attributes diff --git a/sysinv/sysinv/sysinv/sysinv/common/constants.py b/sysinv/sysinv/sysinv/sysinv/common/constants.py index 33f3fbb615..a24489f730 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/constants.py +++ b/sysinv/sysinv/sysinv/sysinv/common/constants.py @@ -79,7 +79,6 @@ POWERON_ACTION = 'power-on' POWEROFF_ACTION = 'power-off' SWACT_ACTION = 'swact' FORCE_SWACT_ACTION = 'force-swact' -APPLY_PROFILE_ACTION = 'apply-profile' SUBFUNCTION_CONFIG_ACTION = 'subfunction_config' VIM_SERVICES_ENABLED = 'services-enabled' VIM_SERVICES_DISABLED = 'services-disabled' @@ -109,8 +108,7 @@ MTCE_ACTIONS = [REBOOT_ACTION, VIM_ACTIONS = [LOCK_ACTION, FORCE_LOCK_ACTION] -CONFIG_ACTIONS = [SUBFUNCTION_CONFIG_ACTION, - APPLY_PROFILE_ACTION] +CONFIG_ACTIONS = [SUBFUNCTION_CONFIG_ACTION] # Personalities CONTROLLER = 'controller' @@ -976,12 +974,6 @@ CEPH_CRUSH_MAP_APPLIED = '.crushmap_applied' CEPH_CRUSH_MAP_DEPTH = 3 CEPH_CRUSH_TIER_SUFFIX = "-tier" -# Profiles -PROFILE_TYPE_CPU = 'cpu' -PROFILE_TYPE_INTERFACE = 'if' -PROFILE_TYPE_STORAGE = 'stor' -PROFILE_TYPE_MEMORY = 'memory' -PROFILE_TYPE_LOCAL_STORAGE = 'localstg' # PCI Alias types and names NOVA_PCI_ALIAS_GPU_NAME = "gpu" diff --git a/sysinv/sysinv/sysinv/sysinv/db/api.py b/sysinv/sysinv/sysinv/sysinv/db/api.py index 5d6239c875..cc1e601e21 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/api.py +++ b/sysinv/sysinv/sysinv/sysinv/db/api.py @@ -236,62 +236,6 @@ class Connection(object): :param server: The id or uuid of a server. """ - @abc.abstractmethod - def interface_profile_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None, session=None): - """Return a list of interface profiles. - - :param limit: Maximum number of profiles to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param session: The DB session instance to use during the model query - """ - - @abc.abstractmethod - def cpu_profile_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None, session=None): - """Return a list of cpu profiles. - - :param limit: Maximum number of profiles to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param session: The DB session instance to use during the model query - """ - - @abc.abstractmethod - def memory_profile_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None, session=None): - """Return a list of memory profiles. - - :param limit: Maximum number of profiles to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param session: The DB session instance to use during the model query - """ - - @abc.abstractmethod - def storage_profile_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None, session=None): - """Return a list of storage profiles. - - :param limit: Maximum number of profiles to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param session: The DB session instance to use during the model query - """ - @abc.abstractmethod def inode_create(self, forihostid, values): """Create a new inode for a host. diff --git a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/api.py b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/api.py index 40675b8962..0e4008b974 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/api.py +++ b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/api.py @@ -34,7 +34,6 @@ from sqlalchemy import or_ from sqlalchemy.orm import contains_eager from sqlalchemy.orm import joinedload -from sqlalchemy.orm import subqueryload from sqlalchemy.orm import with_polymorphic from sqlalchemy.orm.exc import DetachedInstanceError from sqlalchemy.orm.exc import MultipleResultsFound @@ -1460,59 +1459,6 @@ class Connection(api.Connection): query.delete() - def interface_profile_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None, session=None): - - ports = with_polymorphic(models.Ports, '*', flat=True) - interfaces = with_polymorphic(models.Interfaces, '*', flat=True) - - query = model_query(models.ihost, session=session).\ - filter_by(recordtype="profile"). \ - join(models.ihost.ports). \ - options(subqueryload(models.ihost.ports.of_type(ports)), - subqueryload(models.ihost.interfaces.of_type(interfaces))) - - return _paginate_query(models.ihost, limit, marker, - sort_key, sort_dir, query) - - def cpu_profile_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None, session=None): - - query = model_query(models.ihost, session=session).\ - filter_by(recordtype="profile"). \ - join(models.ihost.cpus). \ - options(subqueryload(models.ihost.cpus), - subqueryload(models.ihost.nodes)) - - return _paginate_query(models.ihost, limit, marker, - sort_key, sort_dir, query) - - def memory_profile_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None, session=None): - - query = model_query(models.ihost, session=session).\ - filter_by(recordtype="profile"). \ - join(models.ihost.memory). \ - options(subqueryload(models.ihost.memory), - subqueryload(models.ihost.nodes)) - - return _paginate_query(models.ihost, limit, marker, - sort_key, sort_dir, query) - - def storage_profile_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None, session=None): - - query = model_query(models.ihost, session=session).\ - filter_by(recordtype="profile").\ - join(models.ihost.disks).\ - outerjoin(models.ihost.partitions).\ - outerjoin(models.ihost.stors).\ - outerjoin(models.ihost.pvs).\ - outerjoin(models.ihost.lvgs) - - return _paginate_query(models.ihost, limit, marker, - sort_key, sort_dir, query) - def _node_get(self, inode_id): query = model_query(models.inode) query = add_identity_filter(query, inode_id) @@ -2379,17 +2325,13 @@ class Connection(api.Connection): self._interface_ratelimit_encode(values) - is_profile = values.get('interface_profile', False) with _session_for_write() as session: # interface = models.Interfaces() if hasattr(obj, 'uses') and values.get('uses'): for i in list(values['uses']): try: - if is_profile: - uses_if = self._interface_get(models.Interfaces, i, obj=obj) - else: - uses_if = self._interface_get(models.Interfaces, i, values['forihostid'], obj=obj) + uses_if = self._interface_get(models.Interfaces, i, values['forihostid'], obj=obj) obj.uses.append(uses_if) except NoResultFound: raise exception.InvalidParameterValue( @@ -2402,10 +2344,7 @@ class Connection(api.Connection): if hasattr(obj, 'used_by') and values.get('used_by'): for i in list(values['used_by']): try: - if is_profile: - uses_if = self._interface_get(models.Interfaces, i, obj=obj) - else: - uses_if = self._interface_get(models.Interfaces, i, values['forihostid'], obj=obj) + uses_if = self._interface_get(models.Interfaces, i, values['forihostid'], obj=obj) obj.used_by.append(uses_if) except NoResultFound: raise exception.InvalidParameterValue( @@ -5980,10 +5919,8 @@ class Connection(api.Connection): if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() values['host_id'] = int(host_id) - if 'sensor_profile' in values: values.pop('sensor_profile') - # The id is null for ae sensors with more than one member # sensor temp_id = obj.id @@ -6258,10 +6195,8 @@ class Connection(api.Connection): if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() values['host_id'] = int(host_id) - if 'sensorgroup_profile' in values: values.pop('sensorgroup_profile') - temp_id = obj.id obj.update(values) if obj.id is None: diff --git a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/122_remove_profiles.py b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/122_remove_profiles.py new file mode 100644 index 0000000000..ca3d6dfe8e --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/122_remove_profiles.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2021 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from sqlalchemy import MetaData, Table, Column, Integer, Enum, String +from sqlalchemy.dialects import postgresql + + +ENGINE = 'InnoDB' +CHARSET = 'utf8' + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + i_host = Table('i_host', + meta, + Column('id', Integer, + primary_key=True, nullable=False), + mysql_engine=ENGINE, mysql_charset=CHARSET, + autoload=True) + + if migrate_engine.url.get_dialect() is postgresql.dialect: + old_recordTypeEnum = Enum('standard', + 'profile', + 'sprofile', + 'reserve1', + 'reserve2', + name='recordtypeEnum') + + recordTypeEnum = Enum('standard', + 'sprofile', + 'reserve1', + 'reserve2', + name='recordtypeEnum') + + old_personalityEnum = Enum('controller', + 'worker', + 'network', + 'storage', + 'profile', + 'reserve1', + 'reserve2', + name='invPersonalityEnum') + + personalityEnum = Enum('controller', + 'worker', + 'network', + 'storage', + 'reserve1', + 'reserve2', + name='invPersonalityEnum') + + migrate_engine.execute("delete from partition using i_host" + " where i_host.recordtype='profile'" + " and partition.forihostid=i_host.id") + + migrate_engine.execute("delete from i_host where recordtype='profile'") + + personality_col = i_host.c.personality + personality_col.alter(Column('personality', String(60))) + old_personalityEnum.drop(bind=migrate_engine, checkfirst=False) + personalityEnum.create(bind=migrate_engine, checkfirst=False) + migrate_engine.execute('ALTER TABLE i_host ALTER COLUMN personality ' + 'TYPE "invPersonalityEnum" USING ' + 'personality::text::"invPersonalityEnum"') + + recordtype_col = i_host.c.recordtype + recordtype_col.alter(Column('recordtype', String(60))) + old_recordTypeEnum.drop(bind=migrate_engine, checkfirst=False) + recordTypeEnum.create(bind=migrate_engine, checkfirst=False) + migrate_engine.execute('ALTER TABLE i_host ALTER COLUMN recordtype ' + 'TYPE "recordtypeEnum" USING ' + 'recordtype::text::"recordtypeEnum"') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # As per other openstack components, downgrade is + # unsupported in this release. + raise NotImplementedError('SysInv database downgrade is unsupported.') diff --git a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py index b119380a0e..a6668931aa 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py +++ b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py @@ -116,7 +116,6 @@ class isystem(Base): class ihost(Base): recordTypeEnum = Enum('standard', - 'profile', 'sprofile', 'reserve1', 'reserve2', @@ -135,7 +134,6 @@ class ihost(Base): 'worker', 'network', 'storage', - 'profile', 'reserve1', 'reserve2', 'edgeworker', diff --git a/sysinv/sysinv/sysinv/sysinv/objects/__init__.py b/sysinv/sysinv/sysinv/sysinv/objects/__init__.py index 6c2d509d99..a121c3716a 100644 --- a/sysinv/sysinv/sysinv/sysinv/objects/__init__.py +++ b/sysinv/sysinv/sysinv/sysinv/objects/__init__.py @@ -69,7 +69,6 @@ from sysinv.objects import ntp from sysinv.objects import pci_device from sysinv.objects import peer from sysinv.objects import port -from sysinv.objects import profile from sysinv.objects import ptp from sysinv.objects import ptp_instance from sysinv.objects import ptp_interface @@ -135,7 +134,6 @@ system = system.System cluster = cluster.Cluster peer = peer.Peer host = host.Host -profile = profile.Profile node = node.Node cpu = cpu.CPU memory = memory.Memory @@ -222,7 +220,6 @@ __all__ = ("system", "cluster", "peer", "host", - "profile", "node", "cpu", "memory", diff --git a/sysinv/sysinv/sysinv/sysinv/objects/profile.py b/sysinv/sysinv/sysinv/sysinv/objects/profile.py deleted file mode 100644 index e566f8939f..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/objects/profile.py +++ /dev/null @@ -1,67 +0,0 @@ -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from sysinv.db import api as db_api -from sysinv.objects import base -from sysinv.objects import utils - - -class Profile(base.SysinvObject): - - dbapi = db_api.get_instance() - - fields = { - 'id': int, - 'recordtype': utils.str_or_none, - - # 'created_at': utils.datetime_str_or_none, - # 'updated_at': utils.datetime_str_or_none, - 'hostname': utils.str_or_none, - 'personality': utils.str_or_none, - # Host is working on a blocking process - 'reserved': utils.str_or_none, - # NOTE: instance_uuid must be read-only when server is provisioned - 'uuid': utils.str_or_none, - - # NOTE: driver should be read-only after server is created - 'invprovision': utils.str_or_none, - 'mgmt_mac': utils.str_or_none, - 'mgmt_ip': utils.str_or_none, - - # Board management members - 'bm_ip': utils.str_or_none, - 'bm_mac': utils.str_or_none, - 'bm_type': utils.str_or_none, - 'bm_username': utils.str_or_none, - - 'location': utils.dict_or_none, - # 'reservation': utils.str_or_none, - 'serialid': utils.str_or_none, - 'administrative': utils.str_or_none, - 'operational': utils.str_or_none, - 'availability': utils.str_or_none, - 'action': utils.str_or_none, - 'task': utils.str_or_none, - 'uptime': utils.int_or_none, - - 'boot_device': utils.str_or_none, - 'rootfs_device': utils.str_or_none, - 'install_output': utils.str_or_none, - 'console': utils.str_or_none, - 'tboot': utils.str_or_none, - } - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - return cls.dbapi.ihost_get(uuid) - - def save_changes(self, context, updates): - self.dbapi.ihost_update(self.uuid, # pylint: disable=no-member - updates) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py index 6eafe87d7a..0e965ca7c4 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py @@ -3084,37 +3084,6 @@ class TestPatchStdDuplexControllerVIM(TestHost): self.assertEqual(constants.VIM_SERVICES_DELETE_FAILED, result['vim_progress_status']) - def test_apply_profile_action_bad_profile_id(self): - # Note: Including this testcase for completeness (wanted to cover each - # action. The testcases in test_interface.py cover the success case. - - # Create controller-0 - self._create_controller_0( - invprovision=constants.PROVISIONED, - administrative=constants.ADMIN_UNLOCKED, - operational=constants.OPERATIONAL_ENABLED, - availability=constants.AVAILABILITY_ONLINE) - - # Create controller-1 - c1_host = self._create_controller_1( - invprovision=constants.PROVISIONED, - administrative=constants.ADMIN_LOCKED, - operational=constants.OPERATIONAL_ENABLED, - availability=constants.AVAILABILITY_ONLINE) - - # Apply profile to controller-1 and verify it was rejected - self.assertRaises(webtest.app.AppError, - self.patch_json, - '/ihosts/%s' % c1_host['hostname'], - [{'path': '/action', - 'value': constants.APPLY_PROFILE_ACTION, - 'op': 'replace'}, - {'path': '/iprofile_uuid', - 'value': 'notarealuuid', - 'op': 'replace'} - ], - headers={'User-Agent': 'sysinv-test'}) - def test_subfunction_config_action(self): # Create controller-0 (AIO) c0_host = self._create_controller_0( diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py index 8b75bde8b7..a74314f4af 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py @@ -513,27 +513,6 @@ class InterfaceTestCase(base.FunctionalTest, dbbase.BaseHostTestCase): self.assertEqual(http_client.OK, response.status_int) return response - def _create_and_apply_profile(self, host): - ifprofile = { - 'ihost_uuid': host.uuid, - 'profilename': 'ifprofile-node1', - 'profiletype': constants.PROFILE_TYPE_INTERFACE - } - response = self.post_json('/iprofile', ifprofile) - self.assertEqual(http_client.OK, response.status_int) - - list_data = self.get_json('/iprofile') - profile_uuid = list_data['iprofiles'][0]['uuid'] - - self.get_json('/iprofile/%s/iinterfaces' % profile_uuid) - self.get_json('/iprofile/%s/ethernet_ports' % profile_uuid) - - result = self.patch_dict_json('/ihosts/%s' % host.id, - headers={'User-Agent': 'sysinv'}, - action=constants.APPLY_PROFILE_ACTION, - iprofile_uuid=profile_uuid) - self.assertEqual(http_client.OK, result.status_int) - def is_interface_equal(self, first, second): for key in first: if key in second: @@ -563,518 +542,6 @@ class InterfaceTestCase(base.FunctionalTest, dbbase.BaseHostTestCase): self._setup_configuration() -class InterfaceControllerEthernet(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where all platform interfaces are - # ethernet interfaces. - self._create_host(constants.CONTROLLER, admin=constants.ADMIN_LOCKED) - self._create_ethernet('oam', constants.NETWORK_TYPE_OAM) - self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) - self._create_ethernet('cluster', constants.NETWORK_TYPE_CLUSTER_HOST) - self.get_json('/ihosts/%s/iinterfaces' % self.controller.uuid) - - def setUp(self): - super(InterfaceControllerEthernet, self).setUp() - - def test_controller_ethernet_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceControllerBond(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where all platform interfaces are - # aggregated ethernet interfaces. - self._create_host(constants.CONTROLLER, admin=constants.ADMIN_LOCKED) - self._create_bond('oam', constants.NETWORK_TYPE_OAM) - self._create_bond('mgmt', constants.NETWORK_TYPE_MGMT) - self._create_bond('cluster', constants.NETWORK_TYPE_CLUSTER_HOST) - - def setUp(self): - super(InterfaceControllerBond, self).setUp() - - def test_controller_bond_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceControllerVlanOverBond(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where all platform interfaces are - # vlan interfaces over aggregated ethernet interfaces - self._create_host(constants.CONTROLLER, admin=constants.ADMIN_LOCKED) - bond = self._create_bond('pxeboot', constants.NETWORK_TYPE_PXEBOOT) - self._create_vlan('oam', constants.NETWORK_TYPE_OAM, - constants.INTERFACE_CLASS_PLATFORM, 1, bond) - self._create_vlan('mgmt', constants.NETWORK_TYPE_MGMT, - constants.INTERFACE_CLASS_PLATFORM, 2, bond) - self._create_vlan('cluster', constants.NETWORK_TYPE_CLUSTER_HOST, - constants.INTERFACE_CLASS_PLATFORM, 3, bond) - - def setUp(self): - super(InterfaceControllerVlanOverBond, self).setUp() - - def test_controller_vlan_over_bond_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceControllerVlanOverEthernet(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where all platform interfaces are - # vlan interfaces over ethernet interfaces - self._create_host(constants.CONTROLLER, admin=constants.ADMIN_LOCKED) - port, iface = self._create_ethernet( - 'pxeboot', constants.NETWORK_TYPE_PXEBOOT) - self._create_vlan('oam', constants.NETWORK_TYPE_OAM, - constants.INTERFACE_CLASS_PLATFORM, 1, iface) - self._create_vlan('mgmt', constants.NETWORK_TYPE_MGMT, - constants.INTERFACE_CLASS_PLATFORM, 2, iface) - self._create_vlan('cluster', constants.NETWORK_TYPE_CLUSTER_HOST, - constants.INTERFACE_CLASS_PLATFORM, 3, iface) - - def setUp(self): - super(InterfaceControllerVlanOverEthernet, self).setUp() - - def test_controller_vlan_over_ethernet_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceEthernetOverSriov(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where the personality is set to a - # controller with a worker subfunction and all interfaces are - # ethernet aside from a VF over SR-IOV interface. - self._create_host(constants.CONTROLLER, constants.WORKER, - admin=constants.ADMIN_LOCKED) - self._create_datanetworks() - - lower_port, lower_iface = self._create_sriov( - 'sriov1', sriov_numvfs=2) - self._create_vf('vf1', lower_iface=lower_iface, sriov_numvfs=1, - sriov_vf_driver='vfio', datanetworks='group0-data1') - port, iface = self._create_ethernet( - 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, lower_iface=lower_iface) - self._create_vlan('oam', constants.NETWORK_TYPE_OAM, - constants.INTERFACE_CLASS_PLATFORM, 1, lower_iface) - self._create_vlan('mgmt', constants.NETWORK_TYPE_MGMT, - constants.INTERFACE_CLASS_PLATFORM, 2, lower_iface) - self._create_vlan('cluster', constants.NETWORK_TYPE_CLUSTER_HOST, - constants.INTERFACE_CLASS_PLATFORM, 3, lower_iface) - - def setUp(self): - super(InterfaceEthernetOverSriov, self).setUp() - - def test_ethernet_over_sriov_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceWorkerEthernet(InterfaceTestCase): - - def _setup_configuration(self): - self._create_host(constants.CONTROLLER, admin=constants.ADMIN_UNLOCKED) - self._create_datanetworks() - - # Setup a sample configuration where the personality is set to a - # worker and all interfaces are ethernet interfaces. - self._create_host(constants.WORKER, constants.WORKER, - admin=constants.ADMIN_LOCKED) - self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, - host=self.worker) - self._create_ethernet('cluster', constants.NETWORK_TYPE_CLUSTER_HOST, - host=self.worker) - self._create_ethernet('data', - constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-data0', host=self.worker) - self._create_ethernet('sriov', - constants.NETWORK_TYPE_PCI_SRIOV, - constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-data1', host=self.worker) - self._create_ethernet('pthru', - constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext0', host=self.worker) - port, iface = ( - self._create_ethernet('slow', - constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-ext1', host=self.worker)) - port['dpdksupport'] = False - port, iface = ( - self._create_ethernet('mlx4', - constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-ext2', host=self.worker)) - port['driver'] = 'mlx4_core' - port, iface = ( - self._create_ethernet('mlx5', - constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-ext3', host=self.worker)) - port['driver'] = 'mlx5_core' - - def setUp(self): - super(InterfaceWorkerEthernet, self).setUp() - - def test_worker_ethernet_profile(self): - self._create_and_apply_profile(self.worker) - - -class InterfaceWorkerVlanOverEthernet(InterfaceTestCase): - - def _setup_configuration(self): - self._create_host(constants.CONTROLLER) - self._create_datanetworks() - - # Setup a sample configuration where the personality is set to a - # worker and all interfaces are vlan interfaces over ethernet - # interfaces. - self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) - port, iface = self._create_ethernet( - 'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.worker) - self._create_worker_vlan('mgmt', constants.NETWORK_TYPE_MGMT, - constants.INTERFACE_CLASS_PLATFORM, 2, iface) - self._create_worker_vlan('cluster', constants.NETWORK_TYPE_CLUSTER_HOST, - constants.INTERFACE_CLASS_PLATFORM, 3) - self._create_worker_vlan('data', constants.INTERFACE_CLASS_DATA, - constants.NETWORK_TYPE_DATA, 5, - datanetworks='group0-ext0') - self._create_ethernet('sriov', - constants.NETWORK_TYPE_PCI_SRIOV, - constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-data0', host=self.worker) - self._create_ethernet('pthru', - constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-data1', host=self.worker) - - def setUp(self): - super(InterfaceWorkerVlanOverEthernet, self).setUp() - - def test_worker_vlan_over_ethernet_profile(self): - self._create_and_apply_profile(self.worker) - - -class InterfaceWorkerBond(InterfaceTestCase): - - def _setup_configuration(self): - self._create_host(constants.CONTROLLER, admin=constants.ADMIN_UNLOCKED) - self._create_datanetworks() - - # Setup a sample configuration where the personality is set to a - # worker and all interfaces are aggregated ethernet interfaces. - self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) - self._create_worker_bond('mgmt', constants.NETWORK_TYPE_MGMT) - self._create_worker_bond('cluster', constants.NETWORK_TYPE_CLUSTER_HOST) - self._create_worker_bond('data', - constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - datanetworks='group0-data0') - self._create_ethernet('sriov', - constants.NETWORK_TYPE_PCI_SRIOV, - constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-ext0', host=self.worker) - self._create_ethernet('pthru', - constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext1', host=self.worker) - - def setUp(self): - super(InterfaceWorkerBond, self).setUp() - - def test_worker_bond_profile(self): - self._create_and_apply_profile(self.worker) - - -class InterfaceWorkerVlanOverBond(InterfaceTestCase): - - def _setup_configuration(self): - self._create_host(constants.CONTROLLER) - self._create_datanetworks() - - # Setup a sample configuration where the personality is set to a - # worker and all interfaces are vlan interfaces over aggregated - # ethernet interfaces. - self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) - bond = self._create_worker_bond('pxeboot', - constants.NETWORK_TYPE_PXEBOOT, - constants.INTERFACE_CLASS_PLATFORM) - self._create_worker_vlan('mgmt', constants.NETWORK_TYPE_MGMT, - constants.INTERFACE_CLASS_PLATFORM, 2, bond) - self._create_worker_vlan('cluster', constants.NETWORK_TYPE_CLUSTER_HOST, - constants.INTERFACE_CLASS_PLATFORM, 3, - bond) - bond2 = self._create_worker_bond('bond2', constants.NETWORK_TYPE_NONE) - self._create_worker_vlan('data', - constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 5, bond2, - datanetworks='group0-ext0') - - self._create_worker_bond('bond3', constants.NETWORK_TYPE_NONE) - - self._create_ethernet('sriov', - constants.NETWORK_TYPE_PCI_SRIOV, - constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-data0', host=self.worker) - self._create_ethernet('pthru', - constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-data1', host=self.worker) - - def setUp(self): - super(InterfaceWorkerVlanOverBond, self).setUp() - - def test_worker_vlan_over_bond_profile(self): - self._create_and_apply_profile(self.worker) - - -class InterfaceWorkerVlanOverDataEthernet(InterfaceTestCase): - - def _setup_configuration(self): - self._create_host(constants.CONTROLLER) - self._create_datanetworks() - - # Setup a sample configuration where the personality is set to a - # worker and all interfaces are vlan interfaces over data ethernet - # interfaces. - self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED) - port, iface = ( - self._create_ethernet('data', - constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-data0', host=self.worker)) - self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, - host=self.worker) - self._create_ethernet('cluster', constants.NETWORK_TYPE_CLUSTER_HOST, - host=self.worker) - self._create_worker_vlan('data2', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, 5, - iface, datanetworks='group0-ext0') - self._create_ethernet('sriov', - constants.NETWORK_TYPE_PCI_SRIOV, - constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-ext1', host=self.worker) - self._create_ethernet('pthru', - constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext2', host=self.worker) - - def setUp(self): - super(InterfaceWorkerVlanOverDataEthernet, self).setUp() - - def test_worker_vlan_over_data_ethernet_profile(self): - self._create_and_apply_profile(self.worker) - - -class InterfaceAIOEthernet(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where the personality is set to a - # controller with a worker subfunction and all interfaces are - # ethernet interfaces. - self._create_host(constants.CONTROLLER, constants.WORKER, - admin=constants.ADMIN_LOCKED) - self._create_datanetworks() - self._create_ethernet('oam', constants.NETWORK_TYPE_OAM) - self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) - self._create_ethernet('cluster', constants.NETWORK_TYPE_CLUSTER_HOST) - self._create_ethernet('data', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-data0') - self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV, - constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-data1') - self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext0') - port, iface = ( - self._create_ethernet('slow', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-ext1')) - port['dpdksupport'] = False - port, iface = ( - self._create_ethernet('mlx4', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-ext2')) - port['driver'] = 'mlx4_core' - port, iface = ( - self._create_ethernet('mlx5', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-ext3')) - - def setUp(self): - super(InterfaceAIOEthernet, self).setUp() - - def test_AIO_ethernet_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceAIOVlanOverEthernet(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where the personality is set to a - # controller with a worker subfunction and all interfaces are - # vlan interfaces over ethernet interfaces. - self._create_host(constants.CONTROLLER, constants.WORKER, - admin=constants.ADMIN_LOCKED) - self._create_datanetworks() - port, iface = self._create_ethernet( - 'pxeboot', constants.NETWORK_TYPE_PXEBOOT) - self._create_vlan('oam', constants.NETWORK_TYPE_OAM, - constants.INTERFACE_CLASS_PLATFORM, 1, iface) - self._create_vlan('mgmt', constants.NETWORK_TYPE_MGMT, - constants.INTERFACE_CLASS_PLATFORM, 2, iface) - self._create_vlan('cluster', constants.NETWORK_TYPE_CLUSTER_HOST, - constants.INTERFACE_CLASS_PLATFORM, 3) - self._create_ethernet('data', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - datanetworks='group0-ext0') - self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV, - constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-ext1') - self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext2') - - def setUp(self): - super(InterfaceAIOVlanOverEthernet, self).setUp() - - def test_AIO_vlan_over_ethernet_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceAIOBond(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where the personality is set to a - # controller with a worker subfunction and all interfaces are - # aggregated ethernet interfaces. - self._create_host(constants.CONTROLLER, - subfunction=constants.WORKER, - admin=constants.ADMIN_LOCKED) - self._create_datanetworks() - self._create_bond('oam', constants.NETWORK_TYPE_OAM) - self._create_bond('mgmt', constants.NETWORK_TYPE_MGMT) - self._create_bond('cluster', constants.NETWORK_TYPE_CLUSTER_HOST) - self._create_bond('data', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - datanetworks='group0-data0') - self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV, - constants.INTERFACE_CLASS_PCI_SRIOV, - datanetworks='group0-ext0') - self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - datanetworks='group0-ext1') - - def setUp(self): - super(InterfaceAIOBond, self).setUp() - - def test_AIO_bond_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceAIOVlanOverBond(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where the personality is set to a - # controller with a worker subfunction and all interfaces are - # vlan interfaces over aggregated ethernet interfaces. - self._create_host(constants.CONTROLLER, constants.WORKER, - admin=constants.ADMIN_LOCKED) - self._create_datanetworks() - bond = self._create_bond('pxeboot', constants.NETWORK_TYPE_PXEBOOT) - self._create_vlan('oam', constants.NETWORK_TYPE_OAM, - constants.INTERFACE_CLASS_PLATFORM, 1, bond) - self._create_vlan('mgmt', constants.NETWORK_TYPE_MGMT, - constants.INTERFACE_CLASS_PLATFORM, 2, bond) - self._create_vlan('cluster', constants.NETWORK_TYPE_CLUSTER_HOST, - constants.INTERFACE_CLASS_PLATFORM, 3, bond) - bond2 = self._create_bond('bond4', constants.NETWORK_TYPE_NONE) - self._create_vlan('data', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 5, bond2, - datanetworks='group0-ext0') - self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV, - constants.INTERFACE_CLASS_PCI_SRIOV, - 'group0-ext1') - self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext2') - - def setUp(self): - super(InterfaceAIOVlanOverBond, self).setUp() - - def test_AIO_vlan_over_bond_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceAIOVfOverSriov(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where the personality is set to a - # controller with a worker subfunction and all interfaces are - # ethernet aside from a VF over SR-IOV interface. - self._create_host(constants.CONTROLLER, constants.WORKER, - admin=constants.ADMIN_LOCKED) - self._create_datanetworks() - self._create_ethernet('oam', constants.NETWORK_TYPE_OAM) - self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) - self._create_ethernet('cluster', constants.NETWORK_TYPE_CLUSTER_HOST) - self._create_ethernet('data', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-data0') - self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext0') - lower_port, lower_iface = self._create_sriov( - 'sriov1', sriov_numvfs=2, datanetworks='group0-data0') - self._create_vf('vf1', lower_iface=lower_iface, sriov_numvfs=1, - sriov_vf_driver='vfio', datanetworks='group0-data1') - - def setUp(self): - super(InterfaceAIOVfOverSriov, self).setUp() - - def test_AIO_vf_over_sriov_profile(self): - self._create_and_apply_profile(self.controller) - - -class InterfaceAIOVfWithRatelimitOverSriov(InterfaceTestCase): - - def _setup_configuration(self): - # Setup a sample configuration where the personality is set to a - # controller with a worker subfunction and all interfaces are - # ethernet aside from a VF over SR-IOV interface. - self._create_host(constants.CONTROLLER, constants.WORKER, - admin=constants.ADMIN_LOCKED) - self._create_datanetworks() - self._create_ethernet('oam', constants.NETWORK_TYPE_OAM) - self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) - self._create_ethernet('cluster', constants.NETWORK_TYPE_CLUSTER_HOST) - self._create_ethernet('data', constants.NETWORK_TYPE_DATA, - constants.INTERFACE_CLASS_DATA, - 'group0-data0') - self._create_ethernet('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH, - constants.INTERFACE_CLASS_PCI_PASSTHROUGH, - 'group0-ext0') - lower_port, lower_iface = self._create_sriov( - 'sriov1', sriov_numvfs=3, datanetworks='group0-data0') - self._create_vf('vf1', lower_iface=lower_iface, sriov_numvfs=1, - sriov_vf_driver='vfio', datanetworks='group0-data1') - self._create_vf('vf2', lower_iface=lower_iface, sriov_numvfs=1, - sriov_vf_driver='vfio', datanetworks='group0-data1', - max_tx_rate=100) - - def setUp(self): - super(InterfaceAIOVfWithRatelimitOverSriov, self).setUp() - - def test_AIO_vf_with_ratelimit_over_sriov_profile(self): - self._create_and_apply_profile(self.controller) - - # Test that the unsupported config is rejected class InterfaceAIOVlanOverDataEthernet(InterfaceTestCase): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_host_upgrade.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_host_upgrade.py index b3d662cf37..e75a2f871d 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_host_upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_host_upgrade.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019 Wind River Systems, Inc. +# Copyright (c) 2021 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -160,30 +160,6 @@ class TestListKubeHostUpgrade(TestKubeHostUpgrade): self.assertEqual(upgrade['host_id'], host_id) host_id += 1 - def test_all_ignore_profile(self): - # Create hosts - self._create_controller_0() - self._create_controller_1() - worker = self._create_worker(mgmt_ip='192.168.24.12') - self._create_worker(mgmt_ip='192.168.24.13', - unit=1, - recordtype='profile') - data = self.get_json('/kube_host_upgrades') - self.assertEqual(3, len(data['kube_host_upgrades'])) - host_id = 1 - for upgrade in data['kube_host_upgrades']: - self.assertIn('id', upgrade) - assert (uuidutils.is_uuid_like(upgrade['uuid'])) - self.assertEqual(upgrade['target_version'], None) - self.assertEqual(upgrade['status'], None) - if upgrade['host_id'] == worker.id: - self.assertEqual(upgrade['control_plane_version'], 'N/A') - else: - self.assertEqual(upgrade['control_plane_version'], 'v1.42.1') - self.assertEqual(upgrade['kubelet_version'], 'v1.42.2') - self.assertEqual(upgrade['host_id'], host_id) - host_id += 1 - def test_all_no_dynamic_info(self): # Create hosts self._create_controller_0() diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_profile.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_profile.py deleted file mode 100644 index cc6b3247d7..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_profile.py +++ /dev/null @@ -1,384 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2017 Wind River Systems, Inc. -# - -import mock -from six.moves import http_client - -from sysinv.common import constants -from sysinv.common import utils as cutils -from sysinv.db import api as dbapi -from sysinv.tests.api import base -from sysinv.tests.db import utils as dbutils - -HEADER = {'User-Agent': 'sysinv'} - - -class ProfileTestCase(base.FunctionalTest): - - def setUp(self): - super(ProfileTestCase, self).setUp() - self.dbapi = dbapi.get_instance() - self.system = dbutils.create_test_isystem() - self.load = dbutils.create_test_load() - self.controller = dbutils.create_test_ihost( - id='1', - uuid=None, - forisystemid=self.system.id, - hostname='controller-0', - personality=constants.CONTROLLER, - subfunctions=constants.CONTROLLER, - invprovision=constants.PROVISIONED, - ) - self.worker = dbutils.create_test_ihost( - id='2', - uuid=None, - forisystemid=self.system.id, - hostname='worker-0', - personality=constants.WORKER, - subfunctions=constants.WORKER, - mgmt_mac='01:02.03.04.05.C0', - mgmt_ip='192.168.24.12', - invprovision=constants.PROVISIONED, - ) - self.profile = { - 'profilename': 'profile-node1', - 'ihost_uuid': self.controller.uuid, - } - self.ctrlnode = self.dbapi.inode_create(self.controller.id, - dbutils.get_test_node(id=1)) - self.ctrlcpu = self.dbapi.icpu_create( - self.controller.id, - dbutils.get_test_icpu(id=1, cpu=0, - forihostid=self.controller.id, - forinodeid=self.ctrlnode.id,)) - - self.ctrlif = dbutils.create_test_interface( - forihostid=self.controller.id) - self.port1 = dbutils.create_test_ethernet_port( - id='1', name=self.ctrlif.ifname, host_id=self.controller.id, - interface_id=self.ctrlif.id, mac='08:00:27:43:60:11') - - self.ctrlmemory = self.dbapi.imemory_create( - self.controller.id, - dbutils.get_test_imemory(id=1, - hugepages_configured=True, - forinodeid=self.ctrlcpu.forinodeid)) - - self.compnode = self.dbapi.inode_create(self.worker.id, - dbutils.get_test_node(id=2)) - self.compcpu = self.dbapi.icpu_create( - self.worker.id, - dbutils.get_test_icpu(id=5, cpu=3, - forinodeid=self.compnode.id, - forihostid=self.worker.id)) - self.compcpuapp = self.dbapi.icpu_create( - self.worker.id, - dbutils.get_test_icpu(id=6, cpu=4, forinodeid=self.compnode.id, forihostid=self.worker.id, - allocated_function=constants.APPLICATION_FUNCTION)) - self.compmemory = self.dbapi.imemory_create( - self.worker.id, - dbutils.get_test_imemory(id=2, Hugepagesize=constants.MIB_1G, - forinodeid=self.compcpu.forinodeid)) - - self.disk = self.dbapi.idisk_create( - self.worker.id, - dbutils.get_test_idisk(device_node='/dev/sdb', - device_type=constants.DEVICE_TYPE_HDD)) - self.lvg = self.dbapi.ilvg_create( - self.worker.id, - dbutils.get_test_lvg(lvm_vg_name=constants.LVG_NOVA_LOCAL)) - self.pv = self.dbapi.ipv_create( - self.worker.id, - dbutils.get_test_pv(lvm_vg_name=constants.LVG_NOVA_LOCAL, - disk_or_part_uuid=self.disk.uuid)) - - def _get_path(self, path=None): - if path: - return '/iprofile/' + path - else: - return '/iprofile' - - -class ProfileCreateTestCase(ProfileTestCase): - - def setUp(self): - super(ProfileCreateTestCase, self).setUp() - - def create_profile(self, profiletype): - self.profile["profiletype"] = profiletype - response = self.post_json('%s' % self._get_path(), self.profile) - self.assertEqual(http_client.OK, response.status_int) - - def test_create_cpu_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_CPU - response = self.post_json('%s' % self._get_path(), self.profile) - self.assertEqual(http_client.OK, response.status_int) - - def test_create_interface_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_INTERFACE - response = self.post_json('%s' % self._get_path(), self.profile) - self.assertEqual(http_client.OK, response.status_int) - - def test_create_memory_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY - self.profile["ihost_uuid"] = self.worker.uuid - response = self.post_json('%s' % self._get_path(), self.profile) - self.assertEqual(http_client.OK, response.status_int) - - def test_create_storage_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE - self.profile["ihost_uuid"] = self.worker.uuid - response = self.post_json('%s' % self._get_path(), self.profile) - self.assertEqual(http_client.OK, response.status_int) - - -class ProfileDeleteTestCase(ProfileTestCase): - def setUp(self): - super(ProfileDeleteTestCase, self).setUp() - - def test_delete_cpu_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_CPU - post_response = self.post_json('%s' % self._get_path(), self.profile) - profile_data = self.get_json('%s' % self._get_path()) - cpuprofile_data = self.get_json( - '%s' % self._get_path(profile_data['iprofiles'][0]['uuid'])) - self.assertEqual(post_response.json['uuid'], cpuprofile_data['uuid']) - self.delete( - '%s/%s' % (self._get_path(), post_response.json['uuid'])) - - def test_delete_interface_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_INTERFACE - post_response = self.post_json('%s' % self._get_path(), self.profile) - profile_data = self.get_json('%s' % self._get_path()) - ifprofile_data = self.get_json( - '%s' % self._get_path(profile_data['iprofiles'][0]['uuid'])) - self.assertEqual(post_response.json['uuid'], ifprofile_data['uuid']) - self.delete( - '%s/%s' % (self._get_path(), post_response.json['uuid'])) - - def test_delete_memory_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY - post_response = self.post_json('%s' % self._get_path(), self.profile) - profile_data = self.get_json('%s' % self._get_path()) - memprofile_data = self.get_json( - '%s' % self._get_path(profile_data['iprofiles'][0]['uuid'])) - self.assertEqual(post_response.json['uuid'], memprofile_data['uuid']) - self.delete( - '%s/%s' % (self._get_path(), post_response.json['uuid'])) - - def test_delete_storage_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE - self.profile["ihost_uuid"] = self.worker.uuid - post_response = self.post_json('%s' % self._get_path(), self.profile) - profile_data = self.get_json('%s' % self._get_path()) - storprofile_data = self.get_json( - '%s' % self._get_path(profile_data['iprofiles'][0]['uuid'])) - self.assertEqual(post_response.json['uuid'], storprofile_data['uuid']) - self.delete( - '%s/%s' % (self._get_path(), post_response.json['uuid'])) - - -class ProfileShowTestCase(ProfileTestCase): - def setUp(self): - super(ProfileShowTestCase, self).setUp() - - def test_show_cpu_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_CPU - self.post_json('%s' % self._get_path(), self.profile) - list_data = self.get_json('%s' % self._get_path()) - show_data = self.get_json( - '%s/icpus' % self._get_path(list_data['iprofiles'][0]['uuid'])) - self.assertEqual(self.ctrlcpu.allocated_function, - show_data['icpus'][0]['allocated_function']) - - def test_show_interface_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_INTERFACE - self.post_json('%s' % self._get_path(), self.profile) - list_data = self.get_json('%s' % self._get_path()) - show_data = self.get_json('%s/iinterfaces' % self._get_path( - list_data['iprofiles'][0]['uuid'])) - self.assertEqual(self.ctrlif.ifname, - show_data['iinterfaces'][0]['ifname']) - self.assertEqual(self.ctrlif.iftype, - show_data['iinterfaces'][0]['iftype']) - - @mock.patch.object(cutils, 'is_virtual') - def test_show_memory_success(self, mock_is_virtual): - mock_is_virtual.return_value = True - self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY - self.post_json('%s' % self._get_path(), self.profile) - list_data = self.get_json('%s' % self._get_path()) - show_data = self.get_json( - '%s/imemorys' % self._get_path(list_data['iprofiles'][0]['uuid'])) - self.assertEqual(self.ctrlmemory.platform_reserved_mib, - show_data['imemorys'][0]['platform_reserved_mib']) - self.assertEqual(self.ctrlmemory.vm_hugepages_nr_2M, - show_data['imemorys'][0]['vm_hugepages_nr_2M_pending']) - self.assertEqual(self.ctrlmemory.vm_hugepages_nr_1G, - show_data['imemorys'][0]['vm_hugepages_nr_1G_pending']) - - def test_show_storage_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE - self.profile["ihost_uuid"] = self.worker.uuid - self.post_json('%s' % self._get_path(), self.profile) - list_data = self.get_json('%s' % self._get_path()) - profile_uuid = list_data['iprofiles'][0]['uuid'] - show_data = self.get_json( - '%s/idisks' % self._get_path(profile_uuid)) - self.assertEqual(self.disk.device_path, - show_data['idisks'][0]['device_path']) - show_data = self.get_json( - '%s/ipvs' % self._get_path(profile_uuid)) - self.assertEqual(self.pv.pv_type, - show_data['ipvs'][0]['pv_type']) - show_data = self.get_json( - '%s/ilvgs' % self._get_path(profile_uuid)) - self.assertEqual(self.lvg.lvm_vg_name, - show_data['ilvgs'][0]['lvm_vg_name']) - - -class ProfileListTestCase(ProfileTestCase): - def setUp(self): - super(ProfileListTestCase, self).setUp() - - def test_list_cpu_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_CPU - post_response = self.post_json('%s' % self._get_path(), self.profile) - list_data = self.get_json('%s' % self._get_path()) - self.assertEqual(post_response.json['uuid'], - list_data['iprofiles'][0]['uuid']) - - def test_list_interface_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_INTERFACE - post_response = self.post_json('%s' % self._get_path(), self.profile) - list_data = self.get_json('%s' % self._get_path()) - self.assertEqual(post_response.json['uuid'], - list_data['iprofiles'][0]['uuid']) - - def test_list_memory_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY - post_response = self.post_json('%s' % self._get_path(), self.profile) - list_data = self.get_json('%s' % self._get_path()) - self.assertEqual(post_response.json['uuid'], - list_data['iprofiles'][0]['uuid']) - - def test_list_storage_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE - self.profile["ihost_uuid"] = self.worker.uuid - post_response = self.post_json('%s' % self._get_path(), self.profile) - list_data = self.get_json('%s' % self._get_path()) - self.assertEqual(post_response.json['uuid'], - list_data['iprofiles'][0]['uuid']) - - -class ProfileApplyTestCase(ProfileTestCase): - def setUp(self): - super(ProfileApplyTestCase, self).setUp() - - def test_apply_cpu_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_CPU - self.profile["ihost_uuid"] = self.worker.uuid - response = self.post_json('%s' % self._get_path(), self.profile) - self.assertEqual(http_client.OK, response.status_int) - list_data = self.get_json('%s' % self._get_path()) - profile_uuid = list_data['iprofiles'][0]['uuid'] - result = self.patch_dict_json('/ihosts/%s' % self.worker.id, - headers=HEADER, - action=constants.APPLY_PROFILE_ACTION, - iprofile_uuid=profile_uuid) - self.assertEqual(http_client.OK, result.status_int) - - hostcpu_r = self.get_json( - '/ihosts/%s/icpus' % self.worker.uuid) - profile_r = self.get_json( - '%s/icpus' % self._get_path(profile_uuid)) - self.assertEqual(hostcpu_r['icpus'][0]['allocated_function'], - profile_r['icpus'][0]['allocated_function']) - - @mock.patch.object(cutils, 'is_virtual') - def test_apply_memory_success(self, mock_is_virtual): - mock_is_virtual.return_value = True - self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY - self.profile["ihost_uuid"] = self.worker.uuid - response = self.post_json('%s' % self._get_path(), self.profile) - self.assertEqual(http_client.OK, response.status_int) - - list_data = self.get_json('%s' % self._get_path()) - profile_uuid = list_data['iprofiles'][0]['uuid'] - result = self.patch_dict_json('/ihosts/%s' % self.worker.id, - headers=HEADER, - action=constants.APPLY_PROFILE_ACTION, - iprofile_uuid=profile_uuid) - self.assertEqual(http_client.OK, result.status_int) - - hostmem_r = self.get_json( - '/ihosts/%s/imemorys' % self.worker.uuid) - profile_r = self.get_json( - '%s/imemorys' % self._get_path(profile_uuid)) - self.assertEqual(hostmem_r['imemorys'][0]['platform_reserved_mib'], - profile_r['imemorys'][0]['platform_reserved_mib']) - self.assertEqual(hostmem_r['imemorys'][0]['vm_hugepages_nr_2M_pending'], - profile_r['imemorys'][0]['vm_hugepages_nr_2M_pending']) - self.assertEqual(hostmem_r['imemorys'][0]['vm_hugepages_nr_1G_pending'], - profile_r['imemorys'][0]['vm_hugepages_nr_1G_pending']) - self.assertEqual(hostmem_r['imemorys'][0]['vswitch_hugepages_reqd'], - profile_r['imemorys'][0]['vswitch_hugepages_reqd']) - - def test_apply_storage_success(self): - self.profile["profiletype"] = constants.PROFILE_TYPE_LOCAL_STORAGE - self.profile["ihost_uuid"] = self.worker.uuid - response = self.post_json('%s' % self._get_path(), self.profile) - self.assertEqual(http_client.OK, response.status_int) - - list_data = self.get_json('%s' % self._get_path()) - profile_uuid = list_data['iprofiles'][0]['uuid'] - - # Delete Physical volume and disassociate it from disk - self.delete('/ipvs/%s' % self.pv.uuid) - self.dbapi.idisk_update(self.disk.uuid, - {'foripvid': None, 'foristorid': None}) - # Delete Local Volume - self.delete('/ilvgs/%s' % self.lvg.uuid) - - # Apply storage profile - result = self.patch_dict_json('/ihosts/%s' % self.worker.id, - headers=HEADER, - action=constants.APPLY_PROFILE_ACTION, - iprofile_uuid=profile_uuid) - self.assertEqual(http_client.OK, result.status_int) - - hostdisk_r = self.get_json( - '/ihosts/%s/idisks' % self.worker.uuid) - profile_r = self.get_json( - '%s/idisks' % self._get_path(profile_uuid)) - self.assertEqual(hostdisk_r['idisks'][0]['device_path'], - profile_r['idisks'][0]['device_path']) - - hostpv_r = self.get_json( - '/ihosts/%s/ipvs' % self.worker.uuid) - profile_r = self.get_json( - '%s/ipvs' % self._get_path(profile_uuid)) - self.assertEqual(hostpv_r['ipvs'][1]['pv_type'], - profile_r['ipvs'][0]['pv_type']) - if not profile_r['ipvs'][0].get('disk_or_part_device_path'): - self.assertEqual(hostpv_r['ipvs'][1]['lvm_pv_name'], - profile_r['ipvs'][0]['lvm_pv_name']) - - hostlvg_r = self.get_json( - '/ihosts/%s/ilvgs' % self.worker.uuid) - profile_r = self.get_json( - '%s/ilvgs' % self._get_path(profile_uuid)) - self.assertEqual(hostlvg_r['ilvgs'][0]['lvm_vg_name'], - profile_r['ilvgs'][0]['lvm_vg_name'])