Decouple Fault Management from stx-config

List of changes:
1.Remove all fault management (FM) database tables from sysinv DB
2.Remove all FM commands from sysinv REST API service
3.Remove all FM CLI commands from cgts client
4.Add FM user to config controller to support region config
5.Update backup restore to reference the new alarm database table
6.Update controller config test files and add the new FM user
7.Add a FM puppet module in order to manage configuration data and
  database; to configure user, service and endpoint in Keystone
8.Add a FM puppet operator to populate FM and SNMP configuration data
9.Update NFV puppet to support FM endpoint configuration
10.Update haproxy manifest to support active-active FM API service

Story: 2002828

Task: 22747

Change-Id: I96d22a18d5872c2e5398f2e9e26a7056fe9b4e82
Signed-off-by: Tao Liu <tao.liu@windriver.com>
This commit is contained in:
Tao Liu 2018-08-15 14:07:24 -04:00
parent 4340414bb5
commit 5421df7098
84 changed files with 1749 additions and 2595 deletions

View File

@ -17,3 +17,4 @@ puppet-modules-wrs/puppet-sysinv
puppet-modules-wrs/puppet-dcorch puppet-modules-wrs/puppet-dcorch
puppet-modules-wrs/puppet-dcmanager puppet-modules-wrs/puppet-dcmanager
puppet-modules-wrs/puppet-smapi puppet-modules-wrs/puppet-smapi
puppet-modules-wrs/puppet-fm

View File

@ -42,7 +42,10 @@ EXPECTED_SERVICE_NAME_AND_TYPE = (
"PANKO_SERVICE_NAME": "panko", "PANKO_SERVICE_NAME": "panko",
"PANKO_SERVICE_TYPE": "event", "PANKO_SERVICE_TYPE": "event",
"GNOCCHI_SERVICE_NAME": "gnocchi", "GNOCCHI_SERVICE_NAME": "gnocchi",
"GNOCCHI_SERVICE_TYPE": "metric"}) "GNOCCHI_SERVICE_TYPE": "metric",
"FM_SERVICE_NAME": "fm",
"FM_SERVICE_TYPE": "faultmanagement",
})
def is_valid_vlan(vlan): def is_valid_vlan(vlan):

View File

@ -992,6 +992,13 @@ class ConfigValidator(object):
gnocchi_password = get_optional(self.conf, 'REGION_2_SERVICES', gnocchi_password = get_optional(self.conf, 'REGION_2_SERVICES',
'GNOCCHI_PASSWORD') 'GNOCCHI_PASSWORD')
# validate fm service name and type
get_service(self.conf, 'REGION_2_SERVICES', 'FM_SERVICE_NAME')
get_service(self.conf, 'REGION_2_SERVICES', 'FM_SERVICE_TYPE')
fm_user_name = self.conf.get('REGION_2_SERVICES', 'FM_USER_NAME')
fm_password = get_optional(self.conf, 'REGION_2_SERVICES',
'FM_PASSWORD')
if self.conf.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'): if self.conf.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'):
user_domain = self.conf.get('REGION_2_SERVICES', user_domain = self.conf.get('REGION_2_SERVICES',
'USER_DOMAIN_NAME') 'USER_DOMAIN_NAME')
@ -1100,6 +1107,8 @@ class ConfigValidator(object):
self.cgcs_conf.set('cREGION', 'GNOCCHI_USER_NAME', self.cgcs_conf.set('cREGION', 'GNOCCHI_USER_NAME',
gnocchi_user_name) gnocchi_user_name)
self.cgcs_conf.set('cREGION', 'GNOCCHI_PASSWORD', gnocchi_password) self.cgcs_conf.set('cREGION', 'GNOCCHI_PASSWORD', gnocchi_password)
self.cgcs_conf.set('cREGION', 'FM_USER_NAME', fm_user_name)
self.cgcs_conf.set('cREGION', 'FM_PASSWORD', fm_password)
self.cgcs_conf.set('cREGION', 'USER_DOMAIN_NAME', self.cgcs_conf.set('cREGION', 'USER_DOMAIN_NAME',
user_domain) user_domain)

View File

@ -727,6 +727,12 @@ class REG2SERVICESPage2(ConfigPage):
self.fields['GNOCCHI_PASSWORD'] = Field( self.fields['GNOCCHI_PASSWORD'] = Field(
text="GNOCCHI user password", text="GNOCCHI user password",
type=TYPES.string, initial="") type=TYPES.string, initial="")
self.fields['FM_USER_NAME'] = Field(
text="FM username",
type=TYPES.string, initial="fm")
self.fields['FM_PASSWORD'] = Field(
text="FM user password",
type=TYPES.string, initial="")
def validate_page(self): def validate_page(self):
self.prev.validate_page() self.prev.validate_page()

View File

@ -71,7 +71,7 @@ def get_backup_databases(cinder_config=False):
REGION_LOCAL_DATABASES = ('postgres', 'template1', 'nova', 'sysinv', REGION_LOCAL_DATABASES = ('postgres', 'template1', 'nova', 'sysinv',
'neutron', 'heat', 'nova_api', 'neutron', 'heat', 'nova_api',
'aodh', 'murano', 'magnum', 'panko', 'ironic', 'aodh', 'murano', 'magnum', 'panko', 'ironic',
'nova_cell0', 'gnocchi') 'nova_cell0', 'gnocchi', 'fm')
REGION_SHARED_DATABASES = ('glance', 'keystone') REGION_SHARED_DATABASES = ('glance', 'keystone')
if cinder_config: if cinder_config:
@ -79,7 +79,7 @@ def get_backup_databases(cinder_config=False):
# Indicates which tables have to be dropped for a certain database. # Indicates which tables have to be dropped for a certain database.
DB_TABLE_SKIP_MAPPING = { DB_TABLE_SKIP_MAPPING = {
'sysinv': ('i_alarm',), 'fm': ('alarm',),
'gnocchi': ('metric', 'resource'), 'gnocchi': ('metric', 'resource'),
'dcorch': ('orch_job', 'dcorch': ('orch_job',
'orch_request', 'orch_request',

View File

@ -506,6 +506,9 @@ class ConfigAssistant():
self.mtce_ks_password = "" self.mtce_ks_password = ""
self.nfv_ks_user_name = "" self.nfv_ks_user_name = ""
self.nfv_ks_password = "" self.nfv_ks_password = ""
self.fm_ks_user_name = ""
self.fm_ks_password = ""
self.ldap_region_name = "" self.ldap_region_name = ""
self.ldap_service_name = "" self.ldap_service_name = ""
self.ldap_service_uri = "" self.ldap_service_uri = ""
@ -2831,6 +2834,12 @@ class ConfigAssistant():
'cREGION', 'NFV_PASSWORD') 'cREGION', 'NFV_PASSWORD')
self.add_password_for_validation('NFV_PASSWORD', self.add_password_for_validation('NFV_PASSWORD',
self.nfv_ks_password) self.nfv_ks_password)
self.fm_ks_user_name = config.get(
'cREGION', 'FM_USER_NAME')
self.panko_ks_password = config.get(
'cREGION', 'FM_PASSWORD')
self.add_password_for_validation('FM_PASSWORD',
self.fm_ks_password)
self.shared_services.append(self.keystone_service_type) self.shared_services.append(self.keystone_service_type)
if self.glance_region_name == self.region_1_name: if self.glance_region_name == self.region_1_name:
@ -3403,6 +3412,10 @@ class ConfigAssistant():
self.mtce_ks_user_name) self.mtce_ks_user_name)
f.write("MTCE_PASSWORD=%s\n" % f.write("MTCE_PASSWORD=%s\n" %
self.mtce_ks_password) self.mtce_ks_password)
f.write("FM_USER_NAME=%s\n" %
self.fm_ks_user_name)
f.write("FM_PASSWORD=%s\n" %
self.fm_ks_password)
# Subcloud configuration # Subcloud configuration
if self.subcloud_config(): if self.subcloud_config():
@ -3749,6 +3762,14 @@ class ConfigAssistant():
'capabilities': capabilities} 'capabilities': capabilities}
client.sysinv.sm_service.service_create(**values) client.sysinv.sm_service.service_create(**values)
# fm service config
capabilities = {'user_name': self.fm_ks_user_name}
values = {'name': "fm",
'enabled': True,
'region_name': self.region_2_name,
'capabilities': capabilities}
client.sysinv.sm_service.service_create(**values)
# possible shared services (glance) # possible shared services (glance)
capabilities = {'service_name': self.glance_service_name, capabilities = {'service_name': self.glance_service_name,
'service_type': self.glance_service_type, 'service_type': self.glance_service_type,
@ -3955,6 +3976,9 @@ class ConfigAssistant():
keyring.set_password('vim', constants.DEFAULT_SERVICE_PROJECT_NAME, keyring.set_password('vim', constants.DEFAULT_SERVICE_PROJECT_NAME,
self.nfv_ks_password) self.nfv_ks_password)
keyring.set_password('fm', constants.DEFAULT_SERVICE_PROJECT_NAME,
self.fm_ks_password)
del os.environ["XDG_DATA_HOME"] del os.environ["XDG_DATA_HOME"]
def _populate_network_config(self, client): def _populate_network_config(self, client):

View File

@ -55,7 +55,8 @@ EXPECTED_USERS = [
('REGION_2_SERVICES', 'AODH', 'aodh'), ('REGION_2_SERVICES', 'AODH', 'aodh'),
('REGION_2_SERVICES', 'MTCE', 'mtce'), ('REGION_2_SERVICES', 'MTCE', 'mtce'),
('REGION_2_SERVICES', 'PANKO', 'panko'), ('REGION_2_SERVICES', 'PANKO', 'panko'),
('REGION_2_SERVICES', 'GNOCCHI', 'gnocchi')] ('REGION_2_SERVICES', 'GNOCCHI', 'gnocchi'),
('REGION_2_SERVICES', 'FM', 'fm')]
EXPECTED_SHARED_SERVICES_NEUTRON_USER = ('SHARED_SERVICES', 'NEUTRON', EXPECTED_SHARED_SERVICES_NEUTRON_USER = ('SHARED_SERVICES', 'NEUTRON',
'neutron') 'neutron')
@ -129,6 +130,11 @@ EXPECTED_REGION2_ENDPOINTS = [
'http://{}:8041', 'http://{}:8041',
'http://{}:8041', 'http://{}:8041',
'OpenStack Metric Service'), 'OpenStack Metric Service'),
('FM_SERVICE_NAME', 'FM_SERVICE_TYPE',
'http://{}:18002',
'http://{}:18002',
'http://{}:18002',
'Fault Management Service'),
] ]
EXPECTED_NEUTRON_ENDPOINT = ( EXPECTED_NEUTRON_ENDPOINT = (

View File

@ -123,6 +123,8 @@ PANKO_USER_NAME=pankoTWO
PANKO_PASSWORD=password2WO* PANKO_PASSWORD=password2WO*
GNOCCHI_USER_NAME=gnocchiTWO GNOCCHI_USER_NAME=gnocchiTWO
GNOCCHI_PASSWORD=password2WO* GNOCCHI_PASSWORD=password2WO*
FM_USER_NAME=fmTWO
FM_PASSWORD=password2WO*
[VERSION] [VERSION]
RELEASE = 18.04 RELEASE = 18.04

View File

@ -110,6 +110,8 @@ PANKO_USER_NAME = pankoTWO
PANKO_PASSWORD = password2WO* PANKO_PASSWORD = password2WO*
GNOCCHI_USER_NAME = gnocchiTWO GNOCCHI_USER_NAME = gnocchiTWO
GNOCCHI_PASSWORD = password2WO* GNOCCHI_PASSWORD = password2WO*
FM_USER_NAME = fmTWO
FM_PASSWORD = password2WO*
USER_DOMAIN_NAME = service_domain USER_DOMAIN_NAME = service_domain
PROJECT_DOMAIN_NAME = service_domain PROJECT_DOMAIN_NAME = service_domain
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0

View File

@ -117,6 +117,8 @@ PANKO_USER_NAME=pankoTWO
PANKO_PASSWORD=password2WO* PANKO_PASSWORD=password2WO*
GNOCCHI_USER_NAME=gnocchiTWO GNOCCHI_USER_NAME=gnocchiTWO
GNOCCHI_PASSWORD=password2WO* GNOCCHI_PASSWORD=password2WO*
FM_USER_NAME=fmTWO
FM_PASSWORD=password2WO*
[VERSION] [VERSION]
RELEASE = 18.04 RELEASE = 18.04

View File

@ -108,6 +108,8 @@ PANKO_USER_NAME = pankoTWO
PANKO_PASSWORD = password2WO* PANKO_PASSWORD = password2WO*
GNOCCHI_USER_NAME = gnocchiTWO GNOCCHI_USER_NAME = gnocchiTWO
GNOCCHI_PASSWORD = password2WO* GNOCCHI_PASSWORD = password2WO*
FM_USER_NAME = fmTWO
FM_PASSWORD = password2WO*
USER_DOMAIN_NAME = Default USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0

View File

@ -129,6 +129,8 @@ PANKO_USER_NAME=panko
PANKO_PASSWORD=password2WO* PANKO_PASSWORD=password2WO*
GNOCCHI_USER_NAME=gnocchi GNOCCHI_USER_NAME=gnocchi
GNOCCHI_PASSWORD=password2WO* GNOCCHI_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
MTCE_USER_NAME=mtce MTCE_USER_NAME=mtce
MTCE_PASSWORD=password2WO* MTCE_PASSWORD=password2WO*

View File

@ -141,6 +141,8 @@ PANKO_USER_NAME=panko
PANKO_PASSWORD=password2WO* PANKO_PASSWORD=password2WO*
GNOCCHI_USER_NAME=gnocchi GNOCCHI_USER_NAME=gnocchi
GNOCCHI_PASSWORD=password2WO* GNOCCHI_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
MTCE_USER_NAME=mtce MTCE_USER_NAME=mtce
MTCE_PASSWORD=password2WO* MTCE_PASSWORD=password2WO*

View File

@ -113,6 +113,8 @@ PANKO_USER_NAME=panko
PANKO_PASSWORD=password2WO* PANKO_PASSWORD=password2WO*
GNOCCHI_USER_NAME=gnocchi GNOCCHI_USER_NAME=gnocchi
GNOCCHI_PASSWORD=password2WO* GNOCCHI_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
[VERSION] [VERSION]
RELEASE = 18.04 RELEASE = 18.04

View File

@ -113,6 +113,8 @@ PANKO_USER_NAME = panko
PANKO_PASSWORD = password2WO* PANKO_PASSWORD = password2WO*
GNOCCHI_USER_NAME = gnocchi GNOCCHI_USER_NAME = gnocchi
GNOCCHI_PASSWORD = password2WO* GNOCCHI_PASSWORD = password2WO*
FM_USER_NAME = fm
FM_PASSWORD = password2WO*
USER_DOMAIN_NAME = Default USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0

View File

@ -123,6 +123,8 @@ PANKO_USER_NAME=panko
PANKO_PASSWORD=password2WO* PANKO_PASSWORD=password2WO*
GNOCCHI_USER_NAME=gnocchi GNOCCHI_USER_NAME=gnocchi
GNOCCHI_PASSWORD=password2WO* GNOCCHI_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
[VERSION] [VERSION]
RELEASE = 18.04 RELEASE = 18.04

View File

@ -103,6 +103,8 @@ PANKO_USER_NAME = panko
PANKO_PASSWORD = password2WO* PANKO_PASSWORD = password2WO*
GNOCCHI_USER_NAME = gnocchi GNOCCHI_USER_NAME = gnocchi
GNOCCHI_PASSWORD = password2WO* GNOCCHI_PASSWORD = password2WO*
FM_USER_NAME = fm
FM_PASSWORD = password2WO*
USER_DOMAIN_NAME = Default USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0

View File

@ -119,6 +119,8 @@ PANKO_USER_NAME=panko
PANKO_PASSWORD=password2WO* PANKO_PASSWORD=password2WO*
GNOCCHI_USER_NAME=gnocchi GNOCCHI_USER_NAME=gnocchi
GNOCCHI_PASSWORD=password2WO* GNOCCHI_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
[VERSION] [VERSION]
RELEASE = 18.04 RELEASE = 18.04

View File

@ -91,6 +91,8 @@ PANKO_USER_NAME = panko
PANKO_PASSWORD = password2WO* PANKO_PASSWORD = password2WO*
GNOCCHI_USER_NAME = gnocchi GNOCCHI_USER_NAME = gnocchi
GNOCCHI_PASSWORD = password2WO* GNOCCHI_PASSWORD = password2WO*
FM_USER_NAME = fm
FM_PASSWORD = password2WO*
USER_DOMAIN_NAME = Default USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0

View File

@ -119,6 +119,8 @@ PANKO_USER_NAME=panko
PANKO_PASSWORD=password2WO* PANKO_PASSWORD=password2WO*
GNOCCHI_USER_NAME=gnocchi GNOCCHI_USER_NAME=gnocchi
GNOCCHI_PASSWORD=password2WO* GNOCCHI_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
[VERSION] [VERSION]
RELEASE = 18.04 RELEASE = 18.04

View File

@ -120,6 +120,8 @@ PANKO_USER_NAME=panko
PANKO_PASSWORD=password2WO* PANKO_PASSWORD=password2WO*
GNOCCHI_USER_NAME=gnocchi GNOCCHI_USER_NAME=gnocchi
GNOCCHI_PASSWORD=password2WO* GNOCCHI_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
[VERSION] [VERSION]
RELEASE = 18.04 RELEASE = 18.04

View File

@ -91,6 +91,8 @@ PANKO_USER_NAME = panko
PANKO_PASSWORD = password2WO* PANKO_PASSWORD = password2WO*
GNOCCHI_USER_NAME = gnocchi GNOCCHI_USER_NAME = gnocchi
GNOCCHI_PASSWORD = password2WO* GNOCCHI_PASSWORD = password2WO*
FM_USER_NAME = fm
FM_PASSWORD = password2WO*
USER_DOMAIN_NAME = Default USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0

View File

@ -21,6 +21,7 @@ Requires: puppet-patching
Requires: puppet-sysinv Requires: puppet-sysinv
Requires: puppet-sshd Requires: puppet-sshd
Requires: puppet-smapi Requires: puppet-smapi
Requires: puppet-fm
# Openstack puppet modules # Openstack puppet modules
Requires: puppet-aodh Requires: puppet-aodh

View File

@ -524,3 +524,13 @@ dcorch::debug: false
dcmanager::use_syslog: true dcmanager::use_syslog: true
dcmanager::log_facility: 'local2' dcmanager::log_facility: 'local2'
dcmanager::debug: false dcmanager::debug: false
#FM
fm::use_syslog: true
fm::log_facility: 'local2'
fm::api::enable_proxy_headers_parsing: true
fm::db::sync::user: 'root'
fm::database_idle_timeout: 60
fm::database_max_overflow: 20
fm::database_max_pool_size: 1

View File

@ -62,6 +62,9 @@ include ::platform::influxdb
include ::platform::influxdb::logrotate include ::platform::influxdb::logrotate
include ::platform::collectd include ::platform::collectd
include ::platform::fm
include ::platform::fm::api
include ::openstack::client include ::openstack::client
include ::openstack::keystone include ::openstack::keystone
include ::openstack::keystone::api include ::openstack::keystone::api

View File

@ -325,6 +325,7 @@ class openstack::keystone::endpoint::runtime {
include ::sysinv::keystone::auth include ::sysinv::keystone::auth
include ::patching::keystone::auth include ::patching::keystone::auth
include ::nfv::keystone::auth include ::nfv::keystone::auth
include ::fm::keystone::auth
include ::openstack::aodh::params include ::openstack::aodh::params
if $::openstack::aodh::params::service_enabled { if $::openstack::aodh::params::service_enabled {

View File

@ -0,0 +1,101 @@
class platform::fm::params (
$api_port = 18002,
$api_host = undef,
$region_name = undef,
$system_name = undef,
$service_create = false,
$service_enabled = true,
$trap_destinations = [],
$sysinv_catalog_info = 'platform:sysinv:internalURL',
) { }
class platform::fm::config
inherits ::platform::fm::params {
$trap_dest_str = join($trap_destinations,',')
class { '::fm':
region_name => $region_name,
system_name => $system_name,
trap_destinations => $trap_dest_str,
sysinv_catalog_info => $sysinv_catalog_info,
}
}
class platform::fm
inherits ::platform::fm::params {
include ::fm::client
include ::fm::keystone::authtoken
include ::platform::fm::config
include ::platform::params
if $::platform::params::init_database {
include ::fm::db::postgresql
}
}
class platform::fm::firewall
inherits ::platform::fm::params {
platform::firewall::rule { 'fm-api':
service_name => 'fm',
ports => $api_port,
}
}
class platform::fm::haproxy
inherits ::platform::fm::params {
include ::platform::haproxy::params
platform::haproxy::proxy { 'fm-api-internal':
server_name => 's-fm-api-internal',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $api_port,
private_ip_address => $api_host,
private_port => $api_port,
public_api => false,
}
platform::haproxy::proxy { 'fm-api-public':
server_name => 's-fm-api-public',
public_port => $api_port,
private_port => $api_port,
}
}
class platform::fm::api
inherits ::platform::fm::params {
include ::platform::params
if $service_enabled {
if ($::platform::fm::service_create and
$::platform::params::init_keystone) {
include ::fm::keystone::auth
}
include ::platform::params
class { '::fm::api':
host => $api_host,
workers => $::platform::params::eng_workers,
sync_db => $::platform::params::init_database,
}
include ::platform::fm::firewall
include ::platform::fm::haproxy
}
}
class platform::fm::runtime {
require ::platform::fm::config
exec { 'notify-fm-mgr':
command => "/usr/bin/pkill -HUP fmManager",
onlyif => "pgrep fmManager"
}
}

View File

@ -19,6 +19,7 @@ define platform::haproxy::proxy (
$client_timeout = undef, $client_timeout = undef,
$x_forwarded_proto = true, $x_forwarded_proto = true,
$enable_https = undef, $enable_https = undef,
$public_api = true,
) { ) {
include ::platform::haproxy::params include ::platform::haproxy::params
@ -29,7 +30,7 @@ define platform::haproxy::proxy (
} }
if $x_forwarded_proto { if $x_forwarded_proto {
if $https_enabled { if $https_enabled and $public_api {
$ssl_option = 'ssl crt /etc/ssl/private/server-cert.pem' $ssl_option = 'ssl crt /etc/ssl/private/server-cert.pem'
$proto = 'X-Forwarded-Proto:\ https' $proto = 'X-Forwarded-Proto:\ https'
# The value of max-age matches lighttpd.conf, and should be # The value of max-age matches lighttpd.conf, and should be
@ -135,6 +136,7 @@ class platform::haproxy::runtime {
include ::platform::sysinv::haproxy include ::platform::sysinv::haproxy
include ::platform::nfv::haproxy include ::platform::nfv::haproxy
include ::platform::ceph::haproxy include ::platform::ceph::haproxy
include ::platform::fm::haproxy
if $::platform::params::distributed_cloud_role =='systemcontroller' { if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::platform::dcmanager::haproxy include ::platform::dcmanager::haproxy
include ::platform::dcorch::haproxy include ::platform::dcorch::haproxy

View File

@ -211,6 +211,7 @@ class platform::postgresql::upgrade
include ::sysinv::db::postgresql include ::sysinv::db::postgresql
include ::keystone::db::postgresql include ::keystone::db::postgresql
include ::ironic::db::postgresql include ::ironic::db::postgresql
include ::fm::db::postgresql
} }

View File

@ -60,12 +60,6 @@ class platform::sysinv
'sysinv %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s'; 'sysinv %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s';
} }
$sysinv_db_connection = $::sysinv::database_connection
file { "/etc/fm.conf":
ensure => 'present',
content => template('platform/fm.conf.erb'),
}
if str2bool($::is_initial_config_primary) { if str2bool($::is_initial_config_primary) {
$software_version = $::platform::params::software_version $software_version = $::platform::params::software_version

View File

@ -1,9 +0,0 @@
###################################################
#
# fm.conf
#
# The configuration file for the fmManager process.
#
###################################################
event_log_max_size=4000
sql_connection=<%= @sysinv_db_connection %>

View File

@ -0,0 +1,6 @@
[snmp]
<%- @trap_destinations.each do |destination| -%>
trap2sink=<%= destination %>
<%- end -%>

View File

@ -0,0 +1,2 @@
Name: puppet-fm
Version: 1.0.0

View File

@ -0,0 +1,3 @@
SRC_DIR="src"
COPY_LIST="$SRC_DIR/LICENSE"
TIS_PATCH_VER=1

View File

@ -0,0 +1,34 @@
%global module_dir fm
Name: puppet-%{module_dir}
Version: 1.0.0
Release: %{tis_patch_ver}%{?_tis_dist}
Summary: Puppet FM module
License: Apache-2.0
Packager: Wind River <info@windriver.com>
URL: unknown
Source0: %{name}-%{version}.tar.gz
Source1: LICENSE
BuildArch: noarch
BuildRequires: python2-devel
%description
A puppet module for Fault Management
%prep
%autosetup -c %{module_dir}
#
# The src for this puppet module needs to be staged to puppet/modules
#
%install
install -d -m 0755 %{buildroot}%{_datadir}/puppet/modules/%{module_dir}
cp -R %{name}-%{version}/%{module_dir} %{buildroot}%{_datadir}/puppet/modules
%files
%license %{name}-%{version}/LICENSE
%{_datadir}/puppet/modules/%{module_dir}

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,27 @@
Puppet::Type.type(:fm_api_paste_ini).provide(
:ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do
def section
resource[:name].split('/', 2).first
end
def setting
resource[:name].split('/', 2).last
end
def separator
'='
end
def self.file_path
'/etc/fm/api-paste.ini'
end
# this needs to be removed. This has been replaced with the class method
def file_path
self.class.file_path
end
end

View File

@ -0,0 +1,10 @@
Puppet::Type.type(:fm_config).provide(
:ini_setting,
:parent => Puppet::Type.type(:openstack_config).provider(:ini_setting)
) do
def self.file_path
'/etc/fm/fm.conf'
end
end

View File

@ -0,0 +1,43 @@
Puppet::Type.newtype(:fm_api_paste_ini) do
ensurable
newparam(:name, :namevar => true) do
desc 'Section/setting name to manage from /etc/fm/api_paste.ini'
newvalues(/\S+\/\S+/)
end
newproperty(:value) do
desc 'The value of the setting to be defined.'
munge do |value|
value = value.to_s.strip
value.capitalize! if value =~ /^(true|false)$/i
value
end
def is_to_s( currentvalue )
if resource.secret?
return '[old secret redacted]'
else
return currentvalue
end
end
def should_to_s( newvalue )
if resource.secret?
return '[new secret redacted]'
else
return newvalue
end
end
end
newparam(:secret, :boolean => true) do
desc 'Whether to hide the value from Puppet logs. Defaults to `false`.'
newvalues(:true, :false)
defaultto false
end
end

View File

@ -0,0 +1,51 @@
Puppet::Type.newtype(:fm_config) do
ensurable
newparam(:name, :namevar => true) do
desc 'Section/setting name to manage from fm.conf'
newvalues(/\S+\/\S+/)
end
newproperty(:value) do
desc 'The value of the setting to be defined.'
munge do |value|
value = value.to_s.strip
value.capitalize! if value =~ /^(true|false)$/i
value
end
newvalues(/^[\S ]*$/)
def is_to_s( currentvalue )
if resource.secret?
return '[old secret redacted]'
else
return currentvalue
end
end
def should_to_s( newvalue )
if resource.secret?
return '[new secret redacted]'
else
return newvalue
end
end
end
newparam(:secret, :boolean => true) do
desc 'Whether to hide the value from Puppet logs. Defaults to `false`.'
newvalues(:true, :false)
defaultto false
end
newparam(:ensure_absent_val) do
desc 'A value that is specified as the value property will behave as if ensure => absent was specified'
defaultto('<SERVICE DEFAULT>')
end
autorequire(:package) do
'fm-rest-api'
end
end

View File

@ -0,0 +1,109 @@
# Installs & configure the fm api service
#
# == Parameters
#
# [*enabled*]
# (optional) Should the service be enabled.
# Defaults to true
#
# [*manage_service*]
# (optional) Whether the service should be managed by Puppet.
# Defaults to true.
#
# [*host*]
# (optional) The fm api bind address.
# Defaults to 0.0.0.0
#
# [*port*]
# (optional) The fm api port.
# Defaults to 18002
#
# [*package_ensure*]
# (optional) ensure state for package.
# Defaults to 'present'
#
# [*service_name*]
# (optional) Name of the service that will be providing the
# server functionality of fm-api.
#
# [*sync_db*]
# (optional) Run gnocchi-upgrade db sync on api nodes after installing the package.
# Defaults to false
#
# [*auth_strategy*]
# (optional) Type of authentication to be used.
# Defaults to 'keystone'
#
# [*enable_proxy_headers_parsing*]
# (Optional) Enable paste middleware to handle SSL requests through
# HTTPProxyToWSGI middleware.
# Defaults to $::os_service_default.
#
# [*paste_config*]
# (Optional) Configuration file for WSGI definition of API
# Defaults to $::os_service_default.
#
class fm::api (
$manage_service = true,
$enabled = true,
$package_ensure = 'present',
$host = '0.0.0.0',
$port = '18002',
$workers = 1,
$service_name = $::fm::params::api_service,
$sync_db = false,
$auth_strategy = 'keystone',
$enable_proxy_headers_parsing = $::os_service_default,
$paste_config = '/etc/fm/api-paste.ini',
) inherits fm::params {
include ::fm::deps
include ::fm::params
if $auth_strategy == 'keystone' {
include ::fm::keystone::authtoken
}
package { 'fm-api':
ensure => $package_ensure,
name => $::fm::params::api_package,
tag => 'fm-package',
}
if $manage_service {
if $enabled {
$service_ensure = 'running'
} else {
$service_ensure = 'stopped'
}
}
if $sync_db {
include ::fm::db::sync
}
if $service_name == $::fm::params::api_service {
service { 'fm-api':
ensure => $service_ensure,
name => $::fm::params::api_service,
enable => $enabled,
hasstatus => true,
hasrestart => true,
tag => 'fm-service',
}
} else {
fail("Invalid service_name. fm-api for running as a standalone service")
}
fm_config {
'api/bind_host': value => $host;
'api/bind_port': value => $port;
'api/api_workers': value => $workers;
'api/api_paste_config': value => $paste_config;
}
oslo::middleware { 'fm_config':
enable_proxy_headers_parsing => $enable_proxy_headers_parsing,
}
}

View File

@ -0,0 +1,22 @@
#
# Installs the fm python client.
#
# == parameters
# [*ensure*]
# (optional) Ensure state of the package.
# Defaults to 'present'.
#
class fm::client (
$ensure = 'present'
) {
include ::fm::deps
include ::fm::params
package { 'fmclient':
ensure => $ensure,
name => $::fm::params::client_package,
tag => 'fmclient',
}
}

View File

@ -0,0 +1,78 @@
# == Class: fm::db
#
# Configure the fm database
#
# === Parameters
#
# [*database_db_max_retries*]
# (optional) Maximum retries in case of connection error or deadlock error
# before error is raised. Set to -1 to specify an infinite retry count.
# Defaults to $::os_service_default
#
# [*database_connection*]
# Url used to connect to database.
# (Optional) Defaults to "sqlite:////var/lib/fm/fm.sqlite".
#
# [*database_idle_timeout*]
# Timeout when db connections should be reaped.
# (Optional) Defaults to $::os_service_default.
#
# [*database_min_pool_size*]
# Minimum number of SQL connections to keep open in a pool.
# (Optional) Defaults to $::os_service_default.
#
# [*database_max_pool_size*]
# Maximum number of SQL connections to keep open in a pool.
# (Optional) Defaults to $::os_service_default.
#
# [*database_max_retries*]
# Maximum number of database connection retries during startup.
# Setting -1 implies an infinite retry count.
# (Optional) Defaults to $::os_service_default.
#
# [*database_retry_interval*]
# Interval between retries of opening a database connection.
# (Optional) Defaults to $::os_service_default.
#
# [*database_max_overflow*]
# If set, use this value for max_overflow with sqlalchemy.
# (Optional) Defaults to $::os_service_default.
#
class fm::db (
$database_db_max_retries = $::os_service_default,
$database_connection = 'sqlite:////var/lib/fm/fm.sqlite',
$database_idle_timeout = $::os_service_default,
$database_min_pool_size = $::os_service_default,
$database_max_pool_size = $::os_service_default,
$database_max_retries = $::os_service_default,
$database_retry_interval = $::os_service_default,
$database_max_overflow = $::os_service_default,
) {
include ::fm::deps
$database_connection_real = pick($::fm::database_connection, $database_connection)
$database_idle_timeout_real = pick($::fm::database_idle_timeout, $database_idle_timeout)
$database_min_pool_size_real = pick($::fm::database_min_pool_size, $database_min_pool_size)
$database_max_pool_size_real = pick($::fm::database_max_pool_size, $database_max_pool_size)
$database_max_retries_real = pick($::fm::database_max_retries, $database_max_retries)
$database_retry_interval_real = pick($::fm::database_retry_interval, $database_retry_interval)
$database_max_overflow_real = pick($::fm::database_max_overflow, $database_max_overflow)
oslo::db { 'fm_config':
db_max_retries => $database_db_max_retries,
connection => $database_connection_real,
idle_timeout => $database_idle_timeout_real,
min_pool_size => $database_min_pool_size_real,
max_pool_size => $database_max_pool_size_real,
max_retries => $database_max_retries_real,
retry_interval => $database_retry_interval_real,
max_overflow => $database_max_overflow_real,
}
# set up the connection string for FM Manager
$sql_connection = regsubst($database_connection_real,'^postgresql+psycopg2:','postgresql:')
fm_config {
'DEFAULT/sql_connection': value => $sql_connection;
}
}

View File

@ -0,0 +1,75 @@
# The fm::db::mysql class implements mysql backend for fm
#
# This class can be used to create tables, users and grant
# privileges for a mysql fm database.
#
# == parameters
#
# [*password*]
# (Mandatory) Password to connect to the database.
# Defaults to 'false'.
#
# [*dbname*]
# (Optional) Name of the database.
# Defaults to 'fm'.
#
# [*user*]
# (Optional) User to connect to the database.
# Defaults to 'fm'.
#
# [*host*]
# (Optional) The default source host user is allowed to connect from.
# Defaults to '127.0.0.1'
#
# [*allowed_hosts*]
# (Optional) Other hosts the user is allowed to connect from.
# Defaults to 'undef'.
#
# [*charset*]
# (Optional) The database charset.
# Defaults to 'utf8'
#
# [*collate*]
# (Optional) The database collate.
# Only used with mysql modules >= 2.2.
# Defaults to 'utf8_general_ci'
#
# == Dependencies
# Class['mysql::server']
#
# == Examples
#
# == Authors
#
# == Copyright
#
class fm::db::mysql(
$password,
$dbname = 'fm',
$user = 'fm',
$host = '127.0.0.1',
$charset = 'utf8',
$collate = 'utf8_general_ci',
$allowed_hosts = undef
) {
#include ::fm::deps
validate_string($password)
::openstacklib::db::mysql { 'fm':
user => $user,
password_hash => mysql_password($password),
dbname => $dbname,
host => $host,
charset => $charset,
collate => $collate,
allowed_hosts => $allowed_hosts,
}
Anchor['fm::db::begin']
~> Class['fm::db::mysql']
~> Anchor['fm::db::end']
}

View File

@ -0,0 +1,57 @@
# == Class: fm::db::postgresql
#
# Class that configures postgresql for fm
# Requires the Puppetlabs postgresql module.
#
# === Parameters
#
# [*password*]
# (Required) Password to connect to the database.
#
# [*dbname*]
# (Optional) Name of the database.
# Defaults to 'fm'.
#
# [*user*]
# (Optional) User to connect to the database.
# Defaults to 'fm'.
#
# [*encoding*]
# (Optional) The charset to use for the database.
# Default to undef.
#
# [*privileges*]
# (Optional) Privileges given to the database user.
# Default to 'ALL'
#
# == Dependencies
#
# == Examples
#
# == Authors
#
# == Copyright
#
class fm::db::postgresql(
$password,
$dbname = 'fm',
$user = 'fm',
$encoding = undef,
$privileges = 'ALL',
) {
include ::fm::deps
::openstacklib::db::postgresql { 'fm':
password_hash => postgresql_password($user, $password),
dbname => $dbname,
user => $user,
encoding => $encoding,
privileges => $privileges,
}
Anchor['fm::db::begin']
~> Class['fm::db::postgresql']
~> Anchor['fm::db::end']
}

View File

@ -0,0 +1,30 @@
#
# Class to execute "fm-dbsync"
#
# [*user*]
# (optional) User to run dbsync command.
# Defaults to 'fm'
#
class fm::db::sync (
$user = 'fm',
){
include ::fm::deps
exec { 'fm-db-sync':
command => 'fm-dbsync --config-file /etc/fm/fm.conf',
path => '/usr/bin',
refreshonly => true,
user => $user,
try_sleep => 5,
tries => 10,
logoutput => on_failure,
subscribe => [
Anchor['fm::install::end'],
Anchor['fm::config::end'],
Anchor['fm::dbsync::begin']
],
notify => Anchor['fm::dbsync::end'],
}
}

View File

@ -0,0 +1,40 @@
# == Class: fm::deps
#
# FM anchors and dependency management
#
class fm::deps {
# Setup anchors for install, config and service phases of the module. These
# anchors allow external modules to hook the begin and end of any of these
# phases. Package or service management can also be replaced by ensuring the
# package is absent or turning off service management and having the
# replacement depend on the appropriate anchors. When applicable, end tags
# should be notified so that subscribers can determine if installation,
# config or service state changed and act on that if needed.
anchor { 'fm::install::begin': }
-> Package<| tag == 'fm-package'|>
~> anchor { 'fm::install::end': }
-> anchor { 'fm::config::begin': }
-> Fm_config<||>
~> anchor { 'fm::config::end': }
-> anchor { 'fm::db::begin': }
-> anchor { 'fm::db::end': }
~> anchor { 'fm::dbsync::begin': }
-> anchor { 'fm::dbsync::end': }
~> anchor { 'fm::service::begin': }
~> Service<| tag == 'fm-service' |>
~> anchor { 'fm::service::end': }
# api paste ini config should occur in the config block also.
Anchor['fm::config::begin']
-> Fm_api_paste_ini<||>
~> Anchor['fm::config::end']
# all db settings should be applied and all packages should be installed
# before dbsync starts
Oslo::Db<||> -> Anchor['fm::dbsync::begin']
# Installation or config changes will always restart services.
Anchor['fm::install::end'] ~> Anchor['fm::service::begin']
Anchor['fm::config::end'] ~> Anchor['fm::service::begin']
}

View File

@ -0,0 +1,116 @@
# == Class: fm
#
# Full description of class fm here.
#
# === Parameters
#
# [*package_ensure*]
# (optional) The state of fm packages
# Defaults to 'present'
#
# [*log_dir*]
# (optional) Directory where logs should be stored.
# If set to boolean false or the $::os_service_default, it will not log to
# any directory.
# Defaults to undef.
#
# [*debug*]
# (optional) Set log output to debug output.
# Defaults to undef
#
# [*use_syslog*]
# (optional) Use syslog for logging
# Defaults to undef
#
# [*use_stderr*]
# (optional) Use stderr for logging
# Defaults to undef
#
# [*log_facility*]
# (optional) Syslog facility to receive log lines.
# Defaults to undef
#
# [*database_connection*]
# (optional) Connection url for the fm database.
# Defaults to undef.
#
# [*database_max_retries*]
# (optional) Maximum database connection retries during startup.
# Defaults to undef.
#
# [*database_idle_timeout*]
# (optional) Timeout before idle database connections are reaped.
# Defaults to undef.
#
# [*database_retry_interval*]
# (optional) Interval between retries of opening a database connection.
# Defaults to undef.
#
# [*database_min_pool_size*]
# (optional) Minimum number of SQL connections to keep open in a pool.
# Defaults to undef.
#
# [*database_max_pool_size*]
# (optional) Maximum number of SQL connections to keep open in a pool.
# Defaults to undef.
#
# [*database_max_overflow*]
# (optional) If set, use this value for max_overflow with sqlalchemy.
# Defaults to: undef.
#
class fm (
$package_ensure = 'present',
$debug = undef,
$use_syslog = undef,
$use_stderr = undef,
$log_facility = undef,
$log_dir = undef,
$database_connection = undef,
$database_idle_timeout = undef,
$database_min_pool_size = undef,
$database_max_pool_size = undef,
$database_max_retries = undef,
$database_retry_interval = undef,
$database_max_overflow = undef,
$event_log_max_size = 4000,
$system_name = undef,
$region_name = undef,
$trap_destinations = undef,
$sysinv_catalog_info = undef,
) inherits fm::params {
include ::fm::deps
include ::fm::logging
# set up the connection string for FM Manager, remove psycopg2 if it exists
$sql_connection = regsubst($database_connection,'^postgresql+psycopg2:','postgresql:')
fm_config {
'DEFAULT/sql_connection': value => $sql_connection, secret => true;
'DEFAULT/event_log_max_size': value => $event_log_max_size;
'DEFAULT/system_name': value => $system_name;
'DEFAULT/region_name': value => $region_name;
'DEFAULT/trap_destinations': value => $trap_destinations;
}
# Automatically add psycopg2 driver to postgresql (only does this if it is missing)
$real_connection = regsubst($database_connection,'^postgresql:','postgresql+psycopg2:')
fm_config {
'database/connection': value => $real_connection, secret => true;
'database/idle_timeout': value => $database_idle_timeout;
'database/max_pool_size': value => $database_max_pool_size;
'database/max_overflow': value => $database_max_overflow;
}
fm_config {
'sysinv/catalog_info': value => $sysinv_catalog_info;
'sysinv/os_region_name': value => $region_name;
}
fm_api_paste_ini {
'pipeline:fm-api/pipeline': value => 'request_id authtoken api_v1';
'filter:request_id/paste.filter_factory': value => 'oslo_middleware:RequestId.factory';
'filter:authtoken/acl_public_routes': value => '/, /v1';
'filter:authtoken/paste.filter_factory': value => 'fm.api.middleware.auth_token:AuthTokenMiddleware.factory';
'app:api_v1/paste.app_factory': value => 'fm.api.app:app_factory';
}
}

View File

@ -0,0 +1,87 @@
# == Class: fm::keystone::auth
#
# Configures fault management user, service and endpoint in Keystone.
#
# === Parameters
#
# [*password*]
# (required) Password for fm user.
#
# [*auth_name*]
# Username for fm service. Defaults to 'fm'.
#
# [*email*]
# Email for fm user. Defaults to 'fm@localhost'.
#
# [*tenant*]
# Tenant for fm user. Defaults to 'services'.
#
# [*configure_endpoint*]
# Should fm endpoint be configured? Defaults to 'true'.
#
# [*configure_user*]
# (Optional) Should the service user be configured?
# Defaults to 'true'.
#
# [*configure_user_role*]
# (Optional) Should the admin role be configured for the service user?
# Defaults to 'true'.
#
# [*service_type*]
# Type of service. Defaults to 'faultmanagement'.
#
# [*region*]
# Region for endpoint. Defaults to 'RegionOne'.
#
# [*service_name*]
# (optional) Name of the service.
# Defaults to 'fm'.
#
# [*public_url*]
# (optional) The endpoint's public url. (Defaults to 'http://127.0.0.1:18002')
# This url should *not* contain any trailing '/'.
#
# [*admin_url*]
# (optional) The endpoint's admin url. (Defaults to 'http://127.0.0.1:18002')
# This url should *not* contain any trailing '/'.
#
# [*internal_url*]
# (optional) The endpoint's internal url. (Defaults to 'http://127.0.0.1:18002')
# This url should *not* contain any trailing '/'.
#
class fm::keystone::auth (
$password,
$auth_name = 'fm',
$email = 'fm@localhost',
$tenant = 'services',
$configure_endpoint = true,
$configure_user = true,
$configure_user_role = true,
$service_name = 'fm',
$service_type = 'faultmanagement',
$region = 'RegionOne',
$public_url = 'http://127.0.0.1:18002',
$internal_url = 'http://127.0.0.1:18002',
$admin_url = 'http://127.0.0.1:18002',
) {
include ::fm::deps
keystone::resource::service_identity { 'fm':
configure_user => $configure_user,
configure_user_role => $configure_user_role,
configure_endpoint => $configure_endpoint,
service_name => $service_name,
service_type => $service_type,
service_description => 'Fault Management Service',
region => $region,
auth_name => $auth_name,
password => $password,
email => $email,
tenant => $tenant,
public_url => $public_url,
internal_url => $internal_url,
admin_url => $admin_url,
}
}

View File

@ -0,0 +1,243 @@
# class: fm::keystone::authtoken
#
# Configure the keystone_authtoken section in the configuration file
#
# === Parameters
#
# [*username*]
# (Optional) The name of the service user
# Defaults to 'fm'
#
# [*password*]
# (Optional) Password to create for the service user
# Defaults to $::os_service_default
#
# [*auth_url*]
# (Optional) The URL to use for authentication.
# Defaults to 'http://localhost:35357'
#
# [*project_name*]
# (Optional) Service project name
# Defaults to 'services'
#
# [*user_domain_name*]
# (Optional) Name of domain for $username
# Defaults to 'Default'
#
# [*project_domain_name*]
# (Optional) Name of domain for $project_name
# Defaults to 'Default'
#
# [*insecure*]
# (Optional) If true, explicitly allow TLS without checking server cert
# against any certificate authorities. WARNING: not recommended. Use with
# caution.
# Defaults to $::os_service_default
#
# [*auth_section*]
# (Optional) Config Section from which to load plugin specific options
# Defaults to $::os_service_default.
#
# [*auth_type*]
# (Optional) Authentication type to load
# Defaults to 'password'
#
# [*auth_uri*]
# (Optional) Complete public Identity API endpoint.
# Defaults to 'http://localhost:5000'
#
# [*auth_version*]
# (Optional) API version of the admin Identity API endpoint.
# Defaults to $::os_service_default.
#
# [*cache*]
# (Optional) Env key for the swift cache.
# Defaults to $::os_service_default.
#
# [*cafile*]
# (Optional) A PEM encoded Certificate Authority to use when verifying HTTPs
# connections.
# Defaults to $::os_service_default.
#
# [*certfile*]
# (Optional) Required if identity server requires client certificate
# Defaults to $::os_service_default.
#
# [*check_revocations_for_cached*]
# (Optional) If true, the revocation list will be checked for cached tokens.
# This requires that PKI tokens are configured on the identity server.
# boolean value.
# Defaults to $::os_service_default.
#
# [*delay_auth_decision*]
# (Optional) Do not handle authorization requests within the middleware, but
# delegate the authorization decision to downstream WSGI components. Boolean
# value
# Defaults to $::os_service_default.
#
# [*enforce_token_bind*]
# (Optional) Used to control the use and type of token binding. Can be set
# to: "disabled" to not check token binding. "permissive" (default) to
# validate binding information if the bind type is of a form known to the
# server and ignore it if not. "strict" like "permissive" but if the bind
# type is unknown the token will be rejected. "required" any form of token
# binding is needed to be allowed. Finally the name of a binding method that
# must be present in tokens. String value.
# Defaults to $::os_service_default.
#
# [*hash_algorithms*]
# (Optional) Hash algorithms to use for hashing PKI tokens. This may be a
# single algorithm or multiple. The algorithms are those supported by Python
# standard hashlib.new(). The hashes will be tried in the order given, so put
# the preferred one first for performance. The result of the first hash will
# be stored in the cache. This will typically be set to multiple values only
# while migrating from a less secure algorithm to a more secure one. Once all
# the old tokens are expired this option should be set to a single value for
# better performance. List value.
# Defaults to $::os_service_default.
#
# [*http_connect_timeout*]
# (Optional) Request timeout value for communicating with Identity API
# server.
# Defaults to $::os_service_default.
#
# [*http_request_max_retries*]
# (Optional) How many times are we trying to reconnect when communicating
# with Identity API Server. Integer value
# Defaults to $::os_service_default.
#
# [*include_service_catalog*]
# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
# middleware will not ask for service catalog on token validation and will
# not set the X-Service-Catalog header. Boolean value.
# Defaults to $::os_service_default.
#
# [*keyfile*]
# (Optional) Required if identity server requires client certificate
# Defaults to $::os_service_default.
#
# [*memcache_pool_conn_get_timeout*]
# (Optional) Number of seconds that an operation will wait to get a memcached
# client connection from the pool. Integer value
# Defaults to $::os_service_default.
#
# [*memcache_pool_dead_retry*]
# (Optional) Number of seconds memcached server is considered dead before it
# is tried again. Integer value
# Defaults to $::os_service_default.
#
# [*memcache_pool_maxsize*]
# (Optional) Maximum total number of open connections to every memcached
# server. Integer value
# Defaults to $::os_service_default.
#
# [*memcache_pool_socket_timeout*]
# (Optional) Number of seconds a connection to memcached is held unused in
# the pool before it is closed. Integer value
# Defaults to $::os_service_default.
#
# [*memcache_pool_unused_timeout*]
# (Optional) Number of seconds a connection to memcached is held unused in
# the pool before it is closed. Integer value
# Defaults to $::os_service_default.
#
# [*memcache_secret_key*]
# (Optional, mandatory if memcache_security_strategy is defined) This string
# is used for key derivation.
# Defaults to $::os_service_default.
#
# [*memcache_security_strategy*]
# (Optional) If defined, indicate whether token data should be authenticated
# or authenticated and encrypted. If MAC, token data is authenticated (with
# HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the
# cache. If the value is not one of these options or empty, auth_token will
# raise an exception on initialization.
# Defaults to $::os_service_default.
#
# [*memcache_use_advanced_pool*]
# (Optional) Use the advanced (eventlet safe) memcached client pool. The
# advanced pool will only work under python 2.x Boolean value
# Defaults to $::os_service_default.
#
# [*memcached_servers*]
# (Optional) Optionally specify a list of memcached server(s) to use for
# caching. If left undefined, tokens will instead be cached in-process.
# Defaults to $::os_service_default.
#
# [*manage_memcache_package*]
# (Optional) Whether to install the python-memcache package.
# Defaults to false.
#
# [*region_name*]
# (Optional) The region in which the identity server can be found.
# Defaults to $::os_service_default.
#
# [*revocation_cache_time*]
# (Optional) Determines the frequency at which the list of revoked tokens is
# retrieved from the Identity service (in seconds). A high number of
# revocation events combined with a low cache duration may significantly
# reduce performance. Only valid for PKI tokens. Integer value
# Defaults to $::os_service_default.
#
# [*token_cache_time*]
# (Optional) In order to prevent excessive effort spent validating tokens,
# the middleware caches previously-seen tokens for a configurable duration
# (in seconds). Set to -1 to disable caching completely. Integer value
# Defaults to $::os_service_default.
#
class fm::keystone::authtoken(
$username = 'fm',
$password = $::os_service_default,
$auth_url = 'http://localhost:35357',
$project_name = 'services',
$user_domain_name = 'Default',
$project_domain_name = 'Default',
$insecure = $::os_service_default,
$auth_section = $::os_service_default,
$auth_type = 'password',
$auth_uri = 'http://localhost:5000',
$auth_version = $::os_service_default,
$cache = $::os_service_default,
$cafile = $::os_service_default,
$certfile = $::os_service_default,
$check_revocations_for_cached = $::os_service_default,
$delay_auth_decision = $::os_service_default,
$enforce_token_bind = $::os_service_default,
$hash_algorithms = $::os_service_default,
$http_connect_timeout = $::os_service_default,
$http_request_max_retries = $::os_service_default,
$include_service_catalog = $::os_service_default,
$keyfile = $::os_service_default,
$memcache_pool_conn_get_timeout = $::os_service_default,
$memcache_pool_dead_retry = $::os_service_default,
$memcache_pool_maxsize = $::os_service_default,
$memcache_pool_socket_timeout = $::os_service_default,
$memcache_pool_unused_timeout = $::os_service_default,
$memcache_secret_key = $::os_service_default,
$memcache_security_strategy = $::os_service_default,
$memcache_use_advanced_pool = $::os_service_default,
$memcached_servers = $::os_service_default,
$manage_memcache_package = false,
$region_name = $::os_service_default,
$revocation_cache_time = $::os_service_default,
$token_cache_time = $::os_service_default,
) {
include ::fm::deps
if is_service_default($password) {
fail('Please set password for FM service user')
}
keystone::resource::authtoken { 'fm_config':
username => $username,
password => $password,
project_name => $project_name,
auth_url => $auth_url,
auth_uri => $auth_uri,
auth_type => $auth_type,
user_domain_name => $user_domain_name,
project_domain_name => $project_domain_name,
region_name => $region_name,
}
}

View File

@ -0,0 +1,134 @@
# Class fm::logging
#
# fm logging configuration
#
# == parameters
#
# [*debug*]
# (Optional) Should the daemons log debug messages
# Defaults to $::os_service_default
#
# [*use_syslog*]
# (Optional) Use syslog for logging.
# Defaults to $::os_service_default
#
# [*use_stderr*]
# (optional) Use stderr for logging
# Defaults to $::os_service_default
#
# [*log_facility*]
# (Optional) Syslog facility to receive log lines.
# Defaults to $::os_service_default
#
# [*log_dir*]
# (optional) Directory where logs should be stored.
# If set to boolean false or the $::os_service_default, it will not log to
# any directory.
# Defaults to '/var/log/fm'.
#
# [*logging_context_format_string*]
# (optional) Format string to use for log messages with context.
# Defaults to $::os_service_default
# Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\
# [%(request_id)s %(user_identity)s] %(instance)s%(message)s'
#
# [*logging_default_format_string*]
# (optional) Format string to use for log messages without context.
# Defaults to $::os_service_default
# Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\
# [-] %(instance)s%(message)s'
#
# [*logging_debug_format_suffix*]
# (optional) Formatted data to append to log format when level is DEBUG.
# Defaults to $::os_service_default
# Example: '%(funcName)s %(pathname)s:%(lineno)d'
#
# [*logging_exception_prefix*]
# (optional) Prefix each line of exception output with this format.
# Defaults to $::os_service_default
# Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s'
#
# [*log_config_append*]
# The name of an additional logging configuration file.
# Defaults to $::os_service_default
# See https://docs.python.org/2/howto/logging.html
#
# [*default_log_levels*]
# (optional) Hash of logger (keys) and level (values) pairs.
# Defaults to $::os_service_default
# Example:
# { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN',
# 'sqlalchemy' => 'WARN', 'suds' => 'INFO', 'iso8601' => 'WARN',
# 'requests.packages.urllib3.connectionpool' => 'WARN' }
#
# [*publish_errors*]
# (optional) Publish error events (boolean value).
# Defaults to $::os_service_default
#
# [*fatal_deprecations*]
# (optional) Make deprecations fatal (boolean value)
# Defaults to $::os_service_default
#
# [*instance_format*]
# (optional) If an instance is passed with the log message, format it
# like this (string value).
# Defaults to $::os_service_default
# Example: '[instance: %(uuid)s] '
#
# [*instance_uuid_format*]
# (optional) If an instance UUID is passed with the log message, format
# it like this (string value).
# Defaults to $::os_service_default
# Example: instance_uuid_format='[instance: %(uuid)s] '
#
# [*log_date_format*]
# (optional) Format string for %%(asctime)s in log records.
# Defaults to $::os_service_default
# Example: 'Y-%m-%d %H:%M:%S'
#
class fm::logging(
$use_syslog = $::os_service_default,
$use_stderr = $::os_service_default,
$log_facility = $::os_service_default,
$log_dir = '/var/log/fm',
$debug = $::os_service_default,
$logging_context_format_string = $::os_service_default,
$logging_default_format_string = $::os_service_default,
$logging_debug_format_suffix = $::os_service_default,
$logging_exception_prefix = $::os_service_default,
$log_config_append = $::os_service_default,
$default_log_levels = $::os_service_default,
$publish_errors = $::os_service_default,
$fatal_deprecations = $::os_service_default,
$instance_format = $::os_service_default,
$instance_uuid_format = $::os_service_default,
$log_date_format = $::os_service_default,
) {
include ::fm::deps
$use_syslog_real = pick($::fm::use_syslog,$use_syslog)
$use_stderr_real = pick($::fm::use_stderr,$use_stderr)
$log_facility_real = pick($::fm::log_facility,$log_facility)
$log_dir_real = pick($::fm::log_dir,$log_dir)
$debug_real = pick($::fm::debug,$debug)
oslo::log { 'fm_config':
debug => $debug_real,
use_syslog => $use_syslog_real,
use_stderr => $use_stderr_real,
log_dir => $log_dir_real,
syslog_log_facility => $log_facility_real,
logging_context_format_string => $logging_context_format_string,
logging_default_format_string => $logging_default_format_string,
logging_debug_format_suffix => $logging_debug_format_suffix,
logging_exception_prefix => $logging_exception_prefix,
log_config_append => $log_config_append,
default_log_levels => $default_log_levels,
publish_errors => $publish_errors,
fatal_deprecations => $fatal_deprecations,
log_date_format => $log_date_format,
instance_format => $instance_format,
instance_uuid_format => $instance_uuid_format,
}
}

View File

@ -0,0 +1,20 @@
class fm::params {
case $::osfamily {
'RedHat': {
$client_package = 'python-fmclient'
$api_package = 'fm-rest-api'
$api_service = 'fm-api'
}
'Debian': {
$client_package = 'python-fmclient'
$api_package = 'fm-rest-api'
$api_service = 'fm-api'
}
default: {
fail("Unsupported osfamily: ${::osfamily} operatingsystem")
}
} # Case $::osfamily
}

View File

@ -52,6 +52,10 @@ class nfv::nfvi (
$patching_service_name = 'patching', $patching_service_name = 'patching',
$patching_service_type = 'patching', $patching_service_type = 'patching',
$patching_endpoint_type = 'admin', $patching_endpoint_type = 'admin',
$fm_region_name = 'RegionOne',
$fm_service_name = 'fm',
$fm_service_type = 'faultmanagement',
$fm_endpoint_type = 'admin',
$rabbit_host = '127.0.0.1', $rabbit_host = '127.0.0.1',
$rabbit_port = 5672, $rabbit_port = 5672,
$rabbit_userid = 'guest', $rabbit_userid = 'guest',
@ -133,6 +137,11 @@ class nfv::nfvi (
'patching/service_type': value => $patching_service_type; 'patching/service_type': value => $patching_service_type;
'patching/endpoint_type': value => $patching_endpoint_type; 'patching/endpoint_type': value => $patching_endpoint_type;
'fm/region_name': value => $fm_region_name;
'fm/service_name': value => $fm_service_name;
'fm/service_type': value => $fm_service_type;
'fm/endpoint_type': value => $fm_endpoint_type;
/* AMQP */ /* AMQP */
'amqp/host': value => $rabbit_host; 'amqp/host': value => $rabbit_host;
'amqp/port': value => $rabbit_port; 'amqp/port': value => $rabbit_port;

View File

@ -26,12 +26,9 @@ from cgtsclient.v1 import cluster
from cgtsclient.v1 import controller_fs from cgtsclient.v1 import controller_fs
from cgtsclient.v1 import drbdconfig from cgtsclient.v1 import drbdconfig
from cgtsclient.v1 import ethernetport from cgtsclient.v1 import ethernetport
from cgtsclient.v1 import event_log
from cgtsclient.v1 import event_suppression
from cgtsclient.v1 import firewallrules from cgtsclient.v1 import firewallrules
from cgtsclient.v1 import health from cgtsclient.v1 import health
from cgtsclient.v1 import helm from cgtsclient.v1 import helm
from cgtsclient.v1 import ialarm
from cgtsclient.v1 import icommunity from cgtsclient.v1 import icommunity
from cgtsclient.v1 import icpu from cgtsclient.v1 import icpu
from cgtsclient.v1 import idisk from cgtsclient.v1 import idisk
@ -117,9 +114,6 @@ class Client(http.HTTPClient):
self.iprofile = iprofile.iprofileManager(self) self.iprofile = iprofile.iprofileManager(self)
self.icommunity = icommunity.iCommunityManager(self) self.icommunity = icommunity.iCommunityManager(self)
self.itrapdest = itrapdest.iTrapdestManager(self) self.itrapdest = itrapdest.iTrapdestManager(self)
self.ialarm = ialarm.ialarmManager(self)
self.event_log = event_log.EventLogManager(self)
self.event_suppression = event_suppression.EventSuppressionManager(self)
self.iinfra = iinfra.iinfraManager(self) self.iinfra = iinfra.iinfraManager(self)
self.port = port.PortManager(self) self.port = port.PortManager(self)
self.ethernet_port = ethernetport.EthernetPortManager(self) self.ethernet_port = ethernetport.EthernetPortManager(self)

View File

@ -1,45 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
from cgtsclient.v1 import options
class EventLog(base.Resource):
def __repr__(self):
return "<EventLog %s>" % self._info
class EventLogManager(base.Manager):
resource_class = EventLog
@staticmethod
def _path(id=None):
return '/v1/event_log/%s' % id if id else '/v1/event_log'
def list(self, q=None, limit=None, marker=None, alarms=False, logs=False, include_suppress=False):
params = []
if limit:
params.append('limit=%s' % str(limit))
if marker:
params.append('marker=%s' % str(marker))
if include_suppress:
params.append('include_suppress=True')
if alarms is True and logs is False:
params.append('alarms=True')
elif alarms is False and logs is True:
params.append('logs=True')
restAPIURL = options.build_url(self._path(), q, params)
l = self._list(restAPIURL, 'event_log')
return l
def get(self, iid):
try:
return self._list(self._path(iid))[0]
except IndexError:
return None

View File

@ -1,127 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from cgtsclient.common import utils
from cgtsclient.common import wrapping_formatters
from cgtsclient import exc
from cgtsclient.v1 import options
def _display_event(log):
fields = ['uuid', 'event_log_id', 'state', 'entity_type_id',
'entity_instance_id',
'timestamp', 'severity', 'reason_text', 'event_log_type',
'probable_cause', 'proposed_repair_action',
'service_affecting', 'suppression', 'suppression_status']
data = dict([(f, getattr(log, f, '')) for f in fields])
utils.print_dict(data, wrap=72)
@utils.arg('event_log', metavar='<uuid>',
help="ID of the event log to show")
def do_event_show(cc, args={}):
'''Show a event log.'''
try:
log = cc.event_log.get(args.event_log)
except exc.HTTPNotFound:
raise exc.CommandError('Event log not found: %s' % args.event_log)
else:
_display_event(log)
@utils.arg('-q', '--query', metavar='<QUERY>',
help='key[op]data_type::value; list. data_type is optional, '
'but if supplied must be string, integer, float, or boolean. '
'Valid query fields (event_log_id, entity_type_id, '
'entity_instance_id, severity, start, end)'
' Example: system event-list -q \'start=20160131 10:23:45;end=20171225\'')
@utils.arg('-l', '--limit', metavar='<NUMBER>',
help='Maximum number of event logs to return.')
@utils.arg('--alarms',
action='store_true',
help='Show alarms only')
@utils.arg('--logs', action='store_true',
help='Show logs only')
@utils.arg('--uuid', action='store_true',
help='Include UUID in output')
@utils.arg('--include_suppress',
action='store_true',
help='Include suppressed alarms in output')
@utils.arg('--nopaging', action='store_true',
help='Output is not paged')
def do_event_list(cc, args={}):
'''List event logs.'''
queryAsArray = options.cli_to_array(args.query)
no_paging = args.nopaging
alarms = False
logs = False
include_suppress = False
includeUUID = args.uuid
if args.alarms and not args.logs:
alarms = True
elif args.logs and not args.alarms:
logs = True
if args.include_suppress:
include_suppress = True
logs = cc.event_log.list(q=queryAsArray, limit=args.limit,
alarms=alarms, logs=logs, include_suppress=include_suppress)
for l in logs:
utils.normalize_field_data(l, ['entity_instance_id', 'reason_text'])
# omit action initially to keep output width sane
# (can switch over to vertical formatting when available from CLIFF)
def hightlightEventId(event):
suppressed = hasattr(event, "suppression_status") and event.suppression_status == "suppressed"
if suppressed:
value = "S({})".format(event.event_log_id)
else:
value = event.event_log_id
return value
if includeUUID:
field_labels = ['UUID', 'Time Stamp', 'State', 'Event Log ID', 'Reason Text',
'Entity Instance ID', 'Severity']
fields = ['uuid', 'timestamp', 'state', 'event_log_id', 'reason_text',
'entity_instance_id', 'severity']
formatterSpec = {"uuid": wrapping_formatters.UUID_MIN_LENGTH,
"timestamp": .08,
"state": .08,
"event_log_id": {"formatter": hightlightEventId, "wrapperFormatter": .07},
"reason_text": .42,
"entity_instance_id": .13,
"severity": .12}
else:
field_labels = ['Time Stamp', 'State', 'Event Log ID', 'Reason Text',
'Entity Instance ID', 'Severity']
fields = ['timestamp', 'state', 'event_log_id', 'reason_text',
'entity_instance_id', 'severity']
# for best results, ensure width ratios add up to 1 (=100%)
formatterSpec = {"timestamp": .08,
"state": .08,
"event_log_id": {"formatter": hightlightEventId, "wrapperFormatter": .07},
"reason_text": .52,
"entity_instance_id": .13,
"severity": .12}
formatters = wrapping_formatters.build_wrapping_formatters(logs, fields,
field_labels, formatterSpec)
utils.print_long_list(logs, fields, field_labels,
formatters=formatters, sortby=fields.index('timestamp'),
reversesort=True, no_paging=no_paging)

View File

@ -1,37 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
from cgtsclient.v1 import options
class EventSuppression(base.Resource):
def __repr__(self):
return "<EventSuppression %s>" % self._info
class EventSuppressionManager(base.Manager):
resource_class = EventSuppression
@staticmethod
def _path(iid=None):
return '/v1/event_suppression/%s' % iid if iid else '/v1/event_suppression'
def list(self, q=None):
params = []
restAPIURL = options.build_url(self._path(), q, params)
return self._list(restAPIURL, 'event_suppression')
def get(self, iid):
try:
return self._list(self._path(iid))[0]
except IndexError:
return None
def update(self, event_suppression_uuid, patch):
return self._update(self._path(event_suppression_uuid), patch)

View File

@ -1,208 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from cgtsclient.common import utils
from cgtsclient.common import wrapping_formatters
from cgtsclient.v1 import options
def _get_display_config(includeUUID):
if includeUUID:
field_labels = ['UUID', 'Event ID', 'Status']
fields = ['uuid', 'alarm_id', 'suppression_status']
formatterSpec = {"uuid": 40,
"alarm_id": 25,
"suppression_status": 15}
else:
field_labels = ['Event ID', 'Status']
fields = ['alarm_id', 'suppression_status']
formatterSpec = {"alarm_id": 25,
"suppression_status": 15}
return {
'field_labels': field_labels,
'fields': fields,
'formatterSpec': formatterSpec
}
def _display_event_suppression(log):
fields = ['uuid', 'alarm_id', 'description', 'suppression_status']
data = dict([(f, getattr(log, f, '')) for f in fields])
utils.print_dict(data, wrap=72)
def _get_suppressed_alarms_tuples(data):
"""Split the suppressed_alarms field from a comma separated list alarm id's to a
real list of (start, end) tuples. ??????
"""
suppressed_alarms = []
for a in data['suppressed_alarms'].split(',') or []:
suppressed_alarms.append((a))
return suppressed_alarms
def _event_suppression_list(cc, include_unsuppressed=False):
query = 'suppression_status=string::suppressed'
queryAsArray = []
if include_unsuppressed:
query = None
if query is not None:
queryAsArray = options.cli_to_array(query)
event_suppression_list = cc.event_suppression.list(q=queryAsArray)
return event_suppression_list
def print_event_suppression_list(cc, no_paging, includeUUID):
event_suppression_list = _event_suppression_list(cc, include_unsuppressed=False)
displayCFG = _get_display_config(includeUUID)
field_labels = displayCFG['field_labels']
fields = displayCFG['fields']
formatterSpec = displayCFG['formatterSpec']
formatters = wrapping_formatters.build_wrapping_formatters(event_suppression_list, fields,
field_labels, formatterSpec)
utils.print_long_list(event_suppression_list, fields, field_labels, formatters=formatters, sortby=1,
reversesort=False, no_paging=no_paging)
def event_suppression_update(cc, data, suppress=False):
event_suppression_list = _event_suppression_list(cc, include_unsuppressed=True)
alarm_id_list = []
for alarm_id in data['alarm_id'].split(',') or []:
alarm_id_list.append(alarm_id)
if suppress:
patch_value = 'suppressed'
else:
patch_value = 'unsuppressed'
patch = []
for event_id in event_suppression_list:
if event_id.alarm_id in alarm_id_list:
print "Alarm ID: {} {}.".format(event_id.alarm_id, patch_value)
uuid = event_id.uuid
patch.append(dict(path='/' + 'suppression_status', value=patch_value, op='replace'))
cc.event_suppression.update(uuid, patch)
@utils.arg('--include-unsuppressed', action='store_true',
help='Include unsuppressed Event ID\'s')
@utils.arg('--uuid', action='store_true',
help='Include UUID in output')
@utils.arg('--nopaging', action='store_true',
help='Output is not paged')
def do_event_suppress_list(cc, args={}):
'''List Suppressed Event ID's '''
include_unsuppressed = args.include_unsuppressed
includeUUID = args.uuid
event_suppression_list = _event_suppression_list(cc, include_unsuppressed=include_unsuppressed)
no_paging = args.nopaging
displayCFG = _get_display_config(includeUUID)
field_labels = displayCFG['field_labels']
fields = displayCFG['fields']
formatterSpec = displayCFG['formatterSpec']
formatters = wrapping_formatters.build_wrapping_formatters(event_suppression_list, fields,
field_labels, formatterSpec)
utils.print_long_list(event_suppression_list, fields, field_labels, formatters=formatters, sortby=1,
reversesort=False, no_paging=no_paging)
@utils.arg('--alarm_id',
metavar='<alarm_id>,...',
help="The alarm_id list (comma separated) of alarm ID's to suppress.")
@utils.arg('--nopaging', action='store_true',
help='Output is not paged')
@utils.arg('--uuid', action='store_true',
help='Include UUID in output')
def do_event_suppress(cc, args={}):
'''Suppress specified Event ID's.'''
field_list = ['alarm_id']
# Prune input fields down to required/expected values
data = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
if 'alarm_id' in data:
event_suppression_update(cc, data, suppress=True)
no_paging = args.nopaging
includeUUID = args.uuid
print_event_suppression_list(cc, no_paging, includeUUID)
@utils.arg('--alarm_id',
metavar='<alarm_id>,...',
help="The alarm_id list (comma separated) of alarm ID's to unsuppress.")
@utils.arg('--nopaging', action='store_true',
help='Output is not paged')
@utils.arg('--uuid', action='store_true',
help='Include UUID in output')
def do_event_unsuppress(cc, args):
'''Unsuppress specified Event ID's.'''
field_list = ['alarm_id']
# Prune input fields down to required/expected values
data = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
if 'alarm_id' in data:
event_suppression_update(cc, data, suppress=False)
no_paging = args.nopaging
includeUUID = args.uuid
print_event_suppression_list(cc, no_paging, includeUUID)
@utils.arg('--nopaging', action='store_true',
help='Output is not paged')
@utils.arg('--uuid', action='store_true',
help='Include UUID in output')
def do_event_unsuppress_all(cc, args):
'''Unsuppress all Event ID's.'''
patch = []
alarms_suppression_list = _event_suppression_list(cc, include_unsuppressed=True)
for alarm_type in alarms_suppression_list:
suppression_status = alarm_type.suppression_status
if suppression_status == 'suppressed':
uuid = alarm_type.uuid
patch.append(dict(path='/' + 'suppression_status', value='unsuppressed', op='replace'))
print "Alarm ID: {} unsuppressed.".format(alarm_type.alarm_id)
cc.event_suppression.update(uuid, patch)
no_paging = args.nopaging
includeUUID = args.uuid
print_event_suppression_list(cc, no_paging, includeUUID)

View File

@ -1,53 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
from cgtsclient.v1 import options
class ialarm(base.Resource):
def __repr__(self):
return "<ialarm %s>" % self._info
class ialarmManager(base.Manager):
resource_class = ialarm
@staticmethod
def _path(id=None):
return '/v1/ialarms/%s' % id if id else '/v1/ialarms'
def list(self, q=None, limit=None, marker=None, sort_key=None,
sort_dir=None, include_suppress=False):
params = []
if include_suppress:
params.append('include_suppress=True')
if limit:
params.append('limit=%s' % str(limit))
if marker:
params.append('marker=%s' % str(marker))
if sort_key:
params.append('sort_key=%s' % str(sort_key))
if sort_dir:
params.append('sort_dir=%s' % str(sort_dir))
return self._list(options.build_url(self._path(), q, params), 'ialarms')
def get(self, iid):
try:
return self._list(self._path(iid))[0]
except IndexError:
return None
def delete(self, iid):
return self._delete(self._path(iid))
def summary(self, include_suppress=False):
params = []
if include_suppress:
params.append('include_suppress=True')
return self._list(options.build_url(self._path('summary'), None, params))

View File

@ -1,148 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from cgtsclient.common import utils
from cgtsclient.common import utils as cgts_utils
from cgtsclient.common import wrapping_formatters
from cgtsclient import exc
from cgtsclient.v1 import options
def _display_fault(fault):
fields = ['uuid', 'alarm_id', 'alarm_state', 'entity_type_id', 'entity_instance_id',
'timestamp', 'severity', 'reason_text', 'alarm_type',
'probable_cause', 'proposed_repair_action', 'service_affecting',
'suppression', 'suppression_status', 'mgmt_affecting', 'degrade_affecting']
data = dict([(f, getattr(fault, f, '')) for f in fields])
cgts_utils.print_dict(data, wrap=72)
@utils.arg('ialarm', metavar='<uuid>', help="ID of the alarm to show")
def do_alarm_show(cc, args={}):
'''Show an active alarm.'''
try:
fault = cc.ialarm.get(args.ialarm)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.ialarm)
else:
_display_fault(fault)
@utils.arg('ialarm', metavar='<uuid>', help="ID of the alarm to show")
def do_alarm_delete(cc, args={}):
'''Delete an active alarm.'''
try:
cc.ialarm.delete(args.ialarm)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.ialarm)
@utils.arg('-q', '--query', metavar='<QUERY>',
help='key[op]data_type::value; list. data_type is optional, '
'but if supplied must be string, integer, float, or boolean.')
@utils.arg('--uuid', action='store_true',
help='Include UUID in output')
@utils.arg('--include_suppress',
action='store_true',
help='Include suppressed alarms in output')
@utils.arg('--mgmt_affecting',
action='store_true',
help='Include management affecting status in output')
@utils.arg('--degrade_affecting',
action='store_true',
help='Include degrade affecting status in output')
def do_alarm_list(cc, args={}):
'''List all active alarms.'''
includeUUID = args.uuid
include_suppress = False
if args.include_suppress:
include_suppress = True
include_mgmt_affecting = False
if args.mgmt_affecting:
include_mgmt_affecting = True
include_degrade_affecting = False
if args.degrade_affecting:
include_degrade_affecting = True
faults = cc.ialarm.list(q=options.cli_to_array(args.query), include_suppress=include_suppress)
for f in faults:
cgts_utils.normalize_field_data(f, ['entity_type_id', 'entity_instance_id',
'reason_text', 'proposed_repair_action'])
# omit action initially to keep output width sane
# (can switch over to vertical formatting when available from CLIFF)
def hightlightAlarmId(alarm):
suppressed = hasattr(alarm, "suppression_status") and alarm.suppression_status == "suppressed"
if suppressed:
value = "S({})".format(alarm.alarm_id)
else:
value = alarm.alarm_id
return value
field_labels = ['Alarm ID', 'Reason Text', 'Entity ID', 'Severity', 'Time Stamp']
fields = ['alarm_id', 'reason_text', 'entity_instance_id', 'severity', 'timestamp']
# for best results, ensure width ratios add up to 1 (=100%)
formatterSpec = {"alarm_id": {"formatter": hightlightAlarmId, "wrapperFormatter": .08},
"reason_text": .54,
"entity_instance_id": .15,
"severity": .10,
"timestamp": .10,
}
if includeUUID:
field_labels.insert(0, 'UUID')
fields.insert(0, 'uuid')
# for best results, ensure width ratios add up to 1 (=100%)
formatterSpec['uuid'] = wrapping_formatters.UUID_MIN_LENGTH
formatterSpec['reason_text'] -= .05
formatterSpec['entity_instance_id'] -= .02
if include_mgmt_affecting:
field_labels.insert(4, 'Management Affecting')
fields.insert(4, 'mgmt_affecting')
# for best results, ensure width ratios add up to 1 (=100%)
formatterSpec['mgmt_affecting'] = .08
formatterSpec['reason_text'] -= .05
formatterSpec['severity'] -= .03
if include_degrade_affecting:
field_labels.insert(5, 'Degrade Affecting')
fields.insert(5, 'degrade_affecting')
# for best results, ensure width ratios add up to 1 (=100%)
formatterSpec['degrade_affecting'] = .08
formatterSpec['reason_text'] -= .05
formatterSpec['severity'] -= .03
formatters = wrapping_formatters.build_wrapping_formatters(faults, fields, field_labels, formatterSpec)
cgts_utils.print_list(faults, fields, field_labels, formatters=formatters,
sortby=fields.index('timestamp'), reversesort=True)
@utils.arg('--include_suppress',
action='store_true',
help='Include suppressed alarms in output')
def do_alarm_summary(cc, args={}):
'''Show a summary of active alarms.'''
include_suppress = False
if args.include_suppress:
include_suppress = True
faults = cc.ialarm.summary(include_suppress)
field_labels = ['Critical Alarms', 'Major Alarms', 'Minor Alarms', 'Warnings']
fields = ['critical', 'major', 'minor', 'warnings']
cgts_utils.print_list(faults, fields, field_labels)

View File

@ -14,13 +14,10 @@ from cgtsclient.v1 import cluster_shell
from cgtsclient.v1 import controller_fs_shell from cgtsclient.v1 import controller_fs_shell
from cgtsclient.v1 import drbdconfig_shell from cgtsclient.v1 import drbdconfig_shell
from cgtsclient.v1 import ethernetport_shell from cgtsclient.v1 import ethernetport_shell
from cgtsclient.v1 import event_log_shell
from cgtsclient.v1 import event_suppression_shell
from cgtsclient.v1 import firewallrules_shell from cgtsclient.v1 import firewallrules_shell
from cgtsclient.v1 import health_shell from cgtsclient.v1 import health_shell
from cgtsclient.v1 import helm_shell from cgtsclient.v1 import helm_shell
from cgtsclient.v1 import ialarm_shell
from cgtsclient.v1 import icommunity_shell from cgtsclient.v1 import icommunity_shell
from cgtsclient.v1 import icpu_shell from cgtsclient.v1 import icpu_shell
from cgtsclient.v1 import idisk_shell from cgtsclient.v1 import idisk_shell
@ -83,11 +80,8 @@ COMMAND_MODULES = [
sm_service_nodes_shell, sm_service_nodes_shell,
sm_servicegroup_shell, sm_servicegroup_shell,
sm_service_shell, sm_service_shell,
ialarm_shell,
icommunity_shell, icommunity_shell,
itrapdest_shell, itrapdest_shell,
event_log_shell,
event_suppression_shell,
iinfra_shell, iinfra_shell,
ethernetport_shell, ethernetport_shell,
port_shell, port_shell,

View File

@ -22,7 +22,6 @@ from wsme import types as wtypes
from sysinv.api.controllers.v1 import address from sysinv.api.controllers.v1 import address
from sysinv.api.controllers.v1 import address_pool from sysinv.api.controllers.v1 import address_pool
from sysinv.api.controllers.v1 import alarm
from sysinv.api.controllers.v1 import base from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import ceph_mon from sysinv.api.controllers.v1 import ceph_mon
from sysinv.api.controllers.v1 import cluster from sysinv.api.controllers.v1 import cluster
@ -33,8 +32,6 @@ from sysinv.api.controllers.v1 import disk
from sysinv.api.controllers.v1 import dns from sysinv.api.controllers.v1 import dns
from sysinv.api.controllers.v1 import drbdconfig from sysinv.api.controllers.v1 import drbdconfig
from sysinv.api.controllers.v1 import ethernet_port from sysinv.api.controllers.v1 import ethernet_port
from sysinv.api.controllers.v1 import event_log
from sysinv.api.controllers.v1 import event_suppression
from sysinv.api.controllers.v1 import firewallrules from sysinv.api.controllers.v1 import firewallrules
from sysinv.api.controllers.v1 import health from sysinv.api.controllers.v1 import health
from sysinv.api.controllers.v1 import helm_charts from sysinv.api.controllers.v1 import helm_charts
@ -131,15 +128,6 @@ class V1(base.APIBase):
icommunity = [link.Link] icommunity = [link.Link]
"Links to the icommunity node cluster resource" "Links to the icommunity node cluster resource"
ialarms = [link.Link]
"Links to the ialarm resource"
event_log = [link.Link]
"Links to the event_log resource"
event_suppression = [link.Link]
"Links to the event_suppression resource"
iuser = [link.Link] iuser = [link.Link]
"Links to the iuser resource" "Links to the iuser resource"
@ -511,29 +499,6 @@ class V1(base.APIBase):
bookmark=True) bookmark=True)
] ]
v1.ialarms = [link.Link.make_link('self', pecan.request.host_url,
'ialarms', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'ialarms', '',
bookmark=True)
]
v1.event_log = [link.Link.make_link('self', pecan.request.host_url,
'event_log', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'event_log', '',
bookmark=True)
]
v1.event_suppression = [link.Link.make_link('self', pecan.request.host_url,
'event_suppression', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'event_suppression', '',
bookmark=True)
]
v1.iinfra = [link.Link.make_link('self', pecan.request.host_url, v1.iinfra = [link.Link.make_link('self', pecan.request.host_url,
'iinfra', ''), 'iinfra', ''),
@ -764,9 +729,6 @@ class Controller(rest.RestController):
storage_ceph_external.StorageCephExternalController() storage_ceph_external.StorageCephExternalController()
ceph_mon = ceph_mon.CephMonController() ceph_mon = ceph_mon.CephMonController()
drbdconfig = drbdconfig.drbdconfigsController() drbdconfig = drbdconfig.drbdconfigsController()
ialarms = alarm.AlarmController()
event_log = event_log.EventLogController()
event_suppression = event_suppression.EventSuppressionController()
iinfra = network_infra.InfraNetworkController() iinfra = network_infra.InfraNetworkController()
addresses = address.AddressController() addresses = address.AddressController()
addrpools = address_pool.AddressPoolController() addrpools = address_pool.AddressPoolController()

View File

@ -1,338 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from fm_api import fm_api
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils as api_utils
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv.common import constants
from sysinv import objects
from sysinv.openstack.common import log
from sysinv.api.controllers.v1 import alarm_utils
from sysinv.api.controllers.v1.query import Query
from fm_api import constants as fm_constants
LOG = log.getLogger(__name__)
class AlarmPatchType(types.JsonPatchType):
pass
class Alarm(base.APIBase):
"""API representation of an alarm.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
a ialarm.
"""
uuid = types.uuid
"The UUID of the ialarm"
alarm_id = wsme.wsattr(wtypes.text, mandatory=True)
"structured id for the alarm; AREA_ID ID; 300-001"
alarm_state = wsme.wsattr(wtypes.text, mandatory=True)
"The state of the alarm"
entity_type_id = wtypes.text
"The type of the object raising alarm"
entity_instance_id = wsme.wsattr(wtypes.text, mandatory=True)
"The original instance information of the object raising alarm"
timestamp = datetime.datetime
"The time in UTC at which the alarm state is last updated"
severity = wsme.wsattr(wtypes.text, mandatory=True)
"The severity of the alarm"
reason_text = wtypes.text
"The reason why the alarm is raised"
alarm_type = wsme.wsattr(wtypes.text, mandatory=True)
"The type of the alarm"
probable_cause = wsme.wsattr(wtypes.text, mandatory=True)
"The probable cause of the alarm"
proposed_repair_action = wtypes.text
"The action to clear the alarm"
service_affecting = wtypes.text
"Whether the alarm affects the service"
suppression = wtypes.text
"'allowed' or 'not-allowed'"
suppression_status = wtypes.text
"'suppressed' or 'unsuppressed'"
mgmt_affecting = wtypes.text
"Whether the alarm prevents software management actions"
degrade_affecting = wtypes.text
"Wheter the alarm prevents filesystem resize actions"
links = [link.Link]
"A list containing a self link and associated community string links"
def __init__(self, **kwargs):
self.fields = objects.alarm.fields.keys()
for k in self.fields:
setattr(self, k, kwargs.get(k))
@classmethod
def convert_with_links(cls, rpc_ialarm, expand=True):
if isinstance(rpc_ialarm, tuple):
ialarms = rpc_ialarm[0]
suppress_status = rpc_ialarm[constants.DB_SUPPRESS_STATUS]
mgmt_affecting = rpc_ialarm[constants.DB_MGMT_AFFECTING]
degrade_affecting = rpc_ialarm[constants.DB_DEGRADE_AFFECTING]
else:
ialarms = rpc_ialarm
suppress_status = rpc_ialarm.suppression_status
mgmt_affecting = rpc_ialarm.mgmt_affecting
degrade_affecting = rpc_ialarm.degrade_affecting
if not expand:
ialarms['service_affecting'] = str(ialarms['service_affecting'])
ialarms['suppression'] = str(ialarms['suppression'])
ialm = Alarm(**ialarms.as_dict())
if not expand:
ialm.unset_fields_except(['uuid', 'alarm_id', 'entity_instance_id',
'severity', 'timestamp', 'reason_text',
'mgmt_affecting ', 'degrade_affecting'])
ialm.entity_instance_id = \
alarm_utils.make_display_id(ialm.entity_instance_id, replace=False)
ialm.suppression_status = str(suppress_status)
ialm.mgmt_affecting = str(
not fm_api.FaultAPIs.alarm_allowed(ialm.severity, mgmt_affecting))
ialm.degrade_affecting = str(
not fm_api.FaultAPIs.alarm_allowed(ialm.severity, degrade_affecting))
return ialm
class AlarmCollection(collection.Collection):
"""API representation of a collection of ialarm."""
ialarms = [Alarm]
"A list containing ialarm objects"
def __init__(self, **kwargs):
self._type = 'ialarms'
@classmethod
def convert_with_links(cls, ialm, limit, url=None,
expand=False, **kwargs):
# filter masked alarms
ialms = []
for a in ialm:
if isinstance(a, tuple):
ialm_instance = a[0]
else:
ialm_instance = a
if str(ialm_instance['masked']) != 'True':
ialms.append(a)
collection = AlarmCollection()
collection.ialarms = [Alarm.convert_with_links(ch, expand)
for ch in ialms]
# url = url or None
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'AlarmController'
class AlarmSummary(base.APIBase):
"""API representation of an alarm summary object."""
critical = wsme.wsattr(int, mandatory=True)
"The count of critical alarms"
major = wsme.wsattr(int, mandatory=True)
"The count of major alarms"
minor = wsme.wsattr(int, mandatory=True)
"The count of minor alarms"
warnings = wsme.wsattr(int, mandatory=True)
"The count of warnings"
status = wsme.wsattr(wtypes.text, mandatory=True)
"The status of the system"
system_uuid = wsme.wsattr(types.uuid, mandatory=True)
"The UUID of the system (for distributed cloud use)"
@classmethod
def convert_with_links(cls, ialm_sum, uuid):
summary = AlarmSummary()
summary.critical = ialm_sum[fm_constants.FM_ALARM_SEVERITY_CRITICAL]
summary.major = ialm_sum[fm_constants.FM_ALARM_SEVERITY_MAJOR]
summary.minor = ialm_sum[fm_constants.FM_ALARM_SEVERITY_MINOR]
summary.warnings = ialm_sum[fm_constants.FM_ALARM_SEVERITY_WARNING]
summary.status = ialm_sum['status']
summary.system_uuid = uuid
return summary
class AlarmController(rest.RestController):
"""REST controller for ialarm."""
_custom_actions = {
'detail': ['GET'],
'summary': ['GET'],
}
def _get_ialarm_summary(self, include_suppress):
kwargs = {}
kwargs["include_suppress"] = include_suppress
ialm = pecan.request.dbapi.ialarm_get_all(**kwargs)
ialm_counts = {fm_constants.FM_ALARM_SEVERITY_CRITICAL: 0,
fm_constants.FM_ALARM_SEVERITY_MAJOR: 0,
fm_constants.FM_ALARM_SEVERITY_MINOR: 0,
fm_constants.FM_ALARM_SEVERITY_WARNING: 0}
# filter masked alarms and sum by severity
for a in ialm:
ialm_instance = a[0]
if str(ialm_instance['masked']) != 'True':
if ialm_instance['severity'] in ialm_counts:
ialm_counts[ialm_instance['severity']] += 1
# Generate the status
status = fm_constants.FM_ALARM_OK_STATUS
if (ialm_counts[fm_constants.FM_ALARM_SEVERITY_MAJOR] > 0) or \
(ialm_counts[fm_constants.FM_ALARM_SEVERITY_MINOR] > 0):
status = fm_constants.FM_ALARM_DEGRADED_STATUS
if ialm_counts[fm_constants.FM_ALARM_SEVERITY_CRITICAL] > 0:
status = fm_constants.FM_ALARM_CRITICAL_STATUS
ialm_counts['status'] = status
uuid = pecan.request.dbapi.isystem_get_one()['uuid']
return AlarmSummary.convert_with_links(ialm_counts, uuid)
def _get_ialarm_collection(self, marker, limit, sort_key, sort_dir,
expand=False, resource_url=None,
q=None, include_suppress=False):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
if isinstance(sort_key, basestring) and ',' in sort_key:
sort_key = sort_key.split(',')
kwargs = {}
if q is not None:
for i in q:
if i.op == 'eq':
kwargs[i.field] = i.value
kwargs["include_suppress"] = include_suppress
if marker:
marker_obj = objects.alarm.get_by_uuid(pecan.request.context,
marker)
ialm = pecan.request.dbapi.ialarm_get_list(limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir,
include_suppress=include_suppress)
else:
kwargs['limit'] = limit
ialm = pecan.request.dbapi.ialarm_get_all(**kwargs)
return AlarmCollection.convert_with_links(ialm, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(AlarmCollection, [Query],
types.uuid, int, wtypes.text, wtypes.text, bool)
def get_all(self, q=[], marker=None, limit=None, sort_key='id',
sort_dir='asc', include_suppress=False):
"""Retrieve a list of ialarm.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param include_suppress: filter on suppressed alarms. Default: False
"""
return self._get_ialarm_collection(marker, limit, sort_key,
sort_dir, q=q,
include_suppress=include_suppress)
@wsme_pecan.wsexpose(AlarmCollection, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of ialarm with detail.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
# /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "ialarm":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['ialarm', 'detail'])
return self._get_ialarm_collection(marker, limit, sort_key, sort_dir,
expand, resource_url)
@wsme_pecan.wsexpose(Alarm, wtypes.text)
def get_one(self, id):
"""Retrieve information about the given ialarm.
:param id: UUID of an ialarm.
"""
rpc_ialarm = objects.alarm.get_by_uuid(
pecan.request.context, id)
if str(rpc_ialarm['masked']) == 'True':
raise exception.HTTPNotFound
return Alarm.convert_with_links(rpc_ialarm)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
def delete(self, id):
"""Delete a ialarm.
:param id: uuid of a ialarm.
"""
pecan.request.dbapi.ialarm_destroy(id)
@wsme_pecan.wsexpose(AlarmSummary, bool)
def summary(self, include_suppress=False):
"""Retrieve a summery of ialarms.
:param include_suppress: filter on suppressed alarms. Default: False
"""
return self._get_ialarm_summary(include_suppress)

View File

@ -1,92 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.openstack.common import log, uuidutils
from sysinv.common import exception
import pecan
LOG = log.getLogger(__name__)
ALARM_ENTITY_TYPES_USE_UUID = ['port']
ENTITY_SEP = '.'
KEY_VALUE_SEP = '='
def make_display_id(iid, replace=False):
if replace:
instance_id = replace_uuids(iid)
else:
instance_id = replace_name_with_uuid(iid)
return instance_id
def replace_name_with_uuid(instance_id):
hName = None
port = None
for keyvalue in instance_id.split(ENTITY_SEP):
try:
(key, value) = keyvalue.split(KEY_VALUE_SEP, 1)
except ValueError:
return instance_id
if key == 'host':
hName = value
elif key == 'port':
if hName and not uuidutils.is_uuid_like(value.strip()):
try:
ihost = pecan.request.dbapi.ihost_get_by_hostname(hName)
port = pecan.request.dbapi.port_get(value,
hostid=ihost['id'])
except exception.NodeNotFound:
LOG.error("Can't find the host by name %s", hName)
pass
except exception.ServerNotFound:
LOG.error("Can't find the port for uuid %s", value)
pass
if port:
new_id = key + KEY_VALUE_SEP + port.uuid
instance_id = instance_id.replace(keyvalue, new_id, 1)
return instance_id
def replace_uuid_with_name(key, value):
new_id = None
if key == 'port':
port = None
try:
port = pecan.request.dbapi.port_get(value)
except exception.ServerNotFound:
LOG.error("Can't find the port for uuid %s", value)
pass
if port is not None:
new_id = key + KEY_VALUE_SEP + port.name
return new_id
def replace_uuids(instance_id):
for keyvalue in instance_id.split(ENTITY_SEP):
try:
(key, value) = keyvalue.split(KEY_VALUE_SEP, 1)
except ValueError:
return instance_id
if key in ALARM_ENTITY_TYPES_USE_UUID:
if uuidutils.is_uuid_like(value.strip()):
new_id = replace_uuid_with_name(key, value)
else:
new_id = key + KEY_VALUE_SEP + value
if new_id is not None:
instance_id = instance_id.replace(keyvalue, new_id, 1)
return instance_id

View File

@ -1,290 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import datetime
from oslo_utils import timeutils
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from sysinv.api.controllers.v1 import alarm_utils
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1.query import Query
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils as api_utils
from sysinv.common import exception
from sysinv import objects
from sysinv.openstack.common.gettextutils import _
from sysinv.openstack.common import log
LOG = log.getLogger(__name__)
import json
def prettyDict(dict):
output = json.dumps(dict, sort_keys=True, indent=4)
return output
class EventLogPatchType(types.JsonPatchType):
pass
class EventLog(base.APIBase):
"""API representation of an event log.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
a event_log.
"""
uuid = types.uuid
"The UUID of the event_log"
event_log_id = wsme.wsattr(wtypes.text, mandatory=True)
"structured id for the event log; AREA_ID ID; 300-001"
state = wsme.wsattr(wtypes.text, mandatory=True)
"The state of the event"
entity_type_id = wtypes.text
"The type of the object event log"
entity_instance_id = wsme.wsattr(wtypes.text, mandatory=True)
"The original instance information of the object creating event log"
timestamp = datetime.datetime
"The time in UTC at which the event log is generated"
severity = wsme.wsattr(wtypes.text, mandatory=True)
"The severity of the log"
reason_text = wtypes.text
"The reason why the log is generated"
event_log_type = wsme.wsattr(wtypes.text, mandatory=True)
"The type of the event log"
probable_cause = wsme.wsattr(wtypes.text, mandatory=True)
"The probable cause of the event log"
proposed_repair_action = wtypes.text
"The action to clear the alarm"
service_affecting = wtypes.text
"Whether the log affects the service"
suppression = wtypes.text
"'allowed' or 'not-allowed'"
suppression_status = wtypes.text
"'suppressed' or 'unsuppressed'"
links = [link.Link]
"A list containing a self link and associated community string links"
def __init__(self, **kwargs):
self.fields = objects.event_log.fields.keys()
for k in self.fields:
setattr(self, k, kwargs.get(k))
@classmethod
def convert_with_links(cls, rpc_event_log, expand=True):
if isinstance(rpc_event_log, tuple):
ievent_log = rpc_event_log[0]
suppress_status = rpc_event_log[1]
else:
ievent_log = rpc_event_log
suppress_status = rpc_event_log.suppression_status
if not expand:
ievent_log['service_affecting'] = str(ievent_log['service_affecting'])
ievent_log['suppression'] = str(ievent_log['suppression'])
ilog = EventLog(**ievent_log.as_dict())
if not expand:
ilog.unset_fields_except(['uuid', 'event_log_id', 'entity_instance_id',
'severity', 'timestamp', 'reason_text', 'state'])
ilog.entity_instance_id = \
alarm_utils.make_display_id(ilog.entity_instance_id, replace=False)
ilog.suppression_status = str(suppress_status)
return ilog
def _getEventType(alarms=False, logs=False):
if alarms is False and logs is False:
return "ALL"
if alarms is True and logs is True:
return "ALL"
if logs is True:
return "LOG"
if alarms is True:
return "ALARM"
return "ALL"
class EventLogCollection(collection.Collection):
"""API representation of a collection of event_log."""
event_log = [EventLog]
"A list containing event_log objects"
def __init__(self, **kwargs):
self._type = 'event_log'
@classmethod
def convert_with_links(cls, ilog, limit=None, url=None,
expand=False, **kwargs):
ilogs = []
for a in ilog:
ilogs.append(a)
collection = EventLogCollection()
collection.event_log = [EventLog.convert_with_links(ch, expand)
for ch in ilogs]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
def _handle_bad_input_date(f):
"""
A decorator that executes function f and returns
a more human readable error message on a SQL date exception
"""
def date_handler_wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
import re
e_str = "{}".format(e)
for r in [".*date/time field value out of range: \"(.*)\".*LINE",
".*invalid input syntax for type timestamp: \"(.*)\".*",
".*timestamp out of range: \"(.*)\".*"]:
p = re.compile(r, re.DOTALL)
m = p.match(e_str)
if m and len(m.groups()) > 0:
bad_date = m.group(1)
raise wsme.exc.ClientSideError(_("Invalid date '{}' specified".format(bad_date)))
raise
return date_handler_wrapper
class EventLogController(rest.RestController):
"""REST controller for eventlog."""
_custom_actions = {
'detail': ['GET'],
}
@_handle_bad_input_date
def _get_eventlog_collection(self, marker, limit, sort_key, sort_dir,
expand=False, resource_url=None,
q=None, alarms=False, logs=False,
include_suppress=False):
if limit and limit < 0:
raise wsme.exc.ClientSideError(_("Limit must be positive"))
sort_dir = api_utils.validate_sort_dir(sort_dir)
kwargs = {}
if q is not None:
for i in q:
if i.op == 'eq':
if i.field == 'start' or i.field == 'end':
val = timeutils.normalize_time(
timeutils.parse_isotime(i.value)
.replace(tzinfo=None))
i.value = val.isoformat()
kwargs[i.field] = i.value
evtType = _getEventType(alarms, logs)
kwargs["evtType"] = evtType
kwargs["include_suppress"] = include_suppress
if marker:
marker_obj = objects.event_log.get_by_uuid(pecan.request.context,
marker)
ilog = pecan.request.dbapi.event_log_get_list(limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir,
evtType=evtType,
include_suppress=include_suppress)
else:
kwargs['limit'] = limit
ilog = pecan.request.dbapi.event_log_get_all(**kwargs)
return EventLogCollection.convert_with_links(ilog, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(EventLogCollection, [Query],
types.uuid, int, wtypes.text, wtypes.text, bool, bool, bool)
def get_all(self, q=[], marker=None, limit=None, sort_key='timestamp',
sort_dir='desc', alarms=False, logs=False, include_suppress=False):
"""Retrieve a list of event_log.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param alarms: filter on alarms. Default: False
:param logs: filter on logs. Default: False
:param include_suppress: filter on suppressed alarms. Default: False
"""
return self._get_eventlog_collection(marker, limit, sort_key,
sort_dir, q=q, alarms=alarms, logs=logs,
include_suppress=include_suppress)
@wsme_pecan.wsexpose(EventLogCollection, types.uuid, int,
wtypes.text, wtypes.text, bool, bool)
def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', alarms=False, logs=False):
"""Retrieve a list of event_log with detail.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param alarms: filter on alarms. Default: False
:param logs: filter on logs. Default: False
"""
# /detail should only work against collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "event_log":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['event_log', 'detail'])
return self._get_eventlog_collection(marker, limit, sort_key, sort_dir,
expand, resource_url, None, alarms, logs)
@wsme_pecan.wsexpose(EventLog, wtypes.text)
def get_one(self, id):
"""Retrieve information about the given event_log.
:param id: UUID of an event_log.
"""
rpc_ilog = objects.event_log.get_by_uuid(
pecan.request.context, id)
return EventLog.convert_with_links(rpc_ilog)

View File

@ -1,210 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1.query import Query
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils as api_utils
from sysinv.common import constants
from sysinv.common import utils as cutils
from sysinv import objects
from sysinv.openstack.common.gettextutils import _
from sysinv.openstack.common import log
LOG = log.getLogger(__name__)
class EventSuppressionPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return ['/uuid']
class EventSuppression(base.APIBase):
"""API representation of an event suppression.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
an event_suppression.
"""
id = int
"Unique ID for this entry"
uuid = types.uuid
"Unique UUID for this entry"
alarm_id = wsme.wsattr(wtypes.text, mandatory=True)
"Unique id for the Alarm Type"
description = wsme.wsattr(wtypes.text, mandatory=True)
"Text description of the Alarm Type"
suppression_status = wsme.wsattr(wtypes.text, mandatory=True)
"'suppressed' or 'unsuppressed'"
links = [link.Link]
"A list containing a self link and associated links"
def __init__(self, **kwargs):
self.fields = objects.event_suppression.fields.keys()
for k in self.fields:
if not hasattr(self, k):
continue
setattr(self, k, kwargs.get(k, wtypes.Unset))
@classmethod
def convert_with_links(cls, rpc_event_suppression, expand=True):
parm = EventSuppression(**rpc_event_suppression.as_dict())
if not expand:
parm.unset_fields_except(['uuid', 'alarm_id', 'description',
'suppression_status'])
parm.links = [link.Link.make_link('self', pecan.request.host_url,
'event_suppression', parm.uuid),
link.Link.make_link('bookmark',
pecan.request.host_url,
'event_suppression', parm.uuid,
bookmark=True)
]
return parm
class EventSuppressionCollection(collection.Collection):
"""API representation of a collection of event_suppression."""
event_suppression = [EventSuppression]
"A list containing EventSuppression objects"
def __init__(self, **kwargs):
self._type = 'event_suppression'
@classmethod
def convert_with_links(cls, rpc_event_suppression, limit, url=None,
expand=False,
**kwargs):
collection = EventSuppressionCollection()
collection.event_suppression = [EventSuppression.convert_with_links(p, expand)
for p in rpc_event_suppression]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'EventSuppressionController'
class EventSuppressionController(rest.RestController):
"""REST controller for event_suppression."""
def __init__(self, parent=None, **kwargs):
self._parent = parent
def _get_event_suppression_collection(self, marker=None, limit=None,
sort_key=None, sort_dir=None,
expand=False, resource_url=None,
q=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
kwargs = {}
if q is not None:
for i in q:
if i.op == 'eq':
kwargs[i.field] = i.value
marker_obj = None
if marker:
marker_obj = objects.event_suppression.get_by_uuid(
pecan.request.context, marker)
if q is None:
parms = pecan.request.dbapi.event_suppression_get_list(
limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
else:
kwargs['limit'] = limit
kwargs['sort_key'] = sort_key
kwargs['sort_dir'] = sort_dir
parms = pecan.request.dbapi.event_suppression_get_all(**kwargs)
return EventSuppressionCollection.convert_with_links(
parms, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
def _get_updates(self, patch):
"""Retrieve the updated attributes from the patch request."""
updates = {}
for p in patch:
attribute = p['path'] if p['path'][0] != '/' else p['path'][1:]
updates[attribute] = p['value']
return updates
@staticmethod
def _check_event_suppression_updates(updates):
"""Check attributes to be updated"""
for parameter in updates:
if parameter == 'suppression_status':
if not((updates.get(parameter) == constants.FM_SUPPRESSED) or
(updates.get(parameter) == constants.FM_UNSUPPRESSED)):
msg = _("Invalid event_suppression parameter suppression_status values. \
Valid values are: suppressed, unsuppressed")
raise wsme.exc.ClientSideError(msg)
elif parameter == 'alarm_id':
msg = _("event_suppression parameter alarm_id is not allowed to be updated.")
raise wsme.exc.ClientSideError(msg)
elif parameter == 'description':
msg = _("event_suppression parameter description is not allowed to be updated.")
raise wsme.exc.ClientSideError(msg)
else:
msg = _("event_suppression invalid parameter.")
raise wsme.exc.ClientSideError(msg)
@wsme_pecan.wsexpose(EventSuppressionCollection, [Query],
types.uuid, wtypes.text,
wtypes.text, wtypes.text, wtypes.text)
def get_all(self, q=[], marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of event_suppression."""
sort_key = ['alarm_id']
return self._get_event_suppression_collection(marker, limit,
sort_key,
sort_dir, q=q)
@wsme_pecan.wsexpose(EventSuppression, types.uuid)
def get_one(self, uuid):
"""Retrieve information about the given event_suppression."""
rpc_event_suppression = objects.event_suppression.get_by_uuid(
pecan.request.context, uuid)
return EventSuppression.convert_with_links(rpc_event_suppression)
@cutils.synchronized(LOCK_NAME)
@wsme.validate(types.uuid, [EventSuppressionPatchType])
@wsme_pecan.wsexpose(EventSuppression, types.uuid,
body=[EventSuppressionPatchType])
def patch(self, uuid, patch):
"""Updates attributes of event_suppression."""
event_suppression = objects.event_suppression.get_by_uuid(pecan.request.context, uuid)
event_suppression = event_suppression.as_dict()
updates = self._get_updates(patch)
self._check_event_suppression_updates(updates)
event_suppression.update(updates)
updated_event_suppression = pecan.request.dbapi.event_suppression_update(uuid, updates)
return EventSuppression.convert_with_links(updated_event_suppression)

View File

@ -9042,7 +9042,8 @@ class ConductorManager(service.PeriodicService):
config_uuid = self._config_update_hosts(context, personalities) config_uuid = self._config_update_hosts(context, personalities)
config_dict = { config_dict = {
"personalities": personalities, "personalities": personalities,
"classes": ['platform::snmp::runtime'], "classes": ['platform::snmp::runtime',
'platform::fm::runtime'],
} }
self._config_apply_runtime_manifest(context, config_uuid, config_dict) self._config_apply_runtime_manifest(context, config_uuid, config_dict)

View File

@ -1695,85 +1695,6 @@ class Connection(object):
:param name: The name of an icommunity. :param name: The name of an icommunity.
""" """
@abc.abstractmethod
def ialarm_create(self, values):
"""Create a new alarm.
:param values: A dict containing several items used to identify
and track the alarm.
:returns: An ialarm.
"""
@abc.abstractmethod
def ialarm_get(self, uuid):
"""Return an ialarm.
:param uuid: The uuid of an alarm.
:returns: An ialarm.
"""
@abc.abstractmethod
def ialarm_get_by_ids(self, alarm_id, entity_instance_id):
"""Return an ialarm.
:param alarm_id: The alarm_id of an alarm.
:param entity_instance_id: The entity_instance_id of an alarm.
:returns: An ialarm.
"""
@abc.abstractmethod
def ialarm_get_all(self, uuid=None, alarm_id=None, entity_type_id=None,
entity_instance_id=None, severity=None, alarm_type=None):
"""Return a list of alarms for the given filters.
:param uuid: The uuid of an alarm.
:param alarm_id: The alarm_id of an alarm.
:param entity_type_id: The entity_type_id of an alarm.
:param entity_instance_id: The entity_instance_id of an alarm.
:param severity: The severity of an alarm.
:param alarm_type: The alarm_type of an alarm.
:returns: ialarms.
"""
@abc.abstractmethod
def ialarm_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of ialarms.
:param limit: Maximum number of ialarm to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def ialarm_update(self, id, values):
"""Update properties of an ialarm.
:param id: The id or uuid of an ialarm.
:param values: Dict of values to update.
:returns: An ialarm.
"""
@abc.abstractmethod
def ialarm_destroy(self, id):
"""Destroy an ialarm.
:param id: The id or uuid of an ialarm.
"""
@abc.abstractmethod
def ialarm_destroy_by_ids(self, alarm_id, entity_instance_id):
"""Destroy an ialarm.
:param alarm_id: The alarm_id of an ialarm.
:param entity_instance_id: The entity_instance_id of an ialarm.
"""
@abc.abstractmethod @abc.abstractmethod
def iuser_create(self, values): def iuser_create(self, values):
"""Create a new iuser for an isystem """Create a new iuser for an isystem
@ -2807,45 +2728,6 @@ class Connection(object):
:param name: The name of an service :param name: The name of an service
""" """
@abc.abstractmethod
def event_log_get(self, uuid):
"""Return an event_log.
:param uuid: The uuid of an event_log.
:returns: An event_log.
"""
@abc.abstractmethod
def event_log_get_all(self, uuid=None, event_log_id=None, entity_type_id=None,
entity_instance_id=None, severity=None,
event_log_type=None, start=None, end=None,
limit=None):
"""Return a list of event_log for the given filters.
:param uuid: The uuid of an event_log.
:param alarm_id: The alarm_id of an event_log.
:param entity_type_id: The entity_type_id of an event_log.
:param entity_instance_id: The entity_instance_id of an event_log.
:param severity: The severity of an event_log.
:param alarm_type: The alarm_type of an event_log.
:param start: The event_logs that occurred after start
:param end: The event_logs that occurred before end
:returns: event_log.
"""
@abc.abstractmethod
def event_log_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None, evtType="ALL"):
"""Return a list of event_log.
:param limit: Maximum number of event_log to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod @abc.abstractmethod
def iinfra_get_one(self): def iinfra_get_one(self):
"""Return exactly one iinfra. """Return exactly one iinfra.

View File

@ -31,7 +31,7 @@ from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import utils as db_utils from oslo_db.sqlalchemy import utils as db_utils
from sqlalchemy import asc, desc, or_ from sqlalchemy import or_
from sqlalchemy import inspect from sqlalchemy import inspect
from sqlalchemy.orm.exc import DetachedInstanceError from sqlalchemy.orm.exc import DetachedInstanceError
@ -1082,68 +1082,6 @@ def add_lldp_tlv_filter_by_agent(query, agentid):
models.LldpTlvs.agent_id == models.LldpAgents.id) models.LldpTlvs.agent_id == models.LldpAgents.id)
return query.filter(models.LldpAgents.uuid == agentid) return query.filter(models.LldpAgents.uuid == agentid)
def add_event_log_filter_by_event_suppression(query, include_suppress):
"""Adds an event_suppression filter to a query.
Filters results by suppression status
:param query: Initial query to add filter to.
:param include_suppress: Value for filtering results by.
:return: Modified query.
"""
query = query.outerjoin(models.EventSuppression,
models.event_log.event_log_id == models.EventSuppression.alarm_id)
query = query.add_columns(models.EventSuppression.suppression_status)
if include_suppress:
return query
return query.filter(or_(models.event_log.state == 'log',
models.EventSuppression.suppression_status == constants.FM_UNSUPPRESSED))
def add_alarm_filter_by_event_suppression(query, include_suppress):
"""Adds an event_suppression filter to a query.
Filters results by suppression status
:param query: Initial query to add filter to.
:param include_suppress: Value for filtering results by.
:return: Modified query.
"""
query = query.join(models.EventSuppression,
models.ialarm.alarm_id == models.EventSuppression.alarm_id)
query = query.add_columns(models.EventSuppression.suppression_status)
if include_suppress:
return query
return query.filter(models.EventSuppression.suppression_status == constants.FM_UNSUPPRESSED)
def add_alarm_mgmt_affecting_by_event_suppression(query):
"""Adds a mgmt_affecting attribute from event_suppression to query.
:param query: Initial query.
:return: Modified query.
"""
query = query.add_columns(models.EventSuppression.mgmt_affecting)
return query
def add_alarm_degrade_affecting_by_event_suppression(query):
"""Adds a degrade_affecting attribute from event_suppression to query.
:param query: Initial query.
:return: Modified query.
"""
query = query.add_columns(models.EventSuppression.degrade_affecting)
return query
class Connection(api.Connection): class Connection(api.Connection):
"""SqlAlchemy connection.""" """SqlAlchemy connection."""
@ -4522,259 +4460,6 @@ class Connection(api.Connection):
raise exception.ServiceNotFound(service=service) raise exception.ServiceNotFound(service=service)
query.delete() query.delete()
def ialarm_create(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
ialarm = models.ialarm()
ialarm.update(values)
with _session_for_write() as session:
try:
session.add(ialarm)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.AlarmAlreadyExists(uuid=values['uuid'])
return ialarm
@objects.objectify(objects.alarm)
def ialarm_get(self, uuid):
query = model_query(models.ialarm)
if uuid:
query = query.filter_by(uuid=uuid)
query = add_alarm_filter_by_event_suppression(query, include_suppress=True)
query = add_alarm_mgmt_affecting_by_event_suppression(query)
query = add_alarm_degrade_affecting_by_event_suppression(query)
try:
result = query.one()
except NoResultFound:
raise exception.AlarmNotFound(alarm=uuid)
return result
def ialarm_get_by_ids(self, alarm_id, entity_instance_id):
query = model_query(models.ialarm)
if alarm_id and entity_instance_id:
query = query.filter_by(alarm_id=alarm_id)
query = query.filter_by(entity_instance_id=entity_instance_id)
query = query.join(models.EventSuppression,
models.ialarm.alarm_id == models.EventSuppression.alarm_id)
query = add_alarm_mgmt_affecting_by_event_suppression(query)
query = add_alarm_degrade_affecting_by_event_suppression(query)
try:
result = query.one()
except NoResultFound:
return None
return result
def ialarm_get_all(self, uuid=None, alarm_id=None, entity_type_id=None,
entity_instance_id=None, severity=None, alarm_type=None,
limit=None, include_suppress=False):
query = model_query(models.ialarm, read_deleted="no")
query = query.order_by(asc(models.ialarm.severity), asc(models.ialarm.entity_instance_id), asc(models.ialarm.id))
if uuid is not None:
query = query.filter(models.ialarm.uuid.contains(uuid))
if alarm_id is not None:
query = query.filter(models.ialarm.alarm_id.contains(alarm_id))
if entity_type_id is not None:
query = query.filter(models.ialarm.entity_type_id.contains(entity_type_id))
if entity_instance_id is not None:
query = query.filter(models.ialarm.entity_instance_id.contains(entity_instance_id))
if severity is not None:
query = query.filter(models.ialarm.severity.contains(severity))
if alarm_type is not None:
query = query.filter(models.ialarm.alarm_type.contains(alarm_type))
query = add_alarm_filter_by_event_suppression(query, include_suppress)
query = add_alarm_mgmt_affecting_by_event_suppression(query)
query = add_alarm_degrade_affecting_by_event_suppression(query)
if limit is not None:
query = query.limit(limit)
alarm_list = []
try:
alarm_list = query.all()
except UnicodeDecodeError:
LOG.error("UnicodeDecodeError occurred, "
"return an empty alarm list.")
return alarm_list
@objects.objectify(objects.alarm)
def ialarm_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None,
include_suppress=False):
query = model_query(models.ialarm)
query = add_alarm_filter_by_event_suppression(query, include_suppress)
query = add_alarm_mgmt_affecting_by_event_suppression(query)
query = add_alarm_degrade_affecting_by_event_suppression(query)
return _paginate_query(models.ialarm, limit, marker,
sort_key, sort_dir, query)
def ialarm_update(self, id, values):
with _session_for_write() as session:
query = model_query(models.ialarm, session=session)
query = query.filter_by(id=id)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exception.AlarmNotFound(alarm=id)
return query.one()
def ialarm_destroy(self, id):
with _session_for_write() as session:
query = model_query(models.ialarm, session=session)
query = query.filter_by(uuid=id)
try:
query.one()
except NoResultFound:
raise exception.AlarmNotFound(alarm=id)
query.delete()
def ialarm_destroy_by_ids(self, alarm_id, entity_instance_id):
with _session_for_write() as session:
query = model_query(models.ialarm, session=session)
if alarm_id and entity_instance_id:
query = query.filter_by(alarm_id=alarm_id)
query = query.filter_by(entity_instance_id=entity_instance_id)
try:
query.one()
except NoResultFound:
raise exception.AlarmNotFound(alarm=alarm_id)
query.delete()
@objects.objectify(objects.event_log)
def event_log_get(self, uuid):
query = model_query(models.event_log)
if uuid:
query = query.filter_by(uuid=uuid)
query = add_event_log_filter_by_event_suppression(query, include_suppress=True)
try:
result = query.one()
except NoResultFound:
raise exception.EventLogNotFound(eventLog=uuid)
return result
def _addEventTypeToQuery(self, query, evtType="ALL"):
if evtType is None or not (evtType in ["ALL", "ALARM", "LOG"]):
evtType = "ALL"
if evtType == "ALARM":
query = query.filter(or_(models.event_log.state == "set",
models.event_log.state == "clear"))
if evtType == "LOG":
query = query.filter(models.event_log.state == "log")
return query
@objects.objectify(objects.event_log)
def event_log_get_all(self, uuid=None, event_log_id=None,
entity_type_id=None, entity_instance_id=None,
severity=None, event_log_type=None, start=None,
end=None, limit=None, evtType="ALL", include_suppress=False):
query = model_query(models.event_log, read_deleted="no")
query = query.order_by(desc(models.event_log.timestamp))
if uuid is not None:
query = query.filter_by(uuid=uuid)
query = self._addEventTypeToQuery(query, evtType)
if event_log_id is not None:
query = query.filter(models.event_log.event_log_id.contains(event_log_id))
if entity_type_id is not None:
query = query.filter(models.event_log.entity_type_id.contains(entity_type_id))
if entity_instance_id is not None:
query = query.filter(models.event_log.entity_instance_id.contains(entity_instance_id))
if severity is not None:
query = query.filter(models.event_log.severity.contains(severity))
if event_log_type is not None:
query = query.filter_by(event_log_type=event_log_type)
if start is not None:
query = query.filter(models.event_log.timestamp >= start)
if end is not None:
query = query.filter(models.event_log.timestamp <= end)
if include_suppress is not None:
query = add_event_log_filter_by_event_suppression(query, include_suppress)
if limit is not None:
query = query.limit(limit)
hist_list = []
try:
hist_list = query.all()
except UnicodeDecodeError:
LOG.error("UnicodeDecodeError occurred, "
"return an empty event log list.")
return hist_list
@objects.objectify(objects.event_log)
def event_log_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None, evtType="ALL", include_suppress=False):
query = model_query(models.event_log)
query = self._addEventTypeToQuery(query, evtType)
query = add_event_log_filter_by_event_suppression(query, include_suppress)
return _paginate_query(models.event_log, limit, marker,
sort_key, sort_dir, query)
@objects.objectify(objects.event_suppression)
def event_suppression_get(self, id):
query = model_query(models.EventSuppression)
if utils.is_uuid_like(id):
query = query.filter_by(uuid=id)
else:
query = query.filter_by(id=id)
try:
result = query.one()
except NoResultFound:
raise exception.InvalidParameterValue(
err="No event suppression entry found for %s" % id)
return result
@objects.objectify(objects.event_suppression)
def event_suppression_get_all(self, uuid=None, alarm_id=None,
description=None, suppression_status=None, limit=None,
sort_key=None, sort_dir=None):
query = model_query(models.EventSuppression, read_deleted="no")
if uuid is not None:
query = query.filter_by(uuid=uuid)
if alarm_id is not None:
query = query.filter_by(alarm_id=alarm_id)
if description is not None:
query = query.filter_by(description=description)
if suppression_status is not None:
query = query.filter_by(suppression_status=suppression_status)
query = query.filter_by(set_for_deletion=False)
return _paginate_query(models.EventSuppression, limit, None,
sort_key, sort_dir, query)
@objects.objectify(objects.event_suppression)
def event_suppression_update(self, uuid, values):
with _session_for_write() as session:
query = model_query(models.EventSuppression, session=session)
query = query.filter_by(uuid=uuid)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exception.NotFound(id)
return query.one()
# NOTE: method is deprecated and provided for API compatibility. # NOTE: method is deprecated and provided for API compatibility.
# object class will convert Network entity to an iextoam object # object class will convert Network entity to an iextoam object

View File

@ -497,35 +497,6 @@ def upgrade(migrate_engine):
) )
i_community.create() i_community.create()
i_alarm = Table(
'i_alarm',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(255), unique=True, index=True),
Column('alarm_id', String(255), index=True),
Column('alarm_state', String(255)),
Column('entity_type_id', String(255), index=True),
Column('entity_instance_id', String(255), index=True),
Column('timestamp', DateTime(timezone=False)),
Column('severity', String(255), index=True),
Column('reason_text', String(255)),
Column('alarm_type', String(255), index=True),
Column('probable_cause', String(255)),
Column('proposed_repair_action', String(255)),
Column('service_affecting', Boolean),
Column('suppression', Boolean),
Column('inhibit_alarms', Boolean),
Column('masked', Boolean),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
i_alarm.create()
i_user = Table( i_user = Table(
'i_user', 'i_user',
meta, meta,

View File

@ -1,244 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import time
from sqlalchemy import Boolean, Integer, DateTime
from sqlalchemy import Column, MetaData, String, Table
from sqlalchemy.schema import ForeignKeyConstraint
from sysinv.openstack.common import log
ENGINE = 'InnoDB'
CHARSET = 'utf8'
LOG = log.getLogger(__name__)
def logInfo(msg):
msg = "UPGRADE EVENTLOG: {}".format(msg)
LOG.info(msg)
def _tableFromName(migrate_engine, tableName):
meta = MetaData()
meta.bind = migrate_engine
t = Table(tableName, meta, autoload=True)
return t
def _tableExists(migrate_engine, tableName):
return _tableFromName(migrate_engine, tableName).exists()
def _tableDrop(migrate_engine, tableName):
if _tableExists(migrate_engine, tableName):
logInfo("Dropping table {}.".format(tableName))
return _tableFromName(migrate_engine, tableName).drop()
def countTable(migrate_engine, tableName):
r = migrate_engine.execute('select count(*) from {}'.format(tableName))
for row in r:
break # grab first row of result in order to get count
return row[0]
def populateEventLogFromAlarmHistoryAndCustomerLogs(migrate_engine):
#
# Raw postgres SQL to populate the i_event_log from
# existing data in the i_alarm_history and i_customer_log tables
#
if not _tableExists(migrate_engine, 'i_alarm_history') or \
not _tableExists(migrate_engine, 'i_customer_log'):
logInfo("Not performing event log data migration since source tables do not exist")
return
populateEventLogSQL = """
insert into i_event_log
( created_at,
updated_at,
deleted_at,
uuid,
event_log_id,
state,
entity_type_id,
entity_instance_id,
timestamp,
severity,
reason_text,
event_log_type,
probable_cause,
proposed_repair_action,
service_affecting,
suppression )
select
created_at,
updated_at,
deleted_at,
uuid,
alarm_id as event_log_id,
alarm_state as state,
entity_type_id,
entity_instance_id,
timestamp,
severity,
reason_text,
alarm_type as event_log_type,
probable_cause,
proposed_repair_action,
service_affecting,
suppression
from i_alarm_history
union
select
created_at,
updated_at,
deleted_at,
uuid,
log_id as event_log_id,
'log' as state,
entity_type_id,
entity_instance_id,
timestamp,
severity,
reason_text,
log_type as event_log_type,
probable_cause,
null as proposed_repair_action,
service_affecting,
null as suppression
from i_customer_log
order by created_at
"""
start = time.time()
iAlarmHistoryCount = countTable(migrate_engine, 'i_alarm_history')
iCustomerLogCount = countTable(migrate_engine, 'i_customer_log')
logInfo("Data migration started.")
if iAlarmHistoryCount > 0 or iCustomerLogCount > 0:
logInfo("Migrating {} i_alarm_history records. \
Migrating {} i_customer_log records.".format(iAlarmHistoryCount, iCustomerLogCount))
result = migrate_engine.execute(populateEventLogSQL)
elapsedTime = time.time() - start
logInfo("Data migration end. Elapsed time is {} seconds.".format(elapsedTime))
return result
def get_events_yaml_filename():
events_yaml_name = os.environ.get("EVENTS_YAML")
if events_yaml_name is not None and os.path.isfile(events_yaml_name):
return events_yaml_name
return "/etc/fm/events.yaml"
def is_execute_alter_table():
alter_table = True
if os.environ.get("SYSINV_TEST_ENV") == 'True':
alter_table = False
return alter_table
def add_alarm_table_foreign_key(migrate_engine):
add_event_suppression_foreign_key = """
alter table i_alarm
add constraint fk_ialarm_esuppression_alarm_id
foreign key (alarm_id)
references event_suppression (alarm_id)
match full
"""
migrate_engine.execute(add_event_suppression_foreign_key)
def upgrade(migrate_engine):
start = time.time()
meta = MetaData()
meta.bind = migrate_engine
event_suppression = Table(
'event_suppression',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), unique=True, index=True),
Column('alarm_id', String(15), unique=True, index=True),
Column('description', String(255)),
Column('suppression_status', String(15)),
Column('set_for_deletion', Boolean),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
event_suppression.create()
if is_execute_alter_table():
add_alarm_table_foreign_key(migrate_engine)
i_event_log = Table(
'i_event_log',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(255), unique=True, index=True),
Column('event_log_id', String(255), index=True),
Column('state', String(255)),
Column('entity_type_id', String(255), index=True),
Column('entity_instance_id', String(255), index=True),
Column('timestamp', DateTime(timezone=False)),
Column('severity', String(255), index=True),
Column('reason_text', String(255)),
Column('event_log_type', String(255), index=True),
Column('probable_cause', String(255)),
Column('proposed_repair_action', String(255)),
Column('service_affecting', Boolean),
Column('suppression', Boolean),
Column('alarm_id', String(255), nullable=True),
ForeignKeyConstraint(
['alarm_id'],
['event_suppression.alarm_id'],
use_alter=True,
name='fk_elog_alarm_id_esuppression_alarm_id'
),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
i_event_log.create()
populateEventLogFromAlarmHistoryAndCustomerLogs(migrate_engine)
_tableDrop(migrate_engine, 'i_alarm_history')
_tableDrop(migrate_engine, 'i_customer_log')
elapsedTime = time.time() - start
logInfo("Elapsed time for eventlog table create and migrate is {} seconds.".format(elapsedTime))
def downgrade(migrate_engine):
# As per other openstack components, downgrade is
# unsupported in this release.
raise NotImplementedError('SysInv database downgrade is unsupported.')

View File

@ -0,0 +1,14 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -279,10 +279,6 @@ def upgrade(migrate_engine):
# Update the storage_lvm table. # Update the storage_lvm table.
_update_storage_lvm_device_path(storage_lvm) _update_storage_lvm_device_path(storage_lvm)
# 061_fm_add_mgmt_affecting.py
event_suppression = Table('event_suppression', meta, autoload=True)
event_suppression.create_column(Column('mgmt_affecting', String(255)))
# 062_iscsi_initiator_name.py # 062_iscsi_initiator_name.py
i_host = Table('i_host', meta, autoload=True) i_host = Table('i_host', meta, autoload=True)
i_host.create_column(Column('iscsi_initiator_name', String(64))) i_host.create_column(Column('iscsi_initiator_name', String(64)))

View File

@ -1,27 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import String
from sqlalchemy import Column, MetaData, Table
ENGINE = 'InnoDB'
CHARSET = 'utf8'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
event_suppression = Table('event_suppression', meta, autoload=True)
event_suppression.create_column(Column('degrade_affecting', String(255)))
return True
def downgrade(migrate_engine):
# As per other openstack components, downgrade is
# unsupported in this release.
raise NotImplementedError('SysInv database downgrade is unsupported.')

View File

@ -0,0 +1,14 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -761,29 +761,6 @@ class icommunity(Base):
access = Column(accessEnum, default='ro') access = Column(accessEnum, default='ro')
class ialarm(Base):
__tablename__ = 'i_alarm'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(255), unique=True, index=True)
alarm_id = Column('alarm_id', String(255),
ForeignKey('event_suppression.alarm_id'),
nullable=True, index=True)
alarm_state = Column(String(255))
entity_type_id = Column(String(255), index=True)
entity_instance_id = Column(String(255), index=True)
timestamp = Column(DateTime(timezone=False))
severity = Column(String(255), index=True)
reason_text = Column(String(255))
alarm_type = Column(String(255), index=True)
probable_cause = Column(String(255))
proposed_repair_action = Column(String(255))
service_affecting = Column(Boolean, default=False)
suppression = Column(Boolean, default=False)
inhibit_alarms = Column(Boolean, default=False)
masked = Column(Boolean, default=False)
class iuser(Base): class iuser(Base):
__tablename__ = 'i_user' __tablename__ = 'i_user'
@ -1027,40 +1004,6 @@ class Services(Base):
capabilities = Column(JSONEncodedDict) capabilities = Column(JSONEncodedDict)
class event_log(Base):
__tablename__ = 'i_event_log'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(255), unique=True, index=True)
event_log_id = Column('event_log_id', String(255),
ForeignKey('event_suppression.alarm_id'),
nullable=True, index=True)
state = Column(String(255))
entity_type_id = Column(String(255), index=True)
entity_instance_id = Column(String(255), index=True)
timestamp = Column(DateTime(timezone=False))
severity = Column(String(255), index=True)
reason_text = Column(String(255))
event_log_type = Column(String(255), index=True)
probable_cause = Column(String(255))
proposed_repair_action = Column(String(255))
service_affecting = Column(Boolean, default=False)
suppression = Column(Boolean, default=False)
class EventSuppression(Base):
__tablename__ = 'event_suppression'
id = Column('id', Integer, primary_key=True, nullable=False)
uuid = Column('uuid', String(36), unique=True)
alarm_id = Column('alarm_id', String(255), unique=True)
description = Column('description', String(255))
suppression_status = Column('suppression_status', String(255))
set_for_deletion = Column('set_for_deletion', Boolean)
mgmt_affecting = Column('mgmt_affecting', String(255))
degrade_affecting = Column('degrade_affecting', String(255))
class Routes(Base): class Routes(Base):
__tablename__ = 'routes' __tablename__ = 'routes'

View File

@ -21,7 +21,6 @@ import functools
from sysinv.objects import address from sysinv.objects import address
from sysinv.objects import address_mode from sysinv.objects import address_mode
from sysinv.objects import address_pool from sysinv.objects import address_pool
from sysinv.objects import alarm
from sysinv.objects import ceph_mon from sysinv.objects import ceph_mon
from sysinv.objects import certificate from sysinv.objects import certificate
from sysinv.objects import cluster from sysinv.objects import cluster
@ -34,8 +33,6 @@ from sysinv.objects import partition
from sysinv.objects import dns from sysinv.objects import dns
from sysinv.objects import drbdconfig from sysinv.objects import drbdconfig
from sysinv.objects import port_ethernet from sysinv.objects import port_ethernet
from sysinv.objects import event_log
from sysinv.objects import event_suppression
from sysinv.objects import helm_overrides from sysinv.objects import helm_overrides
from sysinv.objects import host from sysinv.objects import host
from sysinv.objects import host_upgrade from sysinv.objects import host_upgrade
@ -137,7 +134,6 @@ lvg = lvg.LVG
pv = pv.PV pv = pv.PV
trapdest = trapdest.TrapDest trapdest = trapdest.TrapDest
community = community.Community community = community.Community
alarm = alarm.Alarm
user = user.User user = user.User
dns = dns.DNS dns = dns.DNS
ntp = ntp.NTP ntp = ntp.NTP
@ -148,8 +144,6 @@ storage_lvm = storage_lvm.StorageLVM
ceph_mon = ceph_mon.CephMon ceph_mon = ceph_mon.CephMon
controller_fs = controller_fs.ControllerFS controller_fs = controller_fs.ControllerFS
drbdconfig = drbdconfig.DRBDConfig drbdconfig = drbdconfig.DRBDConfig
event_log = event_log.EventLog
event_suppression = event_suppression.EventSuppression
infra_network = network_infra.InfraNetwork infra_network = network_infra.InfraNetwork
address = address.Address address = address.Address
address_pool = address_pool.AddressPool address_pool = address_pool.AddressPool
@ -204,7 +198,6 @@ __all__ = (system,
pv, pv,
trapdest, trapdest,
community, community,
alarm,
user, user,
dns, dns,
ntp, ntp,
@ -214,8 +207,6 @@ __all__ = (system,
storage_lvm, storage_lvm,
ceph_mon, ceph_mon,
drbdconfig, drbdconfig,
event_log,
event_suppression,
infra_network, infra_network,
address, address,
address_mode, address_mode,

View File

@ -1,64 +0,0 @@
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
from sysinv.common import constants
class Alarm(base.SysinvObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'alarm_id': utils.str_or_none,
'alarm_state': utils.str_or_none,
'entity_type_id': utils.str_or_none,
'entity_instance_id': utils.str_or_none,
'timestamp': utils.datetime_or_str_or_none,
'severity': utils.str_or_none,
'reason_text': utils.str_or_none,
'alarm_type': utils.str_or_none,
'probable_cause': utils.str_or_none,
'proposed_repair_action': utils.str_or_none,
'service_affecting': utils.str_or_none,
'suppression': utils.str_or_none,
'inhibit_alarms': utils.str_or_none,
'masked': utils.str_or_none,
'suppression_status': utils.str_or_none,
'mgmt_affecting': utils.str_or_none,
'degrade_affecting': utils.str_or_none,
}
@staticmethod
def _from_db_object(server, db_server):
"""Converts a database entity to a formal object."""
if isinstance(db_server, tuple):
db_server_fields = db_server[0]
db_suppress_status = db_server[constants.DB_SUPPRESS_STATUS]
db_mgmt_affecting = db_server[constants.DB_MGMT_AFFECTING]
db_degrade_affecting = db_server[constants.DB_DEGRADE_AFFECTING]
db_server_fields['suppression_status'] = db_suppress_status
db_server_fields['mgmt_affecting'] = db_mgmt_affecting
db_server_fields['degrade_affecting'] = db_degrade_affecting
else:
db_server_fields = db_server
for field in server.fields:
server[field] = db_server_fields[field]
server.obj_reset_changes()
return server
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.ialarm_get(uuid)
def save_changes(self, context, updates):
self.dbapi.ialarm_update(self.uuid, updates)

View File

@ -1,56 +0,0 @@
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
from sysinv.openstack.common import log as logging
LOG = logging.getLogger('event_log')
class EventLog(base.SysinvObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'event_log_id': utils.str_or_none,
'state': utils.str_or_none,
'entity_type_id': utils.str_or_none,
'entity_instance_id': utils.str_or_none,
'timestamp': utils.datetime_or_str_or_none,
'severity': utils.str_or_none,
'reason_text': utils.str_or_none,
'event_log_type': utils.str_or_none,
'probable_cause': utils.str_or_none,
'proposed_repair_action': utils.str_or_none,
'service_affecting': utils.str_or_none,
'suppression': utils.str_or_none,
'suppression_status': utils.str_or_none,
}
@staticmethod
def _from_db_object(server, db_server):
"""Converts a database entity to a formal object."""
if isinstance(db_server, tuple):
db_server_fields = db_server[0]
db_suppress_status = db_server[1]
db_server_fields['suppression_status'] = db_suppress_status
else:
db_server_fields = db_server
for field in server.fields:
server[field] = db_server_fields[field]
server.obj_reset_changes()
return server
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.event_log_get(uuid)

View File

@ -1,29 +0,0 @@
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
# from sysinv.openstack.common import log as logging
class EventSuppression(base.SysinvObject):
# VERSION 1.0: Initial version
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.uuid_or_none,
'alarm_id': utils.str_or_none,
'description': utils.str_or_none,
'suppression_status': utils.str_or_none,
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.event_suppression_get(uuid)

View File

@ -0,0 +1,108 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from . import openstack
class FmPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for fm configuration"""
SERVICE_NAME = 'fm'
SERVICE_PORT = 18002
def get_static_config(self):
dbuser = self._get_database_username(self.SERVICE_NAME)
return {
'fm::db::postgresql::user': dbuser,
}
def get_secure_static_config(self):
dbpass = self._get_database_password(self.SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
return {
'fm::db::postgresql::password': dbpass,
'fm::keystone::auth::password': kspass,
'fm::keystone::authtoken::password': kspass,
'fm::auth::auth_password': kspass,
}
def get_system_config(self):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
system = self.dbapi.isystem_get_one()
trapdests = self.dbapi.itrapdest_get_list()
config = {
'fm::keystone::auth::public_url': self.get_public_url(),
'fm::keystone::auth::internal_url': self.get_internal_url(),
'fm::keystone::auth::admin_url': self.get_admin_url(),
'fm::keystone::auth::auth_name': ksuser,
'fm::keystone::auth::region': self.get_region_name(),
'fm::keystone::auth::tenant': self._get_service_tenant_name(),
'fm::keystone::authtoken::auth_url':
self._keystone_identity_uri(),
'fm::keystone::authtoken::auth_uri':
self._keystone_auth_uri(),
'fm::keystone::authtoken::user_domain_name':
self._get_service_user_domain_name(),
'fm::keystone::authtoken::project_domain_name':
self._get_service_project_domain_name(),
'fm::keystone::authtoken::project_name':
self._get_service_tenant_name(),
'fm::keystone::authtoken::region_name':
self._keystone_region_name(),
'fm::keystone::authtoken::username': ksuser,
'fm::auth::auth_url':
self._keystone_auth_uri(),
'fm::auth::auth_tenant_name':
self._get_service_tenant_name(),
'platform::fm::params::region_name': self._region_name(),
'platform::fm::params::system_name': system.name,
'platform::fm::params::service_create':
self._to_create_services(),
}
if trapdests is not None:
trap_list = []
for e in trapdests:
trap_list.append(e.ip_address + ' ' + e.community)
config.update(
{'platform::fm::params::trap_destinations': trap_list})
return config
def get_secure_system_config(self):
config = {
'fm::database_connection':
self._format_database_connection(self.SERVICE_NAME),
}
return config
def get_host_config(self, host):
config = {
'platform::fm::params::api_host': host.mgmt_ip
}
return config
def get_public_url(self):
return self._format_public_endpoint(self.SERVICE_PORT)
def get_internal_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)
def get_admin_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)
def get_region_name(self):
return self._get_service_region_name(self.SERVICE_NAME)

View File

@ -90,6 +90,8 @@ class NfvPuppet(openstack.OpenstackBasePuppet):
self._operator.patching.get_region_name(), self._operator.patching.get_region_name(),
'nfv::nfvi::ceilometer_region_name': 'nfv::nfvi::ceilometer_region_name':
self._operator.ceilometer.get_region_name(), self._operator.ceilometer.get_region_name(),
'nfv::nfvi::fm_region_name':
self._operator.fm.get_region_name(),
'nfv::vim::vim_api_ip': self._get_management_address(), 'nfv::vim::vim_api_ip': self._get_management_address(),
'nfv::vim::vim_webserver_ip': self._get_oam_address(), 'nfv::vim::vim_webserver_ip': self._get_oam_address(),

View File

@ -25,6 +25,7 @@ from . import cinder
from . import common from . import common
from . import dcmanager from . import dcmanager
from . import dcorch from . import dcorch
from . import fm
from . import glance from . import glance
from . import gnocchi from . import gnocchi
from . import heat from . import heat
@ -105,6 +106,7 @@ class PuppetOperator(object):
self.kubernetes = kubernetes.KubernetesPuppet(self) self.kubernetes = kubernetes.KubernetesPuppet(self)
self.service_parameter = service_parameter.ServiceParamPuppet(self) self.service_parameter = service_parameter.ServiceParamPuppet(self)
self.smapi = smapi.SmPuppet(self) self.smapi = smapi.SmPuppet(self)
self.fm = fm.FmPuppet(self)
@property @property
def context(self): def context(self):
@ -146,6 +148,7 @@ class PuppetOperator(object):
config.update(self.dcmanager.get_static_config()) config.update(self.dcmanager.get_static_config())
config.update(self.dcorch.get_static_config()) config.update(self.dcorch.get_static_config())
config.update(self.smapi.get_static_config()) config.update(self.smapi.get_static_config())
config.update(self.fm.get_static_config())
filename = 'static.yaml' filename = 'static.yaml'
self._write_config(filename, config) self._write_config(filename, config)
@ -190,6 +193,7 @@ class PuppetOperator(object):
config.update(self.dcmanager.get_secure_static_config()) config.update(self.dcmanager.get_secure_static_config())
config.update(self.dcorch.get_secure_static_config()) config.update(self.dcorch.get_secure_static_config())
config.update(self.smapi.get_secure_static_config()) config.update(self.smapi.get_secure_static_config())
config.update(self.fm.get_secure_static_config())
filename = 'secure_static.yaml' filename = 'secure_static.yaml'
self._write_config(filename, config) self._write_config(filename, config)
@ -229,6 +233,7 @@ class PuppetOperator(object):
config.update(self.dcorch.get_system_config()) config.update(self.dcorch.get_system_config())
config.update(self.kubernetes.get_system_config()) config.update(self.kubernetes.get_system_config())
config.update(self.smapi.get_system_config()) config.update(self.smapi.get_system_config())
config.update(self.fm.get_system_config())
# service_parameter must be last to permit overrides # service_parameter must be last to permit overrides
config.update(self.service_parameter.get_system_config()) config.update(self.service_parameter.get_system_config())
@ -261,6 +266,7 @@ class PuppetOperator(object):
config.update(self.dcmanager.get_secure_system_config()) config.update(self.dcmanager.get_secure_system_config())
config.update(self.dcorch.get_secure_system_config()) config.update(self.dcorch.get_secure_system_config())
config.update(self.kubernetes.get_secure_system_config()) config.update(self.kubernetes.get_secure_system_config())
config.update(self.fm.get_secure_system_config())
filename = 'secure_system.yaml' filename = 'secure_system.yaml'
self._write_config(filename, config) self._write_config(filename, config)
@ -301,6 +307,7 @@ class PuppetOperator(object):
config.update(self.nova.get_host_config(host)) config.update(self.nova.get_host_config(host))
config.update(self.neutron.get_host_config(host)) config.update(self.neutron.get_host_config(host))
config.update(self.smapi.get_host_config(host)) config.update(self.smapi.get_host_config(host))
config.update(self.fm.get_host_config(host))
# service_parameter must be last to permit overrides # service_parameter must be last to permit overrides
config.update(self.service_parameter.get_host_config(host)) config.update(self.service_parameter.get_host_config(host))

View File

@ -863,20 +863,6 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
for col in communities_enums_col: for col in communities_enums_col:
self.assertColumnExists(engine, 'i_community', col) self.assertColumnExists(engine, 'i_community', col)
alarms = db_utils.get_table(engine, 'i_alarm')
alarms_col = {
'id': 'Integer', 'uuid': 'String', 'deleted_at': 'DateTime',
'created_at': 'DateTime', 'updated_at': 'DateTime',
'alarm_id': 'String', 'alarm_state': 'String', 'entity_type_id': 'String',
'entity_instance_id': 'String', 'timestamp': 'DateTime', 'severity': 'String',
'reason_text': 'String', 'alarm_type': 'String', 'probable_cause': 'String',
'proposed_repair_action': 'String', 'service_affecting': 'Boolean',
'suppression': 'Boolean', 'inhibit_alarms': 'Boolean', 'masked': 'Boolean',
}
for col, coltype in alarms_col.items():
self.assertTrue(isinstance(alarms.c[col].type,
getattr(sqlalchemy.types, coltype)))
users = db_utils.get_table(engine, 'i_user') users = db_utils.get_table(engine, 'i_user')
users_col = { users_col = {
'id': 'Integer', 'uuid': 'String', 'deleted_at': 'DateTime', 'id': 'Integer', 'uuid': 'String', 'deleted_at': 'DateTime',
@ -1801,15 +1787,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
self.assertColumnNotExists(engine, 'journal', 'device_node') self.assertColumnNotExists(engine, 'journal', 'device_node')
self.assertColumnExists(engine, 'journal', 'device_path') self.assertColumnExists(engine, 'journal', 'device_path')
# 61 --> Add a column to table "event_suppression" # 61 --> Add a column to table "i_host"
event_suppression = db_utils.get_table(engine, 'event_suppression')
event_suppression_col = {
'mgmt_affecting': 'String',
}
for col, coltype in event_suppression_col.items():
self.assertTrue(isinstance(event_suppression.c[col].type,
getattr(sqlalchemy.types, coltype)))
# 62 --> Add a column to table "i_host"
host = db_utils.get_table(engine, 'i_host') host = db_utils.get_table(engine, 'i_host')
host_col = { host_col = {
'iscsi_initiator_name': 'String', 'iscsi_initiator_name': 'String',