Merge remote-tracking branch starlingx/master into HEAD

Change-Id: I251438ffa46abb3840e0709240a074cfd06a0848
Signed-off-by: Scott Little <scott.little@windriver.com>
This commit is contained in:
Scott Little 2019-02-11 13:05:50 -05:00
commit d94e998e45
66 changed files with 592 additions and 349 deletions

View File

@ -34,3 +34,9 @@ config-gate-worker
# puppet-manifests
puppet-manifests
# ansible
sshpass
python2-ptyprocess
python2-pexpect
ansible

View File

@ -1,2 +1,2 @@
SRC_DIR="controllerconfig"
TIS_PATCH_VER=149
TIS_PATCH_VER=150

View File

@ -2475,7 +2475,7 @@ class ConfigAssistant():
while True:
user_input = input(
"Configure an cluster host VLAN [y/N]: ")
"Configure a cluster host VLAN [y/N]: ")
if user_input.lower() == 'q':
raise UserQuit
elif user_input.lower() == 'y':
@ -3223,6 +3223,7 @@ class ConfigAssistant():
self.cluster_host_interface_name = \
self.management_interface_name
self.cluster_host_interface = self.management_interface
self.cluster_host_vlan = self.management_vlan
self.cluster_host_interface_configured = True
# External OAM network configuration

View File

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -229,7 +229,7 @@ start()
fi
# Check whether our installed load matches the active controller
CONTROLLER_UUID=`curl -sf http://controller/feed/rel-${SW_VERSION}/install_uuid`
CONTROLLER_UUID=`curl -sf http://controller:${http_port}/feed/rel-${SW_VERSION}/install_uuid`
if [ $? -ne 0 ]
then
fatal_error "Unable to retrieve installation uuid from active controller"

View File

@ -480,6 +480,7 @@ data:
cinder:
DEFAULT:
backup_driver: cinder.backup.drivers.swift
storage: rbd
source:
type: tar
location: http://172.17.0.1/helm_charts/cinder-0.1.0.tgz
@ -567,6 +568,8 @@ data:
job_ks_endpoints: false
ingress_osapi: false
service_ingress_osapi: false
cron_job_cell_setup: false
cron_job_service_cleaner: false
labels:
agent:
compute:
@ -618,13 +621,95 @@ data:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
user:
nova:
uid: 0
conf:
ceph:
enabled: true
nova:
DEFAULT:
default_mempages_size: 2048
reserved_host_memory_mb: 0
compute_monitors: cpu.virt_driver
running_deleted_instance_poll_interval: 60
mkisofs_cmd: /usr/bin/genisoimage
network_allocate_retries: 2
force_raw_images: false
concurrent_disk_operations: 2
# Set number of block device allocate retries and interval
# for volume create when VM boots and creates a new volume.
# The total block allocate retries time is set to 2 hours
# to satisfy the volume allocation time on slow RPM disks
# which may take 1 hour and a half per volume when several
# volumes are created in parallel.
block_device_allocate_retries_interval: 3
block_device_allocate_retries: 2400
disk_allocation_ratio: 1.0
cpu_allocation_ratio: 16.0
ram_allocation_ratio: 1.0
remove_unused_original_minimum_age_seconds: 3600
enable_new_services: false
map_new_hosts: false
libvirt:
virt_type: qemu
cpu_mode: none
live_migration_completion_timeout: 180
live_migration_permit_auto_converge: true
mem_stats_period_seconds: 0
rbd_secret_uuid: null
rbd_user: null
# Allow up to 1 day for resize conf
remove_unused_resized_minimum_age_seconds: 86400
database:
idle_timeout: 60
max_overflow: 64
max_pool_size: 1
api_database:
idle_timeout: 60
max_overflow: 64
max_pool_size: 1
cell0_database:
idle_timeout: 60
max_overflow: 64
max_pool_size: 1
placement:
os_interface: internal
neutron:
default_floating_pool: public
notifications:
notification_format: unversioned
filter_scheduler:
disk_weight_multiplier: 0.0
enabled_filters:
- RetryFilter
- ComputeFilter
- AvailabilityZoneFilter
- AggregateInstanceExtraSpecsFilter
- ComputeCapabilitiesFilter
- ImagePropertiesFilter
- NUMATopologyFilter
- ServerGroupAffinityFilter
- ServerGroupAntiAffinityFilter
- PciPassthroughFilter
- DiskFilter
io_ops_weight_multiplier: -5.0
pci_weight_multiplier: 0.0
ram_weight_multiplier: 0.0
soft_affinity_weight_multiplier: 0.0
soft_anti_affinity_weight_multiplier: 0.0
scheduler:
discover_hosts_in_cells_interval: 30
periodic_task_interval: -1
upgrade_levels: None
metrics:
required: false
workarounds:
enable_numa_live_migration: True
network:
sshd:
enabled: true
source:
type: tar
location: http://172.17.0.1/helm_charts/nova-0.1.0.tgz
@ -2223,12 +2308,6 @@ data:
STATIC_ROOT = '/var/www/html/horizon'
# StarlingX additions
# Change session and CSRF cookie names to prevent conflict with
# platform horizon
CSRF_COOKIE_NAME = 'appcsrftoken'
SESSION_COOKIE_NAME = 'appsessionid'
#OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
#present OPENSTACK_API_VERSIONS={"identity":3}

View File

@ -480,6 +480,7 @@ data:
cinder:
DEFAULT:
backup_driver: cinder.backup.drivers.swift
storage: rbd
source:
type: tar
location: http://172.17.0.1/helm_charts/cinder-0.1.0.tgz
@ -567,6 +568,8 @@ data:
job_ks_endpoints: false
ingress_osapi: false
service_ingress_osapi: false
cron_job_cell_setup: false
cron_job_service_cleaner: false
labels:
agent:
compute:
@ -618,13 +621,95 @@ data:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
user:
nova:
uid: 0
conf:
ceph:
enabled: true
nova:
DEFAULT:
default_mempages_size: 2048
reserved_host_memory_mb: 0
compute_monitors: cpu.virt_driver
running_deleted_instance_poll_interval: 60
mkisofs_cmd: /usr/bin/genisoimage
network_allocate_retries: 2
force_raw_images: false
concurrent_disk_operations: 2
# Set number of block device allocate retries and interval
# for volume create when VM boots and creates a new volume.
# The total block allocate retries time is set to 2 hours
# to satisfy the volume allocation time on slow RPM disks
# which may take 1 hour and a half per volume when several
# volumes are created in parallel.
block_device_allocate_retries_interval: 3
block_device_allocate_retries: 2400
disk_allocation_ratio: 1.0
cpu_allocation_ratio: 16.0
ram_allocation_ratio: 1.0
remove_unused_original_minimum_age_seconds: 3600
enable_new_services: false
map_new_hosts: false
libvirt:
virt_type: qemu
cpu_mode: none
live_migration_completion_timeout: 180
live_migration_permit_auto_converge: true
mem_stats_period_seconds: 0
rbd_secret_uuid: null
rbd_user: null
# Allow up to 1 day for resize conf
remove_unused_resized_minimum_age_seconds: 86400
database:
idle_timeout: 60
max_overflow: 64
max_pool_size: 1
api_database:
idle_timeout: 60
max_overflow: 64
max_pool_size: 1
cell0_database:
idle_timeout: 60
max_overflow: 64
max_pool_size: 1
placement:
os_interface: internal
neutron:
default_floating_pool: public
notifications:
notification_format: unversioned
filter_scheduler:
disk_weight_multiplier: 0.0
enabled_filters:
- RetryFilter
- ComputeFilter
- AvailabilityZoneFilter
- AggregateInstanceExtraSpecsFilter
- ComputeCapabilitiesFilter
- ImagePropertiesFilter
- NUMATopologyFilter
- ServerGroupAffinityFilter
- ServerGroupAntiAffinityFilter
- PciPassthroughFilter
- DiskFilter
io_ops_weight_multiplier: -5.0
pci_weight_multiplier: 0.0
ram_weight_multiplier: 0.0
soft_affinity_weight_multiplier: 0.0
soft_anti_affinity_weight_multiplier: 0.0
scheduler:
discover_hosts_in_cells_interval: 30
periodic_task_interval: -1
upgrade_levels: None
metrics:
required: false
workarounds:
enable_numa_live_migration: True
network:
sshd:
enabled: true
source:
type: tar
location: http://172.17.0.1/helm_charts/nova-0.1.0.tgz
@ -2223,12 +2308,6 @@ data:
STATIC_ROOT = '/var/www/html/horizon'
# StarlingX additions
# Change session and CSRF cookie names to prevent conflict with
# platform horizon
CSRF_COOKIE_NAME = 'appcsrftoken'
SESSION_COOKIE_NAME = 'appsessionid'
#OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
#present OPENSTACK_API_VERSIONS={"identity":3}

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=79
TIS_PATCH_VER=80

View File

@ -55,9 +55,7 @@ include ::platform::memcached
include ::platform::nfv
include ::platform::nfv::api
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::storage
include ::platform::ceph::controller
include ::platform::ceph::rgw
include ::platform::influxdb

View File

@ -28,11 +28,7 @@ include ::platform::grub
include ::platform::collectd
include ::platform::filesystem::storage
include ::platform::docker
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::storage
include ::openstack::ceilometer
include ::openstack::ceilometer::polling

View File

@ -35,10 +35,7 @@ include ::platform::dockerdistribution::compute
include ::platform::kubernetes::worker
include ::platform::multipath
include ::platform::client
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::worker
include ::openstack::client
include ::openstack::neutron
include ::openstack::neutron::agents

View File

@ -17,6 +17,9 @@ class openstack::horizon::params (
$tpm_object = undef,
$tpm_engine = '/usr/lib64/openssl/engines/libtpm2.so',
$http_port = 8080,
$https_port = 8443,
) { }
@ -198,9 +201,9 @@ class openstack::horizon::firewall
# of HTTPS for external protocols. The horizon
# server runs on port 8080 behind the proxy server.
if $enable_https {
$firewall_port = 443
$firewall_port = $https_port
} else {
$firewall_port = 80
$firewall_port = $http_port
}
platform::firewall::rule { 'dashboard':
@ -233,3 +236,15 @@ class openstack::horizon::runtime {
stage => post
}
}
class openstack::lighttpd::runtime
inherits ::openstack::horizon::params {
Class[$name] -> Class['::platform::helm::runtime']
file {'/etc/lighttpd/lighttpd.conf':
ensure => present,
content => template('openstack/lighttpd.conf.erb')
}
-> platform::sm::restart {'lighttpd': }
}

View File

@ -11,9 +11,11 @@ class openstack::swift::params (
class openstack::swift::firewall
inherits ::openstack::swift::params {
platform::firewall::rule { 'swift-api':
service_name => 'swift',
ports => $api_port,
if $service_enabled {
platform::firewall::rule { 'swift-api':
service_name => 'swift',
ports => $api_port,
}
}
}
@ -21,10 +23,12 @@ class openstack::swift::firewall
class openstack::swift::haproxy
inherits ::openstack::swift::params {
platform::haproxy::proxy { 'swift-restapi':
server_name => 's-swift',
public_port => $api_port,
private_port => $api_port,
if $service_enabled {
platform::haproxy::proxy { 'swift-restapi':
server_name => 's-swift',
public_port => $api_port,
private_port => $api_port,
}
}
}

View File

@ -144,7 +144,7 @@ static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
######### Options that are good to be but not neccesary to be changed #######
## bind to port (default: 80)
#server.port = 81
server.port = <%= @http_port %>
## bind to localhost (default: all interfaces)
#server.bind = "grisu.home.kneschke.de"
@ -223,7 +223,7 @@ $HTTP["url"] !~ "^/(rel-[^/]*|feed|updates|static|helm_charts)/" {
( "localhost" =>
(
"host" => "127.0.0.1",
"port" => 8080
"port" => 8008
)
)
)
@ -247,11 +247,11 @@ $HTTP["url"] !~ "^/(rel-[^/]*|feed|updates|static|helm_charts)/" {
#
#### Listen to IPv6
$SERVER["socket"] == "[::]:80" { }
$SERVER["socket"] == "[::]:<%= @http_port %>" { }
<% if @enable_https %>
#### SSL engine
$SERVER["socket"] == ":443" {
$SERVER["socket"] == ":<%= @https_port %>" {
ssl.engine = "enable"
ssl.pemfile = "/etc/ssl/private/server-cert.pem"
ssl.use-sslv2 = "disable"
@ -259,7 +259,7 @@ $SERVER["socket"] == ":443" {
ssl.cipher-list = "ALL:!aNULL:!eNULL:!EXPORT:!TLSv1:!DES:!MD5:!PSK:!RC4:!EDH-RSA-DES-CBC3-SHA:!EDH-DSS-DES-CBC3-SHA:!DHE-RSA-AES128-SHA:!DHE-RSA-AES256-SHA:!ECDHE-RSA-DES-CBC3-SHA:!ECDHE-RSA-AES128-SHA:!ECDHE-RSA-AES256-SHA:!DES-CBC3-SHA:!AES128-SHA:!AES256-SHA:!DHE-DSS-AES128-SHA:!DHE-DSS-AES256-SHA:!CAMELLIA128-SHA:!CAMELLIA256-SHA:!DHE-DSS-CAMELLIA128-SHA:!DHE-DSS-CAMELLIA256-SHA:!DHE-RSA-CAMELLIA128-SHA:!DHE-RSA-CAMELLIA256-SHA:!ECDHE-ECDSA-DES-CBC3-SHA:!ECDHE-ECDSA-AES128-SHA:!ECDHE-ECDSA-AES256-SHA"
}
$SERVER["socket"] == "[::]:443" {
$SERVER["socket"] == "[::]:<%= @https_port %>" {
ssl.engine = "enable"
ssl.pemfile = "/etc/ssl/private/server-cert.pem"
ssl.use-sslv2 = "disable"

View File

@ -340,11 +340,10 @@ define platform_ceph_journal(
}
class platform::ceph::storage(
class platform::ceph::osds(
$osd_config = {},
$journal_config = {},
) inherits ::platform::ceph::params {
# Ensure partitions update prior to ceph storage configuration
Class['::platform::partitions'] -> Class[$name]
@ -483,6 +482,24 @@ class platform::ceph::rgw::keystone::auth(
}
}
class platform::ceph::worker {
if $::personality == 'worker' {
include ::platform::ceph
include ::platform::ceph::monitor
}
}
class platform::ceph::storage {
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::osds
}
class platform::ceph::controller {
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::osds
}
class platform::ceph::runtime {
include ::platform::ceph::monitor

View File

@ -36,6 +36,7 @@ class platform::config::file {
include ::platform::network::oam::params
include ::platform::network::cluster_host::params
include ::platform::kubernetes::params
include ::openstack::horizon::params
$kubernetes_enabled = $::platform::kubernetes::params::enabled
# dependent template variables
@ -166,6 +167,12 @@ class platform::config::file {
}
}
file_line { "${platform_conf} http_port":
path => $platform_conf,
line => "http_port=${::openstack::horizon::params::http_port}",
match => '^http_port=',
}
}

View File

@ -143,7 +143,9 @@ class platform::haproxy::runtime {
}
include ::openstack::keystone::haproxy
include ::openstack::neutron::haproxy
include ::openstack::nova::haproxy
if $::platform::kubernetes::params::enabled != true {
include ::openstack::nova::haproxy
}
include ::openstack::glance::haproxy
include ::openstack::cinder::haproxy
include ::openstack::heat::haproxy

View File

@ -54,7 +54,7 @@ class platform::helm
# TODO(jrichard): Upversion tiller image to v2.11.1 once released.
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ],
command => 'helm init --skip-refresh --service-account tiller --node-selectors "node-role.kubernetes.io/master"="" --tiller-image=gcr.io/kubernetes-helm/tiller@sha256:022ce9d4a99603be1d30a4ca96a7fa57a45e6f2ef11172f4333c18aaae407f5b', # lint:ignore:140chars
command => 'helm init --skip-refresh --service-account tiller --node-selectors "node-role.kubernetes.io/master"="" --tiller-image=gcr.io/kubernetes-helm/tiller:v2.12.1', # lint:ignore:140chars
logoutput => true,
user => 'wrsroot',
group => 'wrs',
@ -65,6 +65,16 @@ class platform::helm
command => "mount -o bind -t ext4 ${source_helm_repo_dir} ${target_helm_repo_dir}",
require => Exec['add local starlingx helm repo']
}
# it needs to create the index file after the bind mount, otherwise
# helm repo could not be updated until application-upload adds index
-> exec { 'generate helm repo index on source':
command => "helm repo index ${source_helm_repo_dir}",
logoutput => true,
user => 'www',
group => 'www',
require => User['www']
}
} else {
exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ],
@ -76,6 +86,8 @@ class platform::helm
}
}
include ::openstack::horizon::params
$port = $::openstack::horizon::params::http_port
exec { 'restart lighttpd for helm':
require => [File['/etc/lighttpd/lighttpd.conf', $target_helm_repo_dir], Exec['initialize helm']],
command => 'systemctl restart lighttpd.service',
@ -93,7 +105,7 @@ class platform::helm
-> exec { 'add local starlingx helm repo':
before => Exec['Stop lighttpd'],
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf' , 'HOME=/home/wrsroot'],
command => 'helm repo add starlingx http://127.0.0.1/helm_charts',
command => "helm repo add starlingx http://127.0.0.1:${port}/helm_charts",
logoutput => true,
user => 'wrsroot',
group => 'wrs',
@ -102,3 +114,25 @@ class platform::helm
}
}
}
class platform::helm::runtime
{
include ::platform::kubernetes::params
if $::platform::kubernetes::params::enabled {
include ::platform::users
include ::openstack::horizon::params
$port = $::openstack::horizon::params::http_port
exec { 'update local starlingx helm repo':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf' , 'HOME=/home/wrsroot'],
command => "helm repo add starlingx http://127.0.0.1:${port}/helm_charts",
logoutput => true,
user => 'wrsroot',
group => 'wrs',
require => User['wrsroot']
}
}
}

View File

@ -70,3 +70,18 @@ class platform::patching::api (
include ::platform::patching::firewall
include ::platform::patching::haproxy
}
class platform::patching::agent::reload {
exec { 'restart sw-patch-agent':
command => '/usr/sbin/sw-patch-agent-restart',
logoutput => true,
}
}
class platform::patching::runtime {
class {'::platform::patching::agent::reload':
stage => post
}
}

View File

@ -1,2 +1,2 @@
SRC_DIR="storageconfig"
TIS_PATCH_VER=5
TIS_PATCH_VER=6

View File

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2013-2015 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -141,7 +141,7 @@ start()
fi
# Check whether our installed load matches the active controller
CONTROLLER_UUID=`curl -sf http://controller/feed/rel-${SW_VERSION}/install_uuid`
CONTROLLER_UUID=`curl -sf http://controller:${http_port}/feed/rel-${SW_VERSION}/install_uuid`
if [ $? -ne 0 ]
then
fatal_error "Unable to retrieve installation uuid from active controller"

View File

@ -42,9 +42,9 @@ def _tier_formatter(values):
def _print_cluster_show(obj):
fields = ['uuid', 'cluster_uuid', 'type', 'name', 'peers', 'tiers']
fields = ['uuid', 'cluster_uuid', 'type', 'name', 'peers', 'tiers', 'deployment_model']
labels = ['uuid', 'cluster_uuid', 'type', 'name', 'replication_groups',
'storage_tiers']
'storage_tiers', 'deployment_model']
data = [(f, getattr(obj, f, '')) for f in fields]
utils.print_tuple_list(
data, labels, formatters={'peers': _peer_formatter,
@ -65,7 +65,7 @@ def do_cluster_list(cc, args):
"""List Clusters."""
clusters = cc.cluster.list()
fields = ['uuid', 'cluster_uuid', 'type', 'name']
fields = ['uuid', 'cluster_uuid', 'type', 'name', 'deployment_model']
utils.print_list(clusters, fields, fields, sortby=1)

View File

@ -1,2 +1,2 @@
SRC_DIR="sysinv"
TIS_PATCH_VER=297
TIS_PATCH_VER=299

View File

@ -98,6 +98,7 @@ systemconfig.helm_plugins =
rabbitmq = sysinv.helm.rabbitmq:RabbitmqHelm
rbd-provisioner = sysinv.helm.rbd_provisioner:RbdProvisionerHelm
ceph-pools-audit = sysinv.helm.ceph_pools_audit:CephPoolsAuditHelm
helm-toolkit = sysinv.helm.helm_toolkit:HelmToolkitHelm
sysinv.agent.lldp.drivers =
lldpd = sysinv.agent.lldp.drivers.lldpd.driver:SysinvLldpdAgentDriver

View File

@ -131,8 +131,6 @@ class CephMon(base.APIBase):
'ceph_mon_gib',
'state',
'task',
'ceph_mon_dev_ctrl0',
'ceph_mon_dev_ctrl1',
'hostname'])
if ceph_mon.device_path:

View File

@ -35,6 +35,7 @@ from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.api.controllers.v1 import storage_tier as storage_tier_api
from sysinv.api.controllers.v1.query import Query
from sysinv.common import ceph
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
@ -98,6 +99,9 @@ class Cluster(base.APIBase):
name = wtypes.text
"User defined name of the cluster"
deployment_model = wtypes.text
"Deployment model used by cluster"
peers = types.MultiType([list])
"List of peers info in the cluster"
@ -132,7 +136,18 @@ class Cluster(base.APIBase):
if not expand:
cluster.unset_fields_except(['uuid', 'cluster_uuid',
'type', 'name', 'peers',
'tiers'])
'tiers', 'deployment_model'])
# All Ceph type clusters have the same storage model
if cluster.type == constants.CLUSTER_TYPE_CEPH:
try:
# Storage model is defined dynamically, displayed by CLI
# and used by Horizon.
cluster.deployment_model = ceph.get_ceph_storage_model()
except Exception:
cluster.deployment_model = constants.CEPH_UNDEFINED_MODEL
else:
cluster.deployment_model = None
cluster.links = [link.Link.make_link('self', pecan.request.host_url,
'clusters', cluster.uuid),

View File

@ -46,6 +46,7 @@ from sysinv.api.controllers.v1 import cpu_utils
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import port as port_api
from sysinv.api.controllers.v1 import ethernet_port as ethernet_port_api
from sysinv.common import ceph
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
@ -898,29 +899,25 @@ class ProfileController(rest.RestController):
if 'profiletype' in profile_dict and profile_dict['profiletype']:
profiletype = profile_dict['profiletype']
if profiletype == constants.PROFILE_TYPE_STORAGE:
stor_model = ceph.get_ceph_storage_model()
if constants.WORKER in from_ihost.subfunctions:
# combo has no ceph
profiletype = constants.PROFILE_TYPE_LOCAL_STORAGE
LOG.info("No ceph backend for stor profile, assuming "
"%s" % profiletype)
elif constants.CONTROLLER in from_ihost.subfunctions:
elif not StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,
constants.CINDER_BACKEND_CEPH
):
raise wsme.exc.ClientSideError(_("Storage profiles "
"not applicable for %s with subfunctions %s." %
(from_ihost.hostname, from_ihost.subfunctions)))
elif constants.STORAGE in from_ihost.subfunctions:
if not StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,
constants.CINDER_BACKEND_CEPH
):
raise wsme.exc.ClientSideError(_("Storage profiles "
"not applicable for %s with subfunctions %s "
"and non Ceph backend." %
(from_ihost.hostname, from_ihost.subfunctions)))
else:
"not applicable for %s with non Ceph backend." %
from_ihost.hostname))
elif (from_ihost.personality == constants.CONTROLLER and
stor_model != constants.CEPH_CONTROLLER_MODEL):
raise wsme.exc.ClientSideError(_("Storage profiles "
"not applicable for %s with unsupported "
"subfunctions %s." %
(from_ihost.hostname, from_ihost.subfunctions)))
"not applicable for %s as storage deployment "
"model is: %s" %
(from_ihost.hostname, stor_model)))
# Create profile
LOG.debug("iprofileihost is: %s " % profile_dict)

View File

@ -10,6 +10,8 @@
import copy
import netaddr
import pecan
from fm_api import constants as fm_constants
from fm_api import fm_api
from pecan import rest
import six
import wsme
@ -938,6 +940,36 @@ class ServiceParameterController(rest.RestController):
))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _service_parameter_apply_semantic_check_http():
"""Semantic checks for the HTTP Service Type """
# check if a patching operation in progress
fm = fm_api.FaultAPIs()
alarms = fm.get_faults_by_id(fm_constants.
FM_ALARM_ID_PATCH_IN_PROGRESS)
if alarms is not None:
msg = _("Unable to apply %s service parameters. "
"A patching operation is in progress."
% constants.SERVICE_TYPE_HTTP)
raise wsme.exc.ClientSideError(msg)
# check if all hosts are unlocked/enabled
hosts = pecan.request.dbapi.ihost_get_list()
for host in hosts:
if (host['administrative'] == constants.ADMIN_UNLOCKED and
host['operational'] == constants.OPERATIONAL_ENABLED):
continue
else:
# the host name might be None for a newly discovered host
if not host['hostname']:
host_id = host['uuid']
else:
host_id = host['hostname']
raise wsme.exc.ClientSideError(
_("Host %s must be unlocked and enabled." % host_id))
def _service_parameter_apply_semantic_check(self, service):
"""Semantic checks for the service-parameter-apply command """
@ -987,6 +1019,9 @@ class ServiceParameterController(rest.RestController):
if service == constants.SERVICE_TYPE_PLATFORM:
self._service_parameter_apply_semantic_check_mtce()
if service == constants.SERVICE_TYPE_HTTP:
self._service_parameter_apply_semantic_check_http()
def _get_service(self, body):
service = body.get('service') or ""
if not service:

View File

@ -370,7 +370,8 @@ class CephApiOperator(object):
def crushmap_tiers_add(self):
"""Add all custom storage tiers to the crushmap. """
cluster = pecan.request.dbapi.clusters_get_all(name='ceph_cluster')
ceph_cluster_name = constants.CLUSTER_CEPH_DEFAULT_NAME
cluster = pecan.request.dbapi.clusters_get_all(name=ceph_cluster_name)
# get the list of tiers
tiers = pecan.request.dbapi.storage_tier_get_by_cluster(
@ -404,7 +405,8 @@ class CephApiOperator(object):
def _crushmap_tiers_bucket_add(self, bucket_name, bucket_type):
"""Add a new bucket to all the tiers in the crushmap. """
cluster = pecan.request.dbapi.clusters_get_all(name='ceph_cluster')
ceph_cluster_name = constants.CLUSTER_CEPH_DEFAULT_NAME
cluster = pecan.request.dbapi.clusters_get_all(name=ceph_cluster_name)
tiers = pecan.request.dbapi.storage_tier_get_by_cluster(
cluster[0].uuid)
for t in tiers:
@ -418,7 +420,8 @@ class CephApiOperator(object):
def _crushmap_tiers_bucket_remove(self, bucket_name):
"""Remove an existing bucket from all the tiers in the crushmap. """
cluster = pecan.request.dbapi.clusters_get_all(name='ceph_cluster')
ceph_cluster_name = constants.CLUSTER_CEPH_DEFAULT_NAME
cluster = pecan.request.dbapi.clusters_get_all(name=ceph_cluster_name)
tiers = pecan.request.dbapi.storage_tier_get_by_cluster(
cluster[0].uuid)
for t in tiers:
@ -433,7 +436,8 @@ class CephApiOperator(object):
ancestor_name):
"""Move common bucket in all the tiers in the crushmap. """
cluster = pecan.request.dbapi.clusters_get_all(name='ceph_cluster')
ceph_cluster_name = constants.CLUSTER_CEPH_DEFAULT_NAME
cluster = pecan.request.dbapi.clusters_get_all(name=ceph_cluster_name)
tiers = pecan.request.dbapi.storage_tier_get_by_cluster(
cluster[0].uuid)
for t in tiers:
@ -769,8 +773,6 @@ def get_ceph_storage_model(dbapi=None):
for chost in controller_hosts:
istors = dbapi.istor_get_by_ihost(chost['uuid'])
if len(istors):
LOG.info("Controller host %s has OSDs configured. System has ceph "
"controller storage." % chost['hostname'])
is_controller_model = True
break

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -416,6 +416,10 @@ GLANCE_BACKEND_RBD = 'rbd'
GLANCE_BACKEND_HTTP = 'http'
GLANCE_BACKEND_GLANCE = 'glance'
# Clusters
CLUSTER_TYPE_CEPH = "ceph"
CLUSTER_CEPH_DEFAULT_NAME = "ceph_cluster"
# Storage Tiers: types (aligns with polymorphic backends)
SB_TIER_TYPE_CEPH = SB_TYPE_CEPH
SB_TIER_SUPPORTED = [SB_TIER_TYPE_CEPH]
@ -471,8 +475,8 @@ SB_CONFIGURATION_TIMEOUT = 1200
# Controller model: OSDs are on controllers, no storage nodes can
# be defined.
# Storage model: OSDs are on dedicated storage nodes.
CEPH_STORAGE_MODEL = 'storage'
CEPH_CONTROLLER_MODEL = 'controller'
CEPH_STORAGE_MODEL = 'storage-nodes'
CEPH_CONTROLLER_MODEL = 'controller-nodes'
CEPH_AIO_SX_MODEL = 'aio-sx'
CEPH_UNDEFINED_MODEL = 'undefined'
@ -902,6 +906,7 @@ SERVICE_TYPE_AODH = 'aodh'
SERVICE_TYPE_GLANCE = 'glance'
SERVICE_TYPE_BARBICAN = 'barbican'
SERVICE_TYPE_DOCKER = 'docker'
SERVICE_TYPE_HTTP = 'http'
SERVICE_PARAM_SECTION_MURANO_RABBITMQ = 'rabbitmq'
SERVICE_PARAM_SECTION_MURANO_ENGINE = 'engine'
@ -1102,6 +1107,13 @@ SERVICE_PARAM_NAME_DOCKER_NO_PROXY = 'no_proxy'
# default filesystem size to 25 MB
SERVICE_PARAM_SWIFT_FS_SIZE_MB_DEFAULT = 25
# HTTP Service Parameters
SERVICE_PARAM_SECTION_HTTP_CONFIG = 'config'
SERVICE_PARAM_HTTP_PORT_HTTP = 'http_port'
SERVICE_PARAM_HTTP_PORT_HTTPS = 'https_port'
SERVICE_PARAM_HTTP_PORT_HTTP_DEFAULT = 8080
SERVICE_PARAM_HTTP_PORT_HTTPS_DEFAULT = 8443
# TIS part number, CPE = combined load, STD = standard load
TIS_STD_BUILD = 'Standard'
TIS_AIO_BUILD = 'All-in-one'
@ -1415,6 +1427,7 @@ HELM_CHART_PANKO = 'panko'
HELM_CHART_RABBITMQ = 'rabbitmq'
HELM_CHART_RBD_PROVISIONER = 'rbd-provisioner'
HELM_CHART_CEPH_POOLS_AUDIT = 'ceph-pools-audit'
HELM_CHART_HELM_TOOLKIT = 'helm-toolkit'
SUPPORTED_HELM_CHARTS = [
HELM_CHART_AODH,
@ -1441,6 +1454,7 @@ SUPPORTED_HELM_CHARTS = [
HELM_CHART_RABBITMQ,
HELM_CHART_RBD_PROVISIONER,
HELM_CHART_CEPH_POOLS_AUDIT,
HELM_CHART_HELM_TOOLKIT,
]
# Helm: Supported application (aka chart bundles)
@ -1472,7 +1486,8 @@ SUPPORTED_HELM_APP_CHARTS = {
HELM_CHART_GNOCCHI,
HELM_CHART_CEILOMETER,
HELM_CHART_PANKO,
HELM_CHART_AODH
HELM_CHART_AODH,
HELM_CHART_HELM_TOOLKIT,
]
}

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2018 Wind River Systems, Inc.
# Copyright (c) 2017-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -1516,6 +1516,23 @@ DOCKER_PROXY_PARAMETER_RESOURCE = {
'platform::docker::params::no_proxy',
}
HTTPD_PORT_PARAMETER_OPTIONAL = [
constants.SERVICE_PARAM_HTTP_PORT_HTTP,
constants.SERVICE_PARAM_HTTP_PORT_HTTPS,
]
HTTPD_PORT_PARAMETER_VALIDATOR = {
constants.SERVICE_PARAM_HTTP_PORT_HTTP: _validate_integer,
constants.SERVICE_PARAM_HTTP_PORT_HTTPS: _validate_integer,
}
HTTPD_PORT_PARAMETER_RESOURCE = {
constants.SERVICE_PARAM_HTTP_PORT_HTTP:
'openstack::horizon::params::http_port',
constants.SERVICE_PARAM_HTTP_PORT_HTTPS:
'openstack::horizon::params::https_port',
}
# Service Parameter Schema
SERVICE_PARAM_MANDATORY = 'mandatory'
SERVICE_PARAM_OPTIONAL = 'optional'
@ -1699,6 +1716,13 @@ SERVICE_PARAMETER_SCHEMA = {
SERVICE_PARAM_RESOURCE: DOCKER_PROXY_PARAMETER_RESOURCE,
},
},
constants.SERVICE_TYPE_HTTP: {
constants.SERVICE_PARAM_SECTION_HTTP_CONFIG: {
SERVICE_PARAM_OPTIONAL: HTTPD_PORT_PARAMETER_OPTIONAL,
SERVICE_PARAM_VALIDATOR: HTTPD_PORT_PARAMETER_VALIDATOR,
SERVICE_PARAM_RESOURCE: HTTPD_PORT_PARAMETER_RESOURCE,
},
},
}
SERVICE_PARAMETER_MAX_LENGTH = 255

View File

@ -18,7 +18,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
@ -1883,3 +1883,18 @@ def find_manifest_file(path):
return None
return mfiles
def get_http_port(dbapi):
http_port = constants.SERVICE_PARAM_HTTP_PORT_HTTP_DEFAULT
try:
http_port = int(dbapi.service_parameter_get_one(
constants.SERVICE_TYPE_HTTP,
constants.SERVICE_PARAM_SECTION_HTTP_CONFIG,
constants.SERVICE_PARAM_HTTP_PORT_HTTP).value)
except exception.NotFound:
LOG.error("Failed to find service parameter for %s,%s,%s" % (
constants.SERVICE_TYPE_HTTP,
constants.SERVICE_PARAM_SECTION_HTTP_CONFIG,
constants.SERVICE_PARAM_HTTP_PORT_HTTP))
return http_port

View File

@ -673,13 +673,9 @@ class AppOperator(object):
missing_overrides = []
available_overrides = []
excluded = ['helm-toolkit']
for chart in charts:
overrides = chart.namespace + '-' + chart.name + '.yaml'
if chart.name in excluded:
LOG.debug("Skipping overrides %s " % overrides)
continue
overrides_file = os.path.join(common.HELM_OVERRIDES_PATH,
overrides)
if not os.path.exists(overrides_file):

View File

@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
"""Conduct all activity related system inventory.
@ -517,6 +517,16 @@ class ConductorManager(service.PeriodicService):
'section': constants.SERVICE_PARAM_SECTION_SWIFT_CONFIG,
'name': constants.SERVICE_PARAM_NAME_SWIFT_FS_SIZE_MB,
'value': constants.SERVICE_PARAM_SWIFT_FS_SIZE_MB_DEFAULT},
{'service': constants.SERVICE_TYPE_HTTP,
'section': constants.SERVICE_PARAM_SECTION_HTTP_CONFIG,
'name': constants.SERVICE_PARAM_HTTP_PORT_HTTP,
'value': constants.SERVICE_PARAM_HTTP_PORT_HTTP_DEFAULT
},
{'service': constants.SERVICE_TYPE_HTTP,
'section': constants.SERVICE_PARAM_SECTION_HTTP_CONFIG,
'name': constants.SERVICE_PARAM_HTTP_PORT_HTTPS,
'value': constants.SERVICE_PARAM_HTTP_PORT_HTTPS_DEFAULT
},
]
for i in range(2, constants.SERVICE_PARAM_MAX_HPE3PAR + 1):
@ -1054,6 +1064,9 @@ class ConductorManager(service.PeriodicService):
sw_version != tsc.SW_VERSION_1803:
install_opts += ['-k', system.security_feature]
base_url = "http://pxecontroller:%d" % cutils.get_http_port(self.dbapi)
install_opts += ['-l', base_url]
if host['mgmt_mac']:
dashed_mac = host["mgmt_mac"].replace(":", "-")
pxeboot_update = "/usr/sbin/pxeboot-update-%s.sh" % sw_version
@ -1068,7 +1081,6 @@ class ConductorManager(service.PeriodicService):
os.remove("/pxeboot/pxelinux.cfg/efi-01-" + dashed_mac)
except OSError:
pass
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(
@ -7086,6 +7098,12 @@ class ConductorManager(service.PeriodicService):
config_uuid = self._config_update_hosts(context,
[constants.CONTROLLER,
constants.WORKER])
elif service == constants.SERVICE_TYPE_HTTP:
config_uuid = self._config_update_hosts(context,
[constants.CONTROLLER,
constants.WORKER,
constants.STORAGE])
else:
# All other services
personalities = [constants.CONTROLLER]
@ -7199,6 +7217,28 @@ class ConductorManager(service.PeriodicService):
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
elif service == constants.SERVICE_TYPE_HTTP:
# the platform::config class will be applied that will
# configure the http port
personalities = [constants.WORKER, constants.STORAGE]
config_dict = {
"personalities": personalities,
"classes": ['platform::patching::runtime']}
self._config_apply_runtime_manifest(context, config_uuid,
config_dict)
# the runtime classes on controllers will be applied
personalities = [constants.CONTROLLER]
config_dict = {
"personalities": personalities,
"classes": ['openstack::lighttpd::runtime',
'platform::helm::runtime',
'platform::firewall::runtime',
'platform::patching::runtime']
}
self._config_apply_runtime_manifest(context, config_uuid,
config_dict)
def update_security_feature_config(self, context):
"""Update the kernel options configuration"""
personalities = constants.PERSONALITIES

View File

@ -17,16 +17,10 @@ class AodhHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the aodh chart"""
CHART = constants.HELM_CHART_AODH
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'aodh'
AUTH_USERS = ['aodh']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -14,15 +14,9 @@ class BarbicanHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the barbican chart"""
CHART = constants.HELM_CHART_BARBICAN
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = constants.HELM_CHART_BARBICAN
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -27,6 +27,7 @@ class BaseHelm(object):
DEFAULT_REGION_NAME = 'RegionOne'
CEPH_MON_SERVICE_PORT = 6789
SUPPORTED_NAMESPACES = []
def __init__(self, operator):
self._operator = operator
@ -64,6 +65,10 @@ class BaseHelm(object):
def quoted_str(value):
return quoted_str(value)
def get_chart_location(self, chart_name):
return 'http://controller:%s/helm_charts/%s-0.1.0.tgz' % (
utils.get_http_port(self.dbapi), chart_name)
@staticmethod
def _generate_random_password(length=16):
suffix = "Ti0*"
@ -194,6 +199,26 @@ class BaseHelm(object):
constants.CONTROLLER_0_HOSTNAME, constants.NETWORK_TYPE_MGMT)
return address.address
def get_namespaces(self):
"""
Return list of namespaces supported by this chart
If a chart supports namespaces other than common.HELM_NS_OPENSTACK
then it can override self.SUPPORTED_NAMESPACES as desired.
"""
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
"""
Return chart-specific values overrides
This allows a helm chart class to specify overrides (in Helm format)
for the "values" section of a helm chart.
May be left blank to indicate that there are no additional overrides.
"""
return {}
def get_meta_overrides(self, namespace):
"""
Return Armada-formatted chart-specific meta-overrides

View File

@ -18,16 +18,10 @@ class CeilometerHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the ceilometer chart"""
CHART = constants.HELM_CHART_CEILOMETER
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'ceilometer'
AUTH_USERS = ['ceilometer']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
# Copyright (c) 2018-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -19,17 +19,11 @@ class CinderHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the cinder chart"""
CHART = constants.HELM_CHART_CINDER
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'cinder'
SERVICE_TYPE = 'volume'
AUTH_USERS = ['cinder']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {
@ -47,7 +41,6 @@ class CinderHelm(openstack.OpenstackBaseHelm):
'ceph': self._get_conf_ceph_overrides(),
'backends': self._get_conf_backends_overrides(),
},
'storage': 'rbd',
'endpoints': self._get_endpoints_overrides(),
}
}

View File

@ -23,6 +23,7 @@ HELM_NS_DEFAULT = 'default'
HELM_NS_KUBE_SYSTEM = 'kube-system'
HELM_NS_NFS = 'nfs'
HELM_NS_OPENSTACK = 'openstack'
HELM_NS_HELM_TOOLKIT = 'helm-toolkit'
# Services
# Matches configassistant.py value => Should change to STARLINGX

View File

@ -23,12 +23,8 @@ class GarbdHelm(base.BaseHelm):
SERVICE_NAME = 'mariadb'
CHART = constants.HELM_CHART_GARBD
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
SUPPORTED_NAMESPACES = \
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
def get_meta_overrides(self, namespace):

View File

@ -23,17 +23,11 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the glance chart"""
CHART = constants.HELM_CHART_GLANCE
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'glance'
SERVICE_TYPE = 'image'
AUTH_USERS = ['glance']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -17,16 +17,10 @@ class GnocchiHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the gnocchi chart"""
CHART = constants.HELM_CHART_GNOCCHI
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'gnocchi'
AUTH_USERS = ['gnocchi']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -17,16 +17,10 @@ class HeatHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the heat chart"""
CHART = constants.HELM_CHART_HEAT
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = constants.HELM_CHART_HEAT
AUTH_USERS = ['heat', 'heat_trustee', 'heat_stack_user']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -231,18 +231,42 @@ class HelmOperator(object):
LOG.info(e)
return overrides
@staticmethod
def _add_armada_override_header(chart_name, namespace, overrides):
def _get_helm_chart_location(self, chart_name):
"""Get supported chart location.
This method returns the download location for a given chart.
:param chart_name: name of the chart
:returns: a URL as location or None if the chart is not supported
"""
if chart_name in self.implemented_charts:
return self.chart_operators[chart_name].get_chart_location(
chart_name)
return None
def _add_armada_override_header(self, chart_name, namespace, overrides):
use_chart_name_only = [common.HELM_NS_HELM_TOOLKIT]
if namespace in use_chart_name_only:
name = chart_name
else:
name = namespace + '-' + chart_name
new_overrides = {
'schema': 'armada/Chart/v1',
'metadata': {
'schema': 'metadata/Document/v1',
'name': namespace + '-' + chart_name
'name': name
},
'data': {
'values': overrides
}
}
location = self._get_helm_chart_location(chart_name)
if location:
new_overrides['data'].update({
'source': {
'location': location
}
})
return new_overrides
def merge_overrides(self, file_overrides=[], set_overrides=[]):

View File

@ -0,0 +1,38 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import exception
from sysinv.openstack.common import log as logging
from sysinv.helm import common
from sysinv.helm import base
LOG = logging.getLogger(__name__)
class HelmToolkitHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the helm toolkit"""
CHART = constants.HELM_CHART_HELM_TOOLKIT
SUPPORTED_NAMESPACES = [
common.HELM_NS_HELM_TOOLKIT,
]
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_HELM_TOOLKIT: {}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides

View File

@ -17,15 +17,9 @@ class HorizonHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the horizon chart"""
CHART = constants.HELM_CHART_HORIZON
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'horizon'
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -17,14 +17,12 @@ class IngressHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the ingress chart"""
CHART = constants.HELM_CHART_INGRESS
SUPPORTED_NAMESPACES = [
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + [
common.HELM_NS_KUBE_SYSTEM,
common.HELM_NS_OPENSTACK
]
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
# Currently have conflicts with ports 80 and 8080, use 8081 for now
overrides = {

View File

@ -15,15 +15,9 @@ class IronicHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the ironic chart"""
CHART = constants.HELM_CHART_IRONIC
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = constants.HELM_CHART_IRONIC
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -23,18 +23,12 @@ class KeystoneHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the keystone chart"""
CHART = constants.HELM_CHART_KEYSTONE
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = constants.HELM_CHART_KEYSTONE
SERVICE_PATH = '/v3'
DEFAULT_DOMAIN_NAME = 'default'
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -17,15 +17,9 @@ class LibvirtHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the libvirt chart"""
CHART = constants.HELM_CHART_LIBVIRT
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'libvirt'
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -15,15 +15,9 @@ class MagnumHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the magnum chart"""
CHART = constants.HELM_CHART_MAGNUM
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = constants.HELM_CHART_MAGNUM
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -18,9 +18,6 @@ class MariadbHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the mariadb chart"""
CHART = constants.HELM_CHART_MARIADB
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
def _num_server_replicas(self):
# For now we want to run with a single mariadb server pod for the
@ -30,9 +27,6 @@ class MariadbHelm(openstack.OpenstackBaseHelm):
else:
return self._num_controllers()
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -17,12 +17,8 @@ class MemcachedHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the memcached chart"""
CHART = constants.HELM_CHART_MEMCACHED
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
SUPPORTED_NAMESPACES = \
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
def get_overrides(self, namespace=None):
overrides = {

View File

@ -23,17 +23,11 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the neutron chart"""
CHART = constants.HELM_CHART_NEUTRON
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'neutron'
AUTH_USERS = ['neutron']
SERVICE_USERS = ['nova']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
# Copyright (c) 2018-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -17,38 +17,16 @@ from sysinv.helm import openstack
LOG = logging.getLogger(__name__)
SCHEDULER_FILTERS_COMMON = [
'RetryFilter',
'ComputeFilter',
'AvailabilityZoneFilter',
'AggregateInstanceExtraSpecsFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'NUMATopologyFilter',
'ServerGroupAffinityFilter',
'ServerGroupAntiAffinityFilter',
'PciPassthroughFilter',
'DiskFilter',
]
class NovaHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the nova chart"""
CHART = constants.HELM_CHART_NOVA
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'nova'
AUTH_USERS = ['nova', 'placement']
SERVICE_USERS = ['neutron', 'ironic']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
scheduler_filters = SCHEDULER_FILTERS_COMMON
ssh_privatekey, ssh_publickey = \
self._get_or_generate_ssh_keys(self.SERVICE_NAME, common.HELM_NS_OPENSTACK)
@ -63,103 +41,16 @@ class NovaHelm(openstack.OpenstackBaseHelm):
'consoleauth': self._num_controllers(),
'scheduler': self._num_controllers(),
# set replicas for novncproxy once it's validated.
},
'user': {
'nova': {
'uid': 0
}
}
},
'manifests': {
'cron_job_cell_setup': False,
'cron_job_service_cleaner': False
},
'conf': {
'ceph': {
'enabled': True
},
'nova': {
'DEFAULT': {
'default_mempages_size': 2048,
'reserved_host_memory_mb': 0,
'compute_monitors': 'cpu.virt_driver',
'running_deleted_instance_poll_interval': 60,
'mkisofs_cmd': '/usr/bin/genisoimage',
'network_allocate_retries': 2,
'force_raw_images': False,
'concurrent_disk_operations': 2,
# Set number of block device allocate retries and interval
# for volume create when VM boots and creates a new volume.
# The total block allocate retries time is set to 2 hours
# to satisfy the volume allocation time on slow RPM disks
# which may take 1 hour and a half per volume when several
# volumes are created in parallel.
'block_device_allocate_retries_interval': 3,
'block_device_allocate_retries': 2400,
'disk_allocation_ratio': 1.0,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.0,
'remove_unused_original_minimum_age_seconds': 3600,
'enable_new_services': False,
'map_new_hosts': False
},
'libvirt': {
'virt_type': self._get_virt_type(),
'cpu_mode': 'none',
'live_migration_completion_timeout': 180,
'live_migration_permit_auto_converge': True,
'mem_stats_period_seconds': 0,
'rbd_secret_uuid': None,
'rbd_user': None,
# Allow up to 1 day for resize confirm
'remove_unused_resized_minimum_age_seconds': 86400
},
'database': {
'max_overflow': 64,
'idle_timeout': 60,
'max_pool_size': 1
},
'api_database': {
'max_overflow': 64,
'idle_timeout': 60,
'max_pool_size': 1
},
'cell0_database': {
'max_overflow': 64,
'idle_timeout': 60,
'max_pool_size': 1
},
'placement': {
'os_interface': 'internal'
},
'neutron': {
'default_floating_pool': 'public'
},
'notifications': {
'notification_format': 'unversioned'
},
'filter_scheduler': {
'enabled_filters': scheduler_filters,
'ram_weight_multiplier': 0.0,
'disk_weight_multiplier': 0.0,
'io_ops_weight_multiplier': -5.0,
'pci_weight_multiplier': 0.0,
'soft_affinity_weight_multiplier': 0.0,
'soft_anti_affinity_weight_multiplier': 0.0
},
'scheduler': {
'periodic_task_interval': -1,
'discover_hosts_in_cells_interval': 30
},
'metrics': {
'required': False,
'weight_setting_multi': 'vswitch.multi_avail=100.0',
'weight_setting': 'vswitch.max_avail=100.0'
},
'vnc': {
'novncproxy_base_url': self._get_novncproxy_base_url(),
},
'upgrade_levels': 'None'
}
},
'overrides': {
'nova_compute': {
@ -173,7 +64,6 @@ class NovaHelm(openstack.OpenstackBaseHelm):
'images': self._get_images_overrides(),
'network': {
'sshd': {
'enabled': True,
'from_subnet': self._get_ssh_subnet(),
}
}

View File

@ -17,16 +17,10 @@ class NovaApiProxyHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the nova chart"""
CHART = constants.HELM_CHART_NOVA_API_PROXY
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'nova-api-proxy'
AUTH_USERS = ['nova']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {

View File

@ -21,6 +21,9 @@ LOG = log.getLogger(__name__)
class OpenstackBaseHelm(base.BaseHelm):
"""Class to encapsulate Openstack service operations for helm"""
SUPPORTED_NAMESPACES = \
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
def _get_service_config(self, service):
configs = self.context.setdefault('_service_configs', {})
if service not in configs:

View File

@ -17,12 +17,6 @@ class OpenvswitchHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the openvswitch chart"""
CHART = constants.HELM_CHART_OPENVSWITCH
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {

View File

@ -17,16 +17,10 @@ class PankoHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the panko chart"""
CHART = constants.HELM_CHART_PANKO
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'panko'
AUTH_USERS = ['panko']
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {

View File

@ -17,12 +17,6 @@ class RabbitmqHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the rabbitmq chart"""
CHART = constants.HELM_CHART_RABBITMQ
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {

View File

@ -19,16 +19,12 @@ class RbdProvisionerHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the rbd-provisioner chart"""
CHART = constants.HELM_CHART_RBD_PROVISIONER
SUPPORTED_NAMESPACES = [
common.HELM_NS_OPENSTACK
]
SUPPORTED_NAMESPACES = \
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
SERVICE_NAME = 'rbd-provisioner'
SERVICE_PORT_MON = 6789
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
backends = self.dbapi.storage_backend_get_list()

View File

@ -50,8 +50,7 @@ def make_class_properties(cls):
# if name in _optional_fields, we just return None
# as class not implement obj_load_attr function
if hasattr(self, '_optional_fields') and name in self._optional_fields:
LOG.exception(_('This is Optional field in %(field)s') %
{'field': name})
# This is optional fields , so just return none if no such attr
return None
else:
self.obj_load_attr(name)

View File

@ -269,8 +269,8 @@ class CephPuppet(openstack.OpenstackBasePuppet):
osd_config.update({name: osd})
return {
'platform::ceph::storage::osd_config': osd_config,
'platform::ceph::storage::journal_config': journal_config,
'platform::ceph::osds::osd_config': osd_config,
'platform::ceph::osds::journal_config': journal_config,
}
def _format_ceph_mon_address(self, ip_address):

View File

@ -1466,8 +1466,6 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
'state': 'String',
'task': 'String',
'ceph_mon_gib': 'Integer',
'ceph_mon_dev_ctrl0': 'String',
'ceph_mon_dev_ctrl1': 'String',
}
for col, coltype in storconfigs_cols.items():
self.assertTrue(isinstance(storconfigs.c[col].type,

View File

@ -1,2 +1,2 @@
SRC_DIR="workerconfig"
TIS_PATCH_VER=11
TIS_PATCH_VER=12

View File

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2013-2016 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -239,7 +239,7 @@ start()
if [ "$nodetype" = "worker" ]
then
# Check whether our installed load matches the active controller
CONTROLLER_UUID=`curl -sf http://controller/feed/rel-${SW_VERSION}/install_uuid`
CONTROLLER_UUID=`curl -sf http://controller:${http_port}/feed/rel-${SW_VERSION}/install_uuid`
if [ $? -ne 0 ]
then
fatal_error "Unable to retrieve installation uuid from active controller"