Relocated some packages to repo 'stx-puppet'

List of relocated subdirectories:

puppet-manifests
puppet-modules-wrs/puppet-dcdbsync
puppet-modules-wrs/puppet-dcmanager
puppet-modules-wrs/puppet-dcorch
puppet-modules-wrs/puppet-fm
puppet-modules-wrs/puppet-mtce
puppet-modules-wrs/puppet-nfv
puppet-modules-wrs/puppet-patching
puppet-modules-wrs/puppet-smapi
puppet-modules-wrs/puppet-sshd
puppet-modules-wrs/puppet-sysinv

Story: 2006166
Task: 35687
Depends-On: I665dc7fabbfffc798ad57843eb74dca16e7647a3
Change-Id: Ibc468b9d97d6dbc7ac09652dcd979c0e68a85672
Signed-off-by: Scott Little <scott.little@windriver.com>
Depends-On: I00f54876e7872cf0d3e4f5e8f986cb7e3b23c86f
Signed-off-by: Scott Little <scott.little@windriver.com>
This commit is contained in:
Scott Little 2019-09-04 10:14:28 -04:00
parent 23a41191c1
commit 3077d0c656
359 changed files with 0 additions and 26665 deletions

View File

@ -17,8 +17,6 @@
- cgtsclient-tox-py27
- cgtsclient-tox-pep8
- cgtsclient-tox-pylint
- puppet-manifests-lint
- puppet-modules-wrs-lint
gate:
jobs:
- openstack-tox-linters
@ -32,8 +30,6 @@
- cgtsclient-tox-py27
- cgtsclient-tox-pep8
- cgtsclient-tox-pylint
- puppet-manifests-lint
- puppet-modules-wrs-lint
- job:
name: sysinv-tox-py27
@ -213,27 +209,3 @@
STX_PREFIX: ''
tox_envlist: pylint
tox_extra_args: -c sysinv/cgts-client/cgts-client/tox.ini
- job:
name: puppet-manifests-lint
parent: tox
description: |
Run puppetlint test for puppet-manifests
files:
- puppet-manifests/*
pre-run: playbooks/tox-puppet-lint/pre.yaml
vars:
tox_envlist: puppetlint
tox_extra_args: -c puppet-manifests/tox.ini
- job:
name: puppet-modules-wrs-lint
parent: tox
description: |
Run puppetlint test for puppet-modules-wrs
files:
- puppet-modules-wrs/*
pre-run: playbooks/tox-puppet-lint/pre.yaml
vars:
tox_envlist: puppetlint
tox_extra_args: -c puppet-modules-wrs/tox.ini

View File

@ -1,2 +0,0 @@
SRC_DIR="src"
TIS_PATCH_VER=93

View File

@ -1,84 +0,0 @@
Name: puppet-manifests
Version: 1.0.0
Release: %{tis_patch_ver}%{?_tis_dist}
Summary: Puppet Configuration and Manifests
License: Apache-2.0
Packager: Wind River <info@windriver.com>
URL: unknown
Source0: %{name}-%{version}.tar.gz
BuildArch: noarch
# List all the required puppet modules
# WRS puppet modules
Requires: puppet-dcorch
Requires: puppet-dcmanager
Requires: puppet-mtce
Requires: puppet-nfv
Requires: puppet-patching
Requires: puppet-sysinv
Requires: puppet-sshd
Requires: puppet-smapi
Requires: puppet-fm
Requires: puppet-dcdbsync
# Openstack puppet modules
Requires: puppet-barbican
Requires: puppet-ceph
Requires: puppet-horizon
Requires: puppet-keystone
Requires: puppet-openstacklib
Requires: puppet-vswitch
Requires: puppet-memcached
# Puppetlabs puppet modules
Requires: puppet-concat
Requires: puppet-create_resources
Requires: puppet-drbd
Requires: puppet-firewall
Requires: puppet-haproxy
Requires: puppet-inifile
Requires: puppet-lvm
Requires: puppet-postgresql
Requires: puppet-rabbitmq
Requires: puppet-stdlib
Requires: puppet-sysctl
Requires: puppet-etcd
# 3rdparty puppet modules
Requires: puppet-boolean
Requires: puppet-certmonger
Requires: puppet-dnsmasq
Requires: puppet-filemapper
Requires: puppet-kmod
Requires: puppet-ldap
Requires: puppet-network
Requires: puppet-nslcd
Requires: puppet-nssdb
Requires: puppet-puppi
Requires: puppet-vlan
Requires: puppet-collectd
%description
Platform puppet configuration files and manifests
%define config_dir %{_sysconfdir}/puppet
%define module_dir %{_datadir}/puppet/modules
%define local_bindir /usr/local/bin
%prep
%setup
%install
make install \
BINDIR=%{buildroot}%{local_bindir} \
CONFIGDIR=%{buildroot}%{config_dir} \
MODULEDIR=%{buildroot}%{module_dir}
%files
%defattr(-,root,root,-)
%license LICENSE
%{local_bindir}
%{config_dir}
%{module_dir}

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,20 +0,0 @@
#
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019 Intel Corporation
#
BINDIR ?= /usr/local/bin
CONFIGDIR ?= /etc/puppet
MODULEDIR ?= /usr/share/puppet/modules
install:
install -m 755 -D bin/puppet-manifest-apply.sh $(BINDIR)/puppet-manifest-apply.sh
install -m 755 -D bin/apply_network_config.sh $(BINDIR)/apply_network_config.sh
install -d -m 0755 $(CONFIGDIR)
install -m 640 etc/hiera.yaml $(CONFIGDIR)/
cp -R hieradata $(CONFIGDIR)/
cp -R manifests $(CONFIGDIR)/
install -d -m 0755 $(MODULEDIR)
cp -R modules/platform $(MODULEDIR)/
cp -R modules/openstack $(MODULEDIR)/

View File

@ -1,440 +0,0 @@
#!/bin/bash
################################################################################
# Copyright (c) 2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
################################################################################
#
# Purpose of this script is to copy the puppet-built
# ifcfg-* network config files from the puppet dir
# to the /etc/sysconfig/network-scripts/. Only files that
# are detected as different are copied.
#
# Then for each network puppet config files that are different
# from /etc/sysconfig/network-scripts/ version of the same config file, perform a
# network restart on the related iface.
#
# Please note: function is_eq_ifcfg() is used to determine if
# cfg files are different
#
export IFNAME_INCLUDE="ifcfg-*"
export RTNAME_INCLUDE="route-*"
ACQUIRE_LOCK=1
RELEASE_LOCK=0
if [ ! -d /var/run/network-scripts.puppet/ ] ; then
# No puppet files? Nothing to do!
exit 1
fi
function log_it {
logger "${0} ${1}"
}
function do_if_up {
local iface=$1
log_it "Bringing $iface up"
/sbin/ifup $iface
}
function do_if_down {
local iface=$1
log_it "Bringing $iface down"
/sbin/ifdown $iface
}
function do_rm {
local theFile=$1
log_it "Removing $theFile"
/bin/rm $theFile
}
function do_cp {
local srcFile=$1
local dstFile=$2
log_it "copying network cfg $srcFile to $dstFile"
cp $srcFile $dstFile
}
# Return items in list1 that are not in list2
array_diff () {
list1=${!1}
list2=${!2}
result=()
l2=" ${list2[*]} "
for item in ${list1[@]}; do
if [[ ! $l2 =~ " $item " ]] ; then
result+=($item)
fi
done
echo ${result[@]}
}
function normalized_cfg_attr_value {
local cfg=$1
local attr_name=$2
local attr_value
attr_value=$(cat $cfg | grep $attr_name= | awk -F "=" {'print $2'})
#
# Special case BONDING_OPTS attribute.
#
# The BONDING_OPTS attribute contains '=' characters, so is not correctly
# parsed by splitting on '=' as done above. This results in changes to
# BONDING_OPTS not causing the interface to be restarted, so the old
# BONDING_OPTS still be used. Because this is only checking for changes,
# rather than actually using the returned value, we can return the whole
# line.
#
if [[ "${attr_name}" == "BONDING_OPTS" ]]; then
echo "$(cat $cfg | grep $attr_name=)"
return $(true)
fi
if [[ "${attr_name}" != "BOOTPROTO" ]]; then
echo "${attr_value}"
return $(true)
fi
#
# Special case BOOTPROTO attribute.
#
# The BOOTPROTO attribute is not populated consistently by various aspects
# of the system. Different values are used to indicate a manually
# configured interfaces (i.e., one that does not expect to have an IP
# address) and so to avoid reconfiguring an interface that has different
# values with the same meaning we normalize them here before making any
# decisions.
#
# From a user perspective the values "manual", "none", and "" all have the
# same meaning - an interface without an IP address while "dhcp" and
# "static" are distinct values with a separate meaning. In practice
# however, the only value that matters from a ifup/ifdown script point of
# view is "dhcp". All other values are ignored.
#
# In our system we set BOOTPROTO to "static" to indicate that IP address
# attributes exist and to "manual"/"none" to indicate that no IP address
# attributes exist. These are not needed by ifup/ifdown as it looks for
# the "IPADDR" attribute whenever BOOTPROTO is set to anything other than
# "dhcp".
#
if [[ "${attr_value}" == "none" ]]; then
attr_value="none"
fi
if [[ "${attr_value}" == "manual" ]]; then
attr_value="none"
fi
if [[ "${attr_value}" == "" ]]; then
attr_value="none"
fi
echo "${attr_value}"
return $(true)
}
#
# returns $(true) if cfg file ( $1 ) has property propName ( $2 ) with a value of propValue ( $3 )
#
function cfg_has_property_with_value {
local cfg=$1
local propname=$2
local propvalue=$3
if [ -f $cfg ]; then
if [[ "$(normalized_cfg_attr_value $cfg $propname)" == "${propvalue}" ]]; then
return $(true)
fi
fi
return $(false)
}
#
# returns $(true) if cfg file is configured as a slave
#
function is_slave {
cfg_has_property_with_value $1 "SLAVE" "yes"
return $?
}
#
# returns $(true) if cfg file is configured for DHCP
#
function is_dhcp {
cfg_has_property_with_value $1 "BOOTPROTO" "dhcp"
}
#
# returns $(true) if cfg file is configured as a VLAN interface
#
function is_vlan {
cfg_has_property_with_value $1 "VLAN" "yes"
return $?
}
#
# returns $(true) if cfg file is configured as an ethernet interface. For the
# purposes of this script "ethernet" is considered as any interface that is not
# a vlan or a slave. This includes both regular ethernet interfaces and bonded
# interfaces.
#
function is_ethernet {
if ! is_vlan $1; then
if ! is_slave $1; then
return $(true)
fi
fi
return $(false)
}
#
# returns $(true) if cfg file represents an interface of the specified type.
#
function iftype_filter {
local iftype=$1
return $(is_$iftype $2)
}
#
# returns $(true) if ifcfg files have the same number of VFs
#
#
function is_eq_sriov_numvfs {
local cfg_1=$1
local cfg_2=$2
local sriov_numvfs_1
sriov_numvfs_1=$(grep -o 'echo *[1-9].*sriov_numvfs' $cfg_1 | awk {'print $2'})
local sriov_numvfs_2
sriov_numvfs_2=$(grep -o 'echo *[1-9].*sriov_numvfs' $cfg_2 | awk {'print $2'})
sriov_numvfs_1=${sriov_numvfs_1:-0}
sriov_numvfs_2=${sriov_numvfs_2:-0}
if [[ "${sriov_numvfs_1}" != "${sriov_numvfs_2}" ]]; then
log_it "$cfg_1 and $cfg_2 differ on attribute sriov_numvfs [${sriov_numvfs_1}:${sriov_numvfs_2}]"
return $(false)
fi
return $(true)
}
#
# returns $(true) if ifcfg files are equal
#
# Warning: Only compares against cfg file attributes:
# BOOTPROTO DEVICE IPADDR NETMASK GATEWAY MTU BONDING_OPTS SRIOV_NUMVFS
#
function is_eq_ifcfg {
local cfg_1=$1
local cfg_2=$2
for attr in BOOTPROTO DEVICE IPADDR NETMASK GATEWAY MTU BONDING_OPTS; do
local attr_value1
attr_value1=$(normalized_cfg_attr_value $cfg_1 $attr)
local attr_value2
attr_value2=$(normalized_cfg_attr_value $cfg_2 $attr)
if [[ "${attr_value1}" != "${attr_value2}" ]]; then
log_it "$cfg_1 and $cfg_2 differ on attribute $attr"
return $(false)
fi
done
is_eq_sriov_numvfs $1 $2
return $?
}
# Synchronize with sysinv-agent audit (ifup/down to query link speed).
function sysinv_agent_lock {
case $1 in
$ACQUIRE_LOCK)
local lock_file="/var/run/apply_network_config.lock"
# Lock file should be the same as defined in sysinv agent code
local lock_timeout=5
local max=15
local n=1
LOCK_FD=0
exec {LOCK_FD}>$lock_file
while [[ $n -le $max ]]; do
flock -w $lock_timeout $LOCK_FD && break
log_it "Failed to get lock($LOCK_FD) after $lock_timeout seconds ($n/$max), will retry"
sleep 1
n=$(($n+1))
done
if [[ $n -gt $max ]]; then
log_it "Failed to acquire lock($LOCK_FD) even after $max retries"
exit 1
fi
;;
$RELEASE_LOCK)
[[ $LOCK_FD -gt 0 ]] && flock -u $LOCK_FD
;;
esac
}
# First thing to do is deal with the case of there being no routes left on an interface.
# In this case, there will be no route-<if> in the puppet directory.
# We'll just create an empty one so that the below will loop will work in all cases.
for rt_path in $(find /etc/sysconfig/network-scripts/ -name "${RTNAME_INCLUDE}"); do
rt=$(basename $rt_path)
if [ ! -e /var/run/network-scripts.puppet/$rt ]; then
touch /var/run/network-scripts.puppet/$rt
fi
done
for rt_path in $(find /var/run/network-scripts.puppet/ -name "${RTNAME_INCLUDE}"); do
rt=$(basename $rt_path)
iface_rt=${rt#route-}
if [ -e /etc/sysconfig/network-scripts/$rt ]; then
# There is an existing route file. Check if there are changes.
diff -I ".*Last generated.*" -q /var/run/network-scripts.puppet/$rt \
/etc/sysconfig/network-scripts/$rt >/dev/null 2>&1
if [ $? -ne 0 ] ; then
# We may need to perform some manual route deletes
# Look for route lines that are present in the current netscripts route file,
# but not in the new puppet version. Need to manually delete these routes.
grep -v HEADER /etc/sysconfig/network-scripts/$rt | while read oldRouteLine
do
grepCmd="grep -q '$oldRouteLine' $rt_path > /dev/null"
eval $grepCmd
if [ $? -ne 0 ] ; then
log_it "Removing route: $oldRouteLine"
$(/usr/sbin/ip route del $oldRouteLine)
fi
done
fi
fi
if [ -s /var/run/network-scripts.puppet/$rt ] ; then
# Whether this is a new routes file or there are changes, ultimately we will need
# to ifup the file to add any potentially new routes.
do_cp /var/run/network-scripts.puppet/$rt /etc/sysconfig/network-scripts/$rt
/etc/sysconfig/network-scripts/ifup-routes $iface_rt
else
# Puppet routes file is empty, because we created an empty one due to absence of any routes
# so that our check with the existing netscripts routes would work.
# Just delete the netscripts file as there are no static routes left on this interface.
do_rm /etc/sysconfig/network-scripts/$rt
fi
# Puppet redhat.rb file does not support removing routes from the same resource file.
# Need to smoke the temp one so it will be properly recreated next time.
do_cp /var/run/network-scripts.puppet/$rt /var/run/network-scripts.puppet/$iface_rt.back
do_rm /var/run/network-scripts.puppet/$rt
done
upDown=()
changed=()
for cfg_path in $(find /var/run/network-scripts.puppet/ -name "${IFNAME_INCLUDE}"); do
cfg=$(basename $cfg_path)
diff -I ".*Last generated.*" -q /var/run/network-scripts.puppet/$cfg \
/etc/sysconfig/network-scripts/$cfg >/dev/null 2>&1
if [ $? -ne 0 ] ; then
# puppet file needs to be copied to network dir because diff detected
changed+=($cfg)
# but do we need to actually start the iface?
if is_dhcp /var/run/network-scripts.puppet/$cfg || \
is_dhcp /etc/sysconfig/network-scripts/$cfg ; then
# if dhcp type iface, then too many possible attr's to compare against, so
# just add cfg to the upDown list because we know (from above) cfg file is changed
log_it "dhcp detected for $cfg - adding to upDown list"
upDown+=($cfg)
else
# not in dhcp situation so check if any significant
# cfg attributes have changed to warrant an iface restart
is_eq_ifcfg /var/run/network-scripts.puppet/$cfg \
/etc/sysconfig/network-scripts/$cfg
if [ $? -ne 0 ] ; then
log_it "$cfg changed"
# Remove alias portion in the interface name if any.
# Check if the base interface is already on the list for
# restart. If not, add it to the list.
# The alias interface does not need to be restarted.
base_cfg=${cfg/:*/}
found=0
for chk in ${upDown[@]}; do
if [ "$base_cfg" = "$chk" ]; then
found=1
break
fi
done
if [ $found -eq 0 ]; then
log_it "Adding $base_cfg to upDown list"
upDown+=($base_cfg)
fi
fi
fi
fi
done
current=()
for f in $(find /etc/sysconfig/network-scripts/ -name "${IFNAME_INCLUDE}"); do
current+=($(basename $f))
done
active=()
for f in $(find /var/run/network-scripts.puppet/ -name "${IFNAME_INCLUDE}"); do
active+=($(basename $f))
done
# synchronize with sysinv-agent audit
sysinv_agent_lock $ACQUIRE_LOCK
remove=$(array_diff current[@] active[@])
for r in ${remove[@]}; do
# Bring down interface before we execute network restart, interfaces
# that do not have an ifcfg are not managed by init script
iface=${r#ifcfg-}
do_if_down $iface
do_rm /etc/sysconfig/network-scripts/$r
done
# now down the changed ifaces by dealing with vlan interfaces first so that
# they are brought down gracefully (i.e., without taking their dependencies
# away unexpectedly).
for iftype in vlan ethernet; do
for cfg in ${upDown[@]}; do
ifcfg=/etc/sysconfig/network-scripts/$cfg
if iftype_filter $iftype $ifcfg; then
do_if_down ${ifcfg#ifcfg-}
fi
done
done
# now copy the puppet changed interfaces to /etc/sysconfig/network-scripts
for cfg in ${changed[@]}; do
do_cp /var/run/network-scripts.puppet/$cfg /etc/sysconfig/network-scripts/$cfg
done
# now ifup changed ifaces by dealing with vlan interfaces last so that their
# dependencies are met before they are configured.
for iftype in ethernet vlan; do
for cfg in ${upDown[@]}; do
ifcfg=/var/run/network-scripts.puppet/$cfg
if iftype_filter $iftype $ifcfg; then
do_if_up ${ifcfg#ifcfg-}
fi
done
done
# unlock: synchronize with sysinv-agent audit
sysinv_agent_lock $RELEASE_LOCK

View File

@ -1,117 +0,0 @@
#!/usr/bin/env bash
# Grab a lock before doing anything else
LOCKFILE=/var/lock/.puppet.applyscript.lock
LOCK_FD=200
LOCK_TIMEOUT=60
eval "exec ${LOCK_FD}>$LOCKFILE"
while :; do
flock -w $LOCK_TIMEOUT $LOCK_FD && break
logger -t $0 "Failed to get lock for puppet applyscript after $LOCK_TIMEOUT seconds. Trying again"
sleep 1
done
HIERADATA=$1
HOST=$2
PERSONALITY=$3
MANIFEST=${4:-$PERSONALITY}
RUNTIMEDATA=$5
PUPPET_MODULES_PATH=/usr/share/puppet/modules:/usr/share/openstack-puppet/modules
PUPPET_MANIFEST=/etc/puppet/manifests/${MANIFEST}.pp
PUPPET_TMP=/tmp/puppet
# Setup log directory and file
DATETIME=$(date -u +"%Y-%m-%d-%H-%M-%S")
LOGDIR="/var/log/puppet/${DATETIME}_${PERSONALITY}"
LOGFILE=${LOGDIR}/puppet.log
mkdir -p ${LOGDIR}
rm -f /var/log/puppet/latest
ln -s ${LOGDIR} /var/log/puppet/latest
touch ${LOGFILE}
chmod 600 ${LOGFILE}
# Remove old log directories
declare -i NUM_DIRS=`ls -d1 /var/log/puppet/[0-9]* 2>/dev/null | wc -l`
declare -i MAX_DIRS=20
if [ ${NUM_DIRS} -gt ${MAX_DIRS} ]; then
let -i RMDIRS=${NUM_DIRS}-${MAX_DIRS}
ls -d1 /var/log/puppet/[0-9]* | head -${RMDIRS} | xargs --no-run-if-empty rm -rf
fi
# Setup staging area and hiera data configuration
# (must match hierarchy defined in hiera.yaml)
rm -rf ${PUPPET_TMP}
mkdir -p ${PUPPET_TMP}/hieradata
cp /etc/puppet/hieradata/global.yaml ${PUPPET_TMP}/hieradata/global.yaml
cp /etc/puppet/hieradata/${PERSONALITY}.yaml ${PUPPET_TMP}/hieradata/personality.yaml
# When the worker node is first booted and goes online, sysinv-agent reports
# host CPU inventory which triggers the first runtime manifest apply that updates
# the grub. At this time, copying the host file failed due to a timing issue that
# has not yet been fully understood. Subsequent retries worked.
if [ "${PERSONALITY}" = "worker" ]; then
n=0
until [ $n -ge 3 ]; do
cp -f ${HIERADATA}/${HOST}.yaml ${PUPPET_TMP}/hieradata/host.yaml && break
n=$(($n+1))
logger -t $0 "Failed to copy /etc/puppet/hieradata/${HOST}.yaml"
sleep 15
done
else
cp -f ${HIERADATA}/${HOST}.yaml ${PUPPET_TMP}/hieradata/host.yaml
fi
cp -f ${HIERADATA}/system.yaml \
${HIERADATA}/secure_system.yaml \
${HIERADATA}/static.yaml \
${HIERADATA}/secure_static.yaml \
${PUPPET_TMP}/hieradata/
if [ -n "${RUNTIMEDATA}" ]; then
cp -f ${RUNTIMEDATA} ${PUPPET_TMP}/hieradata/runtime.yaml
fi
# Exit function to save logs from initial apply
function finish {
local SAVEDLOGS=/var/log/puppet/first_apply.tgz
if [ ! -f ${SAVEDLOGS} ]; then
# Save the logs
tar czf ${SAVEDLOGS} ${LOGDIR} 2>/dev/null
fi
}
trap finish EXIT
# Set Keystone endpoint type to internal to prevent SSL cert failures during config
export OS_ENDPOINT_TYPE=internalURL
export CINDER_ENDPOINT_TYPE=internalURL
# Suppress stdlib deprecation warnings until all puppet modules can be updated
export STDLIB_LOG_DEPRECATIONS=false
echo "Applying puppet ${MANIFEST} manifest..."
flock /var/run/puppet.lock \
puppet apply --debug --trace --modulepath ${PUPPET_MODULES_PATH} ${PUPPET_MANIFEST} \
< /dev/null 2>&1 | awk ' { system("date -u +%FT%T.%3N | tr \"\n\" \" \""); print $0; fflush(); } ' > ${LOGFILE}
if [ $? -ne 0 ]; then
echo "[FAILED]"
echo "See ${LOGFILE} for details"
exit 1
else
grep -qE '^(.......)?Warning|^....-..-..T..:..:..([.]...)?(.......)?.Warning|^(.......)?Error|^....-..-..T..:..:..([.]...)?(.......)?.Error' ${LOGFILE}
if [ $? -eq 0 ]; then
echo "[WARNING]"
echo "Warnings found. See ${LOGFILE} for details"
exit 1
fi
echo "[DONE]"
fi
exit 0

View File

@ -1,17 +0,0 @@
---
:backends:
- yaml
:hierarchy:
- runtime
- host
- secure_system
- system
- secure_static
- static
- personality
- global
:yaml:
# data is staged to a local directory by the puppet-manifest-apply.sh script
:datadir: /tmp/puppet/hieradata

View File

@ -1,237 +0,0 @@
# controller specific configuration data
---
# platform
# Default hostname required for initial bootstrap of controller-0.
# Configured hostname will override this value.
platform::params::hostname: 'controller-0'
# Default controller hostname maps to the loopback address
# NOTE: Puppet doesn't support setting multiple IPs for the host resource,
# therefore setup an alias for the controller against localhost and
# then specify the IPv6 localhost as a separate entry.
# The IPv6 entry is required for LDAP clients to connect to the LDAP
# server when there are no IPv4 addresses configured, which occurs
# during the bootstrap phase.
platform::config::params::hosts:
localhost:
ip: '127.0.0.1'
host_aliases:
- localhost.localdomain
- controller
controller:
ip: '::1'
# default parameters, runtime management network configured will override
platform::network::mgmt::params::subnet_version: 4
platform::network::mgmt::params::controller0_address: 127.0.0.1
platform::network::mgmt::params::controller1_address: 127.0.0.2
# default parameters, runtime values will be based on selected link
platform::drbd::params::link_speed: 10000
platform::drbd::params::link_util: 40
platform::drbd::params::num_parallel: 1
platform::drbd::params::rtt_ms: 0.2
# Default LDAP configuration required for bootstrap of controller-0
platform::ldap::params::server_id: '001'
platform::ldap::params::provider_uri: 'ldap://controller-1'
# FIXME(mpeters): remove packstack specific variable
# workaround until openstack credentials module is updated to not reference
# hiera data
CONFIG_ADMIN_USER_DOMAIN_NAME: Default
CONFIG_ADMIN_PROJECT_DOMAIN_NAME: Default
# mtce
platform::mtce::params::auth_host: '127.0.0.1'
platform::mtce::params::auth_port: 5000
platform::mtce::params::auth_uri: 'http://127.0.0.1:5000'
platform::mtce::params::auth_user_domain: 'Default'
platform::mtce::params::auth_project_domain: 'Default'
platform::mtce::params::auth_project: 'services'
platform::mtce::params::auth_region: 'RegionOne'
platform::mtce::params::mtce_multicast: '239.1.1.2'
platform::mtce::agent::params::worker_boot_timeout: 720
platform::mtce::agent::params::controller_boot_timeout: 1200
platform::mtce::agent::params::heartbeat_period: 100
platform::mtce::agent::params::heartbeat_failure_action: 'fail'
platform::mtce::agent::params::heartbeat_failure_threshold: 10
platform::mtce::agent::params::heartbeat_degrade_threshold: 6
platform::mtce::agent::params::mnfa_threshold: 2
platform::mtce::agent::params::mnfa_timeout: 0
# influxdb configuration for collectd
platform::influxdb::params::bind_address: ':25826'
platform::influxdb::params::database: 'collectd'
platform::influxdb::params::typesdb: '/usr/share/collectd/types.db'
platform::influxdb::params::batch_size: 1000
platform::influxdb::params::batch_pending: 5
platform::influxdb::params::batch_timeout: '2s'
platform::influxdb::params::read_buffer: 0
# influxdb log ratation file
platform::influxdb::logrotate::params::log_file_name: '/var/log/influxdb/influxd.log'
platform::influxdb::logrotate::params::log_file_size: '20M'
platform::influxdb::logrotate::params::log_file_rotate: 10
# postgresql
postgresql::globals::needs_initdb: false
postgresql::server::service_enable: false
postgresql::server::ip_mask_deny_postgres_user: '0.0.0.0/32'
postgresql::server::ip_mask_allow_all_users: '0.0.0.0/0'
postgresql::server::pg_hba_conf_path: "/etc/postgresql/pg_hba.conf"
postgresql::server::pg_ident_conf_path: "/etc/postgresql/pg_ident.conf"
postgresql::server::postgresql_conf_path: "/etc/postgresql/postgresql.conf"
postgresql::server::listen_addresses: "*"
postgresql::server::ipv4acls: ['host all all samenet md5']
postgresql::server::log_line_prefix: 'db=%d,user=%u '
# rabbitmq
rabbitmq::repos_ensure: false
rabbitmq::admin_enable: false
rabbitmq::package_provider: 'yum'
rabbitmq::default_host: 'controller'
# drbd
drbd::service_enable: false
drbd::service_ensure: 'stopped'
# haproxy
haproxy::merge_options: true
platform::haproxy::params::global_options:
log:
- '127.0.0.1:514 local1 info'
user: 'haproxy'
group: 'sys_protected'
chroot: '/var/lib/haproxy'
pidfile: '/var/run/haproxy.pid'
maxconn: '4000'
daemon: ''
stats: 'socket /var/lib/haproxy/stats'
ca-base: '/etc/ssl/certs'
crt-base: '/etc/ssl/private'
ssl-default-bind-ciphers: 'kEECDH+aRSA+AES:kRSA+AES:+AES256:!RC4-SHA:!kEDH:!ECDHE-RSA-AES128-SHA:!ECDHE-RSA-AES256-SHA:!LOW:!EXP:!MD5:!aNULL:!eNULL'
ssl-default-bind-options: 'no-sslv3 no-tlsv10'
haproxy::defaults_options:
log: 'global'
mode: 'http'
stats: 'enable'
option:
- 'httplog'
- 'dontlognull'
- 'forwardfor'
retries: '3'
timeout:
- 'http-request 10s'
- 'queue 10m'
- 'connect 10s'
- 'client 90s'
- 'server 90s'
- 'check 10s'
maxconn: '8000'
# memcached
# disable UDP listener to prevent DOS attack
platform::memcached::params::udp_port: 0
platform::memcached::params::max_connections: 8192
platform::memcached::params::max_memory: 782
# sysinv
sysinv::journal_max_size: 51200
sysinv::journal_min_size: 1024
sysinv::journal_default_size: 1024
sysinv::api::enabled: false
sysinv::api::keystone_tenant: 'services'
sysinv::api::keystone_user: 'sysinv'
sysinv::api::keystone_user_domain: 'Default'
sysinv::api::keystone_project_domain: 'Default'
sysinv::conductor::enabled: false
# nfvi
nfv::nfvi::infrastructure_rest_api_data_port_fault_handling_enabled: false
# keystone
keystone::service::enabled: false
keystone::token_provider: 'fernet'
keystone::max_token_size: 255,
keystone::debug: false
keystone::service_name: 'openstack-keystone'
keystone::enable_ssl: false
keystone::use_syslog: true
keystone::log_facility: 'local2'
keystone::database_idle_timeout: 60
keystone::database_max_pool_size: 1
keystone::database_max_overflow: 50
keystone::enable_bootstrap: false
keystone::sync_db: false
keystone::enable_proxy_headers_parsing: true
keystone::log_file: /dev/null
keystone::endpoint::default_domain: 'Default'
keystone::endpoint::version: 'v3'
keystone::endpoint::region: 'RegionOne'
keystone::endpoint::system_controller_region: 'SystemController'
keystone::endpoint::admin_url: 'http://127.0.0.1:5000'
keystone::ldap::identity_driver: 'sql'
keystone::ldap::assignment_driver: 'sql'
keystone::security_compliance::unique_last_password_count: 2
keystone::security_compliance::password_regex: '^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*()<>{}+=_\\\[\]\-?|~`,.;:]).{7,}$'
keystone::security_compliance::password_regex_description: 'Password must have a minimum length of 7 characters, and must contain at least 1 upper case, 1 lower case, 1 digit, and 1 special character'
keystone::roles::admin::email: 'admin@localhost'
keystone::roles::admin::admin_tenant: 'admin'
keystone::roles::admin::admin_tenant_desc: 'admin project'
keystone::roles::admin::service_tenant_desc: 'project for the platform services'
platform::client::params::identity_auth_url: 'http://localhost:5000/v3'
# Dcorch
dcorch::use_syslog: true
dcorch::log_facility: 'local2'
dcorch::debug: false
# Dcmanager
dcmanager::use_syslog: true
dcmanager::log_facility: 'local2'
dcmanager::debug: false
# Dcdbsync
dbsync::use_syslog: true
dbsync::log_facility: 'local2'
dbsync::debug: false
# FM
fm::use_syslog: true
fm::log_facility: 'local2'
fm::api::enable_proxy_headers_parsing: true
fm::db::sync::user: 'root'
fm::database_idle_timeout: 60
fm::database_max_overflow: 20
fm::database_max_pool_size: 1
# Barbican
barbican::api::enabled: false
barbican::api::service_name: 'barbican-api'
barbican::api::enable_proxy_headers_parsing: true
barbican::api::logging::use_syslog: true
barbican::api::logging::log_facility: 'local2'
barbican::db::sync::user: 'root'
barbican::db::database_idle_timeout: 60
barbican::db::database_max_pool_size: 1
barbican::keystone-listener::enabled: false
barbican::worker::enabled: false

View File

@ -1,47 +0,0 @@
# global default configuration data (applicable to all personalities)
---
classes: []
# platform
platform::params::controller_hostname: controller
platform::params::controller_0_hostname: controller-0
platform::params::controller_1_hostname: controller-1
platform::params::pxeboot_hostname: pxecontroller
platform::params::security_feature: nopti nospectre_v2
platform::amqp::auth_user: guest
platform::users::params::sysadmin_password_max_age: 45
# mtce
platform::mtce::params::sm_server_port: 2124
platform::mtce::params::sm_client_port: 2224
# sysinv
sysinv::database_idle_timeout: 60
sysinv::database_max_overflow: 64
sysinv::database_max_pool_size: 1
sysinv::use_syslog: true
sysinv::verbose: true
sysinv::log_facility: 'local6'
# collectd: configuration
platform::collectd::params::interval: 30
platform::collectd::params::timeout: 2
platform::collectd::params::read_threads: 5
platform::collectd::params::write_threads: 5
platform::collectd::params::max_read_interval: 86400
platform::collectd::params::write_queue_limit_high: 1000000
platform::collectd::params::write_queue_limit_low: 800000
platform::collectd::params::server_addrs: ['controller']
platform::collectd::params::server_port: 25826
platform::collectd::params::collectd_d_dir: '/etc/collectd.d'
# collectd: module named plugins
platform::collectd::params::module_path: '/opt/collectd/extensions/python'
platform::collectd::params::plugins: ['fm_notifier', 'mtce_notifier']
platform::collectd::params::mtce_notifier_port: 2101
platform::collectd::params::log_traces: true
platform::collectd::params::encoding: "utf-8"
# ceph
platform::ceph::params::mon_lv_size_reserved: 20

View File

@ -1,2 +0,0 @@
# storage specific configuration data
---

View File

@ -1,5 +0,0 @@
# worker specific configuration data
---
# vswitch
vswitch::dpdk::memory_channels: 4

View File

@ -1,32 +0,0 @@
#
# puppet manifest for controller initial bootstrap
#
Exec {
timeout => 600,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
}
include ::platform::config::bootstrap
include ::platform::users::bootstrap
include ::platform::ldap::bootstrap
include ::platform::drbd::bootstrap
include ::platform::postgresql::bootstrap
include ::platform::amqp::bootstrap
include ::openstack::keystone::bootstrap
include ::openstack::barbican::bootstrap
include ::platform::client::bootstrap
include ::platform::sysinv::bootstrap
# Puppet classes to enable the bring up of kubernetes master
include ::platform::docker::bootstrap
include ::platform::etcd::bootstrap
# Puppet classes to enable initial controller unlock
include ::platform::drbd::dockerdistribution::bootstrap
include ::platform::filesystem::backup
include ::platform::filesystem::kubelet
include ::platform::mtce::bootstrap
include ::platform::fm::bootstrap

View File

@ -1,21 +0,0 @@
#
# puppet manifest for controller initial bootstrap
#
Exec {
timeout => 600,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
}
include ::platform::config::bootstrap
include ::platform::users::bootstrap
include ::platform::ldap::bootstrap
include ::platform::drbd::bootstrap
include ::platform::postgresql::bootstrap
include ::platform::amqp::bootstrap
include ::openstack::keystone::bootstrap
include ::openstack::barbican::bootstrap
include ::platform::client::bootstrap
include ::platform::sysinv::bootstrap

View File

@ -1,107 +0,0 @@
#
# puppet manifest for controller hosts
#
Exec {
timeout => 600,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
}
#
# Disable the firewall to protect against attempted
# restoration of kubernetes-related iptables rules
# during puppet apply, as kubernetes may not yet
# be running and the restore will fail.
#
class { '::firewall':
ensure => stopped
}
include ::platform::config
include ::platform::users
include ::platform::sysctl::controller
include ::platform::filesystem::controller
include ::platform::firewall::calico::oam
include ::platform::dhclient
include ::platform::partitions
include ::platform::lvm::controller
include ::platform::network
include ::platform::drbd
include ::platform::exports
include ::platform::dns
include ::platform::ldap::server
include ::platform::ldap::client
include ::platform::password
include ::platform::ntp::server
include ::platform::ptp
include ::platform::lldp
include ::platform::amqp::rabbitmq
include ::platform::postgresql::server
include ::platform::haproxy::server
include ::platform::grub
include ::platform::etcd
include ::platform::docker
include ::platform::dockerdistribution
include ::platform::kubernetes::master
include ::platform::helm
include ::platform::patching
include ::platform::patching::api
include ::platform::remotelogging
include ::platform::remotelogging::proxy
include ::platform::sysinv
include ::platform::sysinv::api
include ::platform::sysinv::conductor
include ::platform::mtce
include ::platform::mtce::agent
include ::platform::memcached
include ::platform::nfv
include ::platform::nfv::api
include ::platform::ceph::controller
include ::platform::ceph::rgw
include ::platform::influxdb
include ::platform::influxdb::logrotate
include ::platform::collectd
include ::platform::fm
include ::platform::fm::api
include ::platform::multipath
include ::platform::client
include ::openstack::keystone
include ::openstack::keystone::api
include ::openstack::horizon
include ::platform::dcmanager
include ::platform::dcmanager::manager
include ::platform::dcorch
include ::platform::dcorch::engine
include ::platform::dcorch::api_proxy
include ::platform::dcmanager::api
include ::platform::dcorch::snmp
include ::platform::dcdbsync
include ::platform::dcdbsync::api
include ::platform::smapi
include ::openstack::barbican
include ::openstack::barbican::api
include ::platform::sm
class { '::platform::config::controller::post':
stage => post,
}
hiera_include('classes')

View File

@ -1,14 +0,0 @@
#
# puppet manifest for runtime apply of configuration that executes a set of
# tasks that have been identified to execute based on the specific configuration
# change performed.
#
Exec {
timeout => 300,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
}
include ::platform::config
hiera_include('classes')

View File

@ -1,37 +0,0 @@
#
# puppet manifest for storage hosts
#
Exec {
timeout => 300,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
}
include ::platform::config
include ::platform::users
include ::platform::sysctl::storage
include ::platform::dhclient
include ::platform::partitions
include ::platform::lvm::storage
include ::platform::network
include ::platform::fstab
include ::platform::password
include ::platform::ldap::client
include ::platform::ntp::client
include ::platform::ptp
include ::platform::lldp
include ::platform::patching
include ::platform::remotelogging
include ::platform::mtce
include ::platform::sysinv
include ::platform::grub
include ::platform::collectd
include ::platform::filesystem::storage
include ::platform::docker
include ::platform::ceph::storage
class { '::platform::config::storage::post':
stage => post,
}
hiera_include('classes')

View File

@ -1,19 +0,0 @@
#
# puppet manifest for upgrade
#
Exec {
timeout => 600,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
}
class { '::platform::params':
controller_upgrade => true,
}
include ::platform::users::upgrade
include ::platform::postgresql::upgrade
include ::platform::amqp::upgrade
include ::openstack::keystone::upgrade
include ::platform::client::upgrade

View File

@ -1,46 +0,0 @@
#
# puppet manifest for worker nodes
#
Exec {
timeout => 300,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
}
include ::platform::config
include ::platform::users
include ::platform::sysctl::compute
include ::platform::dhclient
include ::platform::partitions
include ::platform::lvm::compute
include ::platform::compute
include ::platform::vswitch
include ::platform::network
include ::platform::fstab
include ::platform::password
include ::platform::ldap::client
include ::platform::ntp::client
include ::platform::ptp
include ::platform::lldp
include ::platform::patching
include ::platform::remotelogging
include ::platform::mtce
include ::platform::sysinv
include ::platform::devices
include ::platform::grub
include ::platform::collectd
include ::platform::filesystem::compute
include ::platform::docker
include ::platform::dockerdistribution::compute
include ::platform::kubernetes::worker
include ::platform::multipath
include ::platform::client
include ::platform::ceph::worker
include ::platform::worker::storage
include ::platform::pciirqaffinity
class { '::platform::config::worker::post':
stage => post,
}
hiera_include('classes')

View File

@ -1,173 +0,0 @@
class openstack::barbican::params (
$api_port = 9311,
$region_name = undef,
$service_name = 'barbican-api',
$service_create = false,
$service_enabled = true,
) { }
class openstack::barbican
inherits ::openstack::barbican::params {
if $service_enabled {
include ::platform::params
if $::platform::params::init_keystone {
include ::barbican::keystone::auth
include ::barbican::keystone::authtoken
}
if $::platform::params::init_database {
include ::barbican::db::postgresql
}
barbican_config {
'service_credentials/interface': value => 'internalURL'
}
file { '/var/run/barbican':
ensure => 'directory',
owner => 'barbican',
group => 'barbican',
}
$api_workers = $::platform::params::eng_workers_by_4
file_line { 'Modify workers in gunicorn-config.py':
path => '/etc/barbican/gunicorn-config.py',
line => "workers = ${api_workers}",
match => '.*workers = .*',
tag => 'modify-workers',
}
file { '/etc/logrotate.d/barbican-api':
ensure => present,
content => template('openstack/barbican-api-logrotate.erb')
}
}
}
class openstack::barbican::service
inherits ::openstack::barbican::params {
if $service_enabled {
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::subnet_version ? {
6 => "[${::platform::network::mgmt::params::controller_address}]",
default => $::platform::network::mgmt::params::controller_address,
}
$api_fqdn = $::platform::params::controller_hostname
$url_host = "http://${api_fqdn}:${api_port}"
if str2bool($::is_initial_config_primary) {
$enabled = true
} else {
$enabled = false
}
include ::platform::amqp::params
class { '::barbican::api':
enabled => $enabled,
bind_host => $api_host,
bind_port => $api_port,
host_href => $url_host,
sync_db => !$::openstack::barbican::params::service_create,
enable_proxy_headers_parsing => true,
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
}
class { '::barbican::keystone::notification':
enable_keystone_notification => true,
}
cron { 'barbican-cleaner':
ensure => 'present',
command => '/usr/bin/barbican-manage db clean -p -e -L /var/log/barbican/barbican-clean.log',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '50',
hour => '*/24',
user => 'root',
}
}
}
class openstack::barbican::haproxy
inherits ::openstack::barbican::params {
platform::haproxy::proxy { 'barbican-restapi':
server_name => 's-barbican-restapi',
public_port => $api_port,
private_port => $api_port,
}
}
class openstack::barbican::api
inherits ::openstack::barbican::params {
include ::platform::params
# The barbican user and service are always required and they
# are used by subclouds when the service itself is disabled
# on System Controller
# whether it creates the endpoint is determined by
# barbican::keystone::auth::configure_endpoint which is
# set via sysinv puppet
if ($::openstack::barbican::params::service_create and
$::platform::params::init_keystone) {
if ($::platform::params::distributed_cloud_role == 'subcloud' and
$::platform::params::region_2_name != 'RegionOne') {
Keystone_endpoint["${platform::params::region_2_name}/barbican::key-manager"] -> Keystone_endpoint['RegionOne/barbican::key-manager']
keystone_endpoint { 'RegionOne/barbican::key-manager':
ensure => 'absent',
name => 'barbican',
type => 'key-manager',
region => 'RegionOne',
public_url => "http://127.0.0.1:${api_port}",
admin_url => "http://127.0.0.1:${api_port}",
internal_url => "http://127.0.0.1:${api_port}"
}
}
}
if $service_enabled {
include ::openstack::barbican::service
include ::openstack::barbican::haproxy
}
}
class openstack::barbican::bootstrap
inherits ::openstack::barbican::params {
class { '::barbican::keystone::auth':
configure_user_role => false,
}
class { '::barbican::keystone::authtoken':
auth_url => 'http://localhost:5000',
project_name => 'services',
user_domain_name => 'Default',
project_domain_name => 'Default',
}
$bu_name = $::barbican::keystone::auth::auth_name
$bu_tenant = $::barbican::keystone::auth::tenant
keystone_role { 'creator':
ensure => present,
}
keystone_user_role { "${bu_name}@${bu_tenant}":
ensure => present,
roles => ['admin', 'creator'],
}
include ::barbican::db::postgresql
include ::openstack::barbican
include ::openstack::barbican::service
}
class openstack::barbican::runtime
inherits ::openstack::barbican::params {
include ::openstack::barbican::service
}

View File

@ -1,221 +0,0 @@
class openstack::horizon::params (
$secret_key,
$openstack_host,
$enable_https = false,
$lockout_period = 300,
$lockout_retries = 3,
$horizon_ssl = false,
$horizon_cert = undef,
$horizon_key = undef,
$horizon_ca = undef,
$neutron_enable_lb = false,
$neutron_enable_firewall = false,
$neutron_enable_vpn = false,
$tpm_object = undef,
$tpm_engine = '/usr/lib64/openssl/engines/libtpm2.so',
$http_port = 8080,
$https_port = 8443,
) { }
class openstack::horizon
inherits ::openstack::horizon::params {
include ::platform::params
include ::platform::network::mgmt::params
include ::platform::network::pxeboot::params
include ::openstack::keystone::params
$controller_address = $::platform::network::mgmt::params::controller_address
$mgmt_subnet_network = $::platform::network::mgmt::params::subnet_network
$mgmt_subnet_prefixlen = $::platform::network::mgmt::params::subnet_prefixlen
$pxeboot_subnet_network = $::platform::network::pxeboot::params::subnet_network
$pxeboot_subnet_prefixlen = $::platform::network::pxeboot::params::subnet_prefixlen
$keystone_api_version = $::openstack::keystone::params::api_version
$keystone_auth_uri = $::openstack::keystone::params::auth_uri
$keystone_host_url = $::openstack::keystone::params::host_url
#The intention here is to set up /www as a chroot'ed
#environment for lighttpd so that it will remain in a jail under /www.
#The uid and gid for www match the uid and gid in the setup package.
group { 'www':
ensure => 'present',
gid => '1877',
}
-> user { 'www':
ensure => 'present',
gid => '1877',
shell => '/sbin/nologin',
groups => ['www', 'sys_protected'],
uid => '1877',
}
file { '/www/tmp':
ensure => directory,
path => '/www/tmp',
mode => '1700',
}
file {'/www/var':
ensure => directory,
path => '/www/var',
owner => 'www',
require => User['www']
}
file {'/www/var/log':
ensure => directory,
path => '/www/var/log',
owner => 'www',
require => User['www']
}
file {'/etc/lighttpd/lighttpd.conf':
ensure => present,
content => template('openstack/lighttpd.conf.erb')
}
file {'/etc/lighttpd/lighttpd-inc.conf':
ensure => present,
content => template('openstack/lighttpd-inc.conf.erb')
}
$workers = $::platform::params::eng_workers_by_2
if str2bool($::is_initial_config) {
exec { 'Stop lighttpd':
command => 'systemctl stop lighttpd; systemctl disable lighttpd',
require => User['www']
}
}
if str2bool($::selinux) {
selboolean{ 'httpd_can_network_connect':
value => on,
persistent => true,
}
}
# Horizon is not used in distributed cloud subclouds
if $::platform::params::distributed_cloud_role != 'subcloud' {
include ::horizon::params
file { '/etc/openstack-dashboard/horizon-config.ini':
ensure => present,
content => template('openstack/horizon-params.erb'),
mode => '0644',
owner => 'root',
group => $::horizon::params::apache_group,
}
$is_django_debug = 'False'
$bind_host = $::platform::network::mgmt::params::subnet_version ? {
6 => '::0',
default => '0.0.0.0',
# TO-DO(mmagr): Add IPv6 support when hostnames are used
}
if $::platform::params::region_config {
$horizon_keystone_url = "${keystone_auth_uri}/${keystone_api_version}"
$region_2_name = $::platform::params::region_2_name
$region_openstack_host = $openstack_host
file { '/etc/openstack-dashboard/region-config.ini':
ensure => present,
content => template('openstack/horizon-region-config.erb'),
mode => '0644',
}
} else {
$horizon_keystone_url = "http://${$keystone_host_url}:5000/${keystone_api_version}"
file { '/etc/openstack-dashboard/region-config.ini':
ensure => absent,
}
}
class {'::horizon':
secret_key => $secret_key,
keystone_url => $horizon_keystone_url,
keystone_default_role => '_member_',
server_aliases => [$controller_address, $::fqdn, 'localhost'],
allowed_hosts => '*',
hypervisor_options => {'can_set_mount_point' => false, },
django_debug => $is_django_debug,
file_upload_temp_dir => '/var/tmp',
listen_ssl => $horizon_ssl,
horizon_cert => $horizon_cert,
horizon_key => $horizon_key,
horizon_ca => $horizon_ca,
neutron_options => {
'enable_lb' => $neutron_enable_lb,
'enable_firewall' => $neutron_enable_firewall,
'enable_vpn' => $neutron_enable_vpn
},
configure_apache => false,
compress_offline => false,
}
# hack for memcached, for now we bind to localhost on ipv6
# https://bugzilla.redhat.com/show_bug.cgi?id=1210658
$memcached_bind_host = $::platform::network::mgmt::params::subnet_version ? {
6 => 'localhost6',
default => '0.0.0.0',
# TO-DO(mmagr): Add IPv6 support when hostnames are used
}
# Run clearsessions daily at the 40 minute mark
cron { 'clearsessions':
ensure => 'present',
command => '/usr/bin/horizon-clearsessions',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '40',
hour => '*/24',
user => 'root',
}
}
}
class openstack::horizon::reload {
# Remove all active Horizon user sessions
# so that we don't use any stale cached data
# such as endpoints
exec { 'remove-Horizon-user-sessions':
path => ['/usr/bin'],
command => '/usr/bin/rm -f /var/tmp/sessionid*',
}
platform::sm::restart {'horizon': }
platform::sm::restart {'lighttpd': }
}
class openstack::horizon::runtime {
include ::openstack::horizon
class {'::openstack::horizon::reload':
stage => post
}
}
class openstack::lighttpd::runtime
inherits ::openstack::horizon::params {
Class[$name] -> Class['::platform::helm::runtime']
file {'/etc/lighttpd/lighttpd.conf':
ensure => present,
content => template('openstack/lighttpd.conf.erb')
}
-> platform::sm::restart {'lighttpd': }
}

View File

@ -1,464 +0,0 @@
class openstack::keystone::params(
$api_version,
$identity_uri,
$auth_uri,
$host_url,
$openstack_auth_uri = undef,
$api_port = 5000,
$admin_port = 5000,
$region_name = undef,
$system_controller_region = undef,
$service_name = 'openstack-keystone',
$token_expiration = 3600,
$service_create = false,
$fernet_keys_rotation_minute = '25',
$fernet_keys_rotation_hour = '0',
$fernet_keys_rotation_month = '*/1',
$fernet_keys_rotation_monthday = '1',
$fernet_keys_rotation_weekday = '*',
) {}
class openstack::keystone (
) inherits ::openstack::keystone::params {
include ::platform::params
# In the case of a classical Multi-Region deployment, apply the Keystone
# controller configuration for Primary Region ONLY
# (i.e. on which region_config is False), since Keystone is a Shared service
#
# In the case of a Distributed Cloud deployment, apply the Keystone
# controller configuration for each SubCloud, since Keystone is also
# a localized service.
if (!$::platform::params::region_config or
$::platform::params::distributed_cloud_role == 'subcloud') {
include ::platform::amqp::params
include ::platform::network::mgmt::params
include ::platform::drbd::platform::params
$keystone_key_repo_path = "${::platform::drbd::platform::params::mountpoint}/keystone"
$eng_workers = $::platform::params::eng_workers
# FIXME(mpeters): binding to wildcard address to allow bootstrap transition
# Not sure if there is a better way to transition from the localhost address
# to the management address while still being able to authenticate the client
if str2bool($::is_initial_config_primary) {
$enabled = true
$bind_host = '[::]'
} else {
$enabled = false
$bind_host = $::platform::network::mgmt::params::controller_address_url
}
Class[$name] -> Class['::platform::client']
include ::keystone::client
# Configure keystone graceful shutdown timeout
# TODO(mpeters): move to puppet-keystone for module configuration
keystone_config {
'DEFAULT/graceful_shutdown_timeout': value => 15;
}
# (Pike Rebase) Disable token post expiration window since this
# allows authentication for upto 2 days worth of stale tokens.
# TODO(knasim): move this to puppet-keystone along with graceful
# shutdown timeout param
keystone_config {
'token/allow_expired_window': value => 0;
}
file { '/etc/keystone/keystone-extra.conf':
ensure => present,
owner => 'root',
group => 'keystone',
mode => '0640',
content => template('openstack/keystone-extra.conf.erb'),
}
-> class { '::keystone':
enabled => $enabled,
enable_fernet_setup => false,
fernet_key_repository => "${keystone_key_repo_path}/fernet-keys",
default_transport_url => $::platform::amqp::params::transport_url,
service_name => $service_name,
token_expiration => $token_expiration,
}
# create keystone policy configuration
file { '/etc/keystone/policy.json':
ensure => present,
owner => 'keystone',
group => 'keystone',
mode => '0640',
content => template('openstack/keystone-policy.json.erb'),
}
# Keystone users can only be added to the SQL backend (write support for
# the LDAP backend has been removed). We can therefore set password rules
# irrespective of the backend
if ! str2bool($::is_restore_in_progress) {
# If the Restore is in progress then we need to apply the Keystone
# Password rules as a runtime manifest, as the passwords in the hiera records
# records may not be rule-compliant if this system was upgraded from R4
# (where-in password rules were not in affect)
include ::keystone::security_compliance
}
include ::keystone::ldap
if $::platform::params::distributed_cloud_role == undef {
# Set up cron job that will rotate fernet keys. This is done every month on
# the first day of the month at 00:25 by default. The cron job runs on both
# controllers, but the script will only take action on the active controller.
cron { 'keystone-fernet-keys-rotater':
ensure => 'present',
command => '/usr/bin/keystone-fernet-keys-rotate-active',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => $fernet_keys_rotation_minute,
hour => $fernet_keys_rotation_hour,
month => $fernet_keys_rotation_month,
monthday => $fernet_keys_rotation_monthday,
weekday => $fernet_keys_rotation_weekday,
user => 'root',
}
}
} else {
class { '::keystone':
enabled => false,
}
}
}
class openstack::keystone::haproxy
inherits ::openstack::keystone::params {
include ::platform::params
if !$::platform::params::region_config {
platform::haproxy::proxy { 'keystone-restapi':
server_name => 's-keystone',
public_port => $api_port,
private_port => $api_port,
}
}
}
define delete_endpoints (
$region,
$service,
$interfaces,
) {
$rc_file = '/etc/platform/openrc'
$delete_endpoint = 'openstack endpoint delete'
$interfaces.each | String $val | {
$get_endpoint_id = "openstack endpoint list --region ${region} --service ${service} --interface ${val} -f value -c ID"
exec { "Delete ${region} ${service} ${val} endpoint":
command => "source ${rc_file} && ${get_endpoint_id} | xargs ${delete_endpoint}",
logoutput => true,
provider => shell,
}
}
}
class openstack::keystone::api
inherits ::openstack::keystone::params {
include ::platform::params
if ($::openstack::keystone::params::service_create and
$::platform::params::init_keystone) {
include ::keystone::endpoint
include ::openstack::keystone::endpointgroup
# Cleanup the endpoints created at bootstrap if they are not in
# the subcloud region.
if ($::platform::params::distributed_cloud_role == 'subcloud' and
$::platform::params::region_2_name != 'RegionOne') {
$interfaces = [ 'public', 'internal', 'admin' ]
Keystone_endpoint<||> -> Class['::platform::client']
# clean up the bootstrap endpoints
-> delete_endpoints { 'Start delete endpoints':
region => 'RegionOne',
service => 'keystone',
interfaces => $interfaces,
}
}
}
include ::openstack::keystone::haproxy
}
class openstack::keystone::bootstrap(
$default_domain = 'Default',
) {
include ::platform::params
include ::platform::amqp::params
include ::platform::drbd::platform::params
$keystone_key_repo_path = "${::platform::drbd::platform::params::mountpoint}/keystone"
$eng_workers = $::platform::params::eng_workers
$bind_host = '[::]'
# In the case of a classical Multi-Region deployment, apply the Keystone
# controller configuration for Primary Region ONLY
# (i.e. on which region_config is False), since Keystone is a Shared service
#
# In the case of a Distributed Cloud deployment, apply the Keystone
# controller configuration for each SubCloud, since Keystone is also
# a localized service.
if ($::platform::params::init_keystone and
(!$::platform::params::region_config or
$::platform::params::distributed_cloud_role == 'subcloud')) {
include ::keystone::db::postgresql
Class[$name] -> Class['::platform::client']
# Create the parent directory for fernet keys repository
file { $keystone_key_repo_path:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
require => Class['::platform::drbd::platform'],
}
-> file { '/etc/keystone/keystone-extra.conf':
ensure => present,
owner => 'root',
group => 'keystone',
mode => '0640',
content => template('openstack/keystone-extra.conf.erb'),
}
-> class { '::keystone':
enabled => true,
enable_bootstrap => true,
fernet_key_repository => "${keystone_key_repo_path}/fernet-keys",
sync_db => true,
default_domain => $default_domain,
default_transport_url => $::platform::amqp::params::transport_url,
}
include ::keystone::client
include ::keystone::endpoint
include ::keystone::roles::admin
# Ensure the default _member_ role is present
keystone_role { '_member_':
ensure => present,
}
# disabling the admin token per openstack recommendation
include ::keystone::disable_admin_token_auth
}
}
class openstack::keystone::reload {
platform::sm::restart {'keystone': }
}
class openstack::keystone::endpointgroup
inherits ::openstack::keystone::params {
include ::platform::params
include ::platform::client
# $::platform::params::init_keystone should be checked by the caller.
# as this class should be only invoked when initializing keystone.
# i.e. is_initial_config_primary is true is expected.
if ($::platform::params::distributed_cloud_role =='systemcontroller') {
$reference_region = $::openstack::keystone::params::region_name
$system_controller_region = $::openstack::keystone::params::system_controller_region
$os_username = $::platform::client::params::admin_username
$identity_region = $::platform::client::params::identity_region
$keystone_region = $::platform::client::params::keystone_identity_region
$keyring_file = $::platform::client::credentials::params::keyring_file
$auth_url = $::platform::client::params::identity_auth_url
$os_project_name = $::platform::client::params::admin_project_name
$api_version = 3
file { "/etc/keystone/keystone-${reference_region}-filter.conf":
ensure => present,
owner => 'root',
group => 'keystone',
mode => '0640',
content => template('openstack/keystone-defaultregion-filter.erb'),
}
-> file { "/etc/keystone/keystone-${system_controller_region}-filter.conf":
ensure => present,
owner => 'root',
group => 'keystone',
mode => '0640',
content => template('openstack/keystone-systemcontroller-filter.erb'),
}
-> exec { "endpointgroup-${reference_region}-command":
cwd => '/etc/keystone',
logoutput => true,
provider => shell,
require => [ Class['openstack::keystone::api'], Class['::keystone::endpoint'] ],
command => template('openstack/keystone-defaultregion.erb'),
path => ['/usr/bin/', '/bin/', '/sbin/', '/usr/sbin/'],
}
-> exec { "endpointgroup-${system_controller_region}-command":
cwd => '/etc/keystone',
logoutput => true,
provider => shell,
require => [ Class['openstack::keystone::api'], Class['::keystone::endpoint'] ],
command => template('openstack/keystone-systemcontroller.erb'),
path => ['/usr/bin/', '/bin/', '/sbin/', '/usr/sbin/'],
}
}
}
class openstack::keystone::server::runtime {
include ::platform::client
include ::openstack::keystone
class {'::openstack::keystone::reload':
stage => post
}
}
class openstack::keystone::endpoint::runtime {
if str2bool($::is_controller_active) {
include ::keystone::endpoint
include ::sysinv::keystone::auth
include ::patching::keystone::auth
include ::nfv::keystone::auth
include ::fm::keystone::auth
include ::barbican::keystone::auth
if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::dcorch::keystone::auth
include ::dcmanager::keystone::auth
include ::dcdbsync::keystone::auth
}
if $::platform::params::distributed_cloud_role == 'subcloud' {
include ::dcdbsync::keystone::auth
}
include ::smapi::keystone::auth
if ($::platform::params::distributed_cloud_role == 'subcloud' and
$::platform::params::region_2_name != 'RegionOne') {
$interfaces = [ 'public', 'internal', 'admin' ]
include ::platform::client
# Cleanup the endpoints created at bootstrap if they are not in
# the subcloud region.
Keystone::Resource::Service_identity <||>
-> Class['::platform::client']
-> delete_endpoints { 'Delete keystone endpoints':
region => 'RegionOne',
service => 'keystone',
interfaces => $interfaces,
}
-> delete_endpoints { 'Delete sysinv endpoints':
region => 'RegionOne',
service => 'sysinv',
interfaces => $interfaces,
}
-> delete_endpoints { 'Delete barbican endpoints':
region => 'RegionOne',
service => 'barbican',
interfaces => $interfaces,
}
-> delete_endpoints { 'Delete fm endpoints':
region => 'RegionOne',
service => 'fm',
interfaces => $interfaces,
}
-> file { '/etc/platform/.service_endpoint_reconfigured':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
}
} else {
Keystone::Resource::Service_identity <||>
-> file { '/etc/platform/.service_endpoint_reconfigured':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
}
}
}
}
class openstack::keystone::upgrade (
$upgrade_token_cmd,
$upgrade_url = undef,
$upgrade_token_file = undef,
) {
if $::platform::params::init_keystone {
include ::keystone::db::postgresql
include ::platform::params
include ::platform::amqp::params
include ::platform::network::mgmt::params
include ::platform::drbd::platform::params
# the unit address is actually the configured default of the loopback address.
$bind_host = $::platform::network::mgmt::params::controller0_address
$eng_workers = $::platform::params::eng_workers
$keystone_key_repo = "${::platform::drbd::platform::params::mountpoint}/keystone"
# TODO(aning): For R5->R6 upgrade, a local keystone fernet keys repository may
# need to be setup for the local keystone instance on standby controller to
# service specific upgrade operations, since we need to keep the keys repository
# in /opt/platform/keystone/fernet-keys intact so that service won't fail on active
# controller during upgrade. Once the upgade finishes, the temparary local
# fernet keys repository will be deleted.
# Need to create the parent directory for fernet keys repository
# This is a workaround to a puppet bug.
file { $keystone_key_repo:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755'
}
-> file { '/etc/keystone/keystone-extra.conf':
ensure => present,
owner => 'root',
group => 'keystone',
mode => '0640',
content => template('openstack/keystone-extra.conf.erb'),
}
-> class { '::keystone':
upgrade_token_cmd => $upgrade_token_cmd,
upgrade_token_file => $upgrade_token_file,
enable_fernet_setup => true,
enable_bootstrap => false,
fernet_key_repository => "${keystone_key_repo}/fernet-keys",
sync_db => false,
default_domain => undef,
default_transport_url => $::platform::amqp::params::transport_url,
}
# Add service account and endpoints for any new R6 services...
# include ::<new service>::keystone::auth
# No new services yet...
# Always remove the upgrade token file after all new
# services have been added
file { $upgrade_token_file :
ensure => absent,
}
include ::keystone::client
}
}

View File

@ -1,14 +0,0 @@
# This file is managed by Puppet.
#
# logrotate.d configuration
# Used in rpm build. Keep in sync with debian/barbican-api.logrotate
/var/log/barbican/barbican-api.log {
nodateext
size 10M
start 1
rotate 20
missingok
compress
notifempty
copytruncate
}

View File

@ -1,21 +0,0 @@
lvremove <%= @cinder_vg_name %> -f || true
pvremove <%= @cinder_device %> --force --force -y || true
dd if=/dev/zero of=<%= @cinder_disk %> bs=512 count=34
size=$(blockdev --getsz <%= @cinder_disk %>)
dd if=/dev/zero of=<%= @cinder_disk %> bs=512 seek=$(($size - 34)) count=34
echo 'Wait for udev on disk before continuing'
udevadm settle
echo 'Create partition table'
parted -a optimal --script <%= @cinder_disk %> -- mktable gpt
echo 'Create primary partition'
parted -a optimal --script <%= @cinder_disk %> -- mkpart primary 2 100%
echo 'Wait for udev before continuing'
udevadm settle
echo 'Wipe'
wipefs -a <%= @cinder_device %>

View File

@ -1,8 +0,0 @@
[horizon_params]
https_enabled = <%= @enable_https %>
[auth]
lockout_period = <%= @lockout_period %>
lockout_retries = <%= @lockout_retries %>
[deployment]
workers = <%= @workers %>

View File

@ -1,4 +0,0 @@
[shared_services]
region_name = <%= @region_2_name %>
openstack_host = <%= @region_openstack_host %>

View File

@ -1,3 +0,0 @@
{
"region_id": "<%=@reference_region %>"
}

View File

@ -1,17 +0,0 @@
PASSWORD=$(TERM=linux <%= @keyring_file %> 2>/dev/null)
ENDPOINTGROUP_ID=$(openstack endpoint group create \
distributed_cloud_<%=@reference_region %> \
keystone-<%=@reference_region %>-filter.conf \
--os-username <%=@os_username %> \
--os-password $PASSWORD \
--os-region-name <%=@identity_region %> \
--os-auth-url <%=@auth_url %> \
--os-identity-api-version <%=@api_version %> \
--os-project-name <%=@os_project_name %> | awk '/id\ \ / { print $4 }' )
openstack endpoint group add project $ENDPOINTGROUP_ID services \
--os-username <%=@os_username %> \
--os-password $PASSWORD \
--os-region-name <%=@identity_region %> \
--os-auth-url <%=@auth_url %> \
--os-identity-api-version <%=@api_version %> \
--os-project-name <%=@os_project_name %>

View File

@ -1,2 +0,0 @@
PUBLIC_BIND_ADDR=<%= @bind_host %>
TIS_PUBLIC_WORKERS=<%=@eng_workers %>

View File

@ -1,32 +0,0 @@
{
"admin_required": "role:admin or is_admin:1",
"service_role": "role:service",
"service_or_admin": "rule:admin_required or rule:service_role",
"owner" : "user_id:%(user_id)s",
"admin_or_owner": "rule:admin_required or rule:owner",
"token_subject": "user_id:%(target.token.user_id)s",
"admin_or_token_subject": "rule:admin_required or rule:token_subject",
"service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject",
"protected_domains": "",
"protected_projects": "'admin':%(target.project.name)s or 'services':%(target.project.name)s",
"protected_admins": "'admin':%(target.user.name)s or 'dcmanager':%(target.user.name)s",
"protected_roles": "'admin':%(target.role.name)s",
"protected_services": [["'barbican':%(target.user.name)s"],
["'patching':%(target.user.name)s"],
["'sysinv':%(target.user.name)s"],
["'mtce':%(target.user.name)s"],
["'fm':%(target.user.name)s"],
["'dcdbsync':%(target.user.name)s"]],
"identity:delete_service": "rule:admin_required and not rule:protected_services",
"identity:delete_domain": "rule:admin_required and not rule:protected_domains",
"identity:delete_project": "rule:admin_required and not rule:protected_projects",
"identity:delete_user": "rule:admin_required and not (rule:protected_admins or rule:protected_services)",
"identity:change_password": "rule:admin_or_owner and not rule:protected_services",
"identity:delete_role": "rule:admin_required and not rule:protected_roles",
}

View File

@ -1,3 +0,0 @@
{
"region_id": "<%=@system_controller_region %>"
}

View File

@ -1,17 +0,0 @@
PASSWORD=$(TERM=linux <%= @keyring_file %> 2>/dev/null)
ENDPOINTGROUP_ID=$(openstack endpoint group create \
distributed_cloud_<%=@system_controller_region %> \
keystone-<%=@system_controller_region %>-filter.conf \
--os-username <%=@os_username %> \
--os-password $PASSWORD \
--os-region-name <%=@identity_region %> \
--os-auth-url <%=@auth_url %> \
--os-identity-api-version <%=@api_version %> \
--os-project-name <%=@os_project_name %> | awk '/id\ \ / { print $4 }' )
openstack endpoint group add project $ENDPOINTGROUP_ID services \
--os-username <%=@os_username %> \
--os-password $PASSWORD \
--os-region-name <%=@identity_region %> \
--os-auth-url <%=@auth_url %> \
--os-identity-api-version <%=@api_version %> \
--os-project-name <%=@os_project_name %>

View File

@ -1,2 +0,0 @@
var.management_ip_network = "<%= @mgmt_subnet_network %>/<%= @mgmt_subnet_prefixlen %>"
var.pxeboot_ip_network = "<%= @pxeboot_subnet_network %>/<%= @pxeboot_subnet_prefixlen %>"

View File

@ -1,397 +0,0 @@
# This file is managed by Puppet. DO NOT EDIT.
# lighttpd configuration file
#
# use it as a base for lighttpd 1.0.0 and above
#
# $Id: lighttpd.conf,v 1.7 2004/11/03 22:26:05 weigon Exp $
############ Options you really have to take care of ####################
## modules to load
# at least mod_access and mod_accesslog should be loaded
# all other module should only be loaded if really neccesary
# - saves some time
# - saves memory
server.modules = (
# "mod_rewrite",
# "mod_redirect",
# "mod_alias",
"mod_access",
# "mod_cml",
# "mod_trigger_b4_dl",
# "mod_auth",
# "mod_status",
# "mod_setenv",
# "mod_fastcgi",
"mod_proxy",
# "mod_simple_vhost",
# "mod_evhost",
# "mod_userdir",
# "mod_cgi",
# "mod_compress",
# "mod_ssi",
# "mod_usertrack",
# "mod_expire",
# "mod_secdownload",
# "mod_rrdtool",
# "mod_webdav",
"mod_setenv",
"mod_accesslog" )
## a static document-root, for virtual-hosting take look at the
## server.virtual-* options
server.document-root = "/pages/"
## where to send error-messages to
server.errorlog = "/var/log/lighttpd-error.log"
# files to check for if .../ is requested
index-file.names = ( "index.php", "index.html",
"index.htm", "default.htm" )
## set the event-handler (read the performance section in the manual)
# server.event-handler = "freebsd-kqueue" # needed on OS X
# mimetype mapping
mimetype.assign = (
".pdf" => "application/pdf",
".sig" => "application/pgp-signature",
".spl" => "application/futuresplash",
".class" => "application/octet-stream",
".ps" => "application/postscript",
".torrent" => "application/x-bittorrent",
".dvi" => "application/x-dvi",
".gz" => "application/x-gzip",
".pac" => "application/x-ns-proxy-autoconfig",
".swf" => "application/x-shockwave-flash",
".tar.gz" => "application/x-tgz",
".tgz" => "application/x-tgz",
".tar" => "application/x-tar",
".zip" => "application/zip",
".mp3" => "audio/mpeg",
".m3u" => "audio/x-mpegurl",
".wma" => "audio/x-ms-wma",
".wax" => "audio/x-ms-wax",
".ogg" => "application/ogg",
".wav" => "audio/x-wav",
".gif" => "image/gif",
".jpg" => "image/jpeg",
".jpeg" => "image/jpeg",
".png" => "image/png",
".svg" => "image/svg+xml",
".xbm" => "image/x-xbitmap",
".xpm" => "image/x-xpixmap",
".xwd" => "image/x-xwindowdump",
".css" => "text/css",
".html" => "text/html",
".htm" => "text/html",
".js" => "text/javascript",
".asc" => "text/plain",
".c" => "text/plain",
".cpp" => "text/plain",
".log" => "text/plain",
".conf" => "text/plain",
".text" => "text/plain",
".txt" => "text/plain",
".dtd" => "text/xml",
".xml" => "text/xml",
".mpeg" => "video/mpeg",
".mpg" => "video/mpeg",
".mov" => "video/quicktime",
".qt" => "video/quicktime",
".avi" => "video/x-msvideo",
".asf" => "video/x-ms-asf",
".asx" => "video/x-ms-asf",
".wmv" => "video/x-ms-wmv",
".bz2" => "application/x-bzip",
".tbz" => "application/x-bzip-compressed-tar",
".tar.bz2" => "application/x-bzip-compressed-tar",
".rpm" => "application/x-rpm",
".yaml" => "text/yaml",
".cfg" => "text/plain"
)
# Use the "Content-Type" extended attribute to obtain mime type if possible
#mimetype.use-xattr = "enable"
## send a different Server: header
## be nice and keep it at lighttpd
# server.tag = "lighttpd"
#### accesslog module
accesslog.filename = "/var/log/lighttpd-access.log"
## deny access the file-extensions
#
# ~ is for backupfiles from vi, emacs, joe, ...
# .inc is often used for code includes which should in general not be part
# of the document-root
url.access-deny = ( "~", ".inc" )
$HTTP["url"] =~ "\.pdf$" {
server.range-requests = "disable"
}
##
# which extensions should not be handle via static-file transfer
#
# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
######### Options that are good to be but not neccesary to be changed #######
## bind to port (default: 80)
server.port = <%= @http_port %>
## bind to localhost (default: all interfaces)
#server.bind = "grisu.home.kneschke.de"
## error-handler for status 404
#server.error-handler-404 = "/error-handler.html"
#server.error-handler-404 = "/error-handler.php"
## to help the rc.scripts
server.pid-file = "/var/run/lighttpd.pid"
###### virtual hosts
##
## If you want name-based virtual hosting add the next three settings and load
## mod_simple_vhost
##
## document-root =
## virtual-server-root + virtual-server-default-host + virtual-server-docroot
## or
## virtual-server-root + http-host + virtual-server-docroot
##
#simple-vhost.server-root = "/home/weigon/wwwroot/servers/"
#simple-vhost.default-host = "grisu.home.kneschke.de"
#simple-vhost.document-root = "/pages/"
##
## Format: <errorfile-prefix><status-code>.html
## -> ..../status-404.html for 'File not found'
#server.errorfile-prefix = "/home/weigon/projects/lighttpd/doc/status-"
## virtual directory listings
##
## disabled as per Nessus scan CVE: 5.0 40984
## Please do NOT enable as this is a security
## vulnerability. If you want dir listing for
## our dir path then a) either add a dir index (index.html)
## file within your dir path, or b) add your path as an exception
## rule (see the one for feeds/ dir below)
dir-listing.activate = "disable"
## enable debugging
#debug.log-request-header = "enable"
#debug.log-response-header = "enable"
#debug.log-request-handling = "enable"
#debug.log-file-not-found = "enable"
### only root can use these options
#
# chroot() to directory (default: no chroot() )
server.chroot = "/www"
## change uid to <uid> (default: don't care)
server.username = "www"
## change uid to <uid> (default: don't care)
server.groupname = "sys_protected"
## defaults to /var/tmp
server.upload-dirs = ( "/tmp" )
## change max-keep-alive-idle (default: 5 secs)
server.max-keep-alive-idle = 0
#### compress module
#compress.cache-dir = "/tmp/lighttpd/cache/compress/"
#compress.filetype = ("text/plain", "text/html")
#### proxy module
## read proxy.txt for more info
# Proxy all non-static content to the local horizon dashboard
$HTTP["url"] !~ "^/(rel-[^/]*|feed|updates|static|helm_charts)/" {
proxy.server = ( "" =>
( "localhost" =>
(
"host" => "127.0.0.1",
"port" => 8008
)
)
)
}
#### fastcgi module
## read fastcgi.txt for more info
## for PHP don't forget to set cgi.fix_pathinfo = 1 in the php.ini
#fastcgi.server = ( ".php" =>
# ( "localhost" =>
# (
# "socket" => "/tmp/php-fastcgi.socket",
# "bin-path" => "/usr/local/bin/php"
# )
# )
# )
#### CGI module
#cgi.assign = ( ".pl" => "/usr/bin/perl",
# ".cgi" => "/usr/bin/perl" )
#
#### Listen to IPv6
$SERVER["socket"] == "[::]:<%= @http_port %>" { }
<% if @enable_https %>
#### SSL engine
$SERVER["socket"] == ":<%= @https_port %>" {
ssl.engine = "enable"
ssl.pemfile = "/etc/ssl/private/server-cert.pem"
ssl.use-sslv2 = "disable"
ssl.use-sslv3 = "disable"
ssl.cipher-list = "ALL:!aNULL:!eNULL:!EXPORT:!TLSv1:!DES:!MD5:!PSK:!RC4:!EDH-RSA-DES-CBC3-SHA:!EDH-DSS-DES-CBC3-SHA:!DHE-RSA-AES128-SHA:!DHE-RSA-AES256-SHA:!ECDHE-RSA-DES-CBC3-SHA:!ECDHE-RSA-AES128-SHA:!ECDHE-RSA-AES256-SHA:!DES-CBC3-SHA:!AES128-SHA:!AES256-SHA:!DHE-DSS-AES128-SHA:!DHE-DSS-AES256-SHA:!CAMELLIA128-SHA:!CAMELLIA256-SHA:!DHE-DSS-CAMELLIA128-SHA:!DHE-DSS-CAMELLIA256-SHA:!DHE-RSA-CAMELLIA128-SHA:!DHE-RSA-CAMELLIA256-SHA:!ECDHE-ECDSA-DES-CBC3-SHA:!ECDHE-ECDSA-AES128-SHA:!ECDHE-ECDSA-AES256-SHA"
}
$SERVER["socket"] == "[::]:<%= @https_port %>" {
ssl.engine = "enable"
ssl.pemfile = "/etc/ssl/private/server-cert.pem"
ssl.use-sslv2 = "disable"
ssl.use-sslv3 = "disable"
ssl.cipher-list = "ALL:!aNULL:!eNULL:!EXPORT:!TLSv1:!DES:!MD5:!PSK:!RC4:!EDH-RSA-DES-CBC3-SHA:!EDH-DSS-DES-CBC3-SHA:!DHE-RSA-AES128-SHA:!DHE-RSA-AES256-SHA:!ECDHE-RSA-DES-CBC3-SHA:!ECDHE-RSA-AES128-SHA:!ECDHE-RSA-AES256-SHA:!DES-CBC3-SHA:!AES128-SHA:!AES256-SHA:!DHE-DSS-AES128-SHA:!DHE-DSS-AES256-SHA:!CAMELLIA128-SHA:!CAMELLIA256-SHA:!DHE-DSS-CAMELLIA128-SHA:!DHE-DSS-CAMELLIA256-SHA:!DHE-RSA-CAMELLIA128-SHA:!DHE-RSA-CAMELLIA256-SHA:!ECDHE-ECDSA-DES-CBC3-SHA:!ECDHE-ECDSA-AES128-SHA:!ECDHE-ECDSA-AES256-SHA"
}
<% else %>
###
# HTTPS not enabled
###
<% end %>
#### status module
#status.status-url = "/server-status"
#status.config-url = "/server-config"
#### auth module
## read authentication.txt for more info
#auth.backend = "plain"
#auth.backend.plain.userfile = "lighttpd.user"
#auth.backend.plain.groupfile = "lighttpd.group"
#auth.backend.ldap.hostname = "localhost"
#auth.backend.ldap.base-dn = "dc=my-domain,dc=com"
#auth.backend.ldap.filter = "(uid=$)"
#auth.require = ( "/server-status" =>
# (
# "method" => "digest",
# "realm" => "download archiv",
# "require" => "user=jan"
# ),
# "/server-config" =>
# (
# "method" => "digest",
# "realm" => "download archiv",
# "require" => "valid-user"
# )
# )
#### url handling modules (rewrite, redirect, access)
#url.rewrite = ( "^/$" => "/server-status" )
#url.redirect = ( "^/wishlist/(.+)" => "http://www.123.org/$1" )
#### both rewrite/redirect support back reference to regex conditional using %n
#$HTTP["host"] =~ "^www\.(.*)" {
# url.redirect = ( "^/(.*)" => "http://%1/$1" )
#}
#
# define a pattern for the host url finding
# %% => % sign
# %0 => domain name + tld
# %1 => tld
# %2 => domain name without tld
# %3 => subdomain 1 name
# %4 => subdomain 2 name
#
#evhost.path-pattern = "/home/storage/dev/www/%3/htdocs/"
#### expire module
#expire.url = ( "/buggy/" => "access 2 hours", "/asdhas/" => "access plus 1 seconds 2 minutes")
#### ssi
#ssi.extension = ( ".shtml" )
#### rrdtool
#rrdtool.binary = "/usr/bin/rrdtool"
#rrdtool.db-name = "/var/www/lighttpd.rrd"
#### setenv
#setenv.add-request-header = ( "TRAV_ENV" => "mysql://user@host/db" )
#setenv.add-response-header = ( "X-Secret-Message" => "42" )
## for mod_trigger_b4_dl
# trigger-before-download.gdbm-filename = "/home/weigon/testbase/trigger.db"
# trigger-before-download.memcache-hosts = ( "127.0.0.1:11211" )
# trigger-before-download.trigger-url = "^/trigger/"
# trigger-before-download.download-url = "^/download/"
# trigger-before-download.deny-url = "http://127.0.0.1/index.html"
# trigger-before-download.trigger-timeout = 10
## for mod_cml
## don't forget to add index.cml to server.indexfiles
# cml.extension = ".cml"
# cml.memcache-hosts = ( "127.0.0.1:11211" )
#### variable usage:
## variable name without "." is auto prefixed by "var." and becomes "var.bar"
#bar = 1
#var.mystring = "foo"
## integer add
#bar += 1
## string concat, with integer cast as string, result: "www.foo1.com"
#server.name = "www." + mystring + var.bar + ".com"
## array merge
#index-file.names = (foo + ".php") + index-file.names
#index-file.names += (foo + ".php")
#### include
#include /etc/lighttpd/lighttpd-inc.conf
## same as above if you run: "lighttpd -f /etc/lighttpd/lighttpd.conf"
#include "lighttpd-inc.conf"
#### include_shell
#include_shell "echo var.a=1"
## the above is same as:
#var.a=1
# deny access to feed directories for external connections.
# Only enable access to dir listing for feed directory if on internal network
# (i.e. mgmt or pxeboot networks)
include "/etc/lighttpd/lighttpd-inc.conf"
$HTTP["remoteip"] != "127.0.0.1" {
$HTTP["url"] =~ "^/(rel-[^/]*|feed|updates)/" {
dir-listing.activate = "enable"
}
$HTTP["remoteip"] != var.management_ip_network {
$HTTP["remoteip"] != var.pxeboot_ip_network {
$HTTP["url"] =~ "^/(rel-[^/]*|feed|updates)/" {
url.access-deny = ( "" )
}
}
}
}
$HTTP["scheme"] == "https" {
setenv.add-response-header = ( "Strict-Transport-Security" => "max-age=63072000; includeSubdomains; ")
}
<%- unless @tpm_object.nil? -%>
server.tpm-object = "<%= @tpm_object %>"
server.tpm-engine = "<%= @tpm_engine %>"
<%- end -%>

View File

@ -1,92 +0,0 @@
#!/bin/bash
#
# Startup script for docker-distribution
#
DESC="Docker Distribution aka Docker Registry"
SERVICE="docker-distribution.service"
PIDFILE="/var/run/docker-distribution.pid"
status()
{
if [ "`systemctl is-active docker-distribution.service`" = "active" ]; then
RETVAL=0
echo "$DESC is running"
return
else
echo "$DESC is Not running"
RETVAL=1
fi
}
start()
{
if [ -e $PIDFILE ]; then
PIDDIR=/proc/$(cat $PIDFILE)
if [ -d $PIDDIR ]; then
echo "$DESC already running."
return
else
echo "Removing stale PID file $PIDFILE"
rm -f $PIDFILE
fi
fi
echo "Starting $SERVICE..."
systemctl start $SERVICE
if [ $? -eq 0 ]; then
echo "Started $SERVICE successfully"
RETVAL=0
else
echo "$SERVICE failed!"
RETVAL=1
fi
}
stop()
{
echo -n "Stopping $SERVICE..."
systemctl stop $SERVICE
if [ $? -eq 0 ]; then
echo "$SERVICE stopped."
else
echo "failed to stop $SERVICE!"
fi
if [ -e $PIDFILE ]; then
echo "Removing stale PID file $PIDFILE"
rm -f $PIDFILE
fi
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
restart)
stop
start
;;
*)
echo "Usage: $0 {start|stop|status|restart}"
exit 1
;;
esac
exit $RETVAL

View File

@ -1,103 +0,0 @@
#!/bin/bash
#
# Startup script for etcd
#
# chkconfig: 2345 20 80
# description: Starts and stops etcd systemd service
### BEGIN INIT INFO
# Provides: etcd
# Required-Start: $local_fs $network
# Required-Stop: $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start up the etcd service
# Description: A highly-available key value store for shared configuration
### END INIT INFO
DESC="ETCD highly-available key value database"
SERVICE="etcd.service"
PIDFILE="/var/run/etcd.pid"
status()
{
if [ "`systemctl is-active etcd.service`" = "active" ]; then
RETVAL=0
echo "$DESC is running"
return
else
echo "$DESC is Not running"
RETVAL=1
fi
}
start()
{
if [ -e $PIDFILE ]; then
PIDDIR=/proc/$(cat $PIDFILE)
if [ -d $PIDDIR ]; then
echo "$DESC already running."
return
else
echo "Removing stale PID file $PIDFILE"
rm -f $PIDFILE
fi
fi
echo "Starting $SERVICE..."
systemctl start $SERVICE
if [ $? -eq 0 ]; then
echo "Started $SERVICE successfully"
RETVAL=0
else
echo "$SERVICE failed!"
RETVAL=1
fi
}
stop()
{
echo -n "Stopping $SERVICE..."
systemctl stop $SERVICE
if [ $? -eq 0 ]; then
echo "$SERVICE stopped."
else
echo "failed to stop $SERVICE!"
fi
if [ -e $PIDFILE ]; then
echo "Removing stale PID file $PIDFILE"
rm -f $PIDFILE
fi
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
restart)
stop
start
;;
*)
echo "Usage: $0 {start|stop|status|restart}"
exit 1
;;
esac
exit $RETVAL

View File

@ -1,9 +0,0 @@
[Service]
EnvironmentFile=-/etc/etcd/etcd.conf
User=root
NotifyAccess=all
Type=notify
ExecStart=
ExecStart=-/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" 2>&1 | /usr/bin/forward-journald -tag etcd"
ExecStartPost=/bin/bash -c 'echo $MAINPID >/var/run/etcd.pid'
ExecStopPost=/bin/bash/rm -f /var/run/etcd.pid

View File

@ -1,7 +0,0 @@
# Check for interactive bash and that we haven't already been sourced.
[ -z "$PS1" -o -n "$KUBECONFIG" ] && return
# Set up the location of the k8s config file for anyone who can read it.
if [ -r /etc/kubernetes/admin.conf ]; then
export KUBECONFIG=/etc/kubernetes/admin.conf
fi

View File

@ -1,4 +0,0 @@
dn: uid=operator,ou=People,dc=cgcs,dc=local
changetype: modify
replace: loginShell
loginShell: /usr/local/bin/cgcs_cli

View File

@ -1,90 +0,0 @@
#!/bin/bash
#
# SPDX-License-Identifier: Apache-2.0
#
# Startup script for registry-token-server
#
DESC="Docker Registry Token Server"
SERVICE="registry-token-server.service"
PIDFILE="/var/run/registry-token-server.pid"
status()
{
if [ "`systemctl is-active registry-token-server.service`" = "active" ]; then
RETVAL=0
echo "$DESC is running"
return
else
echo "$DESC is Not running"
RETVAL=1
fi
}
start()
{
if [ -e $PIDFILE ]; then
PIDDIR=/proc/$(cat $PIDFILE)
if [ -d $PIDDIR ]; then
echo "$DESC already running."
return
else
echo "Removing stale PID file $PIDFILE"
rm -f $PIDFILE
fi
fi
echo "Starting $SERVICE..."
systemctl start $SERVICE
if [ $? -eq 0 ]; then
echo "Started $SERVICE successfully"
RETVAL=0
else
echo "$SERVICE failed!"
RETVAL=1
fi
}
stop()
{
echo -n "Stopping $SERVICE..."
systemctl stop $SERVICE
if [ $? -eq 0 ]; then
echo "$SERVICE stopped."
else
echo "failed to stop $SERVICE!"
fi
if [ -e $PIDFILE ]; then
echo "Removing stale PID file $PIDFILE"
rm -f $PIDFILE
fi
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
restart)
stop
start
;;
*)
echo "Usage: $0 {start|stop|status|restart}"
exit 1
;;
esac
exit $RETVAL

View File

@ -1,5 +0,0 @@
Facter.add("boot_disk_device_path") do
setcode do
Facter::Util::Resolution.exec('find -L /dev/disk/by-path/ -samefile $(df --output=source /boot | tail -1) | tail -1')
end
end

View File

@ -1,8 +0,0 @@
Facter.add("configured_ceph_monitors") do
setcode do
lines = IO.readlines("/etc/ceph/ceph.conf").keep_if { |v| v =~ /\[mon\..*\]/ }
lines.collect do |line|
line.scan(/\[mon\.(.*)\]/).last.first
end
end
end

View File

@ -1,11 +0,0 @@
# Returns true if controllers are running the same software version (or if only
# one controller is configured). Will always return true if:
# 1. Manifests are being applied on any node other than a controller.
# 2. Manifests are being applied as part of a reconfig. Reconfigs can not be
# done while a system is being upgraded.
Facter.add("controller_sw_versions_match") do
setcode do
! (ENV['CONTROLLER_SW_VERSIONS_MISMATCH'] == "true")
end
end

View File

@ -1,7 +0,0 @@
# Returns true if worker services should be disabled
Facter.add("disable_worker_services") do
setcode do
File.exist?('/var/run/.disable_worker_services')
end
end

View File

@ -1,5 +0,0 @@
# Returns the current boot parameters
Facter.add(:get_cmdline) do
setcode "cat /proc/cmdline 2>/dev/null"
end

View File

@ -1,6 +0,0 @@
Facter.add("install_uuid") do
setcode do
Facter::Util::Resolution.exec("awk -F= '{if ($1 == \"INSTALL_UUID\") { print $2; }}' /etc/platform/platform.conf")
end
end

View File

@ -1,8 +0,0 @@
# Returns true if it is Broadwell processor
# Broadwell specific flags (model: 79)
Facter.add("is_broadwell_processor") do
setcode do
Facter::Core::Execution.exec('grep -q -E "^model\s+:\s+79$" /proc/cpuinfo')
$?.exitstatus == 0
end
end

View File

@ -1,10 +0,0 @@
# Check if current node is the active controller
require 'facter'
Facter.add("is_controller_active") do
setcode do
Facter::Core::Execution.exec("pgrep -f sysinv-api")
$?.exitstatus == 0
end
end

View File

@ -1,7 +0,0 @@
# Returns true if one GB pages is supported
Facter.add("is_gb_page_supported") do
setcode do
Facter::Core::Execution.exec('grep -q pdpe1gb /proc/cpuinfo')
$?.exitstatus == 0
end
end

View File

@ -1,7 +0,0 @@
# Returns true if hugetlbfs not enabled
Facter.add("is_hugetlbfs_enabled") do
setcode do
Facter::Core::Execution.exec('grep -q hugetlbfs /proc/filesystems')
$?.exitstatus == 0
end
end

View File

@ -1,7 +0,0 @@
# Returns true is this is the initial config for this node
Facter.add("is_initial_config") do
setcode do
! File.exist?('/etc/platform/.initial_config_complete')
end
end

View File

@ -1,8 +0,0 @@
# Returns true is this is the primary initial config (ie. first controller)
Facter.add("is_initial_config_primary") do
setcode do
ENV['INITIAL_CONFIG_PRIMARY'] == "true"
end
end

View File

@ -1,7 +0,0 @@
# Returns true is this is the initial kubernetes config for this node
Facter.add("is_initial_k8s_config") do
setcode do
! File.exist?('/etc/platform/.initial_k8s_config_complete')
end
end

View File

@ -1,6 +0,0 @@
# Returns whether keystone is running on the local host
Facter.add(:is_keystone_running) do
setcode do
Facter::Util::Resolution.exec('pgrep -c -f "\[keystone\-admin\]"') != '0'
end
end

View File

@ -1,7 +0,0 @@
# Returns true if Ceph has been configured on current node
Facter.add("is_node_ceph_configured") do
setcode do
File.exist?('/etc/platform/.node_ceph_configured')
end
end

View File

@ -1,6 +0,0 @@
# Returns true if Resource Control is supported on this node
Facter.add("is_per_numa_supported") do
setcode do
Dir.exist?('/sys/devices/system/node/node0')
end
end

View File

@ -1,6 +0,0 @@
require 'facter'
Facter.add(:is_primary_disk_rotational) do
rootfs_partition = Facter::Core::Execution.exec("df --output=source / | tail -1")
rootfs_device = Facter::Core::Execution.exec("basename #{rootfs_partition} | sed 's/[0-9]*$//;s/p[0-9]*$//'")
setcode "cat /sys/block/#{rootfs_device}/queue/rotational"
end

View File

@ -1,6 +0,0 @@
# Returns true if Resource Control is supported on this node
Facter.add("is_resctrl_supported") do
setcode do
Dir.exist?('/sys/fs/resctrl')
end
end

View File

@ -1,7 +0,0 @@
# Returns true if restore is in progress
Facter.add("is_restore_in_progress") do
setcode do
File.exist?('/etc/platform/.restore_in_progress')
end
end

View File

@ -1,8 +0,0 @@
# Returns true is this is the only configured controller in the system else
# return false if both controllers are configured.
Facter.add("is_standalone_controller") do
setcode do
File.exist?('/etc/platform/simplex')
end
end

View File

@ -1,4 +0,0 @@
# Returns number of logical cpus
Facter.add(:number_of_logical_cpus) do
setcode "cat /proc/cpuinfo 2>/dev/null | awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}'"
end

View File

@ -1,4 +0,0 @@
# Returns number of numa nodes
Facter.add(:number_of_numa_nodes) do
setcode "ls -d /sys/devices/system/node/node* 2>/dev/null | wc -l"
end

View File

@ -1,4 +0,0 @@
# Returns number of physical cores
Facter.add(:physical_core_count) do
setcode "awk '/^cpu cores/ {c=$4} /physical id/ {a[$4]=1} END {n=0; for (i in a) n++; print (n>0 && c>0) ? n*c : 1}' /proc/cpuinfo"
end

View File

@ -1,12 +0,0 @@
# Platform reserved memory is the total normal memory (i.e. 4K memory) that
# may be allocated by programs in MiB. This total excludes huge-pages and
# kernel overheads.
#
# The 'MemAvailable' field represents total unused memory. This includes:
# free, buffers, cached, and reclaimable slab memory.
#
# The Active(anon) and Inactive(anon) fields represents the total used
# anonymous memory.
Facter.add(:platform_res_mem) do
setcode "grep -e '^MemAvailable:' -e '^Active(anon):' -e '^Inactive(anon):' /proc/meminfo | awk '{a+=$2} END{print int(a/1024)}'"
end

View File

@ -1,5 +0,0 @@
Facter.add("system_info") do
setcode do
Facter::Util::Resolution.exec('uname -r')
end
end

View File

@ -1,34 +0,0 @@
module Puppet::Parser::Functions
newfunction(:check_grub_config,
:type => :rvalue,
:doc => <<-EOD
This internal function checks if a list of arguments are configured
in the current boot args based on the input parameters
EOD
) do |args|
func_name = "check_grub_config()"
raise(Puppet::ParseError, "#{func_name}: Requires 1 argument" +
"#{args.size} given") if args.size != 1
expected = args[0]
raise(Puppet::ParseError, "#{func_name}: first argument must be a string") \
unless expected.instance_of? String
# get the current boot args
cmd = Facter.value(:get_cmdline)
cmd_array = cmd.split()
value = true
expected.split().each do |element|
value = cmd_array.include?(element)
if value == false
Puppet.debug("#{element} is not presented in #{cmd}")
return value
end
end
value
end
end

View File

@ -1,156 +0,0 @@
class platform::amqp::params (
$auth_password = 'guest',
$auth_user = 'guest',
$backend = 'rabbitmq',
$node = 'rabbit@localhost',
$host = 'localhost',
$host_url = 'localhost',
$port = 5672,
$protocol = 'tcp',
$ssl_enabled = false,
) {
$transport_url = "rabbit://${auth_user}:${auth_password}@${host_url}:${port}"
}
class platform::amqp::rabbitmq (
$service_enabled = false,
) inherits ::platform::amqp::params {
include ::platform::params
File <| path == '/etc/rabbitmq/rabbitmq.config' |> {
ensure => present,
owner => 'rabbitmq',
group => 'rabbitmq',
mode => '0640',
}
file { '/var/log/rabbitmq':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
if $service_enabled {
$service_ensure = 'running'
}
elsif str2bool($::is_initial_config_primary) {
$service_ensure = 'running'
# ensure service is stopped after initial configuration
class { '::platform::amqp::post':
stage => post
}
} else {
$service_ensure = 'stopped'
}
$rabbit_dbdir = "/var/lib/rabbitmq/${::platform::params::software_version}"
class { '::rabbitmq':
port => $port,
ssl => $ssl_enabled,
default_user => $auth_user,
default_pass => $auth_password,
service_ensure => $service_ensure,
rabbitmq_home => $rabbit_dbdir,
environment_variables => {
'RABBITMQ_NODENAME' => $node,
'RABBITMQ_MNESIA_BASE' => "${rabbit_dbdir}/mnesia",
'HOME' => $rabbit_dbdir,
},
config_variables => {
'disk_free_limit' => '100000000',
'heartbeat' => '30',
'tcp_listen_options' => '[binary,
{packet,raw},
{reuseaddr,true},
{backlog,128},
{nodelay,true},
{linger,{true,0}},
{exit_on_close,false},
{keepalive,true}]',
}
}
}
class platform::amqp::post {
# rabbitmq-server needs to be running in order to apply the initial manifest,
# however, it needs to be stopped/disabled to allow SM to manage the service.
# To allow for the transition it must be explicitely stopped. Once puppet
# can directly handle SM managed services, then this can be removed.
exec { 'stop rabbitmq-server service':
command => 'systemctl stop rabbitmq-server; systemctl disable rabbitmq-server',
}
}
class platform::amqp::bootstrap {
include ::platform::params
Class['::platform::drbd::rabbit'] -> Class[$name]
class { '::platform::amqp::rabbitmq':
service_enabled => true,
}
# Ensure the rabbit data directory is created in the rabbit filesystem.
$rabbit_dbdir = "/var/lib/rabbitmq/${::platform::params::software_version}"
file { $rabbit_dbdir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
} -> Class['::rabbitmq']
rabbitmq_policy {'notifications_queues_maxlen@/':
require => Class['::rabbitmq'],
pattern => '.*notifications.*',
priority => 0,
applyto => 'queues',
definition => {
'max-length' => '10000',
},
}
rabbitmq_policy {'sample_queues_maxlen@/':
require => Class['::rabbitmq'],
pattern => '.*sample$',
priority => 0,
applyto => 'queues',
definition => {
'max-length' => '100000',
},
}
rabbitmq_policy {'all_queues_ttl@/':
require => Class['::rabbitmq'],
pattern => '.*',
priority => 0,
applyto => 'queues',
definition => {
'expires' => '14400000',
}
}
}
class platform::amqp::upgrade {
include ::platform::params
class { '::platform::amqp::rabbitmq':
service_enabled => true,
}
# Ensure the rabbit data directory is created in the rabbit filesystem.
$rabbit_dbdir = "/var/lib/rabbitmq/${::platform::params::software_version}"
file { $rabbit_dbdir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
} -> Class['::rabbitmq']
}

View File

@ -1,4 +0,0 @@
class platform::anchors {
anchor { 'platform::networking': }
-> anchor { 'platform::services': }
}

View File

@ -1,607 +0,0 @@
class platform::ceph::params(
$service_enabled = false,
$skip_osds_during_restore = false,
$cluster_uuid = undef,
$cluster_name = 'ceph',
$authentication_type = 'none',
$mon_lv_name = 'ceph-mon-lv',
$mon_lv_size = 0,
$mon_lv_size_reserved = 20,
$mon_fs_type = 'ext4',
$mon_fs_options = ' ',
$mon_mountpoint = '/var/lib/ceph/mon',
$floating_mon_host = undef,
$floating_mon_ip = undef,
$floating_mon_addr = undef,
$mon_0_host = undef,
$mon_0_ip = undef,
$mon_0_addr = undef,
$mon_1_host = undef,
$mon_1_ip = undef,
$mon_1_addr = undef,
$mon_2_host = undef,
$mon_2_ip = undef,
$mon_2_addr = undef,
$rgw_enabled = false,
$rgw_client_name = 'radosgw.gateway',
$rgw_user_name = 'root',
$rgw_frontend_type = 'civetweb',
$rgw_port = 7480,
$rgw_log_file = '/var/log/radosgw/radosgw.log',
$rgw_service_domain = undef,
$rgw_service_project = undef,
$rgw_service_password = undef,
$rgw_max_put_size = '53687091200',
$rgw_gc_max_objs = '977',
$rgw_gc_obj_min_wait = '600',
$rgw_gc_processor_max_time = '300',
$rgw_gc_processor_period = '300',
$configure_ceph_mon_info = false,
$ceph_config_file = '/etc/ceph/ceph.conf',
$ceph_config_ready_path = '/var/run/.ceph_started',
$node_ceph_configured_flag = '/etc/platform/.node_ceph_configured',
) { }
class platform::ceph
inherits ::platform::ceph::params {
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $service_enabled or $configure_ceph_mon_info {
# Set the minimum set of monitors that form a valid cluster
if $system_type == 'All-in-one' {
if $system_mode == 'simplex' {
# 1 node configuration, a single monitor is available
$mon_initial_members = $mon_0_host
$osd_pool_default_size = 1
} else {
# 2 node configuration, we have a floating monitor
$mon_initial_members = $floating_mon_host
$osd_pool_default_size = 2
}
} else {
# Multinode & standard, any 2 monitors form a cluster
$mon_initial_members = undef
$osd_pool_default_size = 2
}
class { '::ceph':
fsid => $cluster_uuid,
authentication_type => $authentication_type,
mon_initial_members => $mon_initial_members,
osd_pool_default_size => $osd_pool_default_size,
osd_pool_default_min_size => 1
}
-> ceph_config {
'mon/mon clock drift allowed': value => '.1';
}
if $system_type == 'All-in-one' {
# 1 and 2 node configurations have a single monitor
if 'duplex' in $system_mode {
# Floating monitor, running on active controller.
Class['::ceph']
-> ceph_config {
"mon.${floating_mon_host}/host": value => $floating_mon_host;
"mon.${floating_mon_host}/mon_addr": value => $floating_mon_addr;
}
} else {
# Simplex case, a single monitor binded to the controller.
Class['::ceph']
-> ceph_config {
"mon.${mon_0_host}/host": value => $mon_0_host;
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
}
}
} else {
# Multinode & standard have 3 monitors
Class['::ceph']
-> ceph_config {
"mon.${mon_0_host}/host": value => $mon_0_host;
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
"mon.${mon_1_host}/host": value => $mon_1_host;
"mon.${mon_1_host}/mon_addr": value => $mon_1_addr;
}
if $mon_2_host {
Class['::ceph']
-> ceph_config {
"mon.${mon_2_host}/host": value => $mon_2_host;
"mon.${mon_2_host}/mon_addr": value => $mon_2_addr;
}
}
}
# Remove old, no longer in use, monitor hosts from Ceph's config file
$valid_monitors = [ $mon_0_host, $mon_1_host, $mon_2_host ]
$::configured_ceph_monitors.each |Integer $index, String $monitor| {
if ! ($monitor in $valid_monitors) {
notice("Removing ${monitor} from ${ceph_config_file}")
# Remove all monitor settings of a section
$mon_settings = {
"mon.${monitor}" => {
'public_addr' => { 'ensure' => 'absent' },
'host' => { 'ensure' => 'absent' },
'mon_addr' => { 'ensure' => 'absent' },
}
}
$defaults = { 'path' => $ceph_config_file }
create_ini_settings($mon_settings, $defaults)
# Remove section header
Ini_setting<| |>
-> file_line { "[mon.${monitor}]":
ensure => absent,
path => $ceph_config_file,
line => "[mon.${monitor}]"
}
}
}
}
class { '::platform::ceph::post':
stage => post
}
}
class platform::ceph::post
inherits ::platform::ceph::params {
# Enable ceph process recovery after all configuration is done
file { $ceph_config_ready_path:
ensure => present,
content => '',
owner => 'root',
group => 'root',
mode => '0644',
}
if $service_enabled {
# Ceph configuration on this node is done
file { $node_ceph_configured_flag:
ensure => present
}
}
}
class platform::ceph::monitor
inherits ::platform::ceph::params {
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $service_enabled {
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
if str2bool($::is_standalone_controller) {
# Ceph mon is configured on a DRBD partition,
# when 'ceph' storage backend is added in sysinv.
# Then SM takes care of starting ceph after manifests are applied.
$configure_ceph_mon = true
} else {
$configure_ceph_mon = false
}
} else {
# Simplex, multinode. Ceph is pmon managed.
if $::hostname == $mon_0_host or $::hostname == $mon_1_host or $::hostname == $mon_2_host {
$configure_ceph_mon = true
} else {
$configure_ceph_mon = false
}
}
} else {
$configure_ceph_mon = false
}
if $::personality == 'worker' and ! $configure_ceph_mon {
# Reserve space for ceph-mon on all worker nodes.
include ::platform::filesystem::params
logical_volume { $mon_lv_name:
ensure => present,
volume_group => $::platform::filesystem::params::vg_name,
size => "${mon_lv_size_reserved}G",
} -> Class['platform::filesystem::docker']
}
if $configure_ceph_mon {
file { '/var/lib/ceph':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
# ensure DRBD config is complete before enabling the ceph monitor
Drbd::Resource <| |> -> Class['::ceph']
} else {
File['/var/lib/ceph']
-> platform::filesystem { $mon_lv_name:
lv_name => $mon_lv_name,
lv_size => $mon_lv_size,
mountpoint => $mon_mountpoint,
fs_type => $mon_fs_type,
fs_options => $mon_fs_options,
} -> Class['::ceph']
if $::personality == 'worker' {
Platform::Filesystem[$mon_lv_name] -> Class['platform::filesystem::docker']
}
file { '/etc/pmon.d/ceph.conf':
ensure => link,
target => '/etc/ceph/ceph.conf.pmon',
owner => 'root',
group => 'root',
mode => '0640',
}
}
# ensure configuration is complete before creating monitors
Class['::ceph'] -> Ceph::Mon <| |>
# ensure we load the crushmap at first unlock
if $system_type == 'All-in-one' and str2bool($::is_standalone_controller) {
if 'duplex' in $system_mode {
$crushmap_txt = '/etc/sysinv/crushmap-controller-model.txt'
} else {
$crushmap_txt = '/etc/sysinv/crushmap-aio-sx.txt'
}
$crushmap_bin = '/etc/sysinv/crushmap.bin'
$crushmap_bin_backup = '/etc/sysinv/crushmap.bin.backup'
Ceph::Mon <| |>
-> exec { 'Copy crushmap if backup exists':
command => "mv -f ${crushmap_bin_backup} ${crushmap_bin}",
onlyif => "test -f ${crushmap_bin_backup}",
}
-> exec { 'Compile crushmap':
command => "crushtool -c ${crushmap_txt} -o ${crushmap_bin}",
onlyif => "test ! -f ${crushmap_bin}",
logoutput => true,
}
-> exec { 'Set crushmap':
command => "ceph osd setcrushmap -i ${crushmap_bin}",
unless => 'ceph osd crush rule list --format plain | grep -e "storage_tier_ruleset"',
logoutput => true,
}
-> Platform_ceph_osd <| |>
}
# Ensure networking is up before Monitors are configured
Anchor['platform::networking'] -> Ceph::Mon <| |>
# default configuration for all ceph monitor resources
Ceph::Mon {
fsid => $cluster_uuid,
authentication_type => $authentication_type,
service_ensure => 'running'
}
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
ceph::mon { $floating_mon_host:
public_addr => $floating_mon_ip,
}
# On AIO-DX there is a single, floating, Ceph monitor backed by DRBD.
# Therefore DRBD must be up before Ceph monitor is configured
Drbd::Resource <| |> -> Ceph::Mon <| |>
} else {
if $::hostname == $mon_0_host {
ceph::mon { $mon_0_host:
public_addr => $mon_0_ip,
}
}
elsif $::hostname == $mon_1_host {
ceph::mon { $mon_1_host:
public_addr => $mon_1_ip,
}
}
elsif $::hostname == $mon_2_host {
ceph::mon { $mon_2_host:
public_addr => $mon_2_ip,
}
}
}
}
}
define osd_crush_location(
$osd_id,
$osd_uuid,
$disk_path,
$data_path,
$journal_path,
$tier_name,
) {
ceph_config{
"osd.${$osd_id}/devs": value => $data_path;
}
# Only set the crush location for additional tiers
if $tier_name != 'storage' {
ceph_config {
"osd.${$osd_id}/host": value => "${$::platform::params::hostname}-${$tier_name}";
"osd.${$osd_id}/crush_location": value => "root=${tier_name}-tier host=${$::platform::params::hostname}-${$tier_name}";
}
}
}
define platform_ceph_osd(
$osd_id,
$osd_uuid,
$disk_path,
$data_path,
$journal_path,
$tier_name,
) {
Anchor['platform::networking'] # Make sure networking is up before running ceph commands
-> file { "/var/lib/ceph/osd/ceph-${osd_id}":
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> exec { "ceph osd create ${osd_uuid} ${osd_id}":
logoutput => true,
command => template('platform/ceph.osd.create.erb'),
}
-> ceph::osd { $disk_path:
uuid => $osd_uuid,
osdid => $osd_id,
}
-> exec { "configure journal location ${name}":
logoutput => true,
command => template('platform/ceph.journal.location.erb')
}
}
define platform_ceph_journal(
$disk_path,
$journal_sizes,
) {
exec { "configure journal partitions ${name}":
logoutput => true,
command => template('platform/ceph.journal.partitions.erb')
}
}
class platform::ceph::osds(
$osd_config = {},
$journal_config = {},
) inherits ::platform::ceph::params {
# skip_osds_during_restore is set to true when the default primary
# ceph backend "ceph-store" has "restore" as its task and it is
# not an AIO system.
if ! $skip_osds_during_restore {
file { '/var/lib/ceph/osd':
ensure => 'directory',
path => '/var/lib/ceph/osd',
owner => 'root',
group => 'root',
mode => '0755',
}
# Ensure ceph.conf is complete before configuring OSDs
Class['::ceph'] -> Platform_ceph_osd <| |>
# Journal disks need to be prepared before the OSDs are configured
Platform_ceph_journal <| |> -> Platform_ceph_osd <| |>
# Crush locations in ceph.conf need to be set before the OSDs are configured
Osd_crush_location <| |> -> Platform_ceph_osd <| |>
# default configuration for all ceph object resources
Ceph::Osd {
cluster => $cluster_name,
cluster_uuid => $cluster_uuid,
}
create_resources('osd_crush_location', $osd_config)
create_resources('platform_ceph_osd', $osd_config)
create_resources('platform_ceph_journal', $journal_config)
}
}
class platform::ceph::haproxy
inherits ::platform::ceph::params {
if $rgw_enabled {
platform::haproxy::proxy { 'ceph-radosgw-restapi':
server_name => 's-ceph-radosgw',
public_port => $rgw_port,
private_port => $rgw_port,
}
}
}
class platform::ceph::rgw::keystone (
$swift_endpts_enabled = false,
$rgw_admin_domain = undef,
$rgw_admin_project = undef,
$rgw_admin_user = 'swift',
$rgw_admin_password = undef,
) inherits ::platform::ceph::params {
include ::openstack::keystone::params
if $rgw_enabled {
if $swift_endpts_enabled {
$url = $::openstack::keystone::params::openstack_auth_uri
} else {
$url = $::openstack::keystone::params::auth_uri
}
ceph::rgw::keystone { $rgw_client_name:
# keystone admin token is disabled after initial keystone configuration
# for security reason. Use keystone service tenant credentials instead.
rgw_keystone_admin_token => '',
rgw_keystone_url => $url,
rgw_keystone_version => $::openstack::keystone::params::api_version,
rgw_keystone_accepted_roles => 'admin,_member_',
user => $rgw_user_name,
use_pki => false,
rgw_keystone_revocation_interval => 0,
rgw_keystone_token_cache_size => 0,
rgw_keystone_admin_domain => $rgw_admin_domain,
rgw_keystone_admin_project => $rgw_admin_project,
rgw_keystone_admin_user => $rgw_admin_user,
rgw_keystone_admin_password => $rgw_admin_password,
}
}
}
class platform::ceph::rgw
inherits ::platform::ceph::params {
include ::ceph::params
include ::ceph::profile::params
if $rgw_enabled {
include ::platform::params
include ::openstack::keystone::params
$auth_host = $::openstack::keystone::params::host_url
ceph::rgw { $rgw_client_name:
user => $rgw_user_name,
frontend_type => $rgw_frontend_type,
rgw_frontends => "${rgw_frontend_type} port=${auth_host}:${rgw_port}",
# service is managed by SM
rgw_enable => false,
rgw_ensure => false,
# The location of the log file shoule be the same as what's specified in
# /etc/logrotate.d/radosgw in order for log rotation to work properly
log_file => $rgw_log_file,
}
include ::platform::ceph::rgw::keystone
ceph_config {
# increase limit for single operation uploading to 50G (50*1024*1024*1024)
"client.${rgw_client_name}/rgw_max_put_size": value => $rgw_max_put_size;
# increase frequency and scope of garbage collection
"client.${rgw_client_name}/rgw_gc_max_objs": value => $rgw_gc_max_objs;
"client.${rgw_client_name}/rgw_gc_obj_min_wait": value => $rgw_gc_obj_min_wait;
"client.${rgw_client_name}/rgw_gc_processor_max_time": value => $rgw_gc_processor_max_time;
"client.${rgw_client_name}/rgw_gc_processor_period": value => $rgw_gc_processor_period;
}
}
include ::platform::ceph::haproxy
}
class platform::ceph::worker {
if $::personality == 'worker' {
include ::platform::ceph
include ::platform::ceph::monitor
}
}
class platform::ceph::storage {
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::osds
# Ensure partitions update prior to ceph storage configuration
Class['::platform::partitions'] -> Class['::platform::ceph::osds']
}
class platform::ceph::controller {
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::osds
# Ensure partitions update prior to ceph storage configuration
Class['::platform::partitions'] -> Class['::platform::ceph::osds']
}
class platform::ceph::runtime_base {
include ::platform::ceph::monitor
include ::platform::ceph
# Make sure mgr-restful-plugin is running as it is needed by sysinv config
# TODO(oponcea): Remove when sm supports in-service config reload
if str2bool($::is_controller_active) {
Ceph::Mon <| |>
-> exec { '/etc/init.d/mgr-restful-plugin start':
command => '/etc/init.d/mgr-restful-plugin start',
logoutput => true,
}
}
}
class platform::ceph::runtime_osds {
include ::ceph::params
include ::platform::ceph
include ::platform::ceph::osds
# Since this is runtime we have to avoid checking status of Ceph while we
# configure it. On AIO-DX ceph-osd processes are monitored by SM & on other
# deployments they are pmon managed.
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
exec { 'sm-unmanage service ceph-osd':
command => 'sm-unmanage service ceph-osd'
}
-> Class['::platform::ceph::osds']
-> exec { 'start Ceph OSDs':
command => '/etc/init.d/ceph-init-wrapper start osd'
}
-> exec { 'sm-manage service ceph-osd':
command => 'sm-manage service ceph-osd'
}
} else {
exec { 'remove /etc/pmon.d/ceph.conf':
command => 'rm -f /etc/pmon.d/ceph.conf'
}
-> Class['::platform::ceph::osds']
-> exec { 'start Ceph OSDs':
command => '/etc/init.d/ceph-init-wrapper start osd'
}
-> file { 'link /etc/pmon.d/ceph.conf':
ensure => link,
path => '/etc/pmon.d/ceph.conf',
target => '/etc/ceph/ceph.conf.pmon',
owner => 'root',
group => 'root',
mode => '0640',
}
}
}
# Used to configure optional radosgw platform service
class platform::ceph::rgw::runtime
inherits ::platform::ceph::params {
include platform::ceph::rgw
# Make sure the ceph configuration is complete before sm dynamically
# provisions/deprovisions the service
Class[$name] -> Class['::platform::sm::rgw::runtime']
unless $rgw_enabled {
# SM's current behavior will not stop the service being de-provisioned, so
# stop it when needed
exec { 'Stopping ceph-radosgw service':
command => '/etc/init.d/ceph-radosgw stop'
}
}
}
# Used to configure radosgw keystone info based on containerized swift endpoints
# being enabled/disabled
class platform::ceph::rgw::keystone::runtime
inherits ::platform::ceph::params {
include ::platform::ceph::rgw::keystone
exec { 'sm-restart-safe service ceph-radosgw':
command => 'sm-restart-safe service ceph-radosgw'
}
}

View File

@ -1,80 +0,0 @@
class platform::client::params (
$admin_username,
$identity_auth_url,
$identity_region = 'RegionOne',
$identity_api_version = 3,
$admin_user_domain = 'Default',
$admin_project_domain = 'Default',
$admin_project_name = 'admin',
$admin_password = undef,
$keystone_identity_region = 'RegionOne',
) { }
class platform::client
inherits ::platform::client::params {
include ::platform::client::credentials::params
$keyring_file = $::platform::client::credentials::params::keyring_file
file {'/etc/platform/openrc':
ensure => 'present',
mode => '0640',
owner => 'root',
group => 'root',
content => template('platform/openrc.admin.erb'),
}
-> file {'/etc/bash_completion.d/openstack':
ensure => 'present',
mode => '0644',
content => generate('/usr/bin/openstack', 'complete'),
}
if $::personality == 'controller' {
file {'/etc/ssl/private/openstack':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
}
}
class platform::client::credentials::params (
$keyring_base,
$keyring_directory,
$keyring_file,
) { }
class platform::client::credentials
inherits ::platform::client::credentials::params {
Class['::platform::drbd::platform']
-> file { $keyring_base:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $keyring_directory:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { $keyring_file:
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0755',
content => 'keyring get CGCS admin'
}
}
class platform::client::bootstrap {
include ::platform::client
include ::platform::client::credentials
}
class platform::client::upgrade {
include ::platform::client
}

View File

@ -1,64 +0,0 @@
class platform::collectd::params (
$interval = undef,
$timeout = undef,
$read_threads = undef,
$write_threads = undef,
$write_queue_limit_high = undef,
$write_queue_limit_low = undef,
$server_addrs = [],
$server_port = undef,
$max_read_interval = undef,
# python plugin controls
$module_path = undef,
$plugins = [],
$mtce_notifier_port = undef,
$log_traces = undef,
$encoding = undef,
$collectd_d_dir = undef,
) {}
class platform::collectd
inherits ::platform::collectd::params {
file { '/etc/collectd.conf':
ensure => 'present',
replace => true,
content => template('platform/collectd.conf.erb'),
} # now start collectd
-> exec { 'collectd-enable':
command => 'systemctl enable collectd',
unless => 'systemctl is-enabled collectd'
}
# ensure that collectd is running
-> service { 'collectd':
ensure => running,
provider => 'systemd',
require => Anchor['platform::networking'],
} # now get pmond to monitor the process
# ensure pmon soft link for process monitoring
-> file { '/etc/pmon.d/collectd.conf':
ensure => 'link',
target => '/opt/collectd/extensions/config/collectd.conf.pmon',
owner => 'root',
group => 'root',
mode => '0600',
}
}
class platform::collectd::runtime {
include ::platform::collectd
}
# restart target
class platform::collectd::restart {
include ::platform::collectd
exec { 'collectd-restart':
command => '/usr/local/sbin/pmon-restart collectd'
}
}

View File

@ -1,398 +0,0 @@
class platform::compute::params (
$worker_cpu_list = '',
$platform_cpu_list = '',
$reserved_vswitch_cores = '',
$reserved_platform_cores = '',
$worker_base_reserved = '',
$compute_vswitch_reserved = '',
) { }
class platform::compute::config
inherits ::platform::compute::params {
include ::platform::collectd::restart
file { '/etc/platform/worker_reserved.conf':
ensure => 'present',
replace => true,
content => template('platform/worker_reserved.conf.erb')
}
-> Exec['collectd-restart']
if $::platform::params::system_type != 'All-in-one' {
file { '/etc/systemd/system.conf.d/platform-cpuaffinity.conf':
ensure => 'present',
replace => true,
content => template('platform/systemd-system-cpuaffinity.conf.erb')
}
}
}
class platform::compute::config::runtime {
include ::platform::compute::config
}
class platform::compute::grub::params (
$n_cpus = '',
$cpu_options = '',
$m_hugepages = 'hugepagesz=2M hugepages=0',
$g_hugepages = undef,
$default_pgsz = 'default_hugepagesz=2M',
$keys = [
'kvm-intel.eptad',
'default_hugepagesz',
'hugepagesz',
'hugepages',
'isolcpus',
'nohz_full',
'rcu_nocbs',
'kthread_cpus',
'irqaffinity',
],
) {
if $::is_broadwell_processor {
$eptad = 'kvm-intel.eptad=0'
} else {
$eptad = ''
}
if $::is_gb_page_supported and $::platform::params::vswitch_type != 'none' {
if $g_hugepages != undef {
$gb_hugepages = $g_hugepages
} else {
$gb_hugepages = "hugepagesz=1G hugepages=${::number_of_numa_nodes}"
}
} else {
$gb_hugepages = ''
}
$grub_updates = strip("${eptad} ${$gb_hugepages} ${m_hugepages} ${default_pgsz} ${cpu_options}")
}
class platform::compute::grub::update
inherits ::platform::compute::grub::params {
notice('Updating grub configuration')
$to_be_removed = join($keys, ' ')
exec { 'Remove the cpu arguments':
command => "grubby --update-kernel=ALL --remove-args='${to_be_removed}'",
}
-> exec { 'Add the cpu arguments':
command => "grubby --update-kernel=ALL --args='${grub_updates}'",
}
}
class platform::compute::grub::recovery {
notice('Update Grub and Reboot')
class {'platform::compute::grub::update': } -> Exec['reboot-recovery']
exec { 'reboot-recovery':
command => 'reboot',
}
}
class platform::compute::grub::audit
inherits ::platform::compute::grub::params {
if ! str2bool($::is_initial_config_primary) {
notice('Audit CPU and Grub Configuration')
$expected_n_cpus = Integer($::number_of_logical_cpus)
$n_cpus_ok = ($n_cpus == $expected_n_cpus)
$cmd_ok = check_grub_config($grub_updates)
if $cmd_ok and $n_cpus_ok {
$ensure = present
notice('CPU and Boot Argument audit passed.')
} else {
$ensure = absent
if !$cmd_ok {
notice('Kernel Boot Argument Mismatch')
include ::platform::compute::grub::recovery
} else {
notice("Mismatched CPUs: Found=${n_cpus}, Expected=${expected_n_cpus}")
}
}
file { '/var/run/worker_goenabled':
ensure => $ensure,
owner => 'root',
group => 'root',
mode => '0644',
}
}
}
class platform::compute::grub::runtime {
include ::platform::compute::grub::update
}
# Mounts virtual hugetlbfs filesystems for each supported page size
class platform::compute::hugetlbf {
if str2bool($::is_hugetlbfs_enabled) {
$fs_list = generate('/bin/bash', '-c', 'ls -1d /sys/kernel/mm/hugepages/hugepages-*')
$array = split($fs_list, '\n')
$array.each | String $val | {
$page_name = generate('/bin/bash', '-c', "basename ${val}")
$page_size = strip(regsubst($page_name, 'hugepages-', ''))
$hugemnt ="/mnt/huge-${page_size}"
$options = "pagesize=${page_size}"
# TODO: Once all the code is switched over to use the /dev
# mount point we can get rid of this mount point.
notice("Mounting hugetlbfs at: ${hugemnt}")
exec { "create ${hugemnt}":
command => "mkdir -p ${hugemnt}",
onlyif => "test ! -d ${hugemnt}",
}
-> mount { $hugemnt:
ensure => 'mounted',
device => 'none',
fstype => 'hugetlbfs',
name => $hugemnt,
options => $options,
atboot => 'yes',
remounts => true,
}
# The libvirt helm chart expects hugepages to be mounted
# under /dev so let's do that.
$hugemnt2 ="/dev/huge-${page_size}"
notice("Mounting hugetlbfs at: ${hugemnt2}")
file { $hugemnt2:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> mount { $hugemnt2:
ensure => 'mounted',
device => 'none',
fstype => 'hugetlbfs',
name => $hugemnt2,
options => $options,
atboot => 'yes',
remounts => true,
}
}
# The libvirt helm chart also assumes that the default hugepage size
# will be mounted at /dev/hugepages so let's make that happen too.
# Once we upstream a fix to the helm chart to automatically determine
# the mountpoint then we can remove this.
$page_size = '2M'
$hugemnt ='/dev/hugepages'
$options = "pagesize=${page_size}"
notice("Mounting hugetlbfs at: ${hugemnt}")
exec { "create ${hugemnt}":
command => "mkdir -p ${hugemnt}",
onlyif => "test ! -d ${hugemnt}",
}
-> mount { $hugemnt:
ensure => 'mounted',
device => 'none',
fstype => 'hugetlbfs',
name => $hugemnt,
options => $options,
atboot => 'yes',
remounts => true,
}
}
}
# lint:ignore:variable_is_lowercase
class platform::compute::hugepage::params (
$nr_hugepages_2M = undef,
$nr_hugepages_1G = undef,
$vswitch_2M_pages = '',
$vswitch_1G_pages = '',
$vm_4K_pages = '',
$vm_2M_pages = '',
$vm_1G_pages = '',
) {}
define allocate_pages (
$path,
$page_count,
) {
exec { "Allocate ${page_count} ${path}":
command => "echo ${page_count} > ${path}",
onlyif => "test -f ${path}",
}
}
# Allocates HugeTLB memory according to the attributes specified in the
# nr_hugepages_2M and nr_hugepages_1G
class platform::compute::allocate
inherits ::platform::compute::hugepage::params {
# determine the node file system
if str2bool($::is_per_numa_supported) {
$nodefs = '/sys/devices/system/node'
} else {
$nodefs = '/sys/kernel/mm'
}
if $nr_hugepages_2M != undef {
$nr_hugepages_2M_array = regsubst($nr_hugepages_2M, '[\(\)\"]', '', 'G').split(' ')
$nr_hugepages_2M_array.each | String $val | {
$per_node_2M = $val.split(':')
if size($per_node_2M)== 3 {
$node = $per_node_2M[0]
$page_size = $per_node_2M[1]
allocate_pages { "Start ${node} ${page_size}":
path => "${nodefs}/${node}/hugepages/hugepages-${page_size}/nr_hugepages",
page_count => $per_node_2M[2],
}
}
}
}
if $nr_hugepages_1G != undef {
$nr_hugepages_1G_array = regsubst($nr_hugepages_1G , '[\(\)\"]', '', 'G').split(' ')
$nr_hugepages_1G_array.each | String $val | {
$per_node_1G = $val.split(':')
if size($per_node_1G)== 3 {
$node = $per_node_1G[0]
$page_size = $per_node_1G[1]
allocate_pages { "Start ${node} ${page_size}":
path => "${nodefs}/${node}/hugepages/hugepages-${page_size}/nr_hugepages",
page_count => $per_node_1G[2],
}
}
}
}
}
# lint:endignore:variable_is_lowercase
# Mount resctrl to allow Cache Allocation Technology per VM
class platform::compute::resctrl {
if str2bool($::is_resctrl_supported) {
mount { '/sys/fs/resctrl':
ensure => 'mounted',
device => 'resctrl',
fstype => 'resctrl',
name => '/sys/fs/resctrl',
atboot => 'yes',
remounts => true,
}
}
}
# Set Power Management QoS resume latency constraints for CPUs.
# The PM QoS resume latency limit is set to shallow C-state for vswitch CPUs.
# All other CPUs are allowed to go to the deepest C-state available.
class platform::compute::pmqos (
$low_wakeup_cpus = '',
$hight_wakeup_cpus = '',
) {
if str2bool($::is_worker_subfunction) and str2bool($::is_lowlatency_subfunction) {
$script = '/usr/bin/set-cpu-wakeup-latency.sh'
if $low_wakeup_cpus != '""' {
# Set low wakeup latency (shallow C-state) for vswitch CPUs using PM QoS interface
exec { 'low-wakeup-latency':
command => "${script} low ${low_wakeup_cpus}",
onlyif => "test -f ${script}",
logoutput => true,
}
}
if $hight_wakeup_cpus != '""' {
#Set high wakeup latency (deep C-state) for non-vswitch CPUs using PM QoS interface
exec { 'high-wakeup-latency':
command => "${script} high ${hight_wakeup_cpus}",
onlyif => "test -f ${script}",
logoutput => true,
}
}
}
}
# Set systemd machine.slice cgroup cpuset to be used with VMs,
# and configure this cpuset to span all logical cpus and numa nodes.
# NOTES:
# - The parent directory cpuset spans all online cpus and numa nodes.
# - Setting the machine.slice cpuset prevents this from inheriting
# kubernetes libvirt pod's cpuset, since machine.slice cgroup will be
# created when a VM is launched if it does not already exist.
# - systemd automatically mounts cgroups and controllers, so don't need
# to do that here.
class platform::compute::machine {
$parent_dir = '/sys/fs/cgroup/cpuset'
$parent_mems = "${parent_dir}/cpuset.mems"
$parent_cpus = "${parent_dir}/cpuset.cpus"
$machine_dir = "${parent_dir}/machine.slice"
$machine_mems = "${machine_dir}/cpuset.mems"
$machine_cpus = "${machine_dir}/cpuset.cpus"
notice("Create ${machine_dir}")
file { $machine_dir :
ensure => directory,
owner => 'root',
group => 'root',
mode => '0700',
}
-> exec { "Create ${machine_mems}" :
command => "/bin/cat ${parent_mems} > ${machine_mems}",
}
-> exec { "Create ${machine_cpus}" :
command => "/bin/cat ${parent_cpus} > ${machine_cpus}",
}
}
class platform::compute::kvm_timer_advance(
$enabled = False,
$vcpu_pin_set = undef
) {
if $enabled {
# include the declaration of the kubelet service
include ::platform::kubernetes::worker
file { '/etc/kvm-timer-advance/kvm-timer-advance.conf':
ensure => 'present',
replace => true,
content => template('platform/kvm_timer_advance.conf.erb')
}
-> service { 'kvm_timer_advance_setup':
ensure => 'running',
enable => true,
before => Service['kubelet'],
}
# A separate enable is required since we have modified the service resource
# to never enable/disable services in puppet.
-> exec { 'Enable kvm_timer_advance_setup':
command => '/usr/bin/systemctl enable kvm_timer_advance_setup.service',
}
} else {
# A disable is required since we have modified the service resource
# to never enable/disable services in puppet and stop has no effect.
exec { 'Disable kvm_timer_advance_setup':
command => '/usr/bin/systemctl disable kvm_timer_advance_setup.service',
}
}
}
class platform::compute {
Class[$name] -> Class['::platform::vswitch']
require ::platform::compute::grub::audit
require ::platform::compute::hugetlbf
require ::platform::compute::allocate
require ::platform::compute::pmqos
require ::platform::compute::resctrl
require ::platform::compute::machine
require ::platform::compute::config
require ::platform::compute::kvm_timer_advance
}

View File

@ -1,423 +0,0 @@
class platform::config::params (
$config_uuid = 'install',
$hosts = {},
$timezone = 'UTC',
) { }
class platform::config::certs::params (
$ssl_ca_cert = '',
) { }
class platform::config
inherits ::platform::config::params {
include ::platform::params
include ::platform::anchors
stage { 'pre':
before => Stage['main'],
}
stage { 'post':
require => Stage['main'],
}
class { '::platform::config::pre':
stage => pre
}
class { '::platform::config::post':
stage => post,
}
}
class platform::config::file {
include ::platform::params
include ::platform::network::mgmt::params
include ::platform::network::oam::params
include ::platform::network::cluster_host::params
include ::openstack::horizon::params
# dependent template variables
$management_interface = $::platform::network::mgmt::params::interface_name
$cluster_host_interface = $::platform::network::cluster_host::params::interface_name
$oam_interface = $::platform::network::oam::params::interface_name
$platform_conf = '/etc/platform/platform.conf'
file_line { "${platform_conf} sw_version":
path => $platform_conf,
line => "sw_version=${::platform::params::software_version}",
match => '^sw_version=',
}
if $management_interface {
file_line { "${platform_conf} management_interface":
path => $platform_conf,
line => "management_interface=${management_interface}",
match => '^management_interface=',
}
}
if $cluster_host_interface {
file_line { "${platform_conf} cluster_host_interface":
path => '/etc/platform/platform.conf',
line => "cluster_host_interface=${cluster_host_interface}",
match => '^cluster_host_interface=',
}
}
else {
file_line { "${platform_conf} cluster_host_interface":
ensure => absent,
path => '/etc/platform/platform.conf',
match => '^cluster_host_interface=',
match_for_absence => true,
}
}
if $oam_interface {
file_line { "${platform_conf} oam_interface":
path => $platform_conf,
line => "oam_interface=${oam_interface}",
match => '^oam_interface=',
}
}
if $::platform::params::vswitch_type {
file_line { "${platform_conf} vswitch_type":
path => $platform_conf,
line => "vswitch_type=${::platform::params::vswitch_type}",
match => '^vswitch_type=',
}
}
if $::platform::params::system_type {
file_line { "${platform_conf} system_type":
path => $platform_conf,
line => "system_type=${::platform::params::system_type}",
match => '^system_type=*',
}
}
if $::platform::params::system_mode {
file_line { "${platform_conf} system_mode":
path => $platform_conf,
line => "system_mode=${::platform::params::system_mode}",
match => '^system_mode=*',
}
}
if $::platform::params::security_profile {
file_line { "${platform_conf} security_profile":
path => $platform_conf,
line => "security_profile=${::platform::params::security_profile}",
match => '^security_profile=*',
}
}
if $::platform::params::sdn_enabled {
file_line { "${platform_conf}f sdn_enabled":
path => $platform_conf,
line => 'sdn_enabled=yes',
match => '^sdn_enabled=',
}
}
else {
file_line { "${platform_conf} sdn_enabled":
path => $platform_conf,
line => 'sdn_enabled=no',
match => '^sdn_enabled=',
}
}
if $::platform::params::region_config {
file_line { "${platform_conf} region_config":
path => $platform_conf,
line => 'region_config=yes',
match => '^region_config=',
}
file_line { "${platform_conf} region_1_name":
path => $platform_conf,
line => "region_1_name=${::platform::params::region_1_name}",
match => '^region_1_name=',
}
file_line { "${platform_conf} region_2_name":
path => $platform_conf,
line => "region_2_name=${::platform::params::region_2_name}",
match => '^region_2_name=',
}
} else {
file_line { "${platform_conf} region_config":
path => $platform_conf,
line => 'region_config=no',
match => '^region_config=',
}
}
if $::platform::params::distributed_cloud_role {
file_line { "${platform_conf} distributed_cloud_role":
path => $platform_conf,
line => "distributed_cloud_role=${::platform::params::distributed_cloud_role}",
match => '^distributed_cloud_role=',
}
}
if $::platform::params::security_feature {
file_line { "${platform_conf} security_feature":
path => $platform_conf,
line => "security_feature=\"${::platform::params::security_feature}\"",
match => '^security_feature=*',
}
}
file_line { "${platform_conf} http_port":
path => $platform_conf,
line => "http_port=${::openstack::horizon::params::http_port}",
match => '^http_port=',
}
}
class platform::config::hostname {
include ::platform::params
file { '/etc/hostname':
ensure => present,
owner => root,
group => root,
mode => '0644',
content => "${::platform::params::hostname}\n",
notify => Exec['set-hostname'],
}
exec { 'set-hostname':
command => 'hostname -F /etc/hostname',
unless => 'test `hostname` = `cat /etc/hostname`',
}
}
class platform::config::hosts
inherits ::platform::config::params {
# The localhost should resolve to the IPv4 loopback address only, therefore
# ensure the IPv6 address is removed from configured hosts
resources { 'host': purge => true }
$localhost = {
'localhost' => {
ip => '127.0.0.1',
host_aliases => ['localhost.localdomain', 'localhost4', 'localhost4.localdomain4']
},
}
$merged_hosts = merge($localhost, $hosts)
create_resources('host', $merged_hosts, {})
}
class platform::config::timezone
inherits ::platform::config::params {
exec { 'Configure Timezone':
command => "ln -sf /usr/share/zoneinfo/${timezone} /etc/localtime",
}
}
class platform::config::tpm {
$tpm_certs = hiera_hash('platform::tpm::tpm_data', undef)
if $tpm_certs != undef {
# iterate through each tpm_cert creating it if it doesn't exist
$tpm_certs.each |String $key, String $value| {
file { "create-TPM-cert-${key}":
ensure => present,
path => $key,
owner => root,
group => root,
mode => '0644',
content => $value,
}
}
}
}
class platform::config::certs::ssl_ca
inherits ::platform::config::certs::params {
$ssl_ca_file = '/etc/pki/ca-trust/source/anchors/ca-cert.pem'
if str2bool($::is_initial_config) {
$docker_restart_cmd = 'systemctl restart docker'
}
else {
$docker_restart_cmd = 'pmon-restart dockerd'
}
if ! empty($ssl_ca_cert) {
file { 'create-ssl-ca-cert':
ensure => present,
path => $ssl_ca_file,
owner => root,
group => root,
mode => '0644',
content => $ssl_ca_cert,
}
}
else {
file { 'create-ssl-ca-cert':
ensure => absent,
path => $ssl_ca_file
}
}
exec { 'update-ca-trust ':
command => 'update-ca-trust',
subscribe => File[$ssl_ca_file],
refreshonly => true
}
-> exec { 'restart docker':
command => $docker_restart_cmd,
subscribe => File[$ssl_ca_file],
refreshonly => true
}
if str2bool($::is_controller_active) {
Exec['restart docker']
-> file { '/etc/platform/.ssl_ca_complete':
ensure => present,
owner => root,
group => root,
mode => '0644',
}
}
}
class platform::config::runtime {
include ::platform::config::certs::ssl_ca
}
class platform::config::pre {
group { 'nobody':
ensure => 'present',
gid => '99',
}
include ::platform::config::timezone
include ::platform::config::hostname
include ::platform::config::hosts
include ::platform::config::file
include ::platform::config::tpm
include ::platform::config::certs::ssl_ca
}
class platform::config::post
inherits ::platform::config::params {
include ::platform::params
service { 'crond':
ensure => 'running',
enable => true,
}
# When applying manifests to upgrade controller-1, we do not want SM or the
# sysinv-agent or anything else that depends on these flags to start.
if ! $::platform::params::controller_upgrade {
file { '/etc/platform/.config_applied':
ensure => present,
mode => '0640',
content => "CONFIG_UUID=${config_uuid}"
}
}
}
class platform::config::controller::post
{
include ::platform::params
# TODO(tngo): The following block will be removed when we switch to Ansible
if str2bool($::is_initial_config_primary) {
# copy configured hosts to redundant storage
file { "${::platform::params::config_path}/hosts":
source => '/etc/hosts',
replace => false,
}
file { '/etc/platform/.unlock_ready':
ensure => present,
}
}
if ! $::platform::params::controller_upgrade {
file { '/etc/platform/.initial_config_complete':
ensure => present,
}
}
file { '/etc/platform/.initial_controller_config_complete':
ensure => present,
}
file { '/var/run/.controller_config_complete':
ensure => present,
}
}
class platform::config::worker::post
{
include ::platform::params
if ! $::platform::params::controller_upgrade {
file { '/etc/platform/.initial_config_complete':
ensure => present,
}
}
file { '/etc/platform/.initial_worker_config_complete':
ensure => present,
}
file { '/var/run/.worker_config_complete':
ensure => present,
}
}
class platform::config::storage::post
{
include ::platform::params
if ! $::platform::params::controller_upgrade {
file { '/etc/platform/.initial_config_complete':
ensure => present,
}
}
file { '/etc/platform/.initial_storage_config_complete':
ensure => present,
}
file { '/var/run/.storage_config_complete':
ensure => present,
}
}
class platform::config::bootstrap {
stage { 'pre':
before => Stage['main'],
}
stage { 'post':
require => Stage['main'],
}
include ::platform::params
include ::platform::anchors
include ::platform::config::hostname
include ::platform::config::hosts
}

View File

@ -1,44 +0,0 @@
class platform::dcdbsync::params (
$api_port = 8219,
$region_name = undef,
$service_create = false,
$service_enabled = false,
$default_endpoint_type = 'internalURL',
) {
include ::platform::params
}
class platform::dcdbsync
inherits ::platform::dcdbsync::params {
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
if $service_create {
if $::platform::params::init_keystone {
include ::dcdbsync::keystone::auth
}
class { '::dcdbsync': }
}
}
}
class platform::dcdbsync::api
inherits ::platform::dcdbsync::params {
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
if $service_create {
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::controller_address
$api_fqdn = $::platform::params::controller_hostname
$url_host = "http://${api_fqdn}:${api_port}"
class { '::dcdbsync::api':
bind_host => $api_host,
bind_port => $api_port,
enabled => $service_enabled,
}
}
}
}

View File

@ -1,81 +0,0 @@
class platform::dcmanager::params (
$api_port = 8119,
$region_name = undef,
$domain_name = undef,
$domain_admin = undef,
$domain_pwd = undef,
$service_name = 'dcmanager',
$default_endpoint_type = 'internalURL',
$service_create = false,
) {
include ::platform::params
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::controller_address
}
class platform::dcmanager
inherits ::platform::dcmanager::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::platform::params
include ::platform::amqp::params
if $::platform::params::init_database {
include ::dcmanager::db::postgresql
}
class { '::dcmanager':
rabbit_host => $::platform::amqp::params::host_url,
rabbit_port => $::platform::amqp::params::port,
rabbit_userid => $::platform::amqp::params::auth_user,
rabbit_password => $::platform::amqp::params::auth_password,
}
}
}
class platform::dcmanager::haproxy
inherits ::platform::dcmanager::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
platform::haproxy::proxy { 'dcmanager-restapi':
server_name => 's-dcmanager',
public_port => $api_port,
private_port => $api_port,
}
}
}
class platform::dcmanager::manager {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::dcmanager::manager
}
}
class platform::dcmanager::api
inherits ::platform::dcmanager::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
if ($::platform::dcmanager::params::service_create and
$::platform::params::init_keystone) {
include ::dcmanager::keystone::auth
}
class { '::dcmanager::api':
bind_host => $api_host,
sync_db => $::platform::params::init_database,
}
include ::platform::dcmanager::haproxy
}
}
class platform::dcmanager::runtime {
if $::platform::params::distributed_cloud_role == 'systemcontroller' {
include ::platform::amqp::params
include ::dcmanager
include ::dcmanager::db::postgresql
class { '::dcmanager::api':
sync_db => str2bool($::is_standalone_controller),
}
}
}

View File

@ -1,151 +0,0 @@
class platform::dcorch::params (
$api_port = 8118,
$region_name = undef,
$domain_name = undef,
$domain_admin = undef,
$domain_pwd = undef,
$service_name = 'dcorch',
$default_endpoint_type = 'internalURL',
$service_create = false,
$neutron_api_proxy_port = 29696,
$nova_api_proxy_port = 28774,
$sysinv_api_proxy_port = 26385,
$cinder_api_proxy_port = 28776,
$cinder_enable_ports = false,
$patch_api_proxy_port = 25491,
$identity_api_proxy_port = 25000,
) {
include ::platform::params
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::controller_address
}
class platform::dcorch
inherits ::platform::dcorch::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::platform::params
include ::platform::amqp::params
if $::platform::params::init_database {
include ::dcorch::db::postgresql
}
class { '::dcorch':
rabbit_host => $::platform::amqp::params::host_url,
rabbit_port => $::platform::amqp::params::port,
rabbit_userid => $::platform::amqp::params::auth_user,
rabbit_password => $::platform::amqp::params::auth_password,
proxy_bind_host => $api_host,
proxy_remote_host => $api_host,
}
}
}
class platform::dcorch::firewall
inherits ::platform::dcorch::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
platform::firewall::rule { 'dcorch-api':
service_name => 'dcorch',
ports => $api_port,
}
platform::firewall::rule { 'dcorch-nova-api-proxy':
service_name => 'dcorch-nova-api-proxy',
ports => $nova_api_proxy_port,
}
platform::firewall::rule { 'dcorch-neutron-api-proxy':
service_name => 'dcorch-neutron-api-proxy',
ports => $neutron_api_proxy_port,
}
platform::firewall::rule { 'dcorch-cinder-api-proxy':
service_name => 'dcorch-cinder-api-proxy',
ports => $cinder_api_proxy_port,
}
}
}
class platform::dcorch::haproxy
inherits ::platform::dcorch::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
platform::haproxy::proxy { 'dcorch-neutron-api-proxy':
server_name => 's-dcorch-neutron-api-proxy',
public_port => $neutron_api_proxy_port,
private_port => $neutron_api_proxy_port,
}
platform::haproxy::proxy { 'dcorch-nova-api-proxy':
server_name => 's-dcorch-nova-api-proxy',
public_port => $nova_api_proxy_port,
private_port => $nova_api_proxy_port,
}
platform::haproxy::proxy { 'dcorch-sysinv-api-proxy':
server_name => 's-dcorch-sysinv-api-proxy',
public_port => $sysinv_api_proxy_port,
private_port => $sysinv_api_proxy_port,
}
platform::haproxy::proxy { 'dcorch-cinder-api-proxy':
server_name => 's-cinder-dc-api-proxy',
public_port => $cinder_api_proxy_port,
private_port => $cinder_api_proxy_port,
}
platform::haproxy::proxy { 'dcorch-patch-api-proxy':
server_name => 's-dcorch-patch-api-proxy',
public_port => $patch_api_proxy_port,
private_port => $patch_api_proxy_port,
}
platform::haproxy::proxy { 'dcorch-identity-api-proxy':
server_name => 's-dcorch-identity-api-proxy',
public_port => $identity_api_proxy_port,
private_port => $identity_api_proxy_port,
}
}
}
class platform::dcorch::engine
inherits ::platform::dcorch::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::dcorch::engine
}
}
class platform::dcorch::snmp
inherits ::platform::dcorch::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
class { '::dcorch::snmp':
bind_host => $api_host,
}
}
}
class platform::dcorch::api_proxy
inherits ::platform::dcorch::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
if ($::platform::dcorch::params::service_create and
$::platform::params::init_keystone) {
include ::dcorch::keystone::auth
}
class { '::dcorch::api_proxy':
bind_host => $api_host,
sync_db => $::platform::params::init_database,
}
include ::platform::dcorch::firewall
include ::platform::dcorch::haproxy
}
}
class platform::dcorch::runtime {
if $::platform::params::distributed_cloud_role == 'systemcontroller' {
include ::platform::amqp::params
include ::dcorch
include ::dcorch::db::postgresql
class { '::dcorch::api_proxy':
sync_db => str2bool($::is_standalone_controller),
}
}
}

View File

@ -1,46 +0,0 @@
define qat_device_files(
$qat_idx,
$device_id,
) {
if $device_id == 'dh895xcc'{
file { "/etc/dh895xcc_dev${qat_idx}.conf":
ensure => 'present',
owner => 'root',
group => 'root',
mode => '0640',
notify => Service['qat_service'],
}
}
if $device_id == 'c62x'{
file { "/etc/c62x_dev${qat_idx}.conf":
ensure => 'present',
owner => 'root',
group => 'root',
mode => '0640',
notify => Service['qat_service'],
}
}
}
class platform::devices::qat (
$device_config = {},
$service_enabled = false
)
{
if $service_enabled {
create_resources('qat_device_files', $device_config)
service { 'qat_service':
ensure => 'running',
enable => true,
hasrestart => true,
notify => Service['sysinv-agent'],
}
}
}
class platform::devices {
include ::platform::devices::qat
}

View File

@ -1,19 +0,0 @@
class platform::dhclient::params (
) {}
class platform::dhclient
inherits ::platform::dhclient::params {
file { '/etc/dhcp/dhclient.conf':
ensure => 'present',
replace => true,
content => template('platform/dhclient.conf.erb'),
before => Class['::platform::network::apply'],
}
}
class platform::dhclient::runtime {
include ::platform::dhclient
}

View File

@ -1,84 +0,0 @@
class platform::dns::dnsmasq {
# dependent template variables
$install_uuid = $::install_uuid
include ::platform::params
$config_path = $::platform::params::config_path
$pxeboot_hostname = $::platform::params::pxeboot_hostname
$mgmt_hostname = $::platform::params::controller_hostname
include ::platform::network::pxeboot::params
$pxeboot_interface = $::platform::network::pxeboot::params::interface_name
$pxeboot_subnet_version = $::platform::network::pxeboot::params::subnet_version
$pxeboot_subnet_start = $::platform::network::pxeboot::params::subnet_start
$pxeboot_subnet_end = $::platform::network::pxeboot::params::subnet_end
$pxeboot_controller_address = $::platform::network::pxeboot::params::controller_address
if $pxeboot_subnet_version == 4 {
$pxeboot_subnet_netmask = $::platform::network::pxeboot::params::subnet_netmask
} else {
$pxeboot_subnet_netmask = $::platform::network::pxeboot::params::subnet_prefixlen
}
include ::platform::network::mgmt::params
$mgmt_interface = $::platform::network::mgmt::params::interface_name
$mgmt_subnet_version = $::platform::network::mgmt::params::subnet_version
$mgmt_subnet_start = $::platform::network::mgmt::params::subnet_start
$mgmt_subnet_end = $::platform::network::mgmt::params::subnet_end
$mgmt_controller_address = $::platform::network::mgmt::params::controller_address
$mgmt_network_mtu = $::platform::network::mgmt::params::mtu
if $mgmt_subnet_version == 4 {
$mgmt_subnet_netmask = $::platform::network::mgmt::params::subnet_netmask
} else {
$mgmt_subnet_netmask = $::platform::network::mgmt::params::subnet_prefixlen
}
include ::platform::kubernetes::params
$service_domain = $::platform::kubernetes::params::service_domain
$dns_service_ip = $::platform::kubernetes::params::dns_service_ip
file { '/etc/dnsmasq.conf':
ensure => 'present',
replace => true,
content => template('platform/dnsmasq.conf.erb'),
}
}
class platform::dns::resolv (
$servers,
) {
file { '/etc/resolv.conf':
ensure => 'present',
replace => true,
content => template('platform/resolv.conf.erb')
}
}
class platform::dns {
Anchor['platform::networking'] -> Class[$name]
# The "contain" ensures that the resolv and dnsmasq classes are not applied
# until the dns class is begun, which will wait for networking to be
# complete, as per the anchor dependency above. This is necessary because
# the networking configuration can wipe the /etc/resolv.conf file.
contain ::platform::dns::resolv
contain ::platform::dns::dnsmasq
}
class platform::dns::dnsmasq::reload {
platform::sm::restart {'dnsmasq': }
}
class platform::dns::runtime {
include ::platform::dns::dnsmasq
class {'::platform::dns::dnsmasq::reload':
stage => post
}
}

View File

@ -1,130 +0,0 @@
class platform::docker::params (
$package_name = 'docker-ce',
$http_proxy = undef,
$https_proxy = undef,
$no_proxy = undef,
$k8s_registry = undef,
$gcr_registry = undef,
$quay_registry = undef,
$docker_registry = undef,
$k8s_registry_secret = undef,
$gcr_registry_secret = undef,
$quay_registry_secret = undef,
$docker_registry_secret = undef,
$insecure_registry = undef,
) { }
class platform::docker::config
inherits ::platform::docker::params {
if $http_proxy or $https_proxy {
file { '/etc/systemd/system/docker.service.d':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { '/etc/systemd/system/docker.service.d/http-proxy.conf':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/dockerproxy.conf.erb'),
}
~> exec { 'perform systemctl daemon reload for docker proxy':
command => 'systemctl daemon-reload',
logoutput => true,
refreshonly => true,
} ~> Service['docker']
}
Class['::platform::filesystem::docker'] ~> Class[$name]
service { 'docker':
ensure => 'running',
name => 'docker',
enable => true,
require => Package['docker']
}
-> exec { 'enable-docker':
command => '/usr/bin/systemctl enable docker.service',
}
}
class platform::docker::install
inherits ::platform::docker::params {
package { 'docker':
ensure => 'installed',
name => $package_name,
}
}
class platform::docker
{
include ::platform::docker::install
include ::platform::docker::config
}
class platform::docker::config::bootstrap
inherits ::platform::docker::params {
require ::platform::filesystem::docker::bootstrap
Class['::platform::filesystem::docker::bootstrap'] ~> Class[$name]
service { 'docker':
ensure => 'running',
name => 'docker',
enable => true,
require => Package['docker']
}
-> exec { 'enable-docker':
command => '/usr/bin/systemctl enable docker.service',
}
}
class platform::docker::bootstrap
{
include ::platform::docker::install
include ::platform::docker::config::bootstrap
}
define platform::docker::login_registry (
$registry_url,
$registry_secret,
) {
include ::platform::client::params
$auth_url = $::platform::client::params::identity_auth_url
$username = $::platform::client::params::admin_username
$user_domain = $::platform::client::params::admin_user_domain
$project_name = $::platform::client::params::admin_project_name
$project_domain = $::platform::client::params::admin_project_domain
$region_name = $::platform::client::params::keystone_identity_region
$password = $::platform::client::params::admin_password
$interface = 'internal'
# Registry credentials have been stored in Barbican secret at Ansible
# bootstrap time, retrieve Barbican secret to get the payload
notice("Get payload of Barbican secret ${registry_secret}")
$secret_payload = generate(
'/bin/sh', '-c', template('platform/get-secret-payload.erb'))
if $secret_payload {
# Parse Barbican secret payload to get the registry username and password
$secret_payload_array = split($secret_payload, ' ')
$registry_username = split($secret_payload_array[0], 'username:')[1]
$registry_password = split($secret_payload_array[1], 'password:')[1]
# Login to authenticated registry
if $registry_username and $registry_password {
exec { 'Login registry':
command => "docker login ${registry_url} -u ${registry_username} -p ${registry_password}",
logoutput => true,
}
} else {
notice('Registry username or/and password NOT FOUND')
}
}
}

View File

@ -1,296 +0,0 @@
class platform::dockerdistribution::params (
$registry_ks_endpoint = undef,
) {}
define platform::dockerdistribution::write_config (
$registry_readonly = false,
$file_path = '/etc/docker-distribution/registry/runtime_config.yml',
$docker_registry_ip = undef,
$docker_registry_host = undef,
){
file { $file_path:
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/dockerdistribution.conf.erb'),
}
}
class platform::dockerdistribution::config
inherits ::platform::dockerdistribution::params {
include ::platform::params
include ::platform::kubernetes::params
include ::platform::network::mgmt::params
include ::platform::docker::params
$docker_registry_ip = $::platform::network::mgmt::params::controller_address
$docker_registry_host = $::platform::network::mgmt::params::controller_address_url
$runtime_config = '/etc/docker-distribution/registry/runtime_config.yml'
$used_config = '/etc/docker-distribution/registry/config.yml'
# check insecure registries
if $::platform::docker::params::insecure_registry {
# insecure registry is true means unified registry was set
$insecure_registries = "\"${::platform::docker::params::k8s_registry}\""
} else {
$insecure_registries = ''
}
# for external docker registry running insecure mode
file { '/etc/docker':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/daemon.json':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/insecuredockerregistry.conf.erb'),
}
platform::dockerdistribution::write_config { 'runtime_config':
docker_registry_ip => $docker_registry_ip,
docker_registry_host => $docker_registry_host
}
-> exec { 'use runtime config file':
command => "ln -fs ${runtime_config} ${used_config}",
}
platform::dockerdistribution::write_config { 'readonly_config':
registry_readonly => true,
file_path => '/etc/docker-distribution/registry/readonly_config.yml',
docker_registry_ip => $docker_registry_ip,
docker_registry_host => $docker_registry_host
}
file { '/etc/docker-distribution/registry/token_server.conf':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/registry-token-server.conf.erb'),
}
# copy the startup script to where it is supposed to be
file {'docker_distribution_initd_script':
ensure => 'present',
path => '/etc/init.d/docker-distribution',
mode => '0755',
source => "puppet:///modules/${module_name}/docker-distribution"
}
file {'registry_token_server_initd_script':
ensure => 'present',
path => '/etc/init.d/registry-token-server',
mode => '0755',
source => "puppet:///modules/${module_name}/registry-token-server"
}
# self-signed certificate for registry use
# this needs to be generated here because the certificate
# need to know the registry ip address for SANs
if str2bool($::is_initial_config_primary) {
$shared_dir = $::platform::params::config_path
$certs_dir = '/etc/ssl/private'
# create the certificate files
file { "${certs_dir}/registry-cert-extfile.cnf":
ensure => present,
owner => 'root',
group => 'root',
mode => '0400',
content => template('platform/registry-cert-extfile.erb'),
}
-> exec { 'docker-registry-generate-cert':
command => "openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 \
-keyout ${certs_dir}/registry-cert.key \
-out ${certs_dir}/registry-cert.crt \
-config ${certs_dir}/registry-cert-extfile.cnf",
logoutput => true
}
-> exec { 'docker-registry-generate-pkcs1-cert-from-pkcs8':
command => "openssl rsa -in ${certs_dir}/registry-cert.key \
-out ${certs_dir}/registry-cert-pkcs1.key",
logoutput => true
}
# ensure permissions are set correctly
-> file { "${certs_dir}/registry-cert-pkcs1.key":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
}
-> file { "${certs_dir}/registry-cert.key":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
}
-> file { "${certs_dir}/registry-cert.crt":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
}
# delete the extfile used in certificate generation
-> exec { 'remove-registry-cert-extfile':
command => "rm ${certs_dir}/registry-cert-extfile.cnf"
}
# copy certificates and keys to shared directory for second controller
# we do not need to worry about second controller being up at this point,
# since we have a is_initial_config_primary check
-> file { "${shared_dir}/registry-cert-pkcs1.key":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
source => "${certs_dir}/registry-cert-pkcs1.key",
}
-> file { "${shared_dir}/registry-cert.key":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
source => "${certs_dir}/registry-cert.key",
}
-> file { "${shared_dir}/registry-cert.crt":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
source => "${certs_dir}/registry-cert.crt",
}
# copy the certificate to docker certificates directory,
# which makes docker trust that specific certificate
# this is required for self-signed and also if the user does
# not have a certificate signed by a "default" CA
-> file { '/etc/docker/certs.d':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/certs.d/registry.local:9001':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/certs.d/registry.local:9001/registry-cert.crt':
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
source => "${certs_dir}/registry-cert.crt",
}
}
}
# compute also needs the "insecure" flag in order to deploy images from
# the registry. This is needed for insecure external registry
class platform::dockerdistribution::compute
inherits ::platform::dockerdistribution::params {
include ::platform::kubernetes::params
include ::platform::network::mgmt::params
include ::platform::docker::params
# check insecure registries
if $::platform::docker::params::insecure_registry {
# insecure registry is true means unified registry was set
$insecure_registries = "\"${::platform::docker::params::k8s_registry}\""
} else {
$insecure_registries = ''
}
# for external docker registry running insecure mode
file { '/etc/docker':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/daemon.json':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/insecuredockerregistry.conf.erb'),
}
}
class platform::dockerdistribution
inherits ::platform::dockerdistribution::params {
include ::platform::kubernetes::params
include platform::dockerdistribution::config
Class['::platform::docker::config'] -> Class[$name]
}
class platform::dockerdistribution::reload {
platform::sm::restart {'registry-token-server': }
platform::sm::restart {'docker-distribution': }
}
# this does not update the config right now
# the run time is only used to restart the token server and registry
class platform::dockerdistribution::runtime {
class {'::platform::dockerdistribution::reload':
stage => post
}
}
class platform::dockerdistribution::garbagecollect {
$runtime_config = '/etc/docker-distribution/registry/runtime_config.yml'
$readonly_config = '/etc/docker-distribution/registry/readonly_config.yml'
$used_config = '/etc/docker-distribution/registry/config.yml'
exec { 'turn registry read only':
command => "ln -fs ${readonly_config} ${used_config}",
}
# it doesn't like 2 platform::sm::restart with the same name
# so we have to do 1 as a command
-> exec { 'restart docker-distribution in read only':
command => 'sm-restart-safe service docker-distribution',
}
-> exec { 'run garbage collect':
command => "/usr/bin/registry garbage-collect ${used_config}",
}
-> exec { 'turn registry back to read write':
command => "ln -fs ${runtime_config} ${used_config}",
}
-> platform::sm::restart {'docker-distribution': }
}
class platform::dockerdistribution::bootstrap
inherits ::platform::dockerdistribution::params {
include platform::dockerdistribution::config
Class['::platform::docker::config'] -> Class[$name]
}

View File

@ -1,570 +0,0 @@
class platform::drbd::params (
$link_speed,
$link_util,
$num_parallel,
$rtt_ms,
$automount = false,
$ha_primary = false,
$initial_setup = false,
$fs_type = 'ext4',
$cpumask = false,
) {
include ::platform::params
$host1 = $::platform::params::controller_0_hostname
$host2 = $::platform::params::controller_1_hostname
include ::platform::network::mgmt::params
$ip1 = $::platform::network::mgmt::params::controller0_address
$ip2 = $::platform::network::mgmt::params::controller1_address
$manage = str2bool($::is_initial_config)
}
define platform::drbd::filesystem (
$lv_name,
$vg_name,
$lv_size,
$port,
$device,
$mountpoint,
$resync_after = undef,
$sm_service = $title,
$ha_primary_override = undef,
$initial_setup_override = undef,
$automount_override = undef,
$manage_override = undef,
$ip2_override = undef,
) {
if $manage_override == undef {
$drbd_manage = $::platform::drbd::params::manage
} else {
$drbd_manage = $manage_override
}
if $ha_primary_override == undef {
$drbd_primary = $::platform::drbd::params::ha_primary
} else {
$drbd_primary = $ha_primary_override
}
if $initial_setup_override == undef {
$drbd_initial = $::platform::drbd::params::initial_setup
} else {
$drbd_initial = $initial_setup_override
}
if $automount_override == undef {
$drbd_automount = $::platform::drbd::params::automount
} else {
$drbd_automount = $automount_override
}
if $ip2_override == undef {
$ip2 = $::platform::drbd::params::ip2
} else {
$ip2 = $ip2_override
}
logical_volume { $lv_name:
ensure => present,
volume_group => $vg_name,
size => "${lv_size}G",
size_is_minsize => true,
}
-> drbd::resource { $title:
disk => "/dev/${vg_name}/${lv_name}",
port => $port,
device => $device,
mountpoint => $mountpoint,
handlers => {
before-resync-target =>
"/usr/local/sbin/sm-notify -s ${sm_service} -e sync-start",
after-resync-target =>
"/usr/local/sbin/sm-notify -s ${sm_service} -e sync-end",
},
host1 => $::platform::drbd::params::host1,
host2 => $::platform::drbd::params::host2,
ip1 => $::platform::drbd::params::ip1,
ip2 => $ip2,
manage => $drbd_manage,
ha_primary => $drbd_primary,
initial_setup => $drbd_initial,
automount => $drbd_automount,
fs_type => $::platform::drbd::params::fs_type,
link_util => $::platform::drbd::params::link_util,
link_speed => $::platform::drbd::params::link_speed,
num_parallel => $::platform::drbd::params::num_parallel,
rtt_ms => $::platform::drbd::params::rtt_ms,
cpumask => $::platform::drbd::params::cpumask,
resync_after => $resync_after,
}
if str2bool($::is_initial_config_primary) {
# NOTE: The DRBD file system can only be resized immediately if not peering,
# otherwise it must wait for the peer backing storage device to be
# resized before issuing the resize locally.
Drbd::Resource[$title]
-> exec { "drbd resize ${title}":
command => "drbdadm -- --assume-peer-has-space resize ${title}",
}
-> exec { "resize2fs ${title}":
command => "resize2fs ${device}",
}
}
}
class platform::drbd::pgsql::params (
$device = '/dev/drbd0',
$lv_name = 'pgsql-lv',
$lv_size = '2',
$mountpoint = '/var/lib/postgresql',
$port = '7789',
$resource_name = 'drbd-pgsql',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::pgsql (
) inherits ::platform::drbd::pgsql::params {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
sm_service => 'drbd-pg',
}
}
class platform::drbd::rabbit::params (
$device = '/dev/drbd1',
$lv_name = 'rabbit-lv',
$lv_size = '2',
$mountpoint = '/var/lib/rabbitmq',
$port = '7799',
$resource_name = 'drbd-rabbit',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::rabbit ()
inherits ::platform::drbd::rabbit::params {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => 'drbd-pgsql',
}
}
class platform::drbd::platform::params (
$device = '/dev/drbd2',
$lv_name = 'platform-lv',
$lv_size = '10',
$mountpoint = '/opt/platform',
$port = '7790',
$vg_name = 'cgts-vg',
$resource_name = 'drbd-platform',
) {}
class platform::drbd::platform ()
inherits ::platform::drbd::platform::params {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => 'drbd-rabbit',
}
}
class platform::drbd::extension::params (
$device = '/dev/drbd5',
$lv_name = 'extension-lv',
$lv_size = '1',
$mountpoint = '/opt/extension',
$port = '7793',
$resource_name = 'drbd-extension',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::extension (
) inherits ::platform::drbd::extension::params {
include ::platform::params
include ::platform::drbd::platform::params
if str2bool($::is_primary_disk_rotational) {
$resync_after = $::platform::drbd::platform::params::resource_name
} else {
$resync_after = undef
}
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => $resync_after,
}
}
class platform::drbd::patch_vault::params (
$service_enabled = false,
$device = '/dev/drbd6',
$lv_name = 'patch-vault-lv',
$lv_size = '8',
$mountpoint = '/opt/patch-vault',
$port = '7794',
$resource_name = 'drbd-patch-vault',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::patch_vault (
) inherits ::platform::drbd::patch_vault::params {
if str2bool($::is_standalone_controller) {
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
$drbd_manage = true
} else {
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
$drbd_manage = undef
}
if $service_enabled {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => 'drbd-extension',
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
}
}
class platform::drbd::etcd::params (
#$service_enable = false,
$device = '/dev/drbd7',
$lv_name = 'etcd-lv',
$lv_size = '5',
$mountpoint = '/opt/etcd',
$port = '7797',
$resource_name = 'drbd-etcd',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::etcd (
) inherits ::platform::drbd::etcd::params {
if str2bool($::is_initial_config_primary) {
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
$drbd_manage = true
} else {
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
$drbd_manage = undef
}
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
}
class platform::drbd::etcd::bootstrap (
) inherits ::platform::drbd::etcd::params {
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
$drbd_manage = true
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
}
class platform::drbd::dockerdistribution::params (
$device = '/dev/drbd8',
$lv_name = 'dockerdistribution-lv',
$lv_size = '1',
$mountpoint = '/var/lib/docker-distribution',
$port = '7798',
$resource_name = 'drbd-dockerdistribution',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::dockerdistribution ()
inherits ::platform::drbd::dockerdistribution::params {
if str2bool($::is_initial_config_primary) {
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
$drbd_manage = true
} else {
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
$drbd_manage = undef
}
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
}
class platform::drbd::dockerdistribution::bootstrap ()
inherits ::platform::drbd::dockerdistribution::params {
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
$drbd_manage = true
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => $drbd_manage,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
}
class platform::drbd::cephmon::params (
$device = '/dev/drbd9',
$lv_name = 'ceph-mon-lv',
$mountpoint = '/var/lib/ceph/mon',
$port = '7788',
$resource_name = 'drbd-cephmon',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::cephmon ()
inherits ::platform::drbd::cephmon::params {
include ::platform::ceph::params
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if str2bool($::is_standalone_controller) and ! str2bool($::is_node_ceph_configured) {
# Active controller, first time configuration.
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
} elsif str2bool($::is_standalone_controller) {
# Active standalone controller, successive reboots.
$drbd_primary = true
$drbd_initial = undef
$drbd_automount = true
} else {
# Node unlock, reboot or standby configuration
# Do not mount ceph
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
}
if ($::platform::ceph::params::service_enabled and
$system_type == 'All-in-one' and 'duplex' in $system_mode) {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $::platform::ceph::params::mon_lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => true,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
} -> Class['::ceph']
}
}
class platform::drbd(
$service_enable = false,
$service_ensure = 'stopped',
) {
if (str2bool($::is_initial_config_primary) or str2bool($::is_standalone_controller)
){
# Enable DRBD on standalone
class { '::drbd':
service_enable => true,
service_ensure => 'running',
}
} else {
class { '::drbd':
service_enable => $service_enable,
service_ensure => $service_ensure,
}
include ::drbd
}
include ::platform::drbd::params
include ::platform::drbd::pgsql
include ::platform::drbd::rabbit
include ::platform::drbd::platform
include ::platform::drbd::extension
include ::platform::drbd::patch_vault
include ::platform::drbd::etcd
include ::platform::drbd::dockerdistribution
include ::platform::drbd::cephmon
# network changes need to be applied prior to DRBD resources
Anchor['platform::networking']
-> Drbd::Resource <| |>
-> Anchor['platform::services']
}
class platform::drbd::bootstrap {
class { '::drbd':
service_enable => true,
service_ensure => 'running'
}
# override the defaults to initialize and activate the file systems
class { '::platform::drbd::params':
ha_primary => true,
initial_setup => true,
automount => true,
}
include ::platform::drbd::pgsql
include ::platform::drbd::rabbit
include ::platform::drbd::platform
include ::platform::drbd::extension
}
class platform::drbd::runtime {
class { '::platform::drbd':
service_enable => true,
service_ensure => 'running',
}
}
class platform::drbd::runtime_service_enable {
class { '::drbd':
service_enable => true,
service_ensure => 'running'
}
}
class platform::drbd::pgsql::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::pgsql
}
class platform::drbd::platform::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::platform
}
class platform::drbd::extension::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::extension
}
class platform::drbd::patch_vault::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::patch_vault
}
class platform::drbd::etcd::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::etcd
}
class platform::drbd::dockerdistribution::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::dockerdistribution
}
class platform::drbd::cephmon::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::cephmon
}

View File

@ -1,122 +0,0 @@
class platform::etcd::params (
$bind_address = '0.0.0.0',
$port = 2379,
$node = 'controller',
)
{
include ::platform::params
$sw_version = $::platform::params::software_version
$etcd_basedir = '/opt/etcd'
$etcd_versioned_dir = "${etcd_basedir}/${sw_version}"
}
# Modify the systemd service file for etcd and
# create an init.d script for SM to manage the service
class platform::etcd::setup {
file {'etcd_override_dir':
ensure => directory,
path => '/etc/systemd/system/etcd.service.d',
mode => '0755',
}
-> file {'etcd_override':
ensure => present,
path => '/etc/systemd/system/etcd.service.d/etcd-override.conf',
mode => '0644',
source => "puppet:///modules/${module_name}/etcd-override.conf"
}
-> file {'etcd_initd_script':
ensure => 'present',
path => '/etc/init.d/etcd',
mode => '0755',
source => "puppet:///modules/${module_name}/etcd"
}
-> exec { 'systemd-reload-daemon':
command => '/usr/bin/systemctl daemon-reload',
}
-> Service['etcd']
}
class platform::etcd::init
inherits ::platform::etcd::params {
$client_url = "http://${bind_address}:${port}"
if str2bool($::is_initial_config_primary) {
$service_ensure = 'running'
}
else {
$service_ensure = 'stopped'
}
class { 'etcd':
ensure => 'present',
etcd_name => $node,
service_enable => false,
service_ensure => $service_ensure,
cluster_enabled => false,
listen_client_urls => $client_url,
advertise_client_urls => $client_url,
data_dir => "${etcd_versioned_dir}/${node}.etcd",
proxy => 'off',
}
}
class platform::etcd
inherits ::platform::etcd::params {
Class['::platform::drbd::etcd'] -> Class[$name]
include ::platform::etcd::datadir
include ::platform::etcd::setup
include ::platform::etcd::init
Class['::platform::etcd::datadir']
-> Class['::platform::etcd::setup']
-> Class['::platform::etcd::init']
}
class platform::etcd::datadir
inherits ::platform::etcd::params {
Class['::platform::drbd::etcd'] -> Class[$name]
if $::platform::params::init_database {
file { $etcd_versioned_dir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
}
}
class platform::etcd::datadir::bootstrap
inherits ::platform::etcd::params {
require ::platform::drbd::etcd::bootstrap
Class['::platform::drbd::etcd::bootstrap'] -> Class[$name]
if $::platform::params::init_database {
file { $etcd_versioned_dir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
}
}
class platform::etcd::bootstrap
inherits ::platform::etcd::params {
include ::platform::etcd::datadir::bootstrap
include ::platform::etcd::setup
include ::platform::etcd::init
Class['::platform::etcd::datadir::bootstrap']
-> Class['::platform::etcd::setup']
-> Class['::platform::etcd::init']
}

View File

@ -1,19 +0,0 @@
class platform::exports {
include ::platform::params
file { '/etc/exports':
ensure => present,
mode => '0600',
owner => 'root',
group => 'root',
}
-> file_line { '/etc/exports /etc/platform':
path => '/etc/exports',
line => "/etc/platform\t\t ${::platform::params::mate_ipaddress}(no_root_squash,no_subtree_check,rw)",
match => '^/etc/platform\s',
}
-> exec { 'Re-export filesystems':
command => 'exportfs -r',
}
}

View File

@ -1,306 +0,0 @@
class platform::filesystem::params (
$vg_name = 'cgts-vg',
) {}
define platform::filesystem (
$lv_name,
$lv_size,
$mountpoint,
$fs_type,
$fs_options,
$fs_use_all = false,
$mode = '0750',
) {
include ::platform::filesystem::params
$vg_name = $::platform::filesystem::params::vg_name
$device = "/dev/${vg_name}/${lv_name}"
if !$fs_use_all {
$size = "${lv_size}G"
$fs_size_is_minsize = true
}
else {
# use all available space
$size = undef
$fs_size_is_minsize = false
}
# create logical volume
logical_volume { $lv_name:
ensure => present,
volume_group => $vg_name,
size => $size,
size_is_minsize => $fs_size_is_minsize,
}
# create filesystem
-> filesystem { $device:
ensure => present,
fs_type => $fs_type,
options => $fs_options,
}
-> file { $mountpoint:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => $mode,
}
-> mount { $name:
ensure => 'mounted',
atboot => 'yes',
name => $mountpoint,
device => $device,
options => 'defaults',
fstype => $fs_type,
}
# The above mount resource doesn't actually remount devices that were already present in /etc/fstab, but were
# unmounted during manifest application. To get around this, we attempt to mount them again, if they are not
# already mounted.
-> exec { "mount ${device}":
unless => "mount | awk '{print \$3}' | grep -Fxq ${mountpoint}",
command => "mount ${mountpoint}",
path => '/usr/bin'
}
}
define platform::filesystem::resize(
$lv_name,
$lv_size,
$devmapper,
) {
include ::platform::filesystem::params
$vg_name = $::platform::filesystem::params::vg_name
$device = "/dev/${vg_name}/${lv_name}"
# TODO (rchurch): Fix this... Allowing return code 5 so that lvextends using the same size doesn't blow up
exec { "lvextend ${device}":
command => "lvextend -L${lv_size}G ${device}",
returns => [0, 5]
}
# After a partition extend, make sure that there is no leftover drbd
# type metadata from a previous install. Drbd writes its meta at the
# very end of a block device causing confusion for blkid.
-> exec { "wipe end of device ${device}":
command => "dd if=/dev/zero of=${device} bs=512 seek=$(($(blockdev --getsz ${device}) - 34)) count=34",
onlyif => "blkid ${device} | grep TYPE=\\\"drbd\\\"",
}
-> exec { "resize2fs ${devmapper}":
command => "resize2fs ${devmapper}",
onlyif => "blkid -s TYPE -o value ${devmapper} | grep -v xfs",
}
-> exec { "xfs_growfs ${devmapper}":
command => "xfs_growfs ${devmapper}",
onlyif => "blkid -s TYPE -o value ${devmapper} | grep xfs",
}
}
class platform::filesystem::backup::params (
$lv_name = 'backup-lv',
$lv_size = '5',
$mountpoint = '/opt/backups',
$devmapper = '/dev/mapper/cgts--vg-backup--lv',
$fs_type = 'ext4',
$fs_options = ' '
) {}
class platform::filesystem::backup
inherits ::platform::filesystem::backup::params {
platform::filesystem { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
mountpoint => $mountpoint,
fs_type => $fs_type,
fs_options => $fs_options
}
}
class platform::filesystem::scratch::params (
$lv_size = '8',
$lv_name = 'scratch-lv',
$mountpoint = '/scratch',
$devmapper = '/dev/mapper/cgts--vg-scratch--lv',
$fs_type = 'ext4',
$fs_options = ' '
) { }
class platform::filesystem::scratch
inherits ::platform::filesystem::scratch::params {
platform::filesystem { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
mountpoint => $mountpoint,
fs_type => $fs_type,
fs_options => $fs_options
}
}
class platform::filesystem::kubelet::params (
$lv_size = '10',
$lv_name = 'kubelet-lv',
$mountpoint = '/var/lib/kubelet',
$devmapper = '/dev/mapper/cgts--vg-kubelet--lv',
$fs_type = 'ext4',
$fs_options = ' '
) { }
class platform::filesystem::kubelet
inherits ::platform::filesystem::kubelet::params {
platform::filesystem { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
mountpoint => $mountpoint,
fs_type => $fs_type,
fs_options => $fs_options
}
}
class platform::filesystem::docker::params (
$lv_size = '1',
$lv_name = 'docker-lv',
$mountpoint = '/var/lib/docker',
$devmapper = '/dev/mapper/cgts--vg-docker--lv',
$fs_type = 'xfs',
$fs_options = '-n ftype=1',
$fs_use_all = false
) { }
class platform::filesystem::docker
inherits ::platform::filesystem::docker::params {
platform::filesystem { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
mountpoint => $mountpoint,
fs_type => $fs_type,
fs_options => $fs_options,
fs_use_all => $fs_use_all,
mode => '0711',
}
}
class platform::filesystem::storage {
include ::platform::filesystem::kubelet
class {'platform::filesystem::docker::params' :
lv_size => 30
}
-> class {'platform::filesystem::docker' :
}
Class['::platform::lvm::vg::cgts_vg'] -> Class[$name]
}
class platform::filesystem::compute {
include ::platform::filesystem::kubelet
class {'platform::filesystem::docker::params' :
lv_size => 30
}
-> class {'platform::filesystem::docker' :
}
Class['::platform::lvm::vg::cgts_vg'] -> Class[$name]
}
class platform::filesystem::controller {
include ::platform::filesystem::backup
include ::platform::filesystem::scratch
include ::platform::filesystem::docker
include ::platform::filesystem::kubelet
}
class platform::filesystem::backup::runtime {
include ::platform::filesystem::backup::params
$lv_name = $::platform::filesystem::backup::params::lv_name
$lv_size = $::platform::filesystem::backup::params::lv_size
$devmapper = $::platform::filesystem::backup::params::devmapper
platform::filesystem::resize { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
devmapper => $devmapper,
}
}
class platform::filesystem::scratch::runtime {
include ::platform::filesystem::scratch::params
$lv_name = $::platform::filesystem::scratch::params::lv_name
$lv_size = $::platform::filesystem::scratch::params::lv_size
$devmapper = $::platform::filesystem::scratch::params::devmapper
platform::filesystem::resize { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
devmapper => $devmapper,
}
}
class platform::filesystem::kubelet::runtime {
include ::platform::filesystem::kubelet::params
$lv_name = $::platform::filesystem::kubelet::params::lv_name
$lv_size = $::platform::filesystem::kubelet::params::lv_size
$devmapper = $::platform::filesystem::kubelet::params::devmapper
platform::filesystem::resize { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
devmapper => $devmapper,
}
}
class platform::filesystem::docker::runtime {
include ::platform::filesystem::docker::params
$lv_name = $::platform::filesystem::docker::params::lv_name
$lv_size = $::platform::filesystem::docker::params::lv_size
$devmapper = $::platform::filesystem::docker::params::devmapper
platform::filesystem::resize { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
devmapper => $devmapper,
}
}
class platform::filesystem::docker::params::bootstrap (
$lv_size = '30',
$lv_name = 'docker-lv',
$mountpoint = '/var/lib/docker',
$devmapper = '/dev/mapper/cgts--vg-docker--lv',
$fs_type = 'xfs',
$fs_options = '-n ftype=1',
$fs_use_all = false
) { }
class platform::filesystem::docker::bootstrap
inherits ::platform::filesystem::docker::params::bootstrap {
platform::filesystem { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
mountpoint => $mountpoint,
fs_type => $fs_type,
fs_options => $fs_options,
fs_use_all => $fs_use_all,
mode => '0711',
}
}

View File

@ -1,218 +0,0 @@
define platform::firewall::rule (
$service_name,
$chain = 'INPUT',
$destination = undef,
$ensure = present,
$host = 'ALL',
$jump = undef,
$outiface = undef,
$ports = undef,
$proto = 'tcp',
$table = undef,
$tosource = undef,
) {
include ::platform::params
include ::platform::network::oam::params
$ip_version = $::platform::network::oam::params::subnet_version
$provider = $ip_version ? {
6 => 'ip6tables',
default => 'iptables',
}
$source = $host ? {
'ALL' => $ip_version ? {
6 => '::/0',
default => '0.0.0.0/0'
},
default => $host,
}
$heading = $chain ? {
'OUTPUT' => 'outgoing',
'POSTROUTING' => 'forwarding',
default => 'incoming',
}
# NAT rule
if $jump == 'SNAT' or $jump == 'MASQUERADE' {
firewall { "500 ${service_name} ${heading} ${title}":
ensure => $ensure,
table => $table,
proto => $proto,
outiface => $outiface,
jump => $jump,
tosource => $tosource,
destination => $destination,
source => $source,
provider => $provider,
chain => $chain,
}
}
else {
if $ports == undef {
firewall { "500 ${service_name} ${heading} ${title}":
ensure => $ensure,
proto => $proto,
action => 'accept',
source => $source,
provider => $provider,
chain => $chain,
}
}
else {
firewall { "500 ${service_name} ${heading} ${title}":
ensure => $ensure,
proto => $proto,
dport => $ports,
action => 'accept',
source => $source,
provider => $provider,
chain => $chain,
}
}
}
}
class platform::firewall::calico::oam::services {
include ::platform::params
include ::platform::network::oam::params
include ::platform::nfv::params
include ::platform::fm::params
include ::platform::patching::params
include ::platform::sysinv::params
include ::platform::smapi::params
include ::platform::ceph::params
include ::openstack::barbican::params
include ::openstack::keystone::params
include ::openstack::horizon::params
include ::platform::dcmanager::params
include ::platform::dcorch::params
$ip_version = $::platform::network::oam::params::subnet_version
# icmp
$t_icmp_proto = $ip_version ? {
6 => 'ICMPv6',
default => 'ICMP'
}
# udp
$sm_port = [2222, 2223]
$ntp_port = [123]
$snmp_port = [161, 162]
$ptp_port = [319, 320]
# tcp
$ssh_port = [22]
if $::platform::fm::params::service_enabled {
$fm_port = [$::platform::fm::params::api_port]
} else {
$fm_port = []
}
$nfv_vim_port = [$::platform::nfv::params::api_port]
$patching_port = [$::platform::patching::params::public_port]
$sysinv_port = [$::platform::sysinv::params::api_port]
$sm_api_port = [$::platform::smapi::params::port]
$kube_apiserver_port = [6443]
if $::platform::ceph::params::service_enabled {
$ceph_radosgw_port = [$::platform::ceph::params::rgw_port]
} else {
$ceph_radosgw_port = []
}
$barbican_api_port = [$::openstack::barbican::params::api_port]
if !$::platform::params::region_config {
$keystone_port = [$::openstack::keystone::params::api_port]
} else {
$keystone_port = []
}
if $::platform::params::distributed_cloud_role != 'subcloud' {
if $::openstack::horizon::params::enable_https {
$horizon_port = [$::openstack::horizon::params::https_port]
} else {
$horizon_port = [$::openstack::horizon::params::http_port]
}
} else {
$horizon_port = []
}
if $::platform::params::distributed_cloud_role == 'systemcontroller' {
$dc_port = [$::platform::dcmanager::params::api_port,
$::platform::dcorch::params::sysinv_api_proxy_port,
$::platform::dcorch::params::patch_api_proxy_port,
$::platform::dcorch::params::identity_api_proxy_port]
} else {
$dc_port = []
}
$t_ip_version = $ip_version
$t_udp_ports = concat($sm_port, $ntp_port, $snmp_port, $ptp_port)
$t_tcp_ports = concat($ssh_port,
$fm_port, $nfv_vim_port, $patching_port, $sysinv_port, $sm_api_port,
$kube_apiserver_port,
$ceph_radosgw_port, $barbican_api_port, $keystone_port, $horizon_port,
$dc_port)
$file_name = '/tmp/gnp_all_oam.yaml'
file { $file_name:
ensure => file,
content => template('platform/calico_oam_if_gnp.yaml.erb'),
owner => 'root',
group => 'root',
mode => '0640',
}
-> exec { "apply resource ${file_name}":
path => '/usr/bin:/usr/sbin:/bin',
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f ${file_name}",
onlyif => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf get customresourcedefinitions.apiextensions.k8s.io'
}
}
class platform::firewall::calico::oam::endpoints {
include ::platform::params
include ::platform::network::oam::params
$host = $::platform::params::hostname
$oam_if = $::platform::network::oam::params::interface_name
$oam_addr = $::platform::network::oam::params::interface_address
# create/update host endpoint to represent oam interface
$file_name_oam = "/tmp/hep_${host}_oam.yaml"
file { $file_name_oam:
ensure => file,
content => template('platform/calico_oam_if_hep.yaml.erb'),
owner => 'root',
group => 'root',
mode => '0640',
}
-> exec { "apply resource ${file_name_oam}":
path => '/usr/bin:/usr/sbin:/bin',
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f ${file_name_oam}",
onlyif => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf get customresourcedefinitions.apiextensions.k8s.io'
}
}
class platform::firewall::calico::oam {
contain ::platform::firewall::calico::oam::endpoints
contain ::platform::firewall::calico::oam::services
Class['::platform::kubernetes::master'] -> Class[$name]
Class['::platform::firewall::calico::oam::endpoints']
-> Class['::platform::firewall::calico::oam::services']
}
class platform::firewall::runtime {
include ::platform::firewall::calico::oam::endpoints
include ::platform::firewall::calico::oam::services
Class['::platform::firewall::calico::oam::endpoints']
-> Class['::platform::firewall::calico::oam::services']
}

View File

@ -1,104 +0,0 @@
class platform::fm::params (
$api_port = 18002,
$api_host = '127.0.0.1',
$region_name = undef,
$system_name = undef,
$service_create = false,
$service_enabled = true,
$trap_destinations = [],
$sysinv_catalog_info = 'platform:sysinv:internalURL',
) { }
class platform::fm::config
inherits ::platform::fm::params {
$trap_dest_str = join($trap_destinations,',')
class { '::fm':
region_name => $region_name,
system_name => $system_name,
trap_destinations => $trap_dest_str,
sysinv_catalog_info => $sysinv_catalog_info,
}
}
class platform::fm
inherits ::platform::fm::params {
include ::fm::client
include ::fm::keystone::authtoken
include ::platform::fm::config
include ::platform::params
if $::platform::params::init_database {
include ::fm::db::postgresql
}
}
class platform::fm::haproxy
inherits ::platform::fm::params {
include ::platform::haproxy::params
platform::haproxy::proxy { 'fm-api-internal':
server_name => 's-fm-api-internal',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $api_port,
private_ip_address => $api_host,
private_port => $api_port,
public_api => false,
}
platform::haproxy::proxy { 'fm-api-public':
server_name => 's-fm-api-public',
public_port => $api_port,
private_port => $api_port,
}
}
class platform::fm::api
inherits ::platform::fm::params {
include ::platform::params
if $service_enabled {
if ($::platform::fm::service_create and
$::platform::params::init_keystone) {
include ::fm::keystone::auth
}
include ::platform::params
class { '::fm::api':
host => $api_host,
workers => $::platform::params::eng_workers,
sync_db => $::platform::params::init_database,
}
include ::platform::fm::haproxy
}
}
class platform::fm::runtime {
require ::platform::fm::config
exec { 'notify-fm-mgr':
command => '/usr/bin/pkill -HUP fmManager',
onlyif => 'pgrep fmManager'
}
}
class platform::fm::bootstrap {
# Set up needed config to enable launching of fmManager later
include ::platform::fm::params
include ::platform::fm
if $::platform::params::init_keystone {
include ::fm::keystone::auth
class { '::fm::api':
host => $::platform::fm::params::api_host,
workers => $::platform::params::eng_workers,
sync_db => $::platform::params::init_database,
}
}
}

View File

@ -1,20 +0,0 @@
class platform::fstab {
include ::platform::params
if $::personality != 'controller' {
exec { 'Unmount NFS filesystems':
command => 'umount -a -t nfs ; sleep 5 ;',
}
-> mount { '/opt/platform':
ensure => 'present',
fstype => 'nfs',
device => 'controller-platform-nfs:/opt/platform',
options => "${::platform::params::nfs_mount_options},_netdev",
atboot => 'yes',
remounts => true,
}
-> exec { 'Remount NFS filesystems':
command => 'umount -a -t nfs ; sleep 1 ; mount -a -t nfs',
}
}
}

View File

@ -1,29 +0,0 @@
class platform::grub
{
include ::platform::params
$managed_security_params = 'nopti nospectre_v2'
# Run grubby to update params
# First, remove all the parameters we manage, then we add back in the ones
# we want to use
exec { 'removing managed security kernel params from command line':
command => "grubby --update-kernel=`grubby --default-kernel` --remove-args=\"${managed_security_params}\"",
}
-> exec { 'removing managed security kernel params from command line for EFI':
command => "grubby --efi --update-kernel=`grubby --efi --default-kernel` --remove-args=\"${managed_security_params}\"",
}
-> exec { 'adding requested security kernel params to command line ':
command => "grubby --update-kernel=`grubby --default-kernel` --args=\"${::platform::params::security_feature}\"",
onlyif => "test -n \"${::platform::params::security_feature}\""
}
-> exec { 'adding requested security kernel params to command line for EFI':
command => "grubby --efi --update-kernel=`grubby --efi --default-kernel` --args=\"${::platform::params::security_feature}\"",
onlyif => "test -n \"${::platform::params::security_feature}\""
}
}
class platform::grub::runtime
{
include ::platform::grub
}

View File

@ -1,150 +0,0 @@
class platform::haproxy::params (
$private_ip_address,
$public_ip_address,
$enable_https = false,
$global_options = undef,
$tpm_object = undef,
$tpm_engine = '/usr/lib64/openssl/engines/libtpm2.so',
) { }
define platform::haproxy::proxy (
$server_name,
$private_port,
$public_port,
$public_ip_address = undef,
$private_ip_address = undef,
$server_timeout = undef,
$client_timeout = undef,
$x_forwarded_proto = true,
$enable_https = undef,
$public_api = true,
) {
include ::platform::haproxy::params
if $enable_https != undef {
$https_enabled = $enable_https
} else {
$https_enabled = $::platform::haproxy::params::enable_https
}
if $x_forwarded_proto {
if $https_enabled and $public_api {
$ssl_option = 'ssl crt /etc/ssl/private/server-cert.pem'
$proto = 'X-Forwarded-Proto:\ https'
# The value of max-age matches lighttpd.conf, and should be
# maintained for consistency
$hsts_option = 'Strict-Transport-Security:\ max-age=63072000;\ includeSubDomains'
} else {
$ssl_option = ' '
$proto = 'X-Forwarded-Proto:\ http'
$hsts_option = undef
}
} else {
$ssl_option = ' '
$proto = undef
$hsts_option = undef
}
if $public_ip_address {
$public_ip = $public_ip_address
} else {
$public_ip = $::platform::haproxy::params::public_ip_address
}
if $private_ip_address {
$private_ip = $private_ip_address
} else {
$private_ip = $::platform::haproxy::params::private_ip_address
}
if $client_timeout {
$real_client_timeout = "client ${client_timeout}"
} else {
$real_client_timeout = undef
}
haproxy::frontend { $name:
collect_exported => false,
name => $name,
bind => {
"${public_ip}:${public_port}" => $ssl_option,
},
options => {
'default_backend' => "${name}-internal",
'reqadd' => $proto,
'timeout' => $real_client_timeout,
'rspadd' => $hsts_option,
},
}
if $server_timeout {
$timeout_option = "server ${server_timeout}"
} else {
$timeout_option = undef
}
haproxy::backend { $name:
collect_exported => false,
name => "${name}-internal",
options => {
'server' => "${server_name} ${private_ip}:${private_port}",
'timeout' => $timeout_option,
}
}
}
class platform::haproxy::server {
include ::platform::params
include ::platform::haproxy::params
# If TPM mode is enabled then we need to configure
# the TPM object and the TPM OpenSSL engine in HAPROXY
$tpm_object = $::platform::haproxy::params::tpm_object
$tpm_engine = $::platform::haproxy::params::tpm_engine
if $tpm_object != undef {
$tpm_options = {'tpm-object' => $tpm_object, 'tpm-engine' => $tpm_engine}
$global_options = merge($::platform::haproxy::params::global_options, $tpm_options)
} else {
$global_options = $::platform::haproxy::params::global_options
}
class { '::haproxy':
global_options => $global_options,
}
user { 'haproxy':
ensure => 'present',
shell => '/sbin/nologin',
groups => [$::platform::params::protected_group_name],
} -> Class['::haproxy']
}
class platform::haproxy::reload {
platform::sm::restart {'haproxy': }
}
class platform::haproxy::runtime {
include ::platform::haproxy::server
include ::platform::patching::haproxy
include ::platform::sysinv::haproxy
include ::platform::nfv::haproxy
include ::platform::ceph::haproxy
include ::platform::fm::haproxy
if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::platform::dcmanager::haproxy
include ::platform::dcorch::haproxy
}
include ::openstack::keystone::haproxy
include ::openstack::barbican::haproxy
class {'::platform::haproxy::reload':
stage => post
}
}

View File

@ -1,179 +0,0 @@
class platform::helm::repositories::params(
$source_helm_repos_base_dir = '/opt/platform/helm_charts',
$target_helm_repos_base_dir = '/www/pages/helm_charts',
$helm_repositories = [ 'stx-platform', 'starlingx' ],
) {}
define platform::helm::repository (
$repo_base = undef,
$repo_port = undef,
$create = false,
$primary = false,
) {
$repo_path = "${repo_base}/${name}"
if str2bool($create) {
file {$repo_path:
ensure => directory,
path => $repo_path,
owner => 'www',
require => User['www'],
}
-> exec { "Generate index: ${repo_path}":
command => "helm repo index ${repo_path}",
logoutput => true,
user => 'www',
group => 'www',
require => User['www'],
}
$before_relationship = Exec['Stop lighttpd']
$require_relationship = [ User['sysadmin'], Exec["Generate index: ${repo_path}"] ]
} else {
$before_relationship = undef
$require_relationship = User['sysadmin']
}
exec { "Adding StarlingX helm repo: ${name}":
before => $before_relationship,
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf' , 'HOME=/home/sysadmin'],
command => "helm repo add ${name} http://127.0.0.1:${repo_port}/helm_charts/${name}",
logoutput => true,
user => 'sysadmin',
group => 'sys_protected',
require => $require_relationship
}
}
class platform::helm::repositories
inherits ::platform::helm::repositories::params {
include ::openstack::horizon::params
include ::platform::users
Anchor['platform::services']
-> platform::helm::repository { $helm_repositories:
repo_base => $target_helm_repos_base_dir,
repo_port => $::openstack::horizon::params::http_port,
create => $::is_initial_config,
primary => $::is_initial_config_primary,
}
-> exec { 'Updating info of available charts locally from chart repo':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/sysadmin' ],
command => 'helm repo update',
logoutput => true,
user => 'sysadmin',
group => 'sys_protected',
require => User['sysadmin']
}
}
class platform::helm
inherits ::platform::helm::repositories::params {
include ::platform::docker::params
file {$target_helm_repos_base_dir:
ensure => directory,
path => $target_helm_repos_base_dir,
owner => 'www',
require => User['www']
}
Drbd::Resource <| |>
-> file {$source_helm_repos_base_dir:
ensure => directory,
path => $source_helm_repos_base_dir,
owner => 'www',
require => User['www']
}
if (str2bool($::is_initial_config) and $::personality == 'controller') {
if str2bool($::is_initial_config_primary) {
if $::platform::docker::params::gcr_registry {
$gcr_registry = $::platform::docker::params::gcr_registry
} else {
$gcr_registry = 'gcr.io'
}
if $::platform::docker::params::quay_registry {
$quay_registry = $::platform::docker::params::quay_registry
} else {
$quay_registry = 'quay.io'
}
Class['::platform::kubernetes::master']
-> exec { 'load tiller docker image':
command => "docker image pull ${gcr_registry}/kubernetes-helm/tiller:v2.13.1",
logoutput => true,
}
# TODO(tngo): If and when tiller image is upversioned, please ensure armada compatibility as part of the test
-> exec { 'load armada docker image':
command => "docker image pull ${quay_registry}/airshipit/armada:8a1638098f88d92bf799ef4934abe569789b885e-ubuntu_bionic",
logoutput => true,
}
-> exec { 'create service account for tiller':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create serviceaccount --namespace kube-system tiller',
logoutput => true,
}
-> exec { 'create cluster role binding for tiller service account':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller', # lint:ignore:140chars
logoutput => true,
}
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/sysadmin' ],
command => "helm init --skip-refresh --service-account tiller --node-selectors \"node-role.kubernetes.io/master\"=\"\" --tiller-image=${gcr_registry}/kubernetes-helm/tiller:v2.13.1 --override spec.template.spec.hostNetwork=true", # lint:ignore:140chars
logoutput => true,
user => 'sysadmin',
group => 'sys_protected',
require => User['sysadmin']
}
exec { "bind mount ${target_helm_repos_base_dir}":
command => "mount -o bind -t ext4 ${source_helm_repos_base_dir} ${target_helm_repos_base_dir}",
require => File[ $source_helm_repos_base_dir, $target_helm_repos_base_dir ]
}
} else {
Class['::platform::kubernetes::master']
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/sysadmin' ],
command => 'helm init --skip-refresh --client-only',
logoutput => true,
user => 'sysadmin',
group => 'sys_protected',
require => User['sysadmin']
}
}
include ::platform::helm::repositories
include ::openstack::horizon::params
$port = $::openstack::horizon::params::http_port
exec { 'restart lighttpd for helm':
require => [File['/etc/lighttpd/lighttpd.conf', $target_helm_repos_base_dir, $source_helm_repos_base_dir],
Exec['initialize helm']],
command => 'systemctl restart lighttpd.service',
logoutput => true,
}
-> Class['::platform::helm::repositories']
}
}
class platform::helm::runtime {
include ::platform::helm::repositories
}

View File

@ -1,87 +0,0 @@
class platform::influxdb::params (
$bind_address = undef,
$database = undef,
$typesdb = undef,
$batch_size = undef,
$batch_pending = undef,
$batch_timeout = undef,
$read_buffer = undef,
) {}
class platform::influxdb
inherits ::platform::influxdb::params {
user { 'influxdb': ensure => present, }
-> group { 'influxdb': ensure => present, }
# make a pid dir for influxdb username and group
-> file { '/var/run/influxdb':
ensure => 'directory',
owner => 'influxdb',
group => 'influxdb',
mode => '0755',
}
# make a log dir for influxdb username and group
-> file { '/var/log/influxdb':
ensure => 'directory',
owner => 'influxdb',
group => 'influxdb',
mode => '0755',
}
# make a lib dir for influxdb username and group
-> file { '/var/lib/influxdb':
ensure => 'directory',
owner => 'influxdb',
group => 'influxdb',
mode => '0755',
} # now configure influxdb
-> file { '/etc/influxdb/influxdb.conf':
ensure => 'present',
replace => true,
content => template('platform/influxdb.conf.erb'),
} # now make sure that influxdb is started
-> exec { 'influxdb-enable':
command => 'systemctl enable influxdb',
unless => 'systemctl is-enabled influxdb'
}
# ensure that influxdb is running
-> service { 'influxdb':
ensure => running,
enable => true,
provider => 'systemd'
} # now ask pmon to monitor the process
# ensure pmon soft link for process monitoring
-> file { '/etc/pmon.d/influxdb.conf':
ensure => 'link',
target => '/etc/influxdb/influxdb.conf.pmon',
owner => 'root',
group => 'root',
mode => '0600',
}
}
class platform::influxdb::runtime {
include ::platform::influxdb
}
class platform::influxdb::logrotate::params (
$log_file_name = undef,
$log_file_size = undef,
$log_file_rotate = undef,
) {}
class platform::influxdb::logrotate
inherits ::platform::influxdb::logrotate::params {
file { '/etc/logrotate.d/influxdb':
ensure => 'present',
replace => true,
content => template('platform/logrotate.erb'),
}
}

View File

@ -1,711 +0,0 @@
class platform::kubernetes::params (
$enabled = true,
$node_ip = undef,
$pod_network_cidr = undef,
$pod_network_ipversion = 4,
$service_network_cidr = undef,
$apiserver_advertise_address = undef,
$etcd_endpoint = undef,
$service_domain = undef,
$dns_service_ip = undef,
$host_labels = [],
$ca_crt = undef,
$ca_key = undef,
$sa_key = undef,
$sa_pub = undef,
$k8s_cpuset = undef,
$k8s_nodeset = undef,
$k8s_reserved_cpus = undef,
$k8s_reserved_mem = undef,
$apiserver_cert_san = []
) { }
class platform::kubernetes::cgroup::params (
$cgroup_root = '/sys/fs/cgroup',
$cgroup_name = 'k8s-infra',
$controllers = ['cpuset', 'cpu', 'cpuacct', 'memory', 'systemd', 'pids'],
) {}
class platform::kubernetes::cgroup
inherits ::platform::kubernetes::cgroup::params {
include ::platform::kubernetes::params
$k8s_cpuset = $::platform::kubernetes::params::k8s_cpuset
$k8s_nodeset = $::platform::kubernetes::params::k8s_nodeset
# Default to float across all cpus and numa nodes
if !defined('$k8s_cpuset') {
$k8s_cpuset = generate('/bin/cat', '/sys/devices/system/cpu/online')
notice("System default cpuset ${k8s_cpuset}.")
}
if !defined('$k8s_nodeset') {
$k8s_nodeset = generate('/bin/cat', '/sys/devices/system/node/online')
notice("System default nodeset ${k8s_nodeset}.")
}
# Create kubelet cgroup for the minimal set of required controllers.
# NOTE: The kubernetes cgroup_manager_linux func Exists() checks that
# specific subsystem cgroup paths actually exist on the system. The
# particular cgroup cgroupRoot must exist for the following controllers:
# "cpu", "cpuacct", "cpuset", "memory", "systemd", "pids".
# Reference:
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/cm/cgroup_manager_linux.go
# systemd automatically mounts cgroups and controllers, so don't need
# to do that here.
notice("Create ${cgroup_root}/${controllers}/${cgroup_name}")
$controllers.each |String $controller| {
$cgroup_dir = "${cgroup_root}/${controller}/${cgroup_name}"
file { $cgroup_dir :
ensure => directory,
owner => 'root',
group => 'root',
mode => '0700',
}
# Modify k8s cpuset resources to reflect platform configured cores.
# NOTE: Using 'exec' here instead of 'file' resource type with 'content'
# tag to update contents under /sys, since puppet tries to create files
# with temp names in the same directory, and the kernel only allows
# specific filenames to be created in these particular directories.
# This causes puppet to fail if we use the 'content' tag.
# NOTE: Child cgroups cpuset must be subset of parent. In the case where
# child directories already exist and we change the parent's cpuset to
# be a subset of what the children have, will cause the command to fail
# with "-bash: echo: write error: device or resource busy".
if $controller == 'cpuset' {
$cgroup_mems = "${cgroup_dir}/cpuset.mems"
$cgroup_cpus = "${cgroup_dir}/cpuset.cpus"
$cgroup_tasks = "${cgroup_dir}/tasks"
notice("Set ${cgroup_name} nodeset: ${k8s_nodeset}, cpuset: ${k8s_cpuset}")
File[ $cgroup_dir ]
-> exec { "Create ${cgroup_mems}" :
command => "/bin/echo ${k8s_nodeset} > ${cgroup_mems} || :",
}
-> exec { "Create ${cgroup_cpus}" :
command => "/bin/echo ${k8s_cpuset} > ${cgroup_cpus} || :",
}
-> file { $cgroup_tasks :
ensure => file,
owner => 'root',
group => 'root',
mode => '0644',
}
}
}
}
class platform::kubernetes::kubeadm {
include ::platform::docker::params
include ::platform::kubernetes::params
$node_ip = $::platform::kubernetes::params::node_ip
$host_labels = $::platform::kubernetes::params::host_labels
$k8s_reserved_cpus = $::platform::kubernetes::params::k8s_reserved_cpus
$k8s_reserved_mem = $::platform::kubernetes::params::k8s_reserved_mem
$iptables_file = "net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1"
# Configure kubelet cpumanager options
if str2bool($::is_worker_subfunction)
and !('openstack-compute-node'
in $host_labels) {
$k8s_cpu_manager_opts = join([
'--cpu-manager-policy=static',
'--system-reserved-cgroup=/system.slice',
join([
'--system-reserved=',
"cpu=${k8s_reserved_cpus},",
"memory=${k8s_reserved_mem}Mi"])
], ' ')
} else {
$k8s_cpu_manager_opts = '--cpu-manager-policy=none'
}
# Enable kubelet extra parameters that are node specific such as
# cpumanager
file { '/etc/sysconfig/kubelet':
ensure => file,
content => template('platform/kubelet.conf.erb'),
}
# The cpu_manager_state file is regenerated when cpumanager starts or
# changes allocations so it is safe to remove before kubelet starts.
# This file persists so cpumanager's DefaultCPUSet becomes inconsistent
# when we offline/online CPUs or change the number of reserved cpus.
-> exec { 'remove cpu_manager_state':
command => 'rm -f /var/lib/kubelet/cpu_manager_state || true',
}
# Update iptables config. This is required based on:
# https://kubernetes.io/docs/tasks/tools/install-kubeadm
# This probably belongs somewhere else - initscripts package?
file { '/etc/sysctl.d/k8s.conf':
ensure => file,
content => $iptables_file,
owner => 'root',
group => 'root',
mode => '0644',
}
-> exec { 'update kernel parameters for iptables':
command => 'sysctl --system',
}
# Create manifests directory required by kubelet
-> file { '/etc/kubernetes/manifests':
ensure => directory,
owner => 'root',
group => 'root',
mode => '0700',
}
# Start kubelet.
-> service { 'kubelet':
enable => true,
}
# A seperate enable is required since we have modified the service resource
# to never enable services.
-> exec { 'enable-kubelet':
command => '/usr/bin/systemctl enable kubelet.service',
}
}
class platform::kubernetes::master::init
inherits ::platform::kubernetes::params {
include ::platform::params
include ::platform::docker::params
$apiserver_loopback_address = $pod_network_ipversion ? {
4 => '127.0.0.1',
6 => '::1',
}
$apiserver_certsans = concat($apiserver_cert_san, $apiserver_loopback_address, $apiserver_advertise_address)
# This is used for imageRepository in template kubeadm.yaml.erb
if $::platform::docker::params::k8s_registry {
$k8s_registry = $::platform::docker::params::k8s_registry
} else {
$k8s_registry = 'k8s.gcr.io'
}
# This is used for calico image in template calico.yaml.erb
if $::platform::docker::params::quay_registry {
$quay_registry = $::platform::docker::params::quay_registry
} else {
$quay_registry = 'quay.io'
}
# This is used for device plugin images in template multus.yaml.erb,
# sriov-cni.yaml.erb and sriovdp-daemonset.yaml.erb
if $::platform::docker::params::docker_registry {
$docker_registry = $::platform::docker::params::docker_registry
} else {
$docker_registry = 'docker.io'
}
if str2bool($::is_initial_config_primary) {
# For initial controller install, configure kubernetes from scratch.
$resolv_conf = '/etc/resolv.conf'
# Configure the master node.
file { '/etc/kubernetes/kubeadm.yaml':
ensure => file,
content => template('platform/kubeadm.yaml.erb'),
}
-> exec { 'configure master node':
command => 'kubeadm init --config=/etc/kubernetes/kubeadm.yaml',
logoutput => true,
}
# Update ownership/permissions for file created by "kubeadm init".
# We want it readable by sysinv and sysadmin.
-> file { '/etc/kubernetes/admin.conf':
ensure => file,
owner => 'root',
group => $::platform::params::protected_group_name,
mode => '0640',
}
# Add a bash profile script to set a k8s env variable
-> file {'bash_profile_k8s':
ensure => file,
path => '/etc/profile.d/kubeconfig.sh',
mode => '0644',
source => "puppet:///modules/${module_name}/kubeconfig.sh"
}
# Deploy Multus as a Daemonset, and Calico is used as the default network
# (a network interface that every pod will be created with), each network
# attachment is made in addition to this default network.
-> file { '/etc/kubernetes/multus.yaml':
ensure => file,
content => template('platform/multus.yaml.erb'),
}
-> exec {'deploy multus daemonset':
command =>
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/multus.yaml',
logoutput => true,
}
# Configure calico networking using the Kubernetes API datastore.
-> file { '/etc/kubernetes/calico.yaml':
ensure => file,
content => template('platform/calico.yaml.erb'),
}
-> exec { 'install calico networking':
command =>
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/calico.yaml',
logoutput => true,
}
# Deploy sriov-cni as a Daemonset
-> file { '/etc/kubernetes/sriov-cni.yaml':
ensure => file,
content => template('platform/sriov-cni.yaml.erb'),
}
-> exec {'deploy sriov-cni daemonset':
command =>
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/sriov-cni.yaml',
logoutput => true,
}
# Deploy SRIOV network device plugin as a Daemonset
-> file { '/etc/kubernetes/sriovdp-daemonset.yaml':
ensure => file,
content => template('platform/sriovdp-daemonset.yaml.erb'),
}
-> exec {'deploy sriov device plugin daemonset':
command =>
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/sriovdp-daemonset.yaml',
logoutput => true,
}
# Remove the taint from the master node
-> exec { 'remove taint from master node':
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master- || true", # lint:ignore:140chars
logoutput => true,
}
# Add kubelet service override
-> file { '/etc/systemd/system/kubelet.service.d/kube-stx-override.conf':
ensure => file,
content => template('platform/kube-stx-override.conf.erb'),
owner => 'root',
group => 'root',
mode => '0644',
}
# set kubelet monitored by pmond
-> file { '/etc/pmon.d/kubelet.conf':
ensure => file,
content => template('platform/kubelet-pmond-conf.erb'),
owner => 'root',
group => 'root',
mode => '0644',
}
# Reload systemd
-> exec { 'perform systemctl daemon reload for kubelet override':
command => 'systemctl daemon-reload',
logoutput => true,
}
# Initial kubernetes config done on node
-> file { '/etc/platform/.initial_k8s_config_complete':
ensure => present,
}
} else {
if str2bool($::is_initial_k8s_config) {
# This allows subsequent node installs
# Notes regarding ::is_initial_k8s_config check:
# - Ensures block is only run for new node installs (e.g. controller-1)
# or reinstalls. This part is needed only once;
# - Ansible configuration is independently configuring Kubernetes. A retry
# in configuration by puppet leads to failed manifest application.
# This flag is created by Ansible on controller-0;
# - Ansible replay is not impacted by flag creation.
# If alternative k8s registry requires the authentication,
# kubeadm required images need to be pre-pulled on controller
if $k8s_registry != 'k8s.gcr.io' and $::platform::docker::params::k8s_registry_secret != undef {
File['/etc/kubernetes/kubeadm.yaml']
-> platform::docker::login_registry { 'login k8s registry':
registry_url => $k8s_registry,
registry_secret => $::platform::docker::params::k8s_registry_secret
}
-> exec { 'kubeadm to pre pull images':
command => 'kubeadm config images pull --config /etc/kubernetes/kubeadm.yaml',
logoutput => true,
before => Exec['configure master node']
}
-> exec { 'logout k8s registry':
command => "docker logout ${k8s_registry}",
logoutput => true,
}
}
# Create necessary certificate files
file { '/etc/kubernetes/pki':
ensure => directory,
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { '/etc/kubernetes/pki/ca.crt':
ensure => file,
content => $ca_crt,
owner => 'root',
group => 'root',
mode => '0644',
}
-> file { '/etc/kubernetes/pki/ca.key':
ensure => file,
content => $ca_key,
owner => 'root',
group => 'root',
mode => '0600',
}
-> file { '/etc/kubernetes/pki/sa.key':
ensure => file,
content => $sa_key,
owner => 'root',
group => 'root',
mode => '0600',
}
-> file { '/etc/kubernetes/pki/sa.pub':
ensure => file,
content => $sa_pub,
owner => 'root',
group => 'root',
mode => '0600',
}
# Configure the master node.
-> file { '/etc/kubernetes/kubeadm.yaml':
ensure => file,
content => template('platform/kubeadm.yaml.erb'),
}
-> exec { 'configure master node':
command => 'kubeadm init --config=/etc/kubernetes/kubeadm.yaml',
logoutput => true,
}
# Update ownership/permissions for file created by "kubeadm init".
# We want it readable by sysinv and sysadmin.
-> file { '/etc/kubernetes/admin.conf':
ensure => file,
owner => 'root',
group => $::platform::params::protected_group_name,
mode => '0640',
}
# Add a bash profile script to set a k8s env variable
-> file {'bash_profile_k8s':
ensure => present,
path => '/etc/profile.d/kubeconfig.sh',
mode => '0644',
source => "puppet:///modules/${module_name}/kubeconfig.sh"
}
# Remove the taint from the master node
-> exec { 'remove taint from master node':
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master- || true", # lint:ignore:140chars
logoutput => true,
}
# Add kubelet service override
-> file { '/etc/systemd/system/kubelet.service.d/kube-stx-override.conf':
ensure => file,
content => template('platform/kube-stx-override.conf.erb'),
owner => 'root',
group => 'root',
mode => '0644',
}
# set kubelet monitored by pmond
-> file { '/etc/pmon.d/kubelet.conf':
ensure => file,
content => template('platform/kubelet-pmond-conf.erb'),
owner => 'root',
group => 'root',
mode => '0644',
}
# Reload systemd
-> exec { 'perform systemctl daemon reload for kubelet override':
command => 'systemctl daemon-reload',
logoutput => true,
}
# Initial kubernetes config done on node
-> file { '/etc/platform/.initial_k8s_config_complete':
ensure => present,
}
}
}
}
class platform::kubernetes::master
inherits ::platform::kubernetes::params {
contain ::platform::kubernetes::kubeadm
contain ::platform::kubernetes::cgroup
contain ::platform::kubernetes::master::init
contain ::platform::kubernetes::coredns
contain ::platform::kubernetes::firewall
Class['::platform::etcd'] -> Class[$name]
Class['::platform::docker::config'] -> Class[$name]
# Ensure DNS is configured as name resolution is required when
# kubeadm init is run.
Class['::platform::dns'] -> Class[$name]
Class['::platform::kubernetes::kubeadm']
-> Class['::platform::kubernetes::cgroup']
-> Class['::platform::kubernetes::master::init']
-> Class['::platform::kubernetes::coredns']
-> Class['::platform::kubernetes::firewall']
}
class platform::kubernetes::worker::params (
$join_cmd = undef,
) { }
class platform::kubernetes::worker::init
inherits ::platform::kubernetes::worker::params {
Class['::platform::docker::config'] -> Class[$name]
if str2bool($::is_initial_config) {
include ::platform::params
if $::platform::docker::params::k8s_registry {
$k8s_registry = $::platform::docker::params::k8s_registry
} else {
$k8s_registry = 'k8s.gcr.io'
}
# If alternative k8s registry requires the authentication,
# k8s pause image needs to be pre-pulled on worker nodes
if $k8s_registry != 'k8s.gcr.io' and $::platform::docker::params::k8s_registry_secret != undef {
# Get the pause image tag from kubeadm required images
# list and replace with alternative k8s registry
$get_k8s_pause_img = "kubeadm config images list 2>/dev/null |\
awk '/^k8s.gcr.io\\/pause:/{print \$1}' | sed 's/k8s.gcr.io/${k8s_registry}/'"
$k8s_pause_img = generate('/bin/sh', '-c', $get_k8s_pause_img)
if k8s_pause_img {
platform::docker::login_registry { 'login k8s registry':
registry_url => $k8s_registry,
registry_secret => $::platform::docker::params::k8s_registry_secret
}
-> exec { 'load k8s pause image':
command => "docker image pull ${k8s_pause_img}",
logoutput => true,
before => Exec['configure worker node']
}
-> exec { 'logout k8s registry':
command => "docker logout ${k8s_registry}",
logoutput => true,
}
}
}
}
# Configure the worker node. Only do this once, so check whether the
# kubelet.conf file has already been created (by the join).
exec { 'configure worker node':
command => $join_cmd,
logoutput => true,
unless => 'test -f /etc/kubernetes/kubelet.conf',
}
# Add kubelet service override
-> file { '/etc/systemd/system/kubelet.service.d/kube-stx-override.conf':
ensure => file,
content => template('platform/kube-stx-override.conf.erb'),
owner => 'root',
group => 'root',
mode => '0644',
}
# set kubelet monitored by pmond
-> file { '/etc/pmon.d/kubelet.conf':
ensure => file,
content => template('platform/kubelet-pmond-conf.erb'),
owner => 'root',
group => 'root',
mode => '0644',
}
# Reload systemd
-> exec { 'perform systemctl daemon reload for kubelet override':
command => 'systemctl daemon-reload',
logoutput => true,
}
}
class platform::kubernetes::worker::pci
(
$pcidp_network_resources = undef,
) {
include ::platform::kubernetes::params
file { '/etc/pcidp':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/pcidp/config.json':
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
content => template('platform/pcidp.conf.erb'),
}
}
class platform::kubernetes::worker
inherits ::platform::kubernetes::params {
# Worker configuration is not required on AIO hosts, since the master
# will already be configured and includes support for running pods.
if $::personality != 'controller' {
contain ::platform::kubernetes::kubeadm
contain ::platform::kubernetes::cgroup
contain ::platform::kubernetes::worker::init
Class['::platform::kubernetes::kubeadm']
-> Class['::platform::kubernetes::cgroup']
-> Class['::platform::kubernetes::worker::init']
} else {
# Reconfigure cgroups cpusets on AIO
contain ::platform::kubernetes::cgroup
# Add refresh dependency for kubelet for hugepage allocation
Class['::platform::compute::allocate']
~> service { 'kubelet':
}
}
file { '/var/run/.disable_worker_services':
ensure => file,
replace => no,
}
# TODO: The following exec is a workaround. Once kubernetes becomes the
# default installation, /etc/pmon.d/libvirtd.conf needs to be removed from
# the load.
exec { 'Update PMON libvirtd.conf':
command => "/bin/sed -i 's#mode = passive#mode = ignore #' /etc/pmon.d/libvirtd.conf",
onlyif => '/usr/bin/test -e /etc/pmon.d/libvirtd.conf'
}
contain ::platform::kubernetes::worker::pci
}
class platform::kubernetes::coredns {
include ::platform::params
if str2bool($::is_initial_config_primary) or str2bool($::is_initial_k8s_config) {
if $::platform::params::system_mode != 'simplex' {
# For duplex and multi-node system, restrict the dns pod to master nodes
exec { 'restrict coredns to master nodes':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}\'', # lint:ignore:140chars
logoutput => true,
}
-> exec { 'Use anti-affinity for coredns pods':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"k8s-app","operator":"In","values":["kube-dns"]}]},"topologyKey":"kubernetes.io/hostname"}]}}}}}}\'', # lint:ignore:140chars
logoutput => true,
}
} else {
# For simplex system, 1 coredns is enough
exec { '1 coredns for simplex mode':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system scale --replicas=1 deployment coredns', # lint:ignore:140chars
logoutput => true,
}
}
}
}
# TODO: remove port 9001 once we have a public docker image registry using standard ports.
# add 5000 as the default port for private registry
class platform::kubernetes::firewall::params (
$transport = 'tcp',
$table = 'nat',
$dports = [80, 443, 9001, 5000],
$chain = 'POSTROUTING',
$jump = 'SNAT',
) {}
class platform::kubernetes::firewall
inherits ::platform::kubernetes::firewall::params {
include ::platform::params
include ::platform::network::oam::params
include ::platform::network::mgmt::params
include ::platform::docker::params
# add http_proxy and https_proxy port to k8s firewall
# in order to allow worker node access public network via proxy
if $::platform::docker::params::http_proxy {
$http_proxy_str_array = split($::platform::docker::params::http_proxy, ':')
$http_proxy_port = $http_proxy_str_array[length($http_proxy_str_array) - 1]
if $http_proxy_port =~ /^\d+$/ {
$http_proxy_port_val = $http_proxy_port
}
}
if $::platform::docker::params::https_proxy {
$https_proxy_str_array = split($::platform::docker::params::https_proxy, ':')
$https_proxy_port = $https_proxy_str_array[length($https_proxy_str_array) - 1]
if $https_proxy_port =~ /^\d+$/ {
$https_proxy_port_val = $https_proxy_port
}
}
if defined('$http_proxy_port_val') {
if defined('$https_proxy_port_val') and ($http_proxy_port_val != $https_proxy_port_val) {
$dports = $dports << $http_proxy_port_val << $https_proxy_port_val
} else {
$dports = $dports << $http_proxy_port_val
}
} elsif defined('$https_proxy_port_val') {
$dports = $dports << $https_proxy_port_val
}
$system_mode = $::platform::params::system_mode
$oam_float_ip = $::platform::network::oam::params::controller_address
$oam_interface = $::platform::network::oam::params::interface_name
$mgmt_subnet = $::platform::network::mgmt::params::subnet_network
$mgmt_prefixlen = $::platform::network::mgmt::params::subnet_prefixlen
$s_mgmt_subnet = "${mgmt_subnet}/${mgmt_prefixlen}"
$d_mgmt_subnet = "! ${s_mgmt_subnet}"
if $system_mode != 'simplex' {
platform::firewall::rule { 'kubernetes-nat':
service_name => 'kubernetes',
table => $table,
chain => $chain,
proto => $transport,
jump => $jump,
ports => $dports,
host => $s_mgmt_subnet,
destination => $d_mgmt_subnet,
outiface => $oam_interface,
tosource => $oam_float_ip,
}
}
}

View File

@ -1,157 +0,0 @@
class platform::ldap::params (
$admin_pw,
$admin_hashed_pw = undef,
$provider_uri = undef,
$server_id = undef,
$ldapserver_remote = false,
$ldapserver_host = undef,
$bind_anonymous = false,
) {}
class platform::ldap::server
inherits ::platform::ldap::params {
if ! $ldapserver_remote {
include ::platform::ldap::server::local
}
}
class platform::ldap::server::local
inherits ::platform::ldap::params {
exec { 'slapd-convert-config':
command => '/usr/sbin/slaptest -f /etc/openldap/slapd.conf -F /etc/openldap/schema/',
onlyif => '/usr/bin/test -e /etc/openldap/slapd.conf'
}
exec { 'slapd-conf-move-backup':
command => '/bin/mv -f /etc/openldap/slapd.conf /etc/openldap/slapd.conf.backup',
onlyif => '/usr/bin/test -e /etc/openldap/slapd.conf'
}
service { 'nscd':
ensure => 'running',
enable => true,
name => 'nscd',
hasstatus => true,
hasrestart => true,
}
service { 'openldap':
ensure => 'running',
enable => true,
name => 'slapd',
hasstatus => true,
hasrestart => true,
}
exec { 'stop-openldap':
command => '/usr/bin/systemctl stop slapd.service',
}
exec { 'update-slapd-conf':
command => "/bin/sed -i \\
-e 's#provider=ldap.*#provider=${provider_uri}#' \\
-e 's:serverID.*:serverID ${server_id}:' \\
-e 's:credentials.*:credentials=${admin_pw}:' \\
-e 's:^rootpw .*:rootpw ${admin_hashed_pw}:' \\
-e 's:modulepath .*:modulepath /usr/lib64/openldap:' \\
/etc/openldap/slapd.conf",
onlyif => '/usr/bin/test -e /etc/openldap/slapd.conf'
}
# don't populate the adminpw if binding anonymously
if ! $bind_anonymous {
file { '/usr/local/etc/ldapscripts/ldapscripts.passwd':
content => $admin_pw,
}
}
file { '/usr/share/cracklib/cracklib-small':
ensure => link,
target => '/usr/share/cracklib/cracklib-small.pwd',
}
# start openldap with updated config and updated nsswitch
# then convert slapd config to db format. Note, slapd must have run and created the db prior to this.
Exec['stop-openldap']
-> Exec['update-slapd-conf']
-> Service['nscd']
-> Service['nslcd']
-> Service['openldap']
-> Exec['slapd-convert-config']
-> Exec['slapd-conf-move-backup']
}
class platform::ldap::client
inherits ::platform::ldap::params {
file { '/etc/openldap/ldap.conf':
ensure => 'present',
replace => true,
content => template('platform/ldap.conf.erb'),
}
file { '/etc/nslcd.conf':
ensure => 'present',
replace => true,
content => template('platform/nslcd.conf.erb'),
}
-> service { 'nslcd':
ensure => 'running',
enable => true,
name => 'nslcd',
hasstatus => true,
hasrestart => true,
}
if $::personality == 'controller' {
file { '/usr/local/etc/ldapscripts/ldapscripts.conf':
ensure => 'present',
replace => true,
content => template('platform/ldapscripts.conf.erb'),
}
}
}
class platform::ldap::bootstrap
inherits ::platform::ldap::params {
include ::platform::params
# Local ldap server is configured during bootstrap. It is later
# replaced by remote ldapserver configuration (if needed) during
# application of controller / compute / storage manifest.
include ::platform::ldap::server::local
include ::platform::ldap::client
Class['platform::ldap::server::local'] -> Class[$name]
$dn = 'cn=ldapadmin,dc=cgcs,dc=local'
exec { 'populate initial ldap configuration':
command => "ldapadd -D ${dn} -w ${admin_pw} -f /etc/openldap/initial_config.ldif"
}
-> exec { 'create ldap admin user':
command => 'ldapadduser admin root'
}
-> exec { 'create ldap operator user':
command => 'ldapadduser operator users'
}
-> exec { 'create ldap protected group':
command => "ldapaddgroup ${::platform::params::protected_group_name} ${::platform::params::protected_group_id}"
}
-> exec { 'add admin to sys_protected protected group' :
command => "ldapaddusertogroup admin ${::platform::params::protected_group_name}",
}
-> exec { 'add operator to sys_protected protected group' :
command => "ldapaddusertogroup operator ${::platform::params::protected_group_name}",
}
# Change operator shell from default to /usr/local/bin/cgcs_cli
-> file { '/tmp/ldap.cgcs-shell.ldif':
ensure => present,
replace => true,
source => "puppet:///modules/${module_name}/ldap.cgcs-shell.ldif"
}
-> exec { 'ldap cgcs-cli shell update':
command =>
"ldapmodify -D ${dn} -w ${admin_pw} -f /tmp/ldap.cgcs-shell.ldif"
}
}

View File

@ -1,35 +0,0 @@
class platform::lldp::params(
$tx_interval = 30,
$tx_hold = 4,
$options = []
) {}
class platform::lldp
inherits ::platform::lldp::params {
include ::platform::params
$hostname = $::platform::params::hostname
$system = $::platform::params::system_name
$version = $::platform::params::software_version
file { '/etc/lldpd.conf':
ensure => 'present',
replace => true,
content => template('platform/lldp.conf.erb'),
notify => Service['lldpd'],
}
file { '/etc/default/lldpd':
ensure => 'present',
replace => true,
content => template('platform/lldpd.default.erb'),
notify => Service['lldpd'],
}
service { 'lldpd':
ensure => 'running',
enable => true,
hasrestart => true,
}
}

View File

@ -1,166 +0,0 @@
class platform::lvm::params (
$transition_filter = '[]',
$final_filter = '[]',
) {}
class platform::lvm
inherits platform::lvm::params {
# Mask socket unit as well to make sure
# systemd socket activation does not happen
service { 'lvm2-lvmetad.socket':
ensure => 'stopped',
enable => mask,
}
# Masking service unit ensures that it is not started again
-> service { 'lvm2-lvmetad':
ensure => 'stopped',
enable => mask,
}
# Since masking is changing unit symlinks to point to /dev/null,
# we need to reload systemd configuration
-> exec { 'lvmetad-systemd-daemon-reload':
command => 'systemctl daemon-reload',
}
-> file_line { 'use_lvmetad':
path => '/etc/lvm/lvm.conf',
match => '^[^#]*use_lvmetad = 1',
line => ' use_lvmetad = 0',
}
}
define platform::lvm::global_filter($filter) {
file_line { "${name}: update lvm global_filter":
path => '/etc/lvm/lvm.conf',
line => " global_filter = ${filter}",
match => '^[ ]*global_filter =',
}
}
define platform::lvm::umount {
exec { "umount disk ${name}":
command => "umount ${name}; true",
}
}
class platform::lvm::vg::cgts_vg(
$vg_name = 'cgts-vg',
$physical_volumes = [],
) inherits platform::lvm::params {
::platform::lvm::umount { $physical_volumes:
}
-> physical_volume { $physical_volumes:
ensure => present,
}
-> volume_group { $vg_name:
ensure => present,
physical_volumes => $physical_volumes,
}
}
class platform::lvm::vg::cinder_volumes(
$vg_name = 'cinder-volumes',
$physical_volumes = [],
) inherits platform::lvm::params {
# Let cinder manifests set up DRBD synced volume group
}
class platform::lvm::vg::nova_local(
$vg_name = 'nova-local',
$physical_volumes = [],
) inherits platform::lvm::params {
# TODO(rchurch): refactor portions of platform::worker::storage and move here
}
##################
# Controller Hosts
##################
class platform::lvm::controller::vgs {
include ::platform::lvm::vg::cgts_vg
include ::platform::lvm::vg::cinder_volumes
include ::platform::lvm::vg::nova_local
}
class platform::lvm::controller
inherits ::platform::lvm::params {
::platform::lvm::global_filter { 'transition filter':
filter => $transition_filter,
before => Class['::platform::lvm::controller::vgs']
}
::platform::lvm::global_filter { 'final filter':
filter => $final_filter,
require => Class['::platform::lvm::controller::vgs']
}
include ::platform::lvm
include ::platform::lvm::controller::vgs
}
class platform::lvm::controller::runtime {
include ::platform::lvm::controller
}
###############
# Compute Hosts
###############
class platform::lvm::compute::vgs {
include ::platform::lvm::vg::cgts_vg
include ::platform::lvm::vg::nova_local
}
class platform::lvm::compute
inherits ::platform::lvm::params {
::platform::lvm::global_filter { 'transition filter':
filter => $transition_filter,
before => Class['::platform::lvm::compute::vgs']
}
::platform::lvm::global_filter { 'final filter':
filter => $final_filter,
require => Class['::platform::lvm::compute::vgs']
}
include ::platform::lvm
include ::platform::lvm::compute::vgs
}
class platform::lvm::compute::runtime {
include ::platform::lvm::compute
}
###############
# Storage Hosts
###############
class platform::lvm::storage::vgs {
include ::platform::lvm::vg::cgts_vg
}
class platform::lvm::storage
inherits ::platform::lvm::params {
::platform::lvm::global_filter { 'final filter':
filter => $final_filter,
before => Class['::platform::lvm::storage::vgs']
}
include ::platform::lvm
include ::platform::lvm::storage::vgs
}
class platform::lvm::storage::runtime {
include ::platform::lvm::storage
}

View File

@ -1,56 +0,0 @@
class platform::memcached::params(
$package_ensure = 'present',
$logfile = '/var/log/memcached.log',
# set CACHESIZE in /etc/sysconfig/memcached
$max_memory = false,
$tcp_port = 11211,
$udp_port = 11211,
# set MAXCONN in /etc/sysconfig/memcached
$max_connections = 8192,
$service_restart = true,
) {
include ::platform::params
$controller_0_hostname = $::platform::params::controller_0_hostname
$controller_1_hostname = $::platform::params::controller_1_hostname
$system_mode = $::platform::params::system_mode
if $system_mode == 'simplex' {
$listen_ip = $::platform::network::mgmt::params::controller0_address
} else {
case $::hostname {
$controller_0_hostname: {
$listen_ip = $::platform::network::mgmt::params::controller0_address
}
$controller_1_hostname: {
$listen_ip = $::platform::network::mgmt::params::controller1_address
}
default: {
fail("Hostname must be either ${controller_0_hostname} or ${controller_1_hostname}")
}
}
}
$listen_ip_version = $::platform::network::mgmt::params::subnet_version
}
class platform::memcached
inherits ::platform::memcached::params {
Anchor['platform::networking']
-> class { '::memcached':
package_ensure => $package_ensure,
logfile => $logfile,
listen_ip => $listen_ip,
tcp_port => $tcp_port,
udp_port => $udp_port,
max_connections => $max_connections,
max_memory => $max_memory,
service_restart => $service_restart,
}
-> exec { 'systemctl enable memcached.service':
command => '/usr/bin/systemctl enable memcached.service',
}
}

View File

@ -1,93 +0,0 @@
class platform::mtce::params (
$auth_host = undef,
$auth_port = undef,
$auth_uri = undef,
$auth_username = undef,
$auth_pw = undef,
$auth_project = undef,
$auth_user_domain = undef,
$auth_project_domain = undef,
$auth_region = undef,
$worker_boot_timeout = 720,
$controller_boot_timeout = 1200,
$heartbeat_degrade_threshold = 6,
$heartbeat_failure_threshold = 10,
$heartbeat_failure_action = 'fail',
$heartbeat_period = 100,
$mtce_multicast = undef,
$mnfa_threshold = 2,
$mnfa_timeout = 0,
$sm_client_port = 2224,
$sm_server_port = 2124,
) { }
class platform::mtce
inherits ::platform::mtce::params {
include ::platform::client::credentials::params
$keyring_directory = $::platform::client::credentials::params::keyring_directory
file { '/etc/mtc.ini':
ensure => present,
mode => '0755',
content => template('mtce/mtc_ini.erb'),
}
$boot_device = $::boot_disk_device_path
}
class platform::mtce::agent
inherits ::platform::mtce::params {
if $::platform::params::init_keystone {
# configure a mtce keystone user
keystone_user { $auth_username:
ensure => present,
password => $auth_pw,
enabled => true,
}
# assign an admin role for this mtce user on the services tenant
keystone_user_role { "${auth_username}@${auth_project}":
ensure => present,
user_domain => $auth_user_domain,
project_domain => $auth_project_domain,
roles => ['admin'],
}
}
}
class platform::mtce::reload {
exec {'signal-mtc-agent':
command => 'pkill -HUP mtcAgent',
}
exec {'signal-hbs-agent':
command => 'pkill -HUP hbsAgent',
}
# mtcClient and hbsClient don't currently reload all configuration,
# therefore they must be restarted. Move to HUP if daemon updated.
exec {'pmon-restart-hbs-client':
command => 'pmon-restart hbsClient',
}
exec {'pmon-restart-mtc-client':
command => 'pmon-restart mtcClient',
}
}
class platform::mtce::runtime {
include ::platform::mtce
class {'::platform::mtce::reload':
stage => post
}
}
class platform::mtce::bootstrap {
include ::platform::params
include ::platform::mtce
include ::platform::mtce::agent
}

View File

@ -1,39 +0,0 @@
class platform::multipath::params (
$enabled = false,
) {
}
class platform::multipath
inherits platform::multipath::params {
if $enabled {
file { '/etc/multipath.conf':
ensure => 'present',
mode => '0644',
content => template('platform/multipath.conf.erb')
}
-> service { 'start-multipathd':
ensure => 'running',
enable => true,
name => 'multipathd',
hasstatus => true,
hasrestart => true,
}
-> exec { 'systemctl-enable-multipathd':
command => '/usr/bin/systemctl enable multipathd.service',
}
} else {
service { 'stop-multipathd':
ensure => 'stopped',
enable => false,
name => 'multipathd',
hasstatus => true,
hasrestart => true,
}
-> exec { 'systemctl-disable-multipathd':
command => '/usr/bin/systemctl disable multipathd.service',
}
-> file { '/etc/multipath.conf':
ensure => 'absent',
}
}
}

View File

@ -1,256 +0,0 @@
class platform::network::pxeboot::params(
# shared parameters with base class - required for auto hiera parameter lookup
$interface_name = undef,
$interface_address = undef,
$interface_devices = [],
$subnet_version = undef,
$subnet_network = undef,
$subnet_network_url = undef,
$subnet_prefixlen = undef,
$subnet_netmask = undef,
$subnet_start = undef,
$subnet_end = undef,
$gateway_address = undef,
$controller_address = undef, # controller floating
$controller_address_url = undef, # controller floating url address
$controller0_address = undef, # controller unit0
$controller1_address = undef, # controller unit1
$mtu = 1500,
) { }
class platform::network::mgmt::params(
# shared parameters with base class - required for auto hiera parameter lookup
$interface_name = undef,
$interface_address = undef,
$interface_devices = [],
$subnet_version = undef,
$subnet_network = undef,
$subnet_network_url = undef,
$subnet_prefixlen = undef,
$subnet_netmask = undef,
$subnet_start = undef,
$subnet_end = undef,
$gateway_address = undef,
$controller_address = undef, # controller floating
$controller_address_url = undef, # controller floating url address
$controller0_address = undef, # controller unit0
$controller1_address = undef, # controller unit1
$mtu = 1500,
# network type specific parameters
$platform_nfs_address = undef,
) { }
class platform::network::oam::params(
# shared parameters with base class - required for auto hiera parameter lookup
$interface_name = undef,
$interface_address = undef,
$interface_devices = [],
$subnet_version = undef,
$subnet_network = undef,
$subnet_network_url = undef,
$subnet_prefixlen = undef,
$subnet_netmask = undef,
$subnet_start = undef,
$subnet_end = undef,
$gateway_address = undef,
$controller_address = undef, # controller floating
$controller_address_url = undef, # controller floating url address
$controller0_address = undef, # controller unit0
$controller1_address = undef, # controller unit1
$mtu = 1500,
) { }
class platform::network::cluster_host::params(
# shared parameters with base class - required for auto hiera parameter lookup
$interface_name = undef,
$interface_address = undef,
$interface_devices = [],
$subnet_version = undef,
$subnet_network = undef,
$subnet_network_url = undef,
$subnet_prefixlen = undef,
$subnet_netmask = undef,
$subnet_start = undef,
$subnet_end = undef,
$gateway_address = undef,
$controller_address = undef, # controller floating
$controller_address_url = undef, # controller floating url address
$controller0_address = undef, # controller unit0
$controller1_address = undef, # controller unit1
$mtu = 1500,
) { }
class platform::network::ironic::params(
# shared parameters with base class - required for auto hiera parameter lookup
$interface_name = undef,
$interface_address = undef,
$interface_devices = [],
$subnet_version = undef,
$subnet_network = undef,
$subnet_network_url = undef,
$subnet_prefixlen = undef,
$subnet_netmask = undef,
$subnet_start = undef,
$subnet_end = undef,
$gateway_address = undef,
$controller_address = undef, # controller floating
$controller_address_url = undef, # controller floating url address
$controller0_address = undef, # controller unit0
$controller1_address = undef, # controller unit1
$mtu = 1500,
) { }
define network_address (
$address,
$ifname,
) {
# In AIO simplex configurations, the management addresses are assigned to the
# loopback interface. These addresses must be assigned using the host scope
# or assignment is prevented (can't have multiple global scope addresses on
# the loopback interface).
if $ifname == 'lo' {
$options = 'scope host'
} else {
$options = ''
}
# addresses should only be configured if running in simplex, otherwise SM
# will configure them on the active controller.
exec { "Configuring ${name} IP address":
command => "ip addr replace ${address} dev ${ifname} ${options}",
onlyif => 'test -f /etc/platform/simplex',
}
}
# Defines a single route resource for an interface.
# If multiple are required in the future, then this will need to
# iterate over a hash to create multiple entries per file.
define network_route6 (
$prefix,
$gateway,
$ifname,
) {
file { "/etc/sysconfig/network-scripts/route6-${ifname}":
ensure => present,
owner => root,
group => root,
mode => '0644',
content => "${prefix} via ${gateway} dev ${ifname}"
}
}
class platform::addresses (
$address_config = {},
) {
create_resources('network_address', $address_config, {})
}
define platform::interfaces::sriov_config(
$vf_addrs,
$vf_driver = undef
) {
if $vf_driver != undef {
ensure_resource(kmod::load, $vf_driver)
exec { "sriov-vf-bind-device: ${title}":
command => template('platform/sriov.bind-device.erb'),
logoutput => true,
require => Kmod::Load[$vf_driver],
}
}
}
class platform::interfaces (
$network_config = {},
$route_config = {},
$sriov_config = {}
) {
create_resources('network_config', $network_config, {})
create_resources('network_route', $route_config, {})
create_resources('platform::interfaces::sriov_config', $sriov_config, {})
include ::platform::params
include ::platform::network::mgmt::params
# Add static IPv6 default route since DHCPv6 does not support the router option
if $::personality != 'controller' {
if $::platform::network::mgmt::params::subnet_version == $::platform::params::ipv6 {
network_route6 { 'ipv6 default route':
prefix => 'default',
gateway => $::platform::network::mgmt::params::controller_address,
ifname => $::platform::network::mgmt::params::interface_name
}
}
}
}
class platform::network::apply {
include ::platform::interfaces
include ::platform::addresses
Network_config <| |>
-> Exec['apply-network-config']
-> Network_address <| |>
-> Anchor['platform::networking']
# Adding Network_route dependency separately, in case it's empty,
# as puppet bug will remove dependency altogether if
# Network_route is empty. See below.
# https://projects.puppetlabs.com/issues/18399
Network_config <| |>
-> Network_route <| |>
-> Exec['apply-network-config']
Network_config <| |>
-> Network_route6 <| |>
-> Exec['apply-network-config']
exec {'apply-network-config':
command => 'apply_network_config.sh',
}
}
class platform::network (
$mlx4_core_options = undef,
) {
include ::platform::params
include ::platform::network::mgmt::params
include ::platform::network::cluster_host::params
include ::platform::network::apply
$management_interface = $::platform::network::mgmt::params::interface_name
$testcmd = '/usr/local/bin/connectivity_test'
if $::personality != 'controller' {
if $management_interface {
exec { 'connectivity-test-management':
command => "${testcmd} -t 70 -i ${management_interface} controller-platform-nfs; /bin/true",
require => Anchor['platform::networking'],
onlyif => 'test ! -f /etc/platform/simplex',
}
}
}
if $mlx4_core_options {
exec { 'mlx4-core-config':
command => '/usr/bin/mlx4_core_config.sh',
subscribe => File['/etc/modprobe.d/mlx4_sriov.conf'],
refreshonly => true
}
file {'/etc/modprobe.d/mlx4_sriov.conf':
content => "options mlx4_core ${mlx4_core_options}"
}
}
}
class platform::network::runtime {
include ::platform::network::apply
}

Some files were not shown because too many files have changed in this diff Show More