Merge remote-tracking branch 'origin/master' into f/centos75

Change-Id: I166b99c4ce2ed458ac79ce686064ab05ad3ca198
Signed-off-by: Dean Troyer <dtroyer@gmail.com>
This commit is contained in:
Dean Troyer 2018-09-14 10:55:30 -05:00
commit fe26a0ff84
87 changed files with 1318 additions and 1479 deletions

View File

@ -4,7 +4,7 @@
jobs:
- openstack-tox-pep8:
voting: false
- openstack-tox-linters
gate:
jobs:
- noop
- openstack-tox-linters

View File

@ -10,3 +10,4 @@ ipaddr2-if-down.patch
spec-add-ipaddr2-ignore-lo-state.patch
Disable-creation-of-the-debug-package.patch
metapatch-for-arp_bg.patch
ipaddr2-avoid-failing-svc-if-down-meta.patch

View File

@ -0,0 +1,32 @@
From a8fc00f7d84327284643f70638da2201327bdd10 Mon Sep 17 00:00:00 2001
From: Bin Qian <bin.qian@windriver.com>
Date: Wed, 29 Aug 2018 11:22:32 -0400
Subject: [PATCH 1/1] ipaddr2 avoid failing when svc i/f down
---
SPECS/resource-agents.spec | 2 ++
1 file changed, 2 insertions(+)
diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec
index aa06c49..0eca09f 100644
--- a/SPECS/resource-agents.spec
+++ b/SPECS/resource-agents.spec
@@ -257,6 +257,7 @@ Patch1118: ipaddr2_if_down.patch
Patch1119: ipaddr2_ignore_lo_if_state.patch
Patch1120: Modify-error-code-of-bz1454699-fix-to-prevent-inactive-controller-reboot-loop.patch
Patch1121: Re-enable-background-execution-of-arp-commands.patch
+Patch1122: ipaddr2-avoid-failing-svc-if-down.patch
Obsoletes: heartbeat-resources <= %{version}
Provides: heartbeat-resources = %{version}
@@ -568,6 +569,7 @@ exit 1
%patch1119 -p1
%patch1120 -p1
%patch1121 -p1
+%patch1122 -p1
%build
if [ ! -f configure ]; then
--
1.8.3.1

View File

@ -0,0 +1,61 @@
From c3448b1536d50291dc5ca49dce5957c39403cc82 Mon Sep 17 00:00:00 2001
From: Bin Qian <bin.qian@windriver.com>
Date: Wed, 29 Aug 2018 11:00:22 -0400
Subject: [PATCH 1/1] avoid failing service when I/F is down
---
heartbeat/IPaddr2 | 24 ++++++------------------
1 file changed, 6 insertions(+), 18 deletions(-)
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
index 86009b9..2da5c5e 100755
--- a/heartbeat/IPaddr2
+++ b/heartbeat/IPaddr2
@@ -885,12 +885,8 @@ ip_start() {
then
exit $OCF_SUCCESS
else
- if [ "$OCF_RESKEY_dc" = "yes" ]; then
- ocf_log info "NIC $NIC is DOWN..."
- exit $OCF_SUCCESS
- else
- exit $OCF_ERR_GENERIC
- fi
+ ocf_log info "NIC $NIC is DOWN..."
+ exit $OCF_SUCCESS
fi
fi
@@ -954,12 +950,8 @@ ip_start() {
then
exit $OCF_SUCCESS
else
- if [ "$OCF_RESKEY_dc" = "yes" ]; then
- ocf_log info "NIC $NIC is DOWN"
- exit $OCF_SUCCESS
- else
- exit $OCF_ERR_GENERIC
- fi
+ ocf_log info "NIC $NIC is DOWN"
+ exit $OCF_SUCCESS
fi
}
@@ -1040,12 +1032,8 @@ ip_monitor() {
then
return $OCF_SUCCESS
else
- if [ "$OCF_RESKEY_dc" = "yes" ]; then
- ocf_log info "NIC $NIC is DOWN"
- return $OCF_SUCCESS
- else
- return $OCF_NOT_RUNNING
- fi
+ ocf_log info "NIC $NIC is DOWN"
+ return $OCF_SUCCESS
fi
;;
partial|no|partial2)
--
1.8.3.1

View File

@ -59,7 +59,9 @@ start() {
}
stop() {
if [ ! -e $PIDFILE ]; then return; fi
if [ ! -e $PIDFILE ]; then
return
fi
echo -n "Stopping $DESC..."
@ -73,8 +75,7 @@ stop() {
remove_TPM_transients
}
status()
{
status() {
pid=`cat $PIDFILE 2>/dev/null`
if [ -n "$pid" ]; then
if ps -p $pid &>/dev/null ; then

View File

@ -29,8 +29,7 @@ mount_nfs=no
mount_smb=no
mount_ncp=no
mount_cifs=no
while read device mountpt fstype options
do
while read device mountpt fstype options; do
case "$device" in
""|\#*)
continue
@ -70,8 +69,7 @@ done
exec 0>&1
if test "$rpcbind" = yes
then
if test "$rpcbind" = yes; then
# WRL: Centos precheck: Dont start rpcbind in this init script.
# It is started by a systemd service file.
if test "/etc/centos-release" = no
@ -88,8 +86,7 @@ then
fi
fi
if test "$mount_nfs" = yes || test "$mount_smb" = yes || test "$mount_ncp" = yes || test "$mount_cifs" = yes
then
if test "$mount_nfs" = yes || test "$mount_smb" = yes || test "$mount_ncp" = yes || test "$mount_cifs" = yes; then
echo "Mounting remote filesystems..."
test "$mount_nfs" = yes && mount -a -t nfs
test "$mount_smb" = yes && mount -a -t smbfs

View File

@ -1 +0,0 @@
TIS_PATCH_VER=1

View File

@ -1,25 +0,0 @@
From de355606dea0404c4ae92bad5ce00b841697c698 Mon Sep 17 00:00:00 2001
From: Jack Ding <jack.ding@windriver.com>
Date: Tue, 8 May 2018 14:29:14 -0400
Subject: [PATCH] Update package versioning for TIS format
---
SPECS/memcached.spec | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/SPECS/memcached.spec b/SPECS/memcached.spec
index 6008493..c8575c8 100644
--- a/SPECS/memcached.spec
+++ b/SPECS/memcached.spec
@@ -4,7 +4,7 @@
Name: memcached
Version: 1.4.39
-Release: 1%{?dist}
+Release: 1.el7%{?_tis_dist}.%{tis_patch_ver}
Epoch: 0
Summary: High Performance, Distributed Memory Object Cache
--
1.8.3.1

View File

@ -1,32 +0,0 @@
From f321c8a8b800a7c2ca9394d3c76bec72b98c0d77 Mon Sep 17 00:00:00 2001
From: Jack Ding <jack.ding@windriver.com>
Date: Fri, 11 May 2018 15:38:56 -0400
Subject: [PATCH] always restart memcached service
---
SPECS/memcached.spec | 2 ++
1 file changed, 2 insertions(+)
diff --git a/SPECS/memcached.spec b/SPECS/memcached.spec
index c8575c8..f389035 100644
--- a/SPECS/memcached.spec
+++ b/SPECS/memcached.spec
@@ -16,6 +16,7 @@ Source1: memcached.sysconfig
# https://github.com/memcached/memcached/issues/218
Patch1: 0001-systemd-fix-upstream-provided-service.patch
+Patch2: 0002-always-restart-memcached-service.patch
BuildRequires: libevent-devel systemd-units
BuildRequires: perl-generators
@@ -44,6 +45,7 @@ access to the memcached binary include files.
%prep
%setup -q
%patch1 -p1 -b .unit
+%patch2 -p1
%build
# compile with full RELRO
--
1.8.3.1

View File

@ -1,33 +0,0 @@
From bb6fd3da3ace960eb587e7ff01d5816ea2baaa54 Mon Sep 17 00:00:00 2001
From: Jack Ding <jack.ding@windriver.com>
Date: Sun, 13 May 2018 18:22:15 -0400
Subject: [PATCH] Add dependencies and comment out incompatible service
parameters
---
SPECS/memcached.spec | 2 ++
1 file changed, 2 insertions(+)
diff --git a/SPECS/memcached.spec b/SPECS/memcached.spec
index f389035..86653a1 100644
--- a/SPECS/memcached.spec
+++ b/SPECS/memcached.spec
@@ -17,6 +17,7 @@ Source1: memcached.sysconfig
# https://github.com/memcached/memcached/issues/218
Patch1: 0001-systemd-fix-upstream-provided-service.patch
Patch2: 0002-always-restart-memcached-service.patch
+Patch3: 0003-Add-dependencies-and-comment-out-incompatible-servic.patch
BuildRequires: libevent-devel systemd-units
BuildRequires: perl-generators
@@ -46,6 +47,7 @@ access to the memcached binary include files.
%setup -q
%patch1 -p1 -b .unit
%patch2 -p1
+%patch3 -p1
%build
# compile with full RELRO
--
1.8.3.1

View File

@ -1,3 +0,0 @@
0001-Update-package-versioning-for-TIS-format.patch
0002-always-restart-memcached-service.patch
0003-Add-dependencies-and-comment-out-incompatible-servic.patch

View File

@ -1,26 +0,0 @@
From bb7b75184f7037e6d8d844874ae248fce1d06736 Mon Sep 17 00:00:00 2001
From: Jack Ding <jack.ding@windriver.com>
Date: Fri, 11 May 2018 15:24:28 -0400
Subject: [PATCH] Always restart memcached service
---
scripts/memcached.service | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/scripts/memcached.service b/scripts/memcached.service
index 1bb9d33..8e58485 100644
--- a/scripts/memcached.service
+++ b/scripts/memcached.service
@@ -71,5 +71,9 @@ RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
# Takes away the ability to create or manage any kind of namespace
RestrictNamespaces=true
+# WRS
+Restart=always
+RestartSec=0
+
[Install]
WantedBy=multi-user.target
--
1.8.3.1

View File

@ -1,67 +0,0 @@
From 1d9f43c5ecb20fe0a2a4abe9b94abd0d389edb40 Mon Sep 17 00:00:00 2001
From: Jack Ding <jack.ding@windriver.com>
Date: Mon, 14 May 2018 22:44:32 -0400
Subject: [PATCH 2/2] Add dependencies and comment out incompatible service
parameters
---
scripts/memcached.service | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/scripts/memcached.service b/scripts/memcached.service
index 8e58485..021b8b4 100644
--- a/scripts/memcached.service
+++ b/scripts/memcached.service
@@ -12,7 +12,7 @@
[Unit]
Description=memcached daemon
Before=httpd.service
-After=network.target
+After=network-online.target
[Service]
EnvironmentFile=/etc/sysconfig/memcached
@@ -46,34 +46,34 @@ LimitNOFILE=16384
# Explicit module loading will be denied. This allows to turn off module load and unload
# operations on modular kernels. It is recommended to turn this on for most services that
# do not need special file systems or extra kernel modules to work.
-ProtectKernelModules=true
+#ProtectKernelModules=true
# Kernel variables accessible through /proc/sys, /sys, /proc/sysrq-trigger, /proc/latency_stats,
# /proc/acpi, /proc/timer_stats, /proc/fs and /proc/irq will be made read-only to all processes
# of the unit. Usually, tunable kernel variables should only be written at boot-time, with the
# sysctl.d(5) mechanism. Almost no services need to write to these at runtime; it is hence
# recommended to turn this on for most services.
-ProtectKernelTunables=true
+#ProtectKernelTunables=true
# The Linux Control Groups (cgroups(7)) hierarchies accessible through /sys/fs/cgroup will be
# made read-only to all processes of the unit. Except for container managers no services should
# require write access to the control groups hierarchies; it is hence recommended to turn this on
# for most services
-ProtectControlGroups=true
+#ProtectControlGroups=true
# Any attempts to enable realtime scheduling in a process of the unit are refused.
-RestrictRealtime=true
+#RestrictRealtime=true
# Restricts the set of socket address families accessible to the processes of this unit.
# Protects against vulnerabilities such as CVE-2016-8655
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
# Takes away the ability to create or manage any kind of namespace
-RestrictNamespaces=true
+#RestrictNamespaces=true
# WRS
Restart=always
-RestartSec=0
+RestartSec=10
[Install]
WantedBy=multi-user.target
--
1.8.3.1

View File

@ -1 +0,0 @@
mirror:Source/memcached-1.4.39-1.el7.src.rpm

View File

@ -1 +0,0 @@
TIS_PATCH_VER=1

View File

@ -1,12 +0,0 @@
diff --git a/SPECS/novnc.spec b/SPECS/novnc.spec
index a43f435..640bf1f 100644
--- a/SPECS/novnc.spec
+++ b/SPECS/novnc.spec
@@ -1,6 +1,6 @@
Name: novnc
Version: 0.6.2
-Release: 1%{?dist}
+Release: 1.el7%{?_tis_dist}.%{tis_patch_ver}
Summary: VNC client using HTML5 (Web Sockets, Canvas) with encryption support
Requires: python-websockify

View File

@ -1 +0,0 @@
0001-Update-package-0.6.2-versioning-for-TIS-format.patch

View File

@ -1 +0,0 @@
mirror:Source/novnc-0.6.2-1.el7.src.rpm

View File

@ -104,6 +104,7 @@ collector
# platform-util
platform-util
platform-util-noncontroller
platform-util-controller
# monitor-tools
monitor-tools

View File

@ -32,7 +32,6 @@ base/shadow-utils
security/shim-unsigned
security/shim-signed
logging/syslog-ng
base/novnc
base/sanlock
base/sudo
virt/cloud-init
@ -88,13 +87,12 @@ python/python-requests
base/systemd
python/python-gunicorn
config/puppet-modules/openstack/puppet-oslo-11.3.0
strorage-drivers/python-3parclient
strorage-drivers/python-lefthandclient
storage-drivers/python-3parclient
storage-drivers/python-lefthandclient
security/tboot
networking/mellanox/libibverbs
kernel/kernel-modules/mlnx-ofa_kernel
networking/mellanox/rdma-core
base/memcached
config/puppet-modules/openstack/puppet-memcached-3.0.2
config/puppet-modules/openstack/puppet-horizon-9.5.0
config/puppet-modules/openstack/puppet-swift-11.3.0

View File

@ -43,8 +43,7 @@ fi
WSREP_STATUS=$($MYSQL_CMDLINE -e "SHOW STATUS LIKE 'wsrep_local_state';" \
2>${ERR_FILE} | tail -1 2>>${ERR_FILE})
if [[ "${WSREP_STATUS}" == "4" ]] || [[ "${WSREP_STATUS}" == "2" && ${AVAILABLE_WHEN_DONOR} == 1 ]]
then
if [[ "${WSREP_STATUS}" == "4" ]] || [[ "${WSREP_STATUS}" == "2" && ${AVAILABLE_WHEN_DONOR} == 1 ]]; then
# Check only when set to 0 to avoid latency in response.
if [[ $AVAILABLE_WHEN_READONLY -eq 0 ]];then
READ_ONLY=$($MYSQL_CMDLINE -e "SHOW GLOBAL VARIABLES LIKE 'read_only';" \

View File

@ -27,30 +27,26 @@ should_initialize ()
# If two args given first is user, second is group
# otherwise the arg is the systemd service file
if [ "$#" -eq 2 ]
then
if [ "$#" -eq 2 ]; then
myuser="$1"
mygroup="$2"
else
# Absorb configuration settings from the specified systemd service file,
# or the default service if not specified
SERVICE_NAME="$1"
if [ x"$SERVICE_NAME" = x ]
then
if [ x"$SERVICE_NAME" = x ]; then
SERVICE_NAME=@DAEMON_NAME@.service
fi
myuser=`systemctl show -p User "${SERVICE_NAME}" |
sed 's/^User=//'`
if [ x"$myuser" = x ]
then
if [ x"$myuser" = x ]; then
myuser=mysql
fi
mygroup=`systemctl show -p Group "${SERVICE_NAME}" |
sed 's/^Group=//'`
if [ x"$mygroup" = x ]
then
if [ x"$mygroup" = x ]; then
mygroup=mysql
fi
fi
@ -79,8 +75,7 @@ chmod 0640 "$errlogfile"
if should_initialize "$datadir" ; then
# First, make sure $datadir is there with correct permissions
# (note: if it's not, and we're not root, this'll fail ...)
if [ ! -e "$datadir" -a ! -h "$datadir" ]
then
if [ ! -e "$datadir" -a ! -h "$datadir" ]; then
mkdir -p "$datadir" || exit 1
fi
chown "$myuser:$mygroup" "$datadir"

View File

@ -12,12 +12,10 @@ MOUNT=/opt/platform
previous=1
delay=60
while :
do
while : ; do
# First, check that it's actually an NFS mount
mount | grep -q $MOUNT
if [ $? -ne 0 ]
then
if [ $? -ne 0 ]; then
logger -t NFSCHECK "$MOUNT is not mounted"
previous=1
sleep $delay
@ -31,18 +29,15 @@ do
# At this point, jobs will either report no jobs (empty) or Done,
# unless the job is still running/hung
rc=$(jobs)
if [[ -z "$rc" || $rc =~ "Done" ]]
then
if [[ -z "$rc" || $rc =~ "Done" ]]; then
# NFS is successful
if [ $previous -ne 0 ]
then
if [ $previous -ne 0 ]; then
logger -t NFSCHECK "NFS test of $MOUNT is ok"
previous=0
fi
else
# Keep waiting until the job is done
while ! [[ -z "$rc" || $rc =~ "Done" ]]
do
while ! [[ -z "$rc" || $rc =~ "Done" ]]; do
logger -t NFSCHECK "NFS test of $MOUNT is failed"
previous=1
sleep $delay

View File

@ -25,20 +25,17 @@ DEBUGFS_PATH=/sys/kernel/debug
DEBUGFS_I40_DEVICES_PATH=$DEBUGFS_PATH/i40e
LLDP_COMMAND=lldp
function log()
{
function log {
local MSG="${PROGNAME}: $1"
logger -p notice "${MSG}"
}
function err()
{
function err {
local MSG="${PROGNAME}: $1"
logger -p error "${MSG}"
}
function configure_device()
{
function configure_device {
local DEVICE=$1
local ACTION=$2
local DEVICE_PATH=${DEBUGFS_I40_DEVICES}/${DEVICE}
@ -59,23 +56,22 @@ function configure_device()
return ${RET}
}
function is_debugfs_mounted() {
function is_debugfs_mounted {
if grep -qs "${DEBUGFS_PATH}" /proc/mounts; then
return 0
fi
return 1
}
function mount_debugfs() {
function mount_debugfs {
mount -t debugfs none ${DEBUGFS_PATH}
}
function unmount_debugfs() {
function unmount_debugfs {
umount ${DEBUGFS_PATH}
}
function scan_devices()
{
function scan_devices {
local ACTION=$1
local DEBUGFS_MOUNTED="false"
local DEVICES=${DEBUGFS_I40_DEVICES_PATH}/*
@ -111,20 +107,17 @@ function scan_devices()
return 0
}
function start()
{
function start {
scan_devices start
return $?
}
function stop()
{
function stop {
scan_devices stop
return $?
}
function status()
{
function status {
return 0
}

View File

@ -15,8 +15,7 @@ DEBUG=${DEBUG:-0}
# all files beginning in "mlx4_port" and ending in one or more digits.
shopt -s extglob
function log()
{
function log {
local MSG="${PROGNAME}: $1"
if [ ${DEBUG} -ne 0 ]; then
echo "${MSG}"
@ -24,8 +23,7 @@ function log()
echo "${MSG}" >> /var/log/mlx4-configure.log
}
function configure_device()
{
function configure_device {
local DEVICE=$1
local DEVICE_PATH=${SYSFS_PCI_DEVICES}/${DEVICE}
@ -58,8 +56,7 @@ function configure_device()
}
function scan_devices()
{
function scan_devices {
local DEVICES=$(ls -1 ${SYSFS_PCI_DEVICES})
for DEVICE in ${DEVICES}; do
@ -83,19 +80,16 @@ function scan_devices()
}
function start()
{
function start {
scan_devices
return $?
}
function stop()
{
function stop {
return 0
}
function status()
{
function status {
return 0
}

View File

@ -12,13 +12,11 @@ NAME=$(basename $0)
OPTIONS_CHANGED_FLAG=/var/run/.mlx4_cx3_reboot_required
COMPUTE_CONFIG_COMPLETE=/var/run/.compute_config_complete
function LOG()
{
function LOG {
logger "$NAME: $*"
}
if [ -f $OPTIONS_CHANGED_FLAG ] && [ -f $COMPUTE_CONFIG_COMPLETE ]
then
if [ -f $OPTIONS_CHANGED_FLAG ] && [ -f $COMPUTE_CONFIG_COMPLETE ]; then
LOG "mlx4_core options has been changed. Failing goenabled check."
exit 1
fi

View File

@ -1,2 +1,2 @@
SRC_DIR="scripts"
TIS_PATCH_VER=24
TIS_PATCH_VER=25

View File

@ -14,8 +14,7 @@ SERVICE="ceph"
LOGFILE="${extradir}/ceph.info"
echo "${hostname}: Ceph Info .........: ${LOGFILE}"
function is_service_active()
{
function is_service_active {
active=`sm-query service management-ip | grep "enabled-active"`
if [ -z "$active" ] ; then
return 0
@ -24,8 +23,7 @@ function is_service_active()
fi
}
function exit_if_timeout()
{
function exit_if_timeout {
if [ "$?" = "124" ] ; then
echo "Exiting due to ceph command timeout" >> ${LOGFILE}
exit 0

View File

@ -10,8 +10,7 @@
source /usr/local/sbin/collect_parms
source /usr/local/sbin/collect_utils
function is_extended_profile()
{
function is_extended_profile {
if [ ! -n "${security_profile}" ] || [ "${security_profile}" != "extended" ]; then
return 0
else

View File

@ -12,8 +12,7 @@ source /usr/local/sbin/collect_utils
LOGFILE="${extradir}/nfv-vim.info"
echo "${hostname}: NFV-Vim Info ......: ${LOGFILE}"
function is_service_active()
{
function is_service_active {
active=`sm-query service vim | grep "enabled-active"`
if [ -z "$active" ] ; then
return 0

View File

@ -10,8 +10,7 @@
source /usr/local/sbin/collect_parms
source /usr/local/sbin/collect_utils
function is_service_active()
{
function is_service_active {
active=`sm-query service rabbit-fs | grep "enabled-active"`
if [ -z "$active" ] ; then
return 0

View File

@ -19,8 +19,7 @@ DB_DIR="${extradir}/database"
LOGFILE="${extradir}/database.info"
echo "${hostname}: Database Info .....: ${LOGFILE}"
function is_service_active()
{
function is_service_active {
active=`sm-query service postgres | grep "enabled-active"`
if [ -z "$active" ] ; then
return 0
@ -34,11 +33,9 @@ function is_service_active()
###############################################################################
mkdir -p ${DB_DIR}
function log_database()
{
function log_database {
db_list=( $(${PSQL_CMD} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") )
for db in "${db_list[@]}"
do
for db in "${db_list[@]}"; do
echo "postgres database: ${db}"
${PSQL_CMD} -d ${db} -c "
SELECT
@ -75,12 +72,10 @@ function log_database()
DB_EXT=db.sql.txt
function database_dump()
{
function database_dump {
mkdir -p ${DB_DIR}
db_list=( $(${PSQL_CMD} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") )
for DB in "${db_list[@]}"
do
for DB in "${db_list[@]}"; do
if [ "$DB" != "keystone" -a "$DB" != "ceilometer" ] ; then
echo "${hostname}: Dumping Database ..: ${DB_DIR}/$DB.$DB_EXT"
(cd ${DB_DIR} ; sudo -u postgres pg_dump $DB > $DB.$DB_EXT)

View File

@ -14,8 +14,7 @@ SERVICE="inventory"
LOGFILE="${extradir}/${SERVICE}.info"
RPMLOG="${extradir}/rpm.info"
function is_service_active()
{
function is_service_active {
active=`sm-query service management-ip | grep "enabled-active"`
if [ -z "$active" ] ; then
return 0

View File

@ -27,8 +27,8 @@ fi
delimiter ${LOGFILE} "ip link"
ip link >> ${LOGFILE}
for i in $(ip link | grep mtu | grep eth |awk '{print $2}' | sed 's#:##g');
do
for i in $(ip link | grep mtu | grep eth |awk '{print $2}' | sed 's#:##g'); do
delimiter ${LOGFILE} "ethtool ${i}"
ethtool ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
@ -55,8 +55,8 @@ fi
delimiter ${LOGFILE} "tc qdisc show"
tc qdisc show >> ${LOGFILE}
for i in $(ip link | grep htb | awk '{print $2}' | sed 's#:##g');
do
for i in $(ip link | grep htb | awk '{print $2}' | sed 's#:##g'); do
delimiter ${LOGFILE} "tc class show dev ${i}"
tc class show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
@ -70,8 +70,8 @@ done
delimiter ${LOGFILE} "tc -s qdisc show"
tc -s qdisc show >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
for i in $(ip link | grep htb | awk '{print $2}' | sed 's#:##g');
do
for i in $(ip link | grep htb | awk '{print $2}' | sed 's#:##g'); do
delimiter ${LOGFILE} "tc -s class show dev ${i}"
tc -s class show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}

View File

@ -35,6 +35,8 @@
/etc/bash_completion.d
/etc/pm
/etc/systemd/system/*.mount
/etc/systemd/system/*.socket
/etc/systemd/system/lvm2-lvmetad.service
/etc/systemd/system/ctrl-alt-del.target
/etc/ssl
/etc/mtc/tmp

View File

@ -14,8 +14,7 @@ fi
OPT_USE_INTERVALS=1
# Print key ceph statistics
function print_ceph()
{
function print_ceph {
print_separator
TOOL_HIRES_TIME
@ -47,8 +46,7 @@ tools_header
# Calculate number of sample repeats based on overall interval and sampling interval
((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC))
for ((rep=1; rep <= REPEATS ; rep++))
do
for ((rep=1; rep <= REPEATS ; rep++)); do
print_ceph
sleep ${INTERVAL_SEC}
done

View File

@ -32,11 +32,9 @@ LOG "Cleanup engtools:"
# ( be sure not to clobber /etc/init.d/collect-engtools.sh )
LOG "kill processes brute force"
pids=( $(pidof -x /usr/local/bin/collect-engtools.sh) )
if [ ${#pids[@]} -ne 0 ]
then
if [ ${#pids[@]} -ne 0 ]; then
LOG "killing: ${pids[@]}"
for pid in ${pids[@]}
do
for pid in ${pids[@]}; do
LOG "kill: [ ${pid} ] "
pkill -KILL -P ${pid}
kill -9 ${pid}
@ -48,8 +46,7 @@ else
fi
LOG "remove pidfiles"
for TOOL in "${TOOLS[@]}"
do
for TOOL in "${TOOLS[@]}"; do
rm -f -v /var/run/${TOOL}.pid
done
LOG "done"

View File

@ -48,11 +48,9 @@ declare tgt_avail_bytes
declare tgt_used_bytes
# do_parallel_commands - launch parallel tools with separate output files
function do_parallel_commands()
{
function do_parallel_commands {
parallel_outfiles=()
for elem in "${tlist[@]}"
do
for elem in "${tlist[@]}"; do
tool=""; period=""; repeat=""; interval=""
my_hash="elem[*]"
local ${!my_hash}
@ -75,8 +73,7 @@ function do_parallel_commands()
# get_current_avail_usage() - get output destination file-system usage and
# availability.
# - updates: df_size_bytes, df_avail_bytes, du_used_bytes
function get_current_avail_usage()
{
function get_current_avail_usage {
local -a df_arr_bytes=( $(df -P --block-size=1 ${TOOL_DEST_DIR} | awk 'NR==2 {print $2, $4}') )
df_size_bytes=${df_arr_bytes[0]}
df_avail_bytes=${df_arr_bytes[1]}
@ -85,8 +82,7 @@ function get_current_avail_usage()
# purge_oldest_files() - remove oldest files based on file-system available space,
# and maximum collection size
function purge_oldest_files()
{
function purge_oldest_files {
# get current file-system usage
get_current_avail_usage
msg=$(printf "avail %d MB, headroom %d MB; used %d MB, max %d MB" \
@ -116,8 +112,7 @@ function purge_oldest_files()
# remove files in oldest time sorted order until we meet usage targets,
# incrementally updating usage as we remve files
for file in $( ls -rt ${TOOL_DEST_DIR}/${HOSTNAME}_* 2>/dev/null )
do
for file in $( ls -rt ${TOOL_DEST_DIR}/${HOSTNAME}_* 2>/dev/null ); do
if [[ $df_avail_bytes -ge $tgt_avail_bytes ]] && \
[[ $du_used_bytes -le $tgt_used_bytes ]]; then
break
@ -307,9 +302,7 @@ REP=0
if [ ${#tlist[@]} -ne 0 ]; then
# Static stats collection is turned on
while [[ ${TOOL_USR1_SIGNAL} -eq 0 ]] &&
[[ ${OPT_FOREVER} -eq 1 || ${REP} -lt ${REPEATS} ]]
do
while [[ ${TOOL_USR1_SIGNAL} -eq 0 ]] && [[ ${OPT_FOREVER} -eq 1 || ${REP} -lt ${REPEATS} ]]; do
# increment loop counter
((REP++))

View File

@ -14,8 +14,7 @@ fi
OPT_USE_INTERVALS=1
# Print disk summary
function print_disk()
{
function print_disk {
print_separator
TOOL_HIRES_TIME
@ -57,8 +56,7 @@ function print_disk()
}
# Print disk static summary
function print_disk_static()
{
function print_disk_static {
print_separator
cmd='cat /proc/scsi/scsi'
${ECHO} "Attached devices: ${cmd}"
@ -109,8 +107,7 @@ print_disk_static
# Calculate number of sample repeats based on overall interval and sampling interval
((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC))
for ((rep=1; rep <= REPEATS ; rep++))
do
for ((rep=1; rep <= REPEATS ; rep++)); do
print_disk
sleep ${INTERVAL_SEC}
done

View File

@ -24,8 +24,7 @@ else
. /etc/init.d/functions
fi
# Lightweight replacement for pidofproc -p <pid>
function check_pidfile ()
{
function check_pidfile {
local pidfile pid
OPTIND=1
@ -53,8 +52,7 @@ function check_pidfile ()
}
# tools_init - initialize tool resources
function tools_init ()
{
function tools_init {
local rc=0
local error=0
TOOLNAME=$(basename $0)
@ -199,7 +197,7 @@ function tools_init ()
}
# tools_cleanup() - terminate child processes
function tools_cleanup() {
function tools_cleanup {
# restore signal handling to default behaviour
trap - INT HUP TERM EXIT
trap - USR1 USR2
@ -230,43 +228,47 @@ function tools_cleanup() {
}
# tools_exit_handler() - exit handler routine
function tools_exit_handler() {
function tools_exit_handler {
TOOL_EXIT_SIGNAL=1
tools_cleanup 128
}
# tools_usr1_handler() - USR1 handler routine
function tools_usr1_handler() {
function tools_usr1_handler {
TOOL_USR1_SIGNAL=1
LOG "caught USR1"
}
# tools_usr2_handler() - USR2 handler routine
function tools_usr2_handler() {
function tools_usr2_handler {
TOOL_USR2_SIGNAL=1
LOG "caught USR1"
}
# LOG(), WARNLOG(), ERRLOG() - simple print log functions (not logger)
function LOG ()
{
function LOG {
local tstamp_H=$( date +"%Y-%0m-%0e %H:%M:%S" )
echo "${tstamp_H} ${HOSTNAME} $0($$): $@";
}
function LOG_NOCR ()
{
function LOG_NOCR {
local tstamp_H=$( date +"%Y-%0m-%0e %H:%M:%S" )
echo -n "${tstamp_H} ${HOSTNAME} $0($$): $@";
}
function WARNLOG () { LOG "WARN $@"; }
function ERRLOG () { LOG "ERROR $@"; }
function WARNLOG {
LOG "WARN $@";
}
function ERRLOG {
LOG "ERROR $@";
}
# TOOL_HIRES_TIME() - easily parsed date/timestamp and hi-resolution uptime
function TOOL_HIRES_TIME()
{
function TOOL_HIRES_TIME {
echo "time: " $( ${DATE} +"%a %F %H:%M:%S.%N %Z %z" ) "uptime: " $( cat /proc/uptime )
}
# set_affinity() - set affinity for current script if a a CPULIST is defined
function set_affinity() {
function set_affinity {
local CPULIST=$1
if [ -z "${CPULIST}" ]; then
return
@ -280,7 +282,7 @@ function set_affinity() {
}
# cmd_idle_priority() - command to set nice + ionice
function cmd_idle_priority() {
function cmd_idle_priority {
local NICE=""
local IONICE=""
@ -301,13 +303,13 @@ function cmd_idle_priority() {
# print_separator() - print a horizontal separation line '\u002d' is '-'
function print_separator () {
function print_separator {
printf '\u002d%.s' {1..80}
printf '\n'
}
# tools_header() - print out common GenWare tools header
function tools_header() {
function tools_header {
local TOOLNAME=$(basename $0)
# Get timestamp
@ -393,7 +395,7 @@ function tools_header() {
# tools_usage() - show generic tools tool usage
function tools_usage() {
function tools_usage {
if [ ${OPT_USE_INTERVALS} -eq 1 ]; then
echo "usage: ${TOOLNAME} [-f] [-p <period_mins>] [-i <interval_seconds>] [-c <cpulist>] [-h]"
else
@ -402,7 +404,7 @@ function tools_usage() {
}
# tools_print_help() - print generic tool help
function tools_print_help() {
function tools_print_help {
tools_usage
echo
echo "Options:";
@ -423,7 +425,7 @@ function tools_print_help() {
}
# tools_parse_options() -- parse common options for tools scripts
function tools_parse_options() {
function tools_parse_options {
# check for no arguments, print usage
if [ $# -eq "0" ]; then
tools_usage
@ -432,8 +434,7 @@ function tools_parse_options() {
fi
# parse the input arguments
while getopts "fp:i:c:h" Option
do
while getopts "fp:i:c:h" Option; do
case $Option in
f)
OPT_FOREVER=1

View File

@ -16,8 +16,7 @@ PAGE_SIZE=$(getconf PAGE_SIZE)
OPT_USE_INTERVALS=1
function print_files()
{
function print_files {
print_separator
TOOL_HIRES_TIME
@ -85,8 +84,7 @@ tools_header
# Calculate number of sample repeats based on overall interval and sampling interval
((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC))
for ((rep=1; rep <= REPEATS ; rep++))
do
for ((rep=1; rep <= REPEATS ; rep++)); do
print_files
sleep ${INTERVAL_SEC}
done

View File

@ -69,8 +69,7 @@ case $1 in
stop)
if [ -e ${PIDFILE} ]; then
pids=$(pidof -x ${NAME})
if [[ ! -z "${pids}" ]]
then
if [[ ! -z "${pids}" ]]; then
echo_success "Stopping ${NAME} [$pid]"
start-stop-daemon --stop --quiet --oknodo --pidfile ${PIDFILE} --retry=TERM/3/KILL/5
# [ JGAULD: none of the following should be necessary ]

View File

@ -29,8 +29,7 @@ compute_ports=(8000 8001 8002)
traffic_types=(storage migration default drbd)
flow_ids=(1:20 1:30 1:40 1:50)
function exec_cmd ()
{
function exec_cmd {
node="$1"
cmd="$2"
@ -41,8 +40,7 @@ function exec_cmd ()
fi
}
function iperf3_server_start ()
{
function iperf3_server_start {
local server="$1"
local result="$2"
local port="$3"
@ -55,8 +53,7 @@ function iperf3_server_start ()
$(exec_cmd "${server}" "${cmd}")
}
function iperf3_client_tcp_start ()
{
function iperf3_client_tcp_start {
local result="${result_dir}/throughput"
local cmd=""
local client="$1"
@ -76,8 +73,7 @@ function iperf3_client_tcp_start ()
$(exec_cmd "${client}" "${cmd} > ${result} 2>&1")
}
function iperf3_client_udp_start ()
{
function iperf3_client_udp_start {
local result="${result_dir}/throughput_udp"
local cmd=""
local client="$1"
@ -102,20 +98,17 @@ function iperf3_client_udp_start ()
$(exec_cmd "${client}" "${cmd} -b ${bw} >> ${result} 2>&1" )
}
function iperf3_stop ()
{
function iperf3_stop {
local node="$1"
local cmd="pkill iperf3"
$(exec_cmd "${node}" "${cmd}")
}
function get_ip_addr ()
{
function get_ip_addr {
arp -a | grep -oP "(?<=$1 \()[^)]*" | head -n 1
}
function throughput_tcp_test()
{
function throughput_tcp_test {
for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do
for interface in "${interfaces[@]}"; do
local interface_name="management"
@ -137,8 +130,7 @@ function throughput_tcp_test()
done
}
function throughput_udp_test ()
{
function throughput_udp_test {
for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do
for interface in "${interfaces[@]}"; do
local interface_name="management"
@ -229,8 +221,7 @@ function throughput_udp_test ()
done
}
function throughput_parallel_test ()
{
function throughput_parallel_test {
local dev=""
local ip_addr=""
local interface_name=""
@ -304,8 +295,7 @@ function throughput_parallel_test ()
done
}
function latency_test ()
{
function latency_test {
for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do
for interface in "${interfaces[@]}"; do
local interface_name="management"
@ -328,16 +318,14 @@ function latency_test ()
done
}
function setup ()
{
function setup {
for node in ${nodes[@]}; do
iperf3_stop "${node}"
$(exec_cmd "${node}" "rm -rf ${result_dir}; mkdir -p ${result_dir}")
done
}
function get_remote_results ()
{
function get_remote_results {
for node in ${nodes[@]}; do
if [ "${node}" != "${host}" ]; then
mkdir ${result_dir}/${node}
@ -346,8 +334,7 @@ function get_remote_results ()
done
}
function get_interface_info ()
{
function get_interface_info {
local dev=""
local ip_addr=""
printf "Network interfaces info\n" >> ${summary_file}
@ -365,8 +352,7 @@ function get_interface_info ()
done
}
function generate_summary ()
{
function generate_summary {
local header=""
local result=""
local result_file=""

View File

@ -16,8 +16,7 @@ PAGE_SIZE=$(getconf PAGE_SIZE)
OPT_USE_INTERVALS=1
# Print key networking device statistics
function print_memory()
{
function print_memory {
# Configuration for netcmds
MEMINFO=/proc/meminfo
NODEINFO=/sys/devices/system/node/node?/meminfo
@ -99,8 +98,7 @@ tools_header
# Calculate number of sample repeats based on overall interval and sampling interval
((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC))
for ((rep=1; rep <= REPEATS ; rep++))
do
for ((rep=1; rep <= REPEATS ; rep++)); do
print_memory
sleep ${INTERVAL_SEC}
done

View File

@ -14,8 +14,7 @@ fi
OPT_USE_INTERVALS=1
# Print key networking device statistics
function print_netcmds()
{
function print_netcmds {
# Configuration for netcmds
DEV=/proc/net/dev
NETSTAT=/proc/net/netstat
@ -26,8 +25,7 @@ function print_netcmds()
for net in \
${DEV} ${NETSTAT}
do
if [ -e "${net}" ]
then
if [ -e "${net}" ]; then
${ECHO} "# ${net}"
${CAT} ${net}
${ECHO}
@ -53,8 +51,7 @@ tools_header
# Calculate number of sample repeats based on overall interval and sampling interval
((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC))
for ((rep=1; rep <= REPEATS ; rep++))
do
for ((rep=1; rep <= REPEATS ; rep++)); do
print_netcmds
sleep ${INTERVAL_SEC}
done

View File

@ -14,8 +14,7 @@ fi
OPT_USE_INTERVALS=1
# Print key networking device statistics
function print_postgres()
{
function print_postgres {
print_separator
TOOL_HIRES_TIME
@ -40,8 +39,7 @@ ORDER BY pg_database_size DESC;
"
# For each database, list tables and their sizes (similar to "\dt+")
for db in "${db_list[@]}"
do
for db in "${db_list[@]}"; do
${ECHO} "# postgres database: ${db}"
${PSQL} -d ${db} -c "
SELECT
@ -128,8 +126,7 @@ tools_header
# Calculate number of sample repeats based on overall interval and sampling interval
((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC))
for ((rep=1; rep <= REPEATS ; rep++))
do
for ((rep=1; rep <= REPEATS ; rep++)); do
print_postgres
sleep ${INTERVAL_SEC}
done

View File

@ -15,8 +15,7 @@ OPT_USE_INTERVALS=1
#Need this workaround
MQOPT="-n rabbit@localhost"
# Print key networking device statistics
function print_rabbitmq()
{
function print_rabbitmq {
print_separator
TOOL_HIRES_TIME
@ -72,8 +71,7 @@ tools_header
# Calculate number of sample repeats based on overall interval and sampling interval
((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC))
for ((rep=1; rep <= REPEATS ; rep++))
do
for ((rep=1; rep <= REPEATS ; rep++)); do
print_rabbitmq
sleep ${INTERVAL_SEC}
done

View File

@ -41,8 +41,7 @@ LOG "rsync engtools data from all blades:"
# controllers
SRC=/scratch/syseng_data/
DEST=/opt/backups/syseng_data/
for HOST in ${CONTROLLER[@]}
do
for HOST in ${CONTROLLER[@]}; do
ping -c1 ${HOST} 1>/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
LOG "rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST}"
@ -55,8 +54,7 @@ done
# computes & storage
SRC=/tmp/syseng_data/
DEST=/opt/backups/syseng_data/
for HOST in ${STORAGE[@]} ${COMPUTE[@]}
do
for HOST in ${STORAGE[@]} ${COMPUTE[@]}; do
ping -c1 ${HOST} 1>/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
LOG "rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST}"

View File

@ -29,8 +29,7 @@ set_affinity ${CPULIST}
LOG_NOCR "collecting "
t=0
for ((rep=1; rep <= REPEATS ; rep++))
do
for ((rep=1; rep <= REPEATS ; rep++)); do
((t++))
sleep ${INTERVAL_SEC}
if [ ${t} -ge ${REP_LOG} ]; then

View File

@ -14,8 +14,7 @@ fi
OPT_USE_INTERVALS=1
# Print key networking device statistics
function print_vswitch()
{
function print_vswitch {
print_separator
TOOL_HIRES_TIME
@ -55,8 +54,7 @@ tools_header
# Calculate number of sample repeats based on overall interval and sampling interval
((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC))
for ((rep=1; rep <= REPEATS ; rep++))
do
for ((rep=1; rep <= REPEATS ; rep++)); do
print_vswitch
sleep ${INTERVAL_SEC}
done

View File

@ -22,8 +22,7 @@ YEAR=`date +'%Y'`
files="${FILE_LIST// /, }"
read -p "Are you sure you want to remove all uncompressed $files files? [Y/N]: " -n 1 -r
echo
if [[ $REPLY =~ ^[Y]$ ]]
then
if [[ $REPLY =~ ^[Y]$ ]]; then
for FILE in ${FILE_LIST}; do
rm -v */*_${YEAR}-*${FILE}
done

View File

@ -41,8 +41,7 @@ NODE=$1
CURDATE=$(date)
DATESTAMP=$(date +%b-%d)
function sedit()
{
function sedit {
local FILETOSED=$1
sed -i -e "s/ */ /g" ${FILETOSED}
sed -i -e "s/ /,/g" ${FILETOSED}
@ -50,8 +49,7 @@ function sedit()
sed -i "s/,$//" ${FILETOSED}
}
function get_filename_from_mountname()
{
function get_filename_from_mountname {
local name=$1
local fname
if test "${name#*"scratch"}" != "${name}"; then
@ -82,8 +80,7 @@ function get_filename_from_mountname()
echo $fname
}
function parse_process_schedtop_data()
{
function parse_process_schedtop_data {
# Logic has been moved to a separate script so that parsing process level schedtop
# can be run either as part of parse-all.sh script or independently.
LOG "Process level schedtop parsing is turned on in lab.conf. Parsing schedtop detail..."
@ -92,8 +89,7 @@ function parse_process_schedtop_data()
cd ${NODE}
}
function parse_controller_specific()
{
function parse_controller_specific {
# Parsing Postgres data, removing data from previous run if there are any. Generate summary
# data for each database and detail data for specified tables
LOG "Parsing postgres data for ${NODE}"
@ -123,14 +119,12 @@ function parse_controller_specific()
done
}
function parse_compute_specific()
{
function parse_compute_specific {
LOG "Parsing vswitch data for ${NODE}"
../parse-vswitch.sh ${NODE}
}
function parse_occtop_data()
{
function parse_occtop_data {
LOG "Parsing occtop data for ${NODE}"
bzcat *occtop.bz2 >occtop-${NODE}-${DATESTAMP}.txt
cp occtop-${NODE}-${DATESTAMP}.txt tmp.txt
@ -181,8 +175,7 @@ function parse_occtop_data()
rm tmp.txt tmp2.txt tmpdate.txt tmpcore.txt
}
function parse_memtop_data()
{
function parse_memtop_data {
LOG "Parsing memtop data for ${NODE}"
bzcat *memtop.bz2 > memtop-${NODE}-${DATESTAMP}.txt
cp memtop-${NODE}-${DATESTAMP}.txt tmp.txt
@ -200,8 +193,7 @@ function parse_memtop_data()
rm tmp.txt tmp2.txt
}
function parse_netstats_data()
{
function parse_netstats_data {
LOG "Parsing netstats data for ${NODE}"
# First generate the summary data then detail data for specified interfaces
../parse_netstats *netstats.bz2 > netstats-summary-${NODE}-${DATESTAMP}.txt
@ -225,8 +217,7 @@ function parse_netstats_data()
fi
}
function parse_iostats_data()
{
function parse_iostats_data {
LOG "Parsing iostat data for ${NODE}"
if [ -z "${IOSTATS_DEVICE_LIST}" ]; then
ERRLOG "IOSTAT_DEVICE_LIST is not set in host.conf. Skipping iostats..."
@ -236,8 +227,7 @@ function parse_iostats_data()
echo "Date/Time,${DEVICE},rqm/s,wrqm/s,r/s,w/s,rkB/s,wkB/s,avgrq-sz,avgqu-sz,await,r_await,w_await,svctm,%util" > iostat-${NODE}-${DEVICE}.csv
# Dumping iostat content to tmp file
bzcat *iostat.bz2 | grep -E "/2015|/2016|/2017|${DEVICE}" | awk '{print $1","$2","$3","$4","$5","$6","$7","$8","$9","$10","$11","$12","$13","$14}' > tmp.txt
while IFS= read -r current
do
while IFS= read -r current; do
if test "${current#*Linux}" != "$current"
then
# Skip the line that contains the word "Linux"
@ -277,8 +267,7 @@ function parse_iostats_data()
fi
}
function parse_diskstats_data()
{
function parse_diskstats_data {
LOG "Parsing diskstats data for ${NODE}"
if [ -z "${DISKSTATS_FILESYSTEM_LIST}" ]; then
@ -362,8 +351,7 @@ if test "${NODE#*"controller"}" != "${NODE}"; then
# is to use inotify which requires another inotify-tools package.
oldsize=0
newsize=0
while true
do
while true; do
newsize=$(stat -c %s postgres-conns.csv)
if [ "$oldsize" == "$newsize" ]; then
break

View File

@ -19,16 +19,14 @@
# e.g. >./parse-daily.sh memstats sm-eru
# >./parse-daily.sh controller-0 filestats postgress
function print_usage()
{
function print_usage {
echo "Usage: ./parse-daily.sh <parser-name> <process-name> will parse daily data for all hosts."
echo "Usage: ./parse-daily.sh <host-name> <parser-name> <process-name> will parse daily data for specified host."
echo "Valid parsers for daily stats are: memstats & filestats."
exit 1
}
function parse_daily_stats()
{
function parse_daily_stats {
local PARSER_NAME=$1
local PROCESS_NAME=$2
local TMPFILE="tmp.txt"

View File

@ -25,8 +25,7 @@ else
fi
LOG "Parsing postgres files ${FILES}"
function generate_header()
{
function generate_header {
local header="Date/Time,Total"
for DB in ${DATABASE_LIST}; do
if [ ${DB} == "nova_api" ]; then
@ -59,8 +58,7 @@ function generate_header()
echo $header
}
function generate_grep_str()
{
function generate_grep_str {
local grepstr="time:"
for DB in ${DATABASE_LIST}; do
grepstr="${grepstr}|${DB}"
@ -69,8 +67,7 @@ function generate_grep_str()
echo $grepstr
}
function init_variables()
{
function init_variables {
CONN_TOTAL="0"
CONN_ACTIVE_TOTAL="0"
CONN_IDLE_TOTAL="0"
@ -85,8 +82,7 @@ function init_variables()
done
}
function output_values()
{
function output_values {
local result="${DATEVAL} ${TIMEVAL},${CONN_TOTAL}"
for DB in ${DATABASE_LIST}; do
val=$(eval echo \${CONN_${DB^^}})

View File

@ -17,16 +17,14 @@
PARSERDIR=$(dirname $0)
. ${PARSERDIR}/parse-util.sh
function print_usage()
{
function print_usage {
echo "Usage: ./parse-schedtop.sh <host-name>"
echo " ./parse-schedtop.sh <host-name> <service-name>"
echo "e.g. >./parse-schedtop.sh controller-0 nova-conductor"
exit 1
}
function sedit()
{
function sedit {
local FILETOSED=$1
sed -i -e "s/ */ /g" ${FILETOSED}
sed -i -e "s/ /,/2g" ${FILETOSED}
@ -34,8 +32,7 @@ function sedit()
sed -i "s/,$//" ${FILETOSED}
}
function parse_schedtop_data()
{
function parse_schedtop_data {
HOST=$1
SERVICE=$2
LOG "Parsing ${SERVICE} schedtop for host ${HOST}"

View File

@ -8,19 +8,16 @@
#LOGFILE="${PARSERDIR}/parserlog.txt"
LOGFILE="parserlog.txt"
function LOG ()
{
function LOG {
local tstamp_H=$( date +"%Y-%0m-%0e %H:%M:%S" )
echo -e "${tstamp_H} $0($$): $@" >> ${LOGFILE}
}
function ERRLOG ()
{
function ERRLOG {
LOG "ERROR: $@"
}
function WARNLOG ()
{
function WARNLOG {
LOG "WARN: $@"
}

View File

@ -444,7 +444,7 @@ def _mask_to_cpulist(mask=0):
# Assume max number of cpus for now...
max_cpus = 128
for cpu in xrange(max_cpus):
for cpu in range(max_cpus):
if ((1 << cpu) & mask):
cpulist.append(cpu)
return cpulist
@ -492,9 +492,9 @@ def range_to_list(csv_range=None):
"""
if not csv_range:
return []
xranges = [(lambda L: xrange(L[0], L[-1] + 1))(map(int, r.split('-')))
ranges = [(lambda L: range(L[0], L[-1] + 1))(map(int, r.split('-')))
for r in csv_range.split(',')]
return [y for x in xranges for y in x]
return [y for x in ranges for y in x]
class TimeoutError(Exception):
@ -613,7 +613,7 @@ def do_libvirt_domain_info((host)):
cpulist_d = {}
cpuset_total = 0
up_total = 0
for vcpu in xrange(d_nrVirtCpu):
for vcpu in range(d_nrVirtCpu):
cpuset_b = d_vcpus[1][vcpu]
cpuset = 0
for cpu, up in enumerate(cpuset_b):

12
tox.ini
View File

@ -14,6 +14,12 @@ deps = -r{toxinidir}/test-requirements.txt
[testenv:linters]
whitelist_externals = bash
#bashate ignore
#E006 Line too long
#E041 Arithmetic expansion using $[ is deprecated for $((
#E042 local declaration hides errors
#E043 Arithmetic compound has inconsistent return semantics
#E044 Use [[ for non-POSIX comparisions
commands =
bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
@ -21,10 +27,10 @@ commands =
-not -name \*~ \
-not -name \*.md \
-name \*.sh \
-print0 | xargs -0 bashate -v"
-print0 | xargs -0 bashate -v \
-i E006,E041,E042,E043,E044 -e E*"
bash -c "find {toxinidir} \
\( -name middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/* -prune \) \
-o \( -name .tox -prune \) \
\( -path {toxinidir}/.tox \) -a -prune \
-o -type f -name '*.yaml' \
-print0 | xargs -0 yamllint"

View File

@ -1,4 +1,4 @@
SRC_DIR="platform-util"
COPY_LIST_TO_TAR="scripts"
TIS_PATCH_VER=12
TIS_PATCH_VER=13

View File

@ -20,6 +20,12 @@ Summary: non controller platform utilities
%description -n platform-util-noncontroller
Platform utilities that don't get packaged on controller hosts
%package -n platform-util-controller
Summary: controller platform utilities
%description -n platform-util-controller
Platform utilities that packaged on controllers or one node system
%define local_dir /usr/local
%define local_bindir %{local_dir}/bin
%define local_sbindir %{local_dir}/sbin
@ -52,6 +58,7 @@ install -m 700 -p -D %{_buildsubdir}/scripts/patch-restart-haproxy %{buildroot}%
install -d %{buildroot}/etc/systemd/system
install -m 644 -p -D %{_buildsubdir}/scripts/opt-platform.mount %{buildroot}/etc/systemd/system
install -m 644 -p -D %{_buildsubdir}/scripts/opt-platform.service %{buildroot}/etc/systemd/system
install -m 644 -p -D %{_buildsubdir}/scripts/memcached.service %{buildroot}/etc/systemd/system
# Mask the systemd ctrl-alt-delete.target, to disable reboot on ctrl-alt-del
ln -sf /dev/null %{buildroot}/etc/systemd/system/ctrl-alt-del.target
@ -85,3 +92,7 @@ systemctl enable opt-platform.service
# from parsing the fstab is not used by systemd.
/etc/systemd/system/opt-platform.mount
/etc/systemd/system/opt-platform.service
%files -n platform-util-controller
%defattr(-,root,root,-)
/etc/systemd/system/memcached.service

View File

@ -22,15 +22,13 @@ fi
# network link to autonegotiate link speed. Re-run the script in
# the background so the parent can return right away and init can
# continue.
if [ $# -eq 3 ]
then
if [ $# -eq 3 ]; then
$0 $DEV $NETWORKTYPE $NETWORKSPEED dummy &
disown
exit 0
fi
function test_valid_speed
{
function test_valid_speed {
# After the link is enabled but before the autonegotiation is complete
# the link speed may be read as either -1 or as 4294967295 (which is
# uint(-1) in twos-complement) depending on the kernel. Neither one is valid.
@ -42,24 +40,20 @@ function test_valid_speed
fi
}
function log
{
function log {
# It seems that syslog isn't yet running, so append directly to the syslog file
echo `date +%FT%T.%3N` `hostname` CGCS_TC_SETUP: $@ >> /var/log/platform.log
}
function infra_exists
{
if [ -z "$infrastructure_interface" ]
then
function infra_exists {
if [ -z "$infrastructure_interface" ]; then
return 1
else
return 0
fi
}
function is_consolidated
{
function is_consolidated {
if ! infra_exists
then
return 1
@ -67,26 +61,22 @@ function is_consolidated
# determine whether the management interface is a parent of the
# infrastructure interface based on name.
# eg. this matches enp0s8 to enp0s8.10 but not enp0s88
if [[ $infrastructure_interface =~ $management_interface[\.][0-9]+$ ]]
then
if [[ $infrastructure_interface =~ $management_interface[\.][0-9]+$ ]]; then
return 0
fi
return 1
fi
}
function is_vlan
{
if [ -f /proc/net/vlan/$DEV ]
then
function is_vlan {
if [ -f /proc/net/vlan/$DEV ]; then
return 0
else
return 1
fi
}
function is_loopback
{
function is_loopback {
# (from include/uapi/linux/if.h)
IFF_LOOPBACK=$((1<<3))
@ -101,8 +91,7 @@ function is_loopback
fi
}
function setup_tc_port_filter
{
function setup_tc_port_filter {
local PORT=$1
local PORTMASK=$2
local FLOWID=$3

View File

@ -0,0 +1,55 @@
#
# This service file is a customized version in platform-util package from
# openstack/stx-integ project
[Unit]
Description=memcached daemon
Before=httpd.service
After=network-online.target
[Service]
EnvironmentFile=/etc/sysconfig/memcached
ExecStart=/usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS
# Set up a new file system namespace and mounts private /tmp and /var/tmp directories
# so this service cannot access the global directories and other processes cannot
# access this service's directories.
PrivateTmp=true
# Mounts the /usr, /boot, and /etc directories read-only for processes invoked by this unit.
ProtectSystem=full
# Ensures that the service process and all its children can never gain new privileges
NoNewPrivileges=true
# Sets up a new /dev namespace for the executed processes and only adds API pseudo devices
# such as /dev/null, /dev/zero or /dev/random (as well as the pseudo TTY subsystem) to it,
# but no physical devices such as /dev/sda.
PrivateDevices=true
# Required for dropping privileges and running as a different user
CapabilityBoundingSet=CAP_SETGID CAP_SETUID CAP_SYS_RESOURCE
LimitNOFILE=16384
# Attempts to create memory mappings that are writable and executable at the same time,
# or to change existing memory mappings to become executable are prohibited.
# XXX: this property is supported with systemd 231+ which is not yet on EL7
# MemoryDenyWriteExecute=true
# Restricts the set of socket address families accessible to the processes of this unit.
# Protects against vulnerabilities such as CVE-2016-8655
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
# These service parameters are commented out since they are incompatible with
# Centos 7 and generate warning messages when included.
#ProtectKernelModules=true
#ProtectKernelTunables=true
#ProtectControlGroups=true
#RestrictRealtime=true
#RestrictNamespaces=true
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target

View File

@ -9,8 +9,7 @@
# $1 - listening port of remote log server
PORT=$1
function is_loopback
{
function is_loopback {
# (from include/uapi/linux/if.h)
IFF_LOOPBACK=$((1<<3))
@ -25,14 +24,12 @@ function is_loopback
fi
}
function log
{
function log {
# It seems that syslog isn't yet running, so append directly to the syslog file
echo `date +%FT%T.%3N` `hostname` CGCS_TC_SETUP: $@ >> /var/log/platform.log
}
function test_valid_speed
{
function test_valid_speed {
# After the link is enabled but before the autonegotiation is complete
# the link speed may be read as either -1 or as 4294967295 (which is
# uint(-1) in twos-complement) depending on the kernel. Neither one is valid.
@ -44,8 +41,7 @@ function test_valid_speed
fi
}
function get_dev_speed
{
function get_dev_speed {
# If the link doesn't come up we won't go enabled, so here we can
# afford to wait forever for the link.
while true