Relocate parsers to stx-integ/tools/engtools/parsers

Move content from stx-utils into stx-integ or stx-update

Packages will be relocated to

stx-update:
    enable-dev-patch
    extras

stx-integ:
    config-files/
        io-scheduler

    filesystem/
        filesystem-scripts

    grub/
        grubby

    logging/
        logmgmt

    tools/
        collector
        monitor-tools

    tools/engtools/
        hostdata-collectors
        parsers

    utilities/
        build-info
        branding   (formerly wrs-branding)
        platform-util

Change-Id: I36225e4046ff026e23d3035d01f7a3058111d0fd
Story: 2002801
Task: 22687
This commit is contained in:
Scott Little 2018-08-01 12:29:56 -04:00
parent 78dbdb2e90
commit e042c49ae8
24 changed files with 0 additions and 5877 deletions

View File

@ -1 +0,0 @@
SE tools wiki: http://wiki.wrs.com/PBUeng/InformationAboutSEToolsAndDataAnalysis

View File

@ -1,33 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# This script removes uncompressed file. It can save a huge amount of disk space
# on the analysis server. Run this script after the very last time the data is parsed
# and BEFORE running parse-daily.sh script.
# If it is run after each intermediary parse, the download-data.sh script will download the
# uncompressed files again.
if [ ! -f lab.conf ]; then
echo "Lab configuration file is missing."
echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info."
exit 1
fi
source ./lab.conf
YEAR=`date +'%Y'`
files="${FILE_LIST// /, }"
read -p "Are you sure you want to remove all uncompressed $files files? [Y/N]: " -n 1 -r
echo
if [[ $REPLY =~ ^[Y]$ ]]
then
for FILE in ${FILE_LIST}; do
rm -v */*_${YEAR}-*${FILE}
done
else
echo "Remove request cancelled."
fi

View File

@ -1,474 +0,0 @@
#!/usr/bin/env python
"""
Copyright (c) 2017 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0
This script is for parsing post-data analysis. It takes the csv files generated from the parser scripts and imports
the data to an influx database. All influx information should be specified in the lab.conf file. Please see the wiki
for more details.
"""
import os
import sys
import time
import datetime
from optparse import OptionParser
from multiprocessing import Pool
# command line arguments
def init():
parser = OptionParser()
parser.add_option("-a", "--all", dest="parse_all", action="store_true", default=False, help="use this option to parse all csv files for all nodes specified within lab.conf")
parser.add_option("-n", "--node", dest="node_list", action="append", type="string", help="the specific node(s) to be parsed, otherwise all nodes within lab.conf will be parsed")
parser.add_option("-f", "--file", dest="file_list", action="append", type="string", help="the specific csv file(s) to be parsed. Must use with the -n option. Ex: -n controller-0 -f postgres-conns.csv")
parser.add_option("-p", "--postgres_svc", dest="postgres_list", action="append", type="string", help="use this option to parse postgres CSV files given specific services. Ex: -p nova")
parser.add_option("-b", "--batch-size", dest="batch_size", action="store", type="int", default="100", help="Influx accepts data in batches. Use this option to change the batch size from the default value of 100. Note that Influx can timeout if the batch size is to large")
(options, args) = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit(0)
else:
return options
# converts given UTC time into epoch time
def convertTime(file, node, start, lc, utcTime):
try:
# diskstats csv requires special work as no timestamp is provided
if file.startswith("diskstats"):
t = " ".join(start)
pattern = '%Y-%m-%d %H%M'
epoch = int(time.mktime(time.strptime(t, pattern)))
# add 15 minutes to current timestamp
epoch += 900 * lc
else:
if utcTime.endswith("AM"):
pattern = '%m/%d/%Y %H:%M:%S'
epoch = int(time.mktime(time.strptime(utcTime[:19], pattern)))
elif utcTime.endswith("PM"):
tmp = int(utcTime[11:13])
if tmp < 12:
tmp += 12
str1 = utcTime[:11]
str2 = utcTime[13:19]
utcTime = str1 + str(tmp) + str2
pattern = '%m/%d/%Y %H:%M:%S'
epoch = int(time.mktime(time.strptime(utcTime, pattern)))
elif file.startswith("memstats") or file.startswith("filestats"):
pattern = '%Y-%m-%d %H:%M:%S'
epoch = int(time.mktime(time.strptime(utcTime[:19], pattern)))
else:
pattern = '%Y-%m-%d %H:%M:%S.%f'
epoch = int(time.mktime(time.strptime(utcTime[:23], pattern)))
return str(epoch)
except Exception as e:
appendToFile("/tmp/csv-to-influx.log", "Error: Issue converting time for {} for {}. Please check the csv and re-parse as some data may be incorrect\n-{}".format(file, node, e.message))
return None
# go through each node folder to parse csv files
def processFiles(path, node, options, influx_info):
prefixes = ["postgres-conns", "postgres", "memtop", "occtop", "iostat", "netstats", "rabbitmq", "schedtop", "vswitch", "filestats-summary", "memstats-summary", "diskstats"]
if options.file_list is None:
for file in os.listdir(path):
if file.endswith(".csv"):
if file.startswith(tuple(prefixes)):
if options.parse_all is True or options.node_list is not None:
parse(path, file, node, options, influx_info)
elif options.postgres_list is not None:
for svc in options.postgres_list:
if svc in list(file.split("_")):
parse(path, file, node, options, influx_info)
else:
continue
# if -f option is used
elif options.file_list is not None:
for file in options.file_list:
parse(path, file, node, options, influx_info)
# let the log know when a thread has finished parsing a folder
appendToFile("/tmp/csv-to-influx.log", "-Process for {} finished parsing at {}".format(node, datetime.datetime.utcnow()))
# parse the csv files and add data to influx
# needs to be cleaned up
def parse(path, file, node, options, influx_info):
file_loc = os.path.join(path, file)
# until able to access the file
while True:
if os.access(file_loc, os.R_OK):
try:
with open(file_loc, "r") as f:
file_name = file.replace("-", "_").replace(".csv", "").replace("_{}".format(node.replace("-", "_")),
"").strip("\n")
appendToFile("/tmp/csv-to-influx.log", "Parsing {} for {}".format(file_name, node))
header = f.readline().split(",")
# vswitch CSV files have no headers...
if file_name.startswith("vswitch"):
if file_name.replace("vswitch_", "").split("_")[0] == "engine":
header = "date/time,id,cpuid,rx-packets,tx-packets,tx-disabled,tx-overflow,rx-discard,tx-discard,usage".split(
",")
elif file_name.replace("vswitch_", "").split("_")[0] == "interface":
header = "date/time,rx-packets,tx-packets,rx-bytes,tx-bytes,tx-errors,rx-errors,tx-discards,rx-discards,rx-floods,rx-no-vlan".split(
",")
elif file_name.replace("vswitch_", "").split("_")[0] == "port":
header = "date/time,rx-packets,tx-packets,rx-bytes,tx-bytes,tx-errors,rx-errors,rx-nombuf".split(
",")
elif file_name.startswith("memstats"):
if header[0] != "Date":
header = "date/time,rss,vrz"
influx_string = ""
measurement = ""
tag_names = ["node"]
init_tags = [node]
line_count = 0
batch = 0
start_time = "" # used for diskstats
bad_string = False
# set tag information needed for influx. Each file needs different things
if file_name.startswith("postgres_conns"):
measurement = "postgres_connections"
elif file_name.startswith("postgres"):
if file_name.endswith("_size"):
measurement = "postgres_db_size"
service = file_name.replace("postgres_", "").replace("_size", "")
if service == "size":
service = "postgres"
tag_names = ["node", "service"]
init_tags = [node, service]
else:
measurement = "postgres_svc_stats"
service = file_name.replace("postgres_", "").split("_")[0]
tag_names = ["node", "service", "schema", "table"]
init_tags = [node, service]
elif file_name.startswith("memtop"):
if file_name == "memtop_detailed":
measurement = "memtop_detailed"
else:
measurement = "memtop"
elif file_name.startswith("occtop"):
if file_name == "occtop_detailed":
measurement = "occtop_detailed"
else:
measurement = "occtop"
elif file_name.startswith("iostat"):
measurement = "iostat"
tag_names = ["node", "device"]
init_tags = [node, header[1]]
elif file_name.startswith("netstats"):
measurement = "netstats"
interface = file.replace("{}-".format(measurement), "").replace("{}-".format(node), "").replace(
".csv", "")
tag_names = ["node", "interface"]
init_tags = [node, interface]
elif file_name.startswith("rabbitmq"):
if file_name.endswith("info"):
measurement = "rabbitmq_svc"
service = file_name.replace("rabbitmq_", "")
tag_names = ["node", "service"]
init_tags = [node, service]
else:
measurement = "rabbitmq"
elif file_name.startswith("schedtop"):
measurement = "schedtop"
service = file_name.replace("schedtop_", "").replace("_", "-")
tag_names = ["node", "service"]
init_tags = [node, service]
elif file_name.startswith("vswitch"):
measurement = "vswitch"
identifier = file_name.replace("vswitch_", "").split("_")
tag_names = ["node", identifier[0]]
if identifier[0] == "engine":
init_tags = [node, "engine_id_{}".format(identifier[1])]
elif identifier[0] == "interface":
init_tags = [node, identifier[1]]
elif identifier[0] == "port":
init_tags = [node, "port_{}".format(identifier[1])]
elif file_name.startswith("filestats"):
measurement = "filestats"
service = file_name.replace("filestats_summary_", "").replace(".csv", "").replace("_", "-")
tag_names = ["node", "service"]
init_tags = [node, service]
elif file_name.startswith("memstats"):
measurement = "memstats"
service = file_name.replace("memstats_summary_", "").replace(".csv", "").replace("_", "-")
tag_names = ["node", "service"]
init_tags = [node, service]
elif file_name.startswith("diskstats"):
measurement = "diskstats"
mount = file_name.replace("diskstats_", "")
tag_names = ["node", "mount", "file_system", "type"]
init_tags = [node, mount]
# find the bz2 file with the earliest date
start = float('inf')
for t in os.listdir(path):
if t.startswith(node) and t.endswith("bz2"):
next = int(
str(t.replace("{}_".format(node), "")[2:15]).replace("-", "").replace("_", ""))
if next < start:
start = next
start_time = t.split("_")[1:3]
# go through header, determine the fields, skip the tags
field_names = []
for i in header:
j = i.lower().replace(" ", "_").replace("-", "_").replace("used(%)", "usage").replace("(%)", "").replace("(s)", "").strip(" ").strip("\n")
if j in tag_names or i in init_tags or j == 'pid' or j == 'name':
continue
else:
# for occtop core info
if j.isdigit():
j = "core_{}".format(j)
field_names.append(j)
# go through each line
bad_count = 0
for lines in f:
line = lines.strip("\n").split(",")
timestamp = convertTime(file, node, start_time, line_count, line[0].strip("\n"))
if timestamp is None:
bad_count += 1
if bad_count == 3:
bad_string = True
break
else:
continue
tag_values = init_tags
field_values = []
line_count += 1
batch += 1
# go through data in each line and determine whether it belongs to a tag or a field
for word in line:
word = word.strip("\n")
# is non-number, interface, or device, add to tags, otherwise add to fields
if word.replace("_", "").replace("-", "").replace(" ", "").isalpha() or (word in init_tags) or word.endswith(".info") or word.startswith("ext"):
tag_values.append(word)
elif word.startswith("/dev"):
tag_values.append(word.split("/")[-1])
elif word.startswith("<rabbit"):
continue
else:
if word == "" or word == "\n":
word = '0'
if word.endswith("%"):
word = word.strip("%")
if file_name.startswith("diskstats"):
if word.endswith("k"):
word = word.strip("k")
word = str(float(word) * 1000)
if word.endswith("M"):
word = word.strip("M")
word = str(float(word) * 1000 * 1000)
if word.endswith("G"):
word = word.strip("G")
word = str(float(word) * 1000 * 1000 * 1000)
if word.endswith("T"):
word = word.strip("T")
word = str(float(word) * 1000 * 1000 * 1000 * 1000)
field_values.append(word.strip(" "))
# problem with the generated string? Print error and close file
generated_string = generateString(file, node, measurement, tag_names, tag_values, field_names, field_values, line_count, timestamp)
if generated_string is None:
bad_count += 1
if bad_count == 3:
bad_string = True
break
else:
continue
else:
bad_string = False
bad_count = 0
influx_string += generated_string
# send data to influx in batches
if bad_string is False:
if batch >= options.batch_size:
writing = True
influx_string = "curl -s -i -o /dev/null -XPOST 'http://'{}':'{}'/write?db='{}'&precision=s' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string.strip("\n"))
while writing:
begin = time.time()
os.system(influx_string + "\n")
end = time.time()
if end - begin >= 4.5:
appendToFile("/tmp/csv-to-influx.log", "Timeout warning: {} for {}. Retrying now".format(file_name, node))
else:
batch = 0
influx_string = ""
writing = False
# leave while loop due to incorrectly formatted csv data
if bad_string:
f.close()
break
else:
# get remainder of data from csv
if batch < options.batch_size:
writing = True
influx_string = "curl -s -i -o /dev/null -XPOST 'http://'{}':'{}'/write?db='{}'&precision=s' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string.strip("\n"))
while writing:
begin = time.time()
os.system(influx_string + "\n")
end = time.time()
if end - begin >= 4.5:
appendToFile("/tmp/csv-to-influx.log", "Timeout warning: {} for {}. Retrying now".format(file_name, node))
else:
writing = False
f.close()
appendToFile("/tmp/csv-to-influx.log",
"{} lines parsed in {} for {}".format(line_count, file_name, node))
break
except IOError as e:
appendToFile("/tmp/csv-to-influx.log", "Error: Issue opening {}\n-{}".format(file_loc, e.message))
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
else:
appendToFile("/tmp/csv-to-influx.log", "Error: Could not access {}".format(file_loc))
# generate http api string to send data to influx
def generateString(file, node, meas, tag_n, tag_v, field_n, field_v, lc, date):
base = "{},".format(meas)
try:
if file.startswith("diskstats"):
for i in range(len(tag_n)):
if i == len(tag_n)-1:
base = base + "'{}'='{}' ".format(tag_n[i], str(tag_v[i]))
else:
base = base + "'{}'='{}',".format(tag_n[i], str(tag_v[i]))
for i in range(len(field_v)):
if str(field_v[i]).replace(".", "").isdigit():
if i == len(field_v)-1:
base = base + "'{}'='{}' {}".format(field_n[i], str(field_v[i]), date)
else:
base = base + "'{}'='{}',".format(field_n[i], str(field_v[i]))
else:
appendToFile("/tmp/csv-to-influx.log", "Error: Issue with line {} with {} for {}. Please check the csv and re-parse as some data may be incorrect".format(lc, file, node))
return None
else:
for i in range(len(tag_n)):
if i == len(tag_n)-1:
base = base + "'{}'='{}' ".format(tag_n[i], str(tag_v[i]))
else:
base = base + "'{}'='{}',".format(tag_n[i], str(tag_v[i]))
for i in range(1, len(field_v)):
if str(field_v[i]).replace(".", "").isdigit():
if i == len(field_v)-1:
base = base + "'{}'='{}' {}".format(field_n[i], str(field_v[i]), date)
else:
base = base + "'{}'='{}',".format(field_n[i], str(field_v[i]))
else:
appendToFile("/tmp/csv-to-influx.log", "Error: Issue with line {} with {} for {}. Please check the csv and re-parse as some data may be incorrect".format(lc, file, node))
return None
return base + '\n'
except Exception as e:
appendToFile("/tmp/csv-to-influx.log", "Error: Issue with http api string with {} for {}\n-{}".format(file, node, e.message))
return None
# append to error log
def appendToFile(file, content):
with open(file, "a") as f:
f.write(content + '\n')
# main method
if __name__ == "__main__":
# get command-line args
options = init()
controller_list = []
compute_list = []
storage_list = []
influx_host = influx_port = influx_db = ""
influx_info = []
pool_size = 0
# create the files
file = open("/tmp/csv-to-influx.log", "w")
file.close()
file = open("output.txt", "w")
file.close()
appendToFile("/tmp/csv-to-influx.log", "Starting parsing at {}".format(datetime.datetime.utcnow()))
appendToFile("/tmp/csv-to-influx.log", "----------------------------------------------")
# get node and influx info from lab.conf
with open("lab.conf", "r") as lc:
for lines in lc:
line = lines.strip("\n")
if line.startswith("CONTROLLER_LIST"):
controller_list = list(line.strip(" ").split("="))[1].strip("\"").split(" ")
elif line.startswith("COMPUTE_LIST"):
compute_list = list(line.strip(" ").split("="))[1].strip("\"").split(" ")
elif line.startswith("STORAGE_LIST"):
storage_list = list(line.strip(" ").split("="))[1].strip("\"").split(" ")
elif line.startswith("INFLUX_HOST"):
influx_host = list(line.strip(" ").split("="))[1].strip("\"").split(" ")[0]
elif line.startswith("INFLUX_PORT"):
influx_port = list(line.strip(" ").split("="))[1].strip("\"").split(" ")[0]
elif line.startswith("INFLUX_DB"):
influx_db = list(line.strip(" ").split("="))[1].strip("\"").split(" ")[0]
break
lc.close()
influx_info.append(influx_host)
influx_info.append(influx_port)
influx_info.append(influx_db)
# if -n option is used, remove unneeded nodes
if options.node_list is not None:
tmp_controller_list = []
tmp_compute_list = []
tmp_storage_list = []
for n in controller_list:
if n in options.node_list:
tmp_controller_list.append(n)
for n in compute_list:
if n in options.node_list:
tmp_compute_list.append(n)
for n in storage_list:
if n in options.node_list:
tmp_storage_list.append(n)
controller_list = tmp_controller_list
compute_list = tmp_compute_list
storage_list = tmp_storage_list
pool_size = len(controller_list) + len(compute_list) + len(storage_list)
if options.file_list is not None and options.parse_all is True:
print "You cannot use the -a option with the -f option"
sys.exit(0)
if options.postgres_list is not None and options.file_list is not None:
print "You cannot use the -p option with the -f option"
sys.exit(0)
if options.parse_all is True and options.node_list is not None:
print "You cannot use the -a option with the -n option. Ex: -n controller-0"
sys.exit(0)
if options.file_list is not None and options.node_list is None:
print "You must specify a node and a file. Ex: -n controller-0 -f postgres-conns.csv"
sys.exit(0)
working_dir = os.getcwd()
pool = Pool(processes=pool_size)
proc_list = []
print "Sending data to InfluxDB. Please tail /tmp/csv-to-influx.log"
# create a process per node
if len(controller_list) > 0:
for i in range(len(controller_list)):
path = os.path.join(working_dir, controller_list[i])
proc_list.append(pool.apply_async(processFiles, (path, controller_list[i], options, influx_info,)))
if len(compute_list) > 0:
for i in range(len(compute_list)):
path = os.path.join(working_dir, compute_list[i])
proc_list.append(pool.apply_async(processFiles, (path, compute_list[i], options, influx_info,)))
if len(storage_list) > 0:
for i in range(len(storage_list)):
path = os.path.join(working_dir, storage_list[i])
proc_list.append(pool.apply_async(processFiles, (path, storage_list[i], options, influx_info,)))
pool.close()
pool.join()

View File

@ -1,43 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# This script is used to download syseng data from all hosts to the analysis server
# for post processing.
# Syseng data are stored under /scratch/syseng_data on the controllers. Syseng data
# for storage and compute hosts, which are stored under /tmp/syseng_data, are pulled
# to the controllers via the script download-computes.sh and stored under
# /opt/backups/tmp/syseng-data.
#
# This script is to be run after running download-computes.sh on one of the controllers.
if [ ! -f lab.conf ]; then
echo "Lab configuration file is missing."
echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info."
exit 1
fi
source ./lab.conf
rsync -azvh wrsroot@${CONTROLLER0_IP}:/scratch/syseng_data/* .
rsync -azvh wrsroot@${CONTROLLER1_IP}:/scratch/syseng_data/* .
rsync -azvh wrsroot@${CONTROLLER0_IP}:/opt/backups/tmp/syseng-data/* .
rsync -azvh wrsroot@${CONTROLLER1_IP}:/opt/backups/tmp/syseng-data/* .
# Compress the newly download data files if they have not been compressed
CURDIR=$(pwd)
ALL_HOSTS="${CONTROLLER_LIST} ${STORAGE_LIST} ${COMPUTE_LIST}"
for HOST in ${ALL_HOSTS}; do
if [ -e ${HOST} ]; then
echo "Compressing ${HOST}"
cd ${CURDIR}/${HOST}
bzip2 ${HOST}*
cd ${CURDIR}
else
echo "${HOST} not found"
fi
done

View File

@ -1,376 +0,0 @@
#!/bin/bash
#Copyright (c) 2016-2017 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# This script is used to parse all stats data. It is designed to be called by either
# parse-controllers.sh or parse-computes.sh and not used as a standalone script.
# If the input node is a controller, it will parse controller specific postgres &
# and rabbitmq stats first. If the input node is a compute, it will pars the compute
# specific vswitch stats first.
#
# The following parsing steps are common to all hosts and are executed in the specified order:
# - Parse occtop
# - Parse memtop
# - Parse memstats (summary)
# - Parse netstats
# - Parse schedtop (summary)
# - Parse iostats
# - Parse diskstats
# - Parse filestats (summary)
# - Parse process level schedtop (optional step, configured in lab.conf)
# - Generate tarball
if [[ $# != 1 ]]; then
echo "ERROR: This script is meant to be called by either parse-controllers.sh or parse-computes.sh script."
echo "To run it separately, copy the script to the host directory that contains *.bz2 files."
echo "It takes a single argument - the name of the host directory (e.g. ./parse-all.sh controller-0)."
exit 1
fi
source ../lab.conf
source ./host.conf
PARSERDIR=$(dirname $0)
. ${PARSERDIR}/parse-util.sh
NODE=$1
CURDATE=$(date)
DATESTAMP=$(date +%b-%d)
function sedit()
{
local FILETOSED=$1
sed -i -e "s/ */ /g" ${FILETOSED}
sed -i -e "s/ /,/g" ${FILETOSED}
# Remove any trailing comma
sed -i "s/,$//" ${FILETOSED}
}
function get_filename_from_mountname()
{
local name=$1
local fname
if test "${name#*"scratch"}" != "${name}"; then
fname="scratch"
elif test "${name#*"log"}" != "${name}"; then
fname="log"
elif test "${name#*"backup"}" != "${name}"; then
fname="backup"
elif test "${name#*"ceph/mon"}" != "${name}"; then
fname="cephmon"
elif test "${name#*"conversion"}" != "${name}"; then
fname="img-conversion"
elif test "${name#*"platform"}" != "${name}"; then
fname="platform"
elif test "${name#*"postgres"}" != "${name}"; then
fname="postgres"
elif test "${name#*"cgcs"}" != "${name}"; then
fname="cgcs"
elif test "${name#*"rabbitmq"}" != "${name}"; then
fname="rabbitmq"
elif test "${name#*"instances"}" != "${name}"; then
fname="pv"
elif test "${name#*"ceph/osd"}" != "${name}"; then
# The ceph disk partition has the following mount name convention
# /var/lib/ceph/osd/ceph-0
fname=`basename ${name}`
fi
echo $fname
}
function parse_process_schedtop_data()
{
# Logic has been moved to a separate script so that parsing process level schedtop
# can be run either as part of parse-all.sh script or independently.
LOG "Process level schedtop parsing is turned on in lab.conf. Parsing schedtop detail..."
cd ..
./parse-schedtop.sh ${NODE}
cd ${NODE}
}
function parse_controller_specific()
{
# Parsing Postgres data, removing data from previous run if there are any. Generate summary
# data for each database and detail data for specified tables
LOG "Parsing postgres data for ${NODE}"
if [ -z "${DATABASE_LIST}" ]; then
WARNLOG "DATABASE_LIST is not set in the lab.conf file. Use default setting"
DATABASE_LIST="cinder glance keystone nova neutron ceilometer heat sysinv aodh postgres nova_api"
fi
for DB in ${DATABASE_LIST}; do
rm /tmp/${DB}*.csv
done
../parse_postgres *postgres.bz2 >postgres-summary-${NODE}-${DATESTAMP}.txt
for DB in ${DATABASE_LIST}; do
cp /tmp/${DB}_size.csv postgres_${DB}_size.csv
done
for TABLE in ${TABLE_LIST}; do
cp /tmp/${TABLE}.csv postgres_${TABLE}.csv
done
# Parsing RabbitMQ data
LOG "Parsing rabbitmq data for ${NODE}"
../parse-rabbitmq.sh rabbitmq-${NODE}.csv
for QUEUE in ${RABBITMQ_QUEUE_LIST}; do
# If node is not a controller node then parse-rabbitmq-queue.sh should skip
../parse-rabbitmq-queue.sh rabbitmq-${QUEUE}-${NODE}.csv ${QUEUE}
done
}
function parse_compute_specific()
{
LOG "Parsing vswitch data for ${NODE}"
../parse-vswitch.sh ${NODE}
}
function parse_occtop_data()
{
LOG "Parsing occtop data for ${NODE}"
bzcat *occtop.bz2 >occtop-${NODE}-${DATESTAMP}.txt
cp occtop-${NODE}-${DATESTAMP}.txt tmp.txt
sedit tmp.txt
# Get the highest column count
column_count=$(awk -F "," '{print NF}' tmp.txt | sort -nu | tail -n 1)
grep '^[0-9]' tmp.txt |cut -d, -f1,2 | awk -F "," '{print $1" "$2}' > tmpdate.txt
grep '^[0-9]' tmp.txt |cut -d, -f3-$column_count > tmpcore.txt
paste -d, tmpdate.txt tmpcore.txt > tmp2.txt
# Generate header based on the number of columns. The Date/Time column consists of date and time fields
header="Date/Time,Total"
count=$(($column_count-3))
for i in $(seq 0 $(($count-1))); do
header="$header,$i"
done
# Generate detailed CSV with Date/Time, Total CPU occupancy and individual core occupancies e.g.
# Date/Time,Total,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35
# 2016-11-22 00:29:16.523,759.5,21.4,18.9,43.8,24.5,23.1,25.3,28.1,25.5,20.5,27.8,26.8,32.7,27.3,25.1,21.1,23.2,21.7,36.4,23.3,16.6,15.3,13.9,14.4,15.0,14.7,14.4,16.4,13.8,17.0,17.8,19.0,15.1,14.0,13.2,14.5,17.8
echo "${header}" > occtop-${NODE}-detailed.csv
cat tmp2.txt >> occtop-${NODE}-detailed.csv
# Generate simple CSV file which is used to generate host CPU occupancy chart. Platform cores are
# defined in the host.conf. The simple CSV contains only the Date/Time and Total platform CPU occupancy e.g.
# Date/Time,Total
# 2016-11-22 00:29:16.523,94.9
# 2016-11-22 00:30:16.526,71.3
if [ -z "${PLATFORM_CPU_LIST}" ]; then
# A controller node in standard system. In this case, all cores are dedicated to platform use.
# Simply extract the Date/Time and Total CPU occupancy
cut -d, -f1,2 occtop-${NODE}-detailed.csv > occtop-${NODE}.csv
else
# A CPE, compute or storage node. The cores dedicated to platform use are specified in the config.
echo "Date/Time,Total" > occtop-${NODE}.csv
while read -r line || [[ -n "$line" ]]; do
IFS="," read -r -a arr <<< "${line}"
total=0
for CORE in ${PLATFORM_CPU_LIST}; do
# Add 2 to the index as occupancy of each individual core starts after Date/Time and Total
idx=$(($CORE+2))
total=`echo $total + ${arr[$idx]} | bc`
done
echo "${arr[0]},${total}" >> occtop-${NODE}.csv
done < tmp2.txt
fi
# Remove temporary files
rm tmp.txt tmp2.txt tmpdate.txt tmpcore.txt
}
function parse_memtop_data()
{
LOG "Parsing memtop data for ${NODE}"
bzcat *memtop.bz2 > memtop-${NODE}-${DATESTAMP}.txt
cp memtop-${NODE}-${DATESTAMP}.txt tmp.txt
sedit tmp.txt
# After dumping all memtop bz2 output into one text file and in-place sed, grab only relevant data
# for CSV output. Generate both detailed and simple CSV files. Simple output will be used to generate
# chart.
grep '^[0-9]' tmp.txt | awk -F "," '{print $1" "$2","$3","$4","$5","$6","$7","$8","$9","$10","$11","$12","$13","$14","$15","$16","$17","$18}' > tmp2.txt
echo "Date/Time,Total,Used,Free,Cached,Buf,Slab,CAS,CLim,Dirty,WBack,Anon,Avail,0:Avail,0:HFree,1:Avail,1:HFree" > memtop-${NODE}-detailed.csv
cat tmp2.txt >> memtop-${NODE}-detailed.csv
echo "Date/Time,Total,Anon" > memtop-${NODE}.csv
cut -d, -f1-2,12 tmp2.txt >> memtop-${NODE}.csv
# Remove temporary files
rm tmp.txt tmp2.txt
}
function parse_netstats_data()
{
LOG "Parsing netstats data for ${NODE}"
# First generate the summary data then detail data for specified interfaces
../parse_netstats *netstats.bz2 > netstats-summary-${NODE}-${DATESTAMP}.txt
if [ -z "${NETSTATS_INTERFACE_LIST}" ]; then
ERRLOG "NETSTATS_INTERFACE_LIST is not set in host.conf. Skipping detail netstats..."
else
for INTERFACE in ${NETSTATS_INTERFACE_LIST}; do
echo "Date/Time,Interface,Rx PPS,Rx Mbps,Rx Packet Size,Tx PPS,Tx Mbps,Tx Packet Size" > netstats-${NODE}-${INTERFACE}.csv
../parse_netstats *netstats.bz2 | grep " ${INTERFACE} " > tmp.txt
sed -i -e "s/|/ /g" tmp.txt
sed -i -e "s/ */ /g;s/ */ /g" tmp.txt
sed -i -e "s/ /,/g" tmp.txt
# Remove the leading comma
sed -i 's/,//' tmp.txt
while read -r line || [[ -n "$line" ]]; do
IFS="," read -r -a arr <<< "${line}"
echo "${arr[8]} ${arr[9]},${arr[0]},${arr[2]},${arr[3]},${arr[4]},${arr[5]},${arr[6]},${arr[7]}" >> netstats-${NODE}-${INTERFACE}.csv
done < tmp.txt
done
rm tmp.txt
fi
}
function parse_iostats_data()
{
LOG "Parsing iostat data for ${NODE}"
if [ -z "${IOSTATS_DEVICE_LIST}" ]; then
ERRLOG "IOSTAT_DEVICE_LIST is not set in host.conf. Skipping iostats..."
else
for DEVICE in ${IOSTATS_DEVICE_LIST}; do
# Add header to output csv file
echo "Date/Time,${DEVICE},rqm/s,wrqm/s,r/s,w/s,rkB/s,wkB/s,avgrq-sz,avgqu-sz,await,r_await,w_await,svctm,%util" > iostat-${NODE}-${DEVICE}.csv
# Dumping iostat content to tmp file
bzcat *iostat.bz2 | grep -E "/2015|/2016|/2017|${DEVICE}" | awk '{print $1","$2","$3","$4","$5","$6","$7","$8","$9","$10","$11","$12","$13","$14}' > tmp.txt
while IFS= read -r current
do
if test "${current#*Linux}" != "$current"
then
# Skip the line that contains the word "Linux"
continue
else
if test "${current#*$DEVICE}" == "$current"
then
# It's a date entry, look ahead
read -r next
if test "${next#*$DEVICE}" != "${next}"
then
# This next line contains the device stats
# Combine date and time fields
current="${current//2016,/2016 }"
current="${current//2017,/2017 }"
# Combine time and AM/PM fields
current="${current//,AM/ AM}"
current="${current//,PM/ PM}"
# Write both lines to intermediate file
echo "${current}" >> tmp2.txt
echo "${next}" >> tmp2.txt
fi
fi
fi
done < tmp.txt
mv tmp2.txt tmp.txt
# Combine the time and stats data into one line
# 11/22/2016 06:34:00 AM,,,,,,,,,,,
# dm-0,0.00,0.00,0.00,1.07,0.00,38.00,71.25,0.00,0.19,0.00,0.19,0.03,0.00
paste -d "" - - < tmp.txt > tmp2.txt
# Remove empty fields, use "complement" option for contiguous fields/range
cut -d, -f2-11 --complement tmp2.txt > tmp.txt
# Write final content to output csv
cat tmp.txt >> iostat-${NODE}-${DEVICE}.csv
rm tmp.txt tmp2.txt
done
fi
}
function parse_diskstats_data()
{
LOG "Parsing diskstats data for ${NODE}"
if [ -z "${DISKSTATS_FILESYSTEM_LIST}" ]; then
ERRLOG "DISKSTATS_FILESYSTEM_LIST is not set in host.conf. Skipping diskstats..."
else
for FS in ${DISKSTATS_FILESYSTEM_LIST}; do
fspair=(${FS//|/ })
fsname=${fspair[0]}
mountname=${fspair[1]}
if [ ${mountname} == "/" ]; then
mountname=" /"
echo "File system,Type,Size,Used,Avail,Used(%)" > diskstats-${NODE}-root.csv
bzcat *diskstats.bz2 | grep $fsname | grep $mountname | grep G | awk '{print $1","$2","$3","$4","$5","$6}' >> diskstats-${NODE}-root.csv
else
fname=$(get_filename_from_mountname $mountname)
echo "File system,Type,Size,Used,Avail,Used(%)" > diskstats-${NODE}-$fname.csv
bzcat *diskstats.bz2 | grep $fsname | grep $mountname | grep G | awk '{print $1","$2","$3","$4","$5","$6}' >> diskstats-${NODE}-$fname.csv
fi
done
fi
}
# Parsing starts here ...
LOG "Parsing ${NODE} files - ${CURDATE}"
# Let's get the host specific parsing out of the way
if test "${NODE#*"controller"}" != "${NODE}"; then
parse_controller_specific
elif test "${NODE#*"compute"}" != "${NODE}"; then
parse_compute_specific
fi
# Parsing CPU occtop data
parse_occtop_data
# Parsing memtop data
parse_memtop_data
# Parsing memstats data to generate the high level report. The most important piece of info is the list of
# hi-runners at the end of the file. If there is a leak, run parse-daily.sh script to generate the time
# series data for the offending processes only. Use process name, not PID as most Titanium Cloud processes have
# workers.
LOG "Parsing memstats summary for ${NODE}"
../parse_memstats --report *memstats.bz2 > memstats-summary-${NODE}-${DATESTAMP}.txt
#tar czf pidstats.tgz pid-*.csv
rm pid-*.csv
# Parsing netstats data
parse_netstats_data
# Parsing schedtop data to generate the high level report. Leave the process level schedtop parsing till
# the end as it is a long running task.
LOG "Parsing schedtop summary for ${NODE}"
FILES=$(ls *schedtop.bz2)
../parse_schedtop ${FILES} > schedtop-summary-${NODE}-${DATESTAMP}.txt
# Parsing iostats data
parse_iostats_data
# Parsing diskstats data
parse_diskstats_data
# Parsing filestats data to generate the high level report. If there is a file descriptor leak, run parse-daily.sh
# script to generate the time series data for the offending processes only. Use process name, not PID as most
# Titanium Cloud processes have workers.
LOG "Parsing filestats summary for ${NODE}"
../parse_filestats --all *filestats.bz2 > filestats-summary-${NODE}-${DATESTAMP}.txt
# Parsing process level schedtop data. This is a long running task. To skip this step or generate data for
# only specific processes, update the lab.conf and host.conf files.
[[ ${GENERATE_PROCESS_SCHEDTOP} == Y ]] && parse_process_schedtop_data || WARNLOG "Parsing process level schedtop is skipped."
# Done parsing for this host. If it's a controller host, check if the parsing of postgres connection stats which is run in
# parallel is done before creating a tar file.
if test "${NODE#*"controller"}" != "${NODE}"; then
# If postgres-conns.csv file has not been created which is highly unlikely, wait a couple of minutes
[ ! -e postgres-conns.csv ] && sleep 120
# Now check the stats of this file every 5 seconds to see if it's still being updated. Another option
# is to use inotify which requires another inotify-tools package.
oldsize=0
newsize=0
while true
do
newsize=$(stat -c %s postgres-conns.csv)
if [ "$oldsize" == "$newsize" ]; then
break
fi
oldsize=$newsize
sleep 5
done
fi
tar czf syseng-data-${NODE}-${DATESTAMP}.tgz *.csv *.txt
LOG "Parsing stats data for ${NODE} completed!"

View File

@ -1,52 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# This script is used to parse stats data for controller/CPE hosts. For large office,
# it is called by parse-everything.sh. For CPE, it should be called on its own.
# File lab.conf must exist with CONTROLLER_LIST config parameter set for the script to run
# Usage: ./parse-controllers.sh
PARSERDIR=$(dirname $0)
. ${PARSERDIR}/parse-util.sh
if [ ! -f lab.conf ]; then
echo "Lab configuration file is missing."
echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info."
exit 1
fi
source ./lab.conf
if [ -z "${CONTROLLER_LIST}" ]; then
echo "ERROR: Controller list is not set in lab.conf file. Exiting..."
exit 1
fi
for HOST in ${CONTROLLER_LIST}; do
LOG "Parsing stats data for controller host ${HOST}"
if [ -d ${HOST} ]; then
cd ${HOST}
bzip2 ${HOST}* > /dev/null 2>&1
../parse-all.sh ${HOST} > /dev/null 2>&1 &
# Delay the next controller because they both write to /tmp
sleep 120
cd ..
else
ERRLOG "${HOST} does not exist. Parsing skipped."
fi
done
# Parsing postgres connection stats is a time consuming step, run it in parallel with parse-all
# script.
for HOST in ${CONTROLLER_LIST}; do
if [ -d ${HOST} ]; then
LOG "Parsing postgres connection stats data for controller host ${HOST}"
cd ${HOST}
../parse-postgres.sh *postgres.bz2 > /dev/null 2>&1 &
cd ..
fi
done

View File

@ -1,115 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# The following script is used when either memstats or filestats summary reports
# a possible memory or file leak respectively. It can be run for a particular host or
# for all hosts as configured in the lab.conf.
# Make sure to run cleanup-uncompressed.sh script before running this script to remove
# any uncompressed files as memstats/filestats parser can produce erronous result if
# there are both uncompressed and compressed version of the same file.
#
# Usage:
# ./parse-daily.sh <parser-name> <process-name> to generate daily stats for all hosts
# ./parse-daily.sh <host-name> <parser-name> <process-name> to generate daily stats for
# specified host.
#
# e.g. >./parse-daily.sh memstats sm-eru
# >./parse-daily.sh controller-0 filestats postgress
function print_usage()
{
echo "Usage: ./parse-daily.sh <parser-name> <process-name> will parse daily data for all hosts."
echo "Usage: ./parse-daily.sh <host-name> <parser-name> <process-name> will parse daily data for specified host."
echo "Valid parsers for daily stats are: memstats & filestats."
exit 1
}
function parse_daily_stats()
{
local PARSER_NAME=$1
local PROCESS_NAME=$2
local TMPFILE="tmp.txt"
# Inserting the header in the summary csv file. The summary file is a concatenation
# of the daily file. If there is a large number of files, the parser may not have
# enough memory to process them all. The safest way is to parse one day at a time.
if [ ${PARSER_NAME} == "memstats" ]; then
local SUMMARYFILE=memstats-summary-${PROCESS_NAME}.csv
echo "Date,RSS,VSZ" > ${SUMMARYFILE}
else
local SUMMARYFILE=filestats-summary-${PROCESS_NAME}.csv
echo "Date,Read/Write,Write,Read" > ${SUMMARYFILE}
fi
# Get the list of dates for memstats/filestats bz2 files in this directory.
# The filename convention is : <hostname>_YYYY-MM-DD_<time>_memstats.bz2
# e.g. storage-0_2016-11-23_1211_memstats.bz2
DATE_LIST=$(ls -1|awk -F "_" '{print $2}'| grep 20|uniq)
for DATE in ${DATE_LIST}; do
local YEAR=$(echo ${DATE}|awk -F "-" '{print $1}')
if [ ${PARSER_NAME} == "memstats" ]; then
local DAILYFILE=memstats-${PROCESS_NAME}-${DATE}.csv
../parse_memstats --name ${DATE} --cmd ${PROCESS_NAME} --detail > ${TMPFILE}
# Time series data for memstats would look something like this
# DATE TIME AVAIL SLAB | NLWP RSS VSZ
# 2016-11-18 00:42:50 123735.29 4831.39 | 2 602208 1292348
grep "^${YEAR}-" ${TMPFILE} |awk '{print $1" "$2","$7","$8}' > ${DAILYFILE}
# TO-DO: There is a bug somehwere in parse_memstats script which produces
# --reboot detected ------------------ entries when the directory has more files
# than the those that match the specific date. This is a workaround for this
# bug.
sed -i '/,0,0/d' ${DAILYFILE}
else
local DAILYFILE=filestats-${PROCESS_NAME}-${DATE}.csv
../parse_filestats --name ${DATE} --cmd ${PROCESS_NAME} --detail > ${TMPFILE}
grep "^${YEAR}-" ${TMPFILE} |awk '{print $1" "$2","$8","$9","$10}' > ${DAILYFILE}
fi
cat ${DAILYFILE} >> ${SUMMARYFILE}
done
rm ${TMPFILE}
}
if [[ $# -eq 0 ]]; then
echo "ERROR: No arguments provided."
print_usage
fi
CURDIR=$(pwd)
if [[ $# -eq 2 ]]; then
if [[ $1 == "memstats" ]] || [[ $1 == "filestats" ]]; then
if [ ! -f lab.conf ]; then
echo "Lab configuration file is missing."
echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info."
exit 1
fi
source ./lab.conf
ALL_HOSTS="${CONTROLLER_LIST} ${STORAGE_LIST} ${COMPUTE_LIST}"
for HOST in ${ALL_HOSTS}; do
cd ${HOST}
parse_daily_stats $1 $2
cd ${CURDIR}
done
else
echo "Specified parser $1 is not a valid parser."
print_usage
fi
elif [[ $# -eq 3 ]]; then
if [[ $2 == "memstats" ]] || [[ $2 == "filestats" ]]; then
if [ -d "$1" ]; then
cd $1
parse_daily_stats $2 $3
else
echo "ERROR: Specified host $1 does not exist."
exit 1
fi
else
echo "Specified parser $2 is not a valid parser."
print_usage
fi
else
print_usage
fi

View File

@ -1,209 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# This script is used to parse postgres bz2 files for postgres connection stats.
# It is called by parse-controllers.sh script for either large office or CPE.
source ../lab.conf
PARSERDIR=$(dirname $0)
. ${PARSERDIR}/parse-util.sh
if [ -z "${DATABASE_LIST}" ]; then
WARNLOG "DATABASE_LIST is not set in the lab.conf file. Use default setting"
DATABASE_LIST="cinder glance keystone nova neutron ceilometer heat sysinv aodh postgres nova_api"
fi
# Do all files if no files specified
if [ -z "${1}" ]; then
FILES=$(ls controller*postgres.bz2)
else
FILES=$(ls $@)
fi
LOG "Parsing postgres files ${FILES}"
function generate_header()
{
local header="Date/Time,Total"
for DB in ${DATABASE_LIST}; do
if [ ${DB} == "nova_api" ]; then
header="${header},Nova API"
else
header="${header},${DB^}"
fi
done
for DB in ${DATABASE_LIST}; do
if [ ${DB} == "nova_api" ]; then
header="${header},Nova API Active"
else
header="${header},${DB^} Active"
fi
done
for DB in ${DATABASE_LIST}; do
if [ ${DB} == "nova_api" ]; then
header="${header},Nova API Idle"
else
header="${header},${DB^} Idle"
fi
done
for DB in ${DATABASE_LIST}; do
if [ ${DB} == "nova_api" ]; then
header="${header},Nova API Other"
else
header="${header},${DB^} Other"
fi
done
echo $header
}
function generate_grep_str()
{
local grepstr="time:"
for DB in ${DATABASE_LIST}; do
grepstr="${grepstr}|${DB}"
done
grepstr="${grepstr}|breakdown|connections total|rows"
echo $grepstr
}
function init_variables()
{
CONN_TOTAL="0"
CONN_ACTIVE_TOTAL="0"
CONN_IDLE_TOTAL="0"
CONN_OTHER_TOTAL="0"
FIRST_TIME="no"
INIT_VAL="0"
for DB in ${DATABASE_LIST}; do
eval "CONN_${DB^^}=${INIT_VAL}"
eval "CONN_ACTIVE_${DB^^}=${INIT_VAL}"
eval "CONN_IDLE_${DB^^}=${INIT_VAL}"
eval "CONN_OTHER_${DB^^}=${INIT_VAL}"
done
}
function output_values()
{
local result="${DATEVAL} ${TIMEVAL},${CONN_TOTAL}"
for DB in ${DATABASE_LIST}; do
val=$(eval echo \${CONN_${DB^^}})
result=$result,$val
done
for DB in ${DATABASE_LIST}; do
val=$(eval echo \${CONN_ACTIVE_${DB^^}})
result=$result,$val
done
for DB in ${DATABASE_LIST}; do
val=$(eval echo \${CONN_IDLE_${DB^^}})
result=$result,$val
done
for DB in ${DATABASE_LIST}; do
val=$(eval echo \${CONN_OTHER_${DB^^}})
result=$result,$val
done
echo $result >> postgres-conns.csv
}
HEADER=$(generate_header)
echo ${HEADER} > postgres-conns.csv
GREPSTR=$(generate_grep_str)
[ -e postgres-tmp2.txt ] && rm postgres-tmp2.txt
for FILE in ${FILES}; do
TEST=`echo ${FILE} | grep bz2`
if [ ! -z "${TEST}" ]; then
bzcat ${FILE} | grep -E "time:|active|idle|breakdown|total|rows" >> postgres-tmp2.txt
fi
cat postgres-tmp2.txt | grep -E "${GREPSTR}" > postgres-tmp.txt
done
# Start parsing
FIRST_TIME="yes"
PARSING_TABLE="no"
while IFS='' read -r LINE || [[ -n "${LINE}" ]]; do
TEST=`echo ${LINE} | grep "time:" | awk '{print $4}'`
if [ ! -z "${TEST}" ]; then
DATEVAL=`echo ${LINE} | awk '{print $3}'`
TIMEVAL=`echo ${LINE} | awk '{print $4}'`
if [ "z${FIRST_TIME}" != "zyes" ]; then
init_variables
FIRST_TIME="no"
fi
fi
TEST=`echo ${LINE} | grep "connections total =" | awk '{print $4}'`
if [ ! -z "${TEST}" ]; then
CONN_TOTAL=${TEST}
fi
TEST=`echo ${LINE} | grep "connections breakdown (query)"`
if [ ! -z "${TEST}" ]; then
PARSING_TABLE="yes"
fi
if [ "x${PARSING_TABLE}" == "xyes" ]; then
TESTNAME=`echo ${LINE} | grep "|" | awk '{print $1}'`
TESTVAL=`echo ${LINE} | grep "|" | awk '{print $5}'`
CONNSTATE=`echo ${LINE} | grep "|" | awk '{print $3}'`
# This gets last field regardless of number of preceding spaces
FIELDS=(${LINE// / })
for I in ${!FIELDS[@]}; do
TESTVAL=${FIELDS[${I}]}
done
for DB in ${DATABASE_LIST}; do
if [ "x${TESTNAME}" == "x${DB}" ]; then
eval "CONN_${DB^^}=$((CONN_${DB^^} + ${TESTVAL}))"
break
fi
done
if [ "x${CONNSTATE}" == "xidle" ]; then
for DB in ${DATABASE_LIST}; do
if [ "x${TESTNAME}" == "x${DB}" ]; then
eval "CONN_IDLE_${DB^^}=$((CONN_IDLE_${DB^^} + ${TESTVAL}))"
break
fi
done
elif [ "x${CONNSTATE}" == "xactive" ]; then
for DB in ${DATABASE_LIST}; do
if [ "x${TESTNAME}" == "x${DB}" ]; then
eval "CONN_ACTIVE_${DB^^}=$((CONN_ACTIVE_${DB^^} + ${TESTVAL}))"
break
fi
done
else
for DB in ${DATABASE_LIST}; do
if [ "x${TESTNAME}" == "x${DB}" ]; then
eval "CONN_OTHER_${DB^^}=$((CONN_OTHER_${DB^^} + ${TESTVAL}))"
break
fi
done
fi
TEST=`echo ${LINE} | grep "rows"`
if [ ! -z "${TEST}" ]; then
PARSING_TABLE="no"
output_values
init_variables
else
TEST=`echo ${LINE} | grep "age:"`
if [ ! -z "${TEST}" ]; then
PARSING_TABLE="no"
echo "${DATEVAL} ${TIMEVAL} - no data"
init_variables
fi
fi
fi
done < postgres-tmp.txt
rm postgres-tmp.txt postgres-tmp2.txt
LOG "Parsing postgres connection stats data completed!"

View File

@ -1,57 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
OUTFILE=$1
QUEUENAME=$2
FILES=$(ls *rabbitmq.bz2 | sort)
[ -e tmp.txt ] && rm tmp.txt
echo "Time/Date,Name,Pid,Messages,Messages Ready,Messages Unacknowledged,Memory,Consumers" >${OUTFILE}
for FILE in ${FILES}; do
bzcat $FILE | grep -E "time\:|${QUEUENAME}" >>tmp.txt
sed -i -e "s/\t/ /g" tmp.txt
done
while IFS='' read -r LINE || [[ -n "${LINE}" ]]; do
TEST=$(echo ${LINE} | awk '{print $1}')
TEST2=$(echo ${LINE} | awk '{print $2}')
if [[ "${TEST}" == "time:" ]]; then
TIMEDATE=$(echo ${LINE} | awk '{print $3" "$4}')
MESSAGES=""
NAME=""
PID=""
MESSAGES_READY=""
MESSAGES_UNACKNOWLEDGED=""
MEMORY=""
CONSUMERS=""
elif [[ "${TEST2}" == "${QUEUENAME}" ]]; then
MESSAGES=$(echo ${LINE} | awk '{print $1}')
NAME=$(echo ${LINE} | awk '{print $2}')
PID=$(echo ${LINE} | awk '{print $3}')
MESSAGES_READY=$(echo ${LINE} | awk '{print $4}')
MESSAGES_UNACKNOWLEDGED=$(echo ${LINE} | awk '{print $5}')
MEMORY=$(echo ${LINE} | awk '{print $6}')
CONSUMERS=$(echo ${LINE} | awk '{print $7}')
echo "${TIMEDATE},${NAME},${PID},${MESSAGES},${MESSAGES_READY},${MESSAGES_UNACKNOWLEDGED},${MEMORY},${CONSUMERS}" >> ${OUTFILE}
TIMEDATE=""
MESSAGES=""
NAME=""
PID=""
MESSAGES_READY=""
MESSAGES_UNACKNOWLEDGED=""
MEMORY=""
CONSUMERS=""
fi
done < tmp.txt
rm tmp.txt

View File

@ -1,161 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
OUTFILE=$1
FILES=$(ls *rabbitmq.bz2 | sort)
[ -e tmp.txt ] && rm tmp.txt
echo "Time/Date,Memory Total,Connection Readers,Connection Writers,Connection Channels,Connection Other,Queue Procs,Queue Slave Procs,Plugins,Other Proc,Mnesia,Mgmt DB,Msg Index,Other ETS,Binary,Code,Atom,Other System,VM Memory High Watermark,VM Memory Limit,Disk Free Limit,Disk Free,Descriptor Limit,Descriptors Used,Sockets Limit,Sockets Used,Processes Limit,Processes Used,Run Queue,Uptime" >${OUTFILE}
for FILE in ${FILES}; do
bzcat $FILE | grep -E "time\:|\{memory\,\[\{total|\{connection_readers|\{connection_writers|\{connection_channels|\{connection_other|\{queue_procs|\{queue_slave_procs|\{plugins|\{other_proc|\{mnesia|\{mgmt_db|\{msg_index|\{other_ets|\{binary|\{code|\{atom|\{other_system|\{vm_memory_high_watermark|\{vm_memory_limit|\{disk_free_limit|\{disk_free|\{file_descriptors|\{total_used|\{sockets_limit|\{sockets_used|\{processes|\{run_queue|\{uptime" >>tmp.txt
sed -i -e "s/ //g" tmp.txt
sed -i -e "s/ {/{/g" tmp.txt
sed -i -e "s/time:/time: /g" tmp.txt
sed -i -e "s/}//g" tmp.txt
sed -i -e "s/\[//g" tmp.txt
sed -i -e "s/\]//g" tmp.txt
sed -i -e 's/{used/,/g' tmp.txt
sed -i -e 's/,/ /g' tmp.txt
done
while IFS='' read -r LINE || [[ -n "${LINE}" ]]; do
TEST=$(echo ${LINE} | awk '{print $1}')
if [[ "${TEST}" == "time:" ]]; then
TIMEDATE=$(echo ${LINE} | awk '{print $3" "$4}')
TOTAL=""
CONNECTION_READERS=""
CONNECTION_WRITERS=""
CONNECTION_CHANNELS=""
CONNECTION_OTHER=""
QUEUE_PROCS=""
QUEUE_SLAVE_PROCS=""
PLUGINS=""
OTHER_PROC=""
MNESIA=""
MGMT_DB=""
MSG_INDEX=""
OTHER_ETS=""
BINARY=""
CODE=""
ATOM=""
OTHER_SYSTEM=""
VM_MEMORY_HIGH_WATERMARK=""
VM_MEMORY_LIMIT=""
DISK_FREE_LIMIT=""
DISK_FREE=""
TOTAL_LIMIT=""
TOTAL_USED=""
SOCKETS_LIMIT=""
SOCKETS_USED=""
LIMIT=""
USED=""
RUN_QUEUE=""
UPTIME=""
elif [[ "${TEST}" == "{memory{total" ]]; then
TOTAL=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{memory" ]]; then
TOTAL=$(echo ${LINE} | awk '{print $3}')
elif [[ "${TEST}" == "{connection_readers" ]]; then
CONNECTION_READERS=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{connection_writers" ]]; then
CONNECTION_WRITERS=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{connection_channels" ]]; then
CONNECTION_CHANNELS=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{connection_other" ]]; then
CONNECTION_OTHER=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{queue_procs" ]]; then
QUEUE_PROCS=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{queue_slave_procs" ]]; then
QUEUE_SLAVE_PROCS=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{plugins" ]]; then
PLUGINS=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{other_proc" ]]; then
OTHER_PROC=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{mnesia" ]]; then
MNESIA=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{mgmt_db" ]]; then
MGMT_DB=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{msg_index" ]]; then
MSG_INDEX=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{other_ets" ]]; then
OTHER_ETS=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{binary" ]]; then
BINARY=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{code" ]]; then
CODE=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{atom" ]]; then
ATOM=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{other_system" ]]; then
OTHER_SYSTEM=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{vm_memory_high_watermark" ]]; then
VM_MEMORY_HIGH_WATERMARK=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{vm_memory_limit" ]]; then
VM_MEMORY_LIMIT=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{disk_free_limit" ]]; then
DISK_FREE_LIMIT=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{disk_free" ]]; then
DISK_FREE=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{file_descriptors{total_limit" ]]; then
TOTAL_LIMIT=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{file_descriptors" ]]; then
TOTAL_LIMIT=$(echo ${LINE} | awk '{print $3}')
elif [[ "${TEST}" == "{total_used" ]]; then
TOTAL_USED=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{sockets_limit" ]]; then
SOCKETS_LIMIT=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{sockets_used" ]]; then
SOCKETS_USED=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{processes{limit" ]]; then
LIMIT=$(echo ${LINE} | awk '{print $2}')
USED=$(echo ${LINE} | awk '{print $3}')
elif [[ "${TEST}" == "{processes" ]]; then
LIMIT=$(echo ${LINE} | awk '{print $3}')
USED=$(echo ${LINE} | awk '{print $4}')
elif [[ "${TEST}" == "{run_queue" ]]; then
RUN_QUEUE=$(echo ${LINE} | awk '{print $2}')
elif [[ "${TEST}" == "{uptime" ]]; then
UPTIME=$(echo ${LINE} | awk '{print $2}')
echo "${TIMEDATE},${TOTAL},${CONNECTION_READERS},${CONNECTION_WRITERS},${CONNECTION_CHANNELS},${CONNECTION_OTHER},${QUEUE_PROCS},${QUEUE_SLAVE_PROCS},${PLUGINS},${OTHER_PROC},${MNESIA},${MGMT_DB},${MSG_INDEX},${OTHER_ETS},${BINARY},${CODE},${ATOM},${OTHER_SYSTEM},${VM_MEMORY_HIGH_WATERMARK},${VM_MEMORY_LIMIT},${DISK_FREE_LIMIT},${DISK_FREE},${TOTAL_LIMIT},${TOTAL_USED},${SOCKETS_LIMIT},${SOCKETS_USED},${LIMIT},${USED},${RUN_QUEUE},${UPTIME}" >> ${OUTFILE}
TIMEDATE=""
TOTAL=""
CONNECTION_READERS=""
CONNECTION_WRITERS=""
CONNECTION_CHANNELS=""
CONNECTION_OTHER=""
QUEUE_PROCS=""
QUEUE_SLAVE_PROCS=""
PLUGINS=""
OTHER_PROC=""
MNESIA=""
MGMT_DB=""
MSG_INDEX=""
OTHER_ETS=""
BINARY=""
CODE=""
ATOM=""
OTHER_SYSTEM=""
VM_MEMORY_HIGH_WATERMARK=""
VM_MEMORY_LIMIT=""
DISK_FREE_LIMIT=""
DISK_FREE=""
TOTAL_LIMIT=""
TOTAL_USED=""
SOCKETS_LIMIT=""
SOCKETS_USED=""
LIMIT=""
USED=""
RUN_QUEUE=""
UPTIME=""
fi
done < tmp.txt
rm tmp.txt

View File

@ -1,99 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# When occtop stats (occtop-xxx.csv) and schedtop summary (schedtop-summary-xxx.txt)
# show high CPU occupancy and a list of high runners for a particular host, this script can
# be used to generate detail stats for the offending service(s). The command line only takes
# one service name. To specify more than one service, update the SERVICE_LIST in host.conf file.
#
# Usage:
# ./parse-schedtop.sh <host-name>
# ./parse-schedtop.sh <host-name> <service-name>
PARSERDIR=$(dirname $0)
. ${PARSERDIR}/parse-util.sh
function print_usage()
{
echo "Usage: ./parse-schedtop.sh <host-name>"
echo " ./parse-schedtop.sh <host-name> <service-name>"
echo "e.g. >./parse-schedtop.sh controller-0 nova-conductor"
exit 1
}
function sedit()
{
local FILETOSED=$1
sed -i -e "s/ */ /g" ${FILETOSED}
sed -i -e "s/ /,/2g" ${FILETOSED}
# Remove any trailing comma
sed -i "s/,$//" ${FILETOSED}
}
function parse_schedtop_data()
{
HOST=$1
SERVICE=$2
LOG "Parsing ${SERVICE} schedtop for host ${HOST}"
../parse_schedtop --detail --field=occ --sum=${SERVICE} *schedtop.bz2 > tmp.txt
sedit tmp.txt
grep '^[0-9]' tmp.txt > tmp2.txt
echo "Date/Time,dt(s),occ(%)" > schedtop-${SERVICE}-${HOST}.csv
cat tmp2.txt >> schedtop-${SERVICE}-${HOST}.csv
}
if [[ $# -eq 0 ]]; then
# Parsing detail schedtop stats for all services configured in host.conf for all hosts would
# take a very long time and is often unnecessary. Until the performance issue with parse_schedtop
# is addressed, this is not supported.
print_usage
else
if [ ! -d "$1" ]; then
echo "ERROR: Specified host $1 does not exist."
exit 1
fi
if [[ $# -eq 1 ]]; then
cd $1
if [ ! -f host.conf ]; then
echo "Host configuration file is missing."
echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info."
exit 1
fi
source ./host.conf
if [ -z "${SERVICE_LIST}" ]; then
# This script could be invoked from parse-all script or executed independently so display the
# error on the console and as well as log it to file.
echo "ERROR: The SERVICE_LIST config parameter is not set in host.conf file."
ERRLOG "SERVICE_LIST config parameter is not set in host.conf file. Detail schedtop parsing skipped for $1."
exit 1
fi
for SERVICE in ${SERVICE_LIST}; do
# This could be the most time consuming step. Jim G.'s suggestion:
#
# We could rewrite some of the pattern matching outputs to use 'tab' for separate columns of
# occupancy output, to make that 1-pass instead of multi-pass per variable we are after.
# Should monitory loadavg and per-cpu usage and iowait and memory usage of the parsers if
# we are idel and can handle more load, we should do more of these in parallel, and just call
# 'wait' at the end.
#
# May also consider using "GNU parallel" package to parallel the entire function, e.g.
# function do_parallel_work() { do_command }
# do_parallel_work arg1 arg2 &
# do parallel_work arg3 arg4 &
# wait
#
# Can also export function "export -f func_name" and run that function in another bash command
parse_schedtop_data $1 ${SERVICE}
done
elif [[ $# -eq 2 ]]; then
cd $1
parse_schedtop_data $1 $2
else
print_usage
fi
[ -e tmp.txt ] && rm tmp.txt tmp2.txt
fi

View File

@ -1,26 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
#PARSERDIR=$(dirname $0)
#LOGFILE="${PARSERDIR}/parserlog.txt"
LOGFILE="parserlog.txt"
function LOG ()
{
local tstamp_H=$( date +"%Y-%0m-%0e %H:%M:%S" )
echo -e "${tstamp_H} $0($$): $@" >> ${LOGFILE}
}
function ERRLOG ()
{
LOG "ERROR: $@"
}
function WARNLOG ()
{
LOG "WARN: $@"
}

View File

@ -1,896 +0,0 @@
#!/usr/bin/perl
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# Usage:
# parse_filestats [--list | --all | --name <pattern>] file1 file2 file3.gz ...
# [--pid <pid1>] ...
# [--cmd <string>] ...
# [--exact <string> ...]
# [--excl <string>] ...
# [--detail]
# [--thresh <MiB/d>]
# [--transient]
# [--dir <path>]
# [--dur <days>]
# [--report]
# [--help]
#
# Purpose: Parse and summarize file descriptor usage per process by name/pid.
# Display hirunners based on daily growth rate.
#
# Modification history:
# - 2016-Oct-06 - Lachlan Plant, created based on parse_memstats.
##############################################################################
use 5.10.0;
use warnings;
use strict;
use Time::Local 'timelocal_nocheck'; # inverse time functions
use File::Basename;
use File::Spec ();
use Data::Dumper;
my $SCRIPT = basename($0);
my $DEFAULT_PATH = ".";
my $iter = 0;
# Hash storage data-structures
my (%data, %overall, %timestamps, %days, %matched, %stats) = ();
my (%files_current, %file_stats, %file_slope) = ();
# Timestamp variables
my ($wday, $month, $day, $hh, $mm, $ss, $yy, $ns) = ();
# Uptime data
my ($uptime, $idle);
my ($ftotal, $files, $sockets, $pipes);
# Process data
my ($PID, $TOTAL, $FD ,$U, $W, $R, $CWD, $RTD, $TXT, $MEM, $DEL, $TCP, $COMMAND, ) = ();
# Free memory
my ($is_strict) = (0);
my ($avail_free_mem_MB, $unlinked_files_MB) = ();
my ($fs_root_MB, $fs_root_p_use, $fs_tmp_MB, $fs_tmp_p_use) = ();
# Argument list parameters
our ($arg_all, $arg_match, $arg_name,
@arg_pids, @arg_commands, @arg_exact, @arg_commands_excl,
$arg_list, $arg_detail, $arg_thresh, $arg_transient, $arg_path, $arg_dur,
$arg_report, @arg_files) = ();
# Determine location of gunzip binary
our $GUNZIP = which('gunzip');
if (!(defined $GUNZIP)) {
die "*error* cannot find 'gunzip' binary. Cannot continue.\n";
}
our $BUNZIP2 = which('bunzip2');
if (!(defined $BUNZIP2)) {
die "*error* cannot find 'bunzip2' binary. Cannot continue.\n";
}
# Parse input arguments and print tool usage if necessary
# -- note: @arg_pids, and @arg_commands are only defined if they are set
&get_parse_filestats_args(\$arg_all, \$arg_match, \$arg_name,
\@arg_pids, \@arg_commands, \@arg_exact, \@arg_commands_excl,
\$arg_list, \$arg_detail, \$arg_thresh, \$arg_transient,
\$arg_path, \$arg_dur, \$arg_report, \@arg_files);
# Print list of file information
if (defined $arg_list) {
my @list = (); my %chrono = (); my ($host, $time) = ();
opendir(DIR, $arg_path) || die "can't opendir $arg_path: ($!)";
@list = sort {$a cmp $b}
grep { /_(\d{4}-\d{2}-\d{2}_\d{4})_filestats?.?g?z\b/ && -f "$arg_path/$_" }
readdir(DIR);
closedir DIR;
foreach my $file (@list) {
$_ = $file;
($host, $time) = /(.*)_(\d{4}-\d{2}-\d{2}_\d{4})_filestats/;
$chrono{$host}{$time} = 1;
}
# Print out summary of hosts with oldests and newest files
printf "%s: List of available 'filestat' data:\n\n", $SCRIPT;
printf "%-20s %15s %15s\n", "host", "oldest", "newest";
printf "%-20s %15s %15s\n", "-"x20, "-"x15, "-"x15;
foreach $host (sort keys %chrono) {
my @times = sort {$a cmp $b} keys %{$chrono{$host}};
printf "%-20s %15s %15s\n", $host, $times[0], $times[-1];
}
exit 1;
}
# Print selected options (except for file list)
if ((@arg_pids) || (@arg_commands)) {
printf "selected pids/patterns: @arg_pids @arg_commands\n";
}
printf "this may take a while...\n";
# Determine file list based on smart file GLOB
if (!@arg_files) {
if (defined $arg_name) {
@arg_files = <$arg_path/*$arg_name*>;
} else {
@arg_files = <$arg_path/*file*>
}
if (!@arg_files) {
printf "no files selected.\n";
}
}
# Compile regular expressions command string patterns
# -- store list of expressions to INCLUDE
my @re_commands = ();
foreach my $arg (@arg_commands) {
push @re_commands, qr/\Q$arg\E/;
}
my @re_exact_commands = ();
foreach my $arg (@arg_exact) {
push @re_exact_commands, qr/\Q$arg\E/;
}
# -- store list of expressions to EXCLUDE
my @nre_commands = ();
push @arg_commands_excl, $SCRIPT;
foreach my $arg (@arg_commands_excl) {
push @nre_commands, qr/\Q$arg\E/;
}
# Determine list of files per matching hostname
my %filenames = ();
foreach my $file (sort @arg_files) {
if ($file !~ /_\d{4}-\d{2}-\d{2}_\d{4}_file/) {
printf "ignoring: '$file', does not match '_<yyyy>-<mm>-<dd>-<hhmm>_file' format.\n";
next;
}
my $host = $file; $host =~ s/_\d{4}-\d{2}-\d{2}_\d{4}_.*$//; $host =~ s/^.*\///;
printf "host = $host, file = $file\n";
push @{$filenames{$host}}, $file;
}
# Prune file list retain most recent number of --dur <days> per host
my $max_files = int($arg_dur*24);
foreach my $host (keys %filenames) {
if (scalar(@{$filenames{$host}}) > $max_files) { # prune to size of max_files, keeping end of list (most recent)
@{$filenames{$host}} = splice(@{$filenames{$host}},-$max_files);
}
}
my $seconds_in_day = 24*60*60;
my $first_time = 0.0;
my $last_time = 0.0;
# PROCESS ALL MATCHING HOSTS
# -- then process all files per host in chronological order
foreach my $host (sort keys %filenames) {
my $pass = 1;
REPEAT_CALCULATION:
$iter = 0; $first_time = 0.0;
%matched = ();
%data = ();
%timestamps = ();
%overall = ();
%days = ();
%stats = ();
%file_stats = ();
# Evalutate first and last filename's time and convert time to days relative to first_time
my $first_file = ${$filenames{$host}}[0];
my $last_file = ${$filenames{$host}}[-1];
$_ = $first_file; ($yy, $month, $day, $hh) = /_(\d{4})-(\d{2})-(\d{2})_(\d{2})\d{2}_file/;
$first_time = timelocal_nocheck(00, 00, $hh, $day, $month-1, $yy-1900)/$seconds_in_day;
my $first_date = sprintf("%4d-%02d-%02d %02d:00", $yy, $month, $day, $hh);
$_ = $last_file; ($yy, $month, $day, $hh) = /_(\d{4})-(\d{2})-(\d{2})_(\d{2})\d{2}_file/;
$last_time = timelocal_nocheck(59, 59, $hh, $day, $month-1, $yy-1900)/$seconds_in_day - $first_time;
my $last_date = sprintf("%4d-%02d-%02d %02d:00", $yy, $month, $day, $hh);
FILE_LIST: foreach my $file ( @{$filenames{$host}} ) {
my $FOUND = 0; # handle files being decompressed while parser is running
if ( -e $file ) {
if ($file =~ /\.gz$/) {
open(FILE, "$::GUNZIP -c $file |") || die "Cannot open file: $file ($!)\n";
} elsif ($file =~ /\.bz2$/) {
open(FILE, "$::BUNZIP2 -c $file |") || die "Cannot open file: $file ($!)\n";
} else {
open(FILE, $file) || die "Cannot open file: $file ($!)\n";
}
$FOUND = 1;
} else {
if ($file =~ /\.gz$/) {$file =~ s/\.gz//;} else {$file .= '.gz';}
if ($file =~ /\.bz2$/) {$file =~ s/\.bz2//;} else {$file .= '.bz2';}
if ( -e $file ) {
if ($file =~ /\.gz$/) {
open(FILE, "$::GUNZIP -c $file |") || die "Cannot open file: $file ($!)\n";
} elsif ($file =~ /\.bz2$/) {
open(FILE, "$::BUNZIP2 -c $file |") || die "Cannot open file: $file ($!)\n";
} else {
open(FILE, $file) || die "Cannot open file: $file ($!)\n";
}
$FOUND = 1;
}
}
next if ($FOUND == 0);
# Parse file line at a time
READ_LOOP: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
# START OF SAMPLE Time: Parse hires timestamp, ignore timezone
if (/time:\s+(\w+)\s+(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})\.(\d{9})\s+\w+\s+\S+\s+uptime:\s+(\S+)\s+(\S+)/) {
$wday = $1; $yy = $2; $month = $3; $day = $4; $hh = $5; $mm = $6; $ss = $7; $ns = $8; $uptime = $9; $idle = $10;
$FOUND = -1 if (($FOUND == 1) || ($FOUND == -2));
$timestamps{$iter} = [($wday,$month,$day,$hh,$mm,$ss,$yy)];
$days{$iter} = timelocal_nocheck($ss, $mm, $hh, $day, $month-1, $yy-1900)/$seconds_in_day - $first_time;
($ftotal, $files, $sockets, $pipes) = (0, 0, 0, 0);
%files_current = ();
next READ_LOOP;
}
## ls -l /proc/*/fd
#TOTAL FILES SOCKETS NULL PIPES
#4955 3385 1071 499
if (/^MEMINFO:/ || /^ls -l \/proc\/*\/fd/) {
# handle case where we detect the sample is incomplete, and delete
if ($FOUND != -1) {
close(FILE);
delete $days{$iter} if (defined $days{$iter});
delete $timestamps{$iter} if (defined $timestamps{$iter});
delete $data{$iter} if (defined $data{$iter});
delete $overall{$iter} if (defined $overall{$iter});
next FILE_LIST;
}
my $hcnt = 0;
# find headings line
HEADER_LOOP: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
$hcnt++;
last HEADER_LOOP if (/\bTOTAL\b/); # end titles-line
next READ_LOOP if (($hcnt == 1) && /^\s*$/); # end at blank-line (eg no 'ps' output)
}
next if ! $_; # sometimes $_ can be null (at EOF) and causes subsequent warnings
# Process all entries of MEMINFO
MEMINFO_LOOP: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
last MEMINFO_LOOP if (/^\s*$/); # end at blank-line
($ftotal, $files, $sockets, $pipes) = /^(\d+)\s+(\d+)\s+(\d+)\s+(\d+).*/
}
}
next if ! $_; # sometimes $_ can be null (at EOF) and causes subsequent warnings
# EXPECTED RAW FORMAT
## lsof
#PID TOTAL U W R CWD RTD TXT MEM DEL TCP CMD
#20830 299 16 4 3 1 1 1 16 0 1 lcore-sla
if (/^PROCESS SUMMARY:/ || /^# lsof/) {
my $hcnt = 0;
# find headings line
HEADER_LOOP: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
$hcnt++;
last HEADER_LOOP if (/\bPID\b/); # end titles-line
next READ_LOOP if (($hcnt == 1) && /^\s*$/); # end at blank-line (eg no 'ps' output)
}
next if ! $_; # sometimes $_ can be null (at EOF) and causes subsequent warnings
# Parse file line at a time
PROCESS_LOOP: while($_ = <FILE>) {
my $found_pid = 0;
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
last PROCESS_LOOP if (/^\s*$/); # end at blank-line
if (($PID ,$TOTAL, $FD, $U, $W, $R, $CWD, $RTD, $TXT, $MEM, $DEL, $TCP, $COMMAND) = /^\s*(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(.*)/) {
$found_pid = 1;
}
if ($found_pid == 1) {
# Match on multiple regular expressions or multiple pids
my $match = (defined $arg_all ? 1: 0);
foreach my $pid (@arg_pids) {if ($pid == $PID) {$match = 1; goto FOUND_CMD;} } # inclusions
foreach my $nre (@nre_commands) {if ($COMMAND =~ $nre) {$match = 0; goto FOUND_CMD;} } # exclusions
foreach my $re (@re_commands) {if ($COMMAND =~ $re) {$match = 1; goto FOUND_CMD;} } # inclusions
foreach my $re (@re_exact_commands) {if ($COMMAND =~ /^$re$/) {$match = 1; goto FOUND_CMD;} } # inclusions
FOUND_CMD: if ($match == 1) {
if ($arg_match) {
$matched{'MATCH'}{$PID} = 1;
$data{$iter}{'MATCH'}{$PID} = [($TOTAL, $FD, $U, $W, $R, $CWD, $RTD, $TXT, $MEM, $DEL, $TCP)];
} else {
$matched{$COMMAND}{$PID} = 1;
$data{$iter}{$COMMAND}{$PID} = [($TOTAL, $FD, $U, $W, $R, $CWD, $RTD, $TXT, $MEM, $DEL, $TCP)];
}
$files_current{$COMMAND} += $FD;
}
}
}
}
next if ! $_; # sometimes $_ can be null (at EOF) and causes subsequent warnings
if (/^END OF SAMPLE:/ || /^----/ || / done$/) {
next if !(defined $uptime);
$FOUND = -2; # flag that we parsed complete file descriptor sample
# Handle incomplete sample in case there was no END OF SAMPLE
if (!(defined $days{$iter}) || !(defined $timestamps{$iter})) {
delete $days{$iter} if (defined $days{$iter});
delete $timestamps{$iter} if (defined $timestamps{$iter});
delete $data{$iter} if (defined $data{$iter});
delete $overall{$iter} if (defined $overall{$iter});
next;
}
$overall{$iter} = [($ftotal, $files, $sockets, $pipes, $uptime)];
# Store data for hirunner stats for current day only
my $day = $days{$iter};
if (!(defined $day)) {
printf "iter = %s\n", $iter;
printf "day=%s, last_time=%s\n", $day, $last_time;
$Data::Dumper::Indent = 1;
print Data::Dumper->Dump([\%overall], [qw(overall)]);
#printf "overall:%s = %s\n", $iter, "@{$overall{$iter}}";
}
if ($day >= ($last_time - 1.0)) {
foreach my $cmd (keys %files_current) {
my ($count, $cur_files, $max_files, $sum_X, $sum_Y, $sum_XX, $sum_XY) = (0,0,0,0,0,0,0);
if (defined $file_stats{$cmd}) {
($count, $cur_files, $max_files, $sum_X, $sum_Y, $sum_XX, $sum_XY) = @{$file_stats{$cmd}};
}
$count++;
$cur_files = $files_current{$cmd};
$max_files = ($cur_files > $max_files) ? $cur_files : $max_files;
$sum_X += $day;
$sum_Y += $cur_files;
$sum_XX += ($day * $day);
$sum_XY += ($cur_files * $day);
$file_stats{$cmd} = [($count, $cur_files, $max_files, $sum_X, $sum_Y, $sum_XX, $sum_XY)];
}
}
$iter++;
$uptime = ();
next READ_LOOP;
}
}
close(FILE);
# Check that last sample was completed, else delete last hash key if defined
# -- no use in showing a message to user, will just confuse
if ($FOUND != -2 || !$overall{$iter}) {
delete $days{$iter} if (defined $days{$iter});
delete $timestamps{$iter} if (defined $timestamps{$iter});
delete $data{$iter} if (defined $data{$iter});
delete $overall{$iter} if (defined $overall{$iter});
}
}
# PRINT SUMMARY FOR THIS HOSTNAME
my ($idx_nlwp, $idx_rss, $idx_vsz) = (0, 1, 2);
my ($idx_TOTAL, $idx_FD, $idx_U, $idx_W, $idx_R, $idx_CWD, $idx_RTD, $idx_TXT, $idx_MEM, $idx_DEL, $idx_TCP) = (0,1,2,3,4,5,6,7,8,9,10);
my ($idx_ftotal, $idx_files, $idx_sockets, $idx_pipes, $idx_uptime) = (0,1,2,3,4);
my ($idx_wday, $idx_month, $idx_day, $idx_hh, $idx_mm, $idx_ss, $idx_yy) = (0, 1, 2, 3, 4, 5, 6);
my @iters = sort {$a <=> $b} keys %timestamps;
if (scalar(@iters) == 0) {
# do not continue processing for this host if no valid data
print "\n", "="x80, "\n", "NO VALID DATA FOR: $host\n\n";
next;
}
$last_time = $days{ $iters[-1] };
# Calculate statistics (only on first pass)
my $idx_mem = $idx_FD;
if ((defined $arg_report) && ($pass == 1)) {
$pass++;
my %num = ();
foreach my $iter (@iters) {
my $avail = ${ $overall{$iter} }[$idx_ftotal];
my $unlinked = ${ $overall{$iter} }[$idx_files];
my $fs_root = ${ $overall{$iter} }[$idx_sockets];
my $uptime = ${ $overall{$iter} }[$idx_uptime];
my $slab = ${ $overall{$iter} }[$idx_pipes];
my $day = $days{$iter};
my @keys = ();
# Cumulate stats for regression
if ($days{$iter} >= ($last_time - 1.0)) { push @keys, '1day';}
foreach my $key (@keys) {
$stats{$key}{'DAY'}{'sum_X'} += $day;
$stats{$key}{'DAY'}{'sum_XX'} += ($day * $day);
$stats{$key}{'DAY'}{'sum_XY'} += ($day * $day);
$num{$key}++;
}
foreach my $cmd (sort {$b cmp $a} keys %matched) {
# Sum up key values for commands that match
my $SUM_fd = 0.0;
foreach my $pid (keys %{ $matched{$cmd} }) {
if (defined $data{$iter}{$cmd}{$pid}) {
$SUM_fd += ${ $data{$iter}{$cmd}{$pid} }[$idx_FD];
}
}
# Cumulate stats for regression
foreach my $key (@keys) {
$stats{$key}{'fd_' .$cmd}{'sum_X'} += $SUM_fd;
$stats{$key}{'fd_' .$cmd}{'sum_XX'} += ($SUM_fd * $SUM_fd);
$stats{$key}{'fd_' .$cmd}{'sum_XY'} += ($SUM_fd * $day);
}
}
}
# Perform simple linear regression on all variables
foreach my $key (keys %stats) {
foreach my $var (keys %{ $stats{$key} }) {
($stats{$key}{$var}{'int'}, $stats{$key}{$var}{'slope'}) = &linear_fit(
$stats{$key}{'DAY'}{'sum_X'}, # 'DAY' is 'x' variable
$stats{$key}{$var}{'sum_X'},
$stats{$key}{$var}{'sum_XY'},
$stats{$key}{'DAY'}{'sum_XX'},
$num{$key}
);
}
}
%file_slope = ();
my $max_iter = $iters[-1];
# Compile regular expressions command string patterns
# -- store list of expressions to INCLUDE
@re_exact_commands = ();
foreach my $arg (@arg_exact) {
push @re_exact_commands, qr/\Q$arg\E/;
}
foreach my $cmd (keys %{file_stats}) {
my ($count, $cur_files, $max_files, $sum_X, $sum_Y, $sum_XX, $sum_XY) = @{$file_stats{$cmd}};
my ($intercept, $slope) = &linear_fit($sum_X, $sum_Y, $sum_XY, $sum_XX, $count);
$file_slope{$cmd} = $slope; # slope (MiB/d)
# sneaky check for ignoring transient processes
# i.e. specific process exists less than half a day,
# or is in less than half the samples
if (($file_slope{$cmd} >= $arg_thresh) &&
((defined $arg_transient) ||
((($max_iter >= 142) && ($count > 70)) ||
(($max_iter < 142) && ($max_iter/2 < $count))
))) {
push @re_exact_commands, qr/\Q$cmd\E/;
}
}
goto REPEAT_CALCULATION;
}
print "\n", "="x80, "\n", "SUMMARY: host:$host ($first_date to $last_date)\n\n";
if (keys %matched) {
# PRINT HEADING OF MATCHED COMMAND PATTERNS AND PIDS
my %labels = ();
my $ele = 0;
foreach my $cmd (sort {$b cmp $a} keys %matched) {
my @pids = keys %{$matched{$cmd}};
# Create short command name
my $name = "";
$_ = $cmd;
if (/^\[(.*)\]/) {
$name = $1;
} else {
my @array = split(/\s+/, $cmd);
$name = shift @array; $name =~ s/^.*\///;
}
$labels{$cmd} = sprintf("%d:%s", $ele, $name);
printf "label: %s (%s)\n", $labels{$cmd}, $cmd;
printf " pids:(";
foreach my $pid (sort {$a <=> $b} keys %{ $matched{$cmd} }) {
printf "%d,", $pid;
}
printf ")\n";
$ele++;
}
# PRINT COLUMN HEADINGS FOR EACH PATTERN
printf "%10s %9s", "", "";
if (!(defined $arg_report)) {
my $width = 26;
foreach my $cmd (sort {$b cmp $a} keys %matched) {
my @pids = keys %{$matched{$cmd}};
printf " | %26s", substr $labels{$cmd}, 0, $width;
}
} else {
my $width = 15;
foreach my $cmd (sort {$b cmp $a} keys %matched) {
my @pids = keys %{$matched{$cmd}};
printf " %15s", substr $labels{$cmd}, 0, $width;
}
}
print "\n";
}
printf "%10s %9s", "DATE", "TIME";
if (!(defined $arg_report)) {
foreach my $cmd (sort {$b cmp $a} keys %matched) {
printf " | %8s %8s %8s", "U", "R", "W";
}
} else {
foreach my $cmd (sort {$b cmp $a} keys %matched) {
printf " %8s", "FD";
}
}
print "\n";
my %num = ();
my $uptime_last = 0.0;
my $num_reboots = 0;
foreach my $iter (@iters) {
my $ftotal = ${ $overall{$iter} }[$idx_ftotal];
my $f_files = ${ $overall{$iter} }[$idx_files];
my $sockets = ${ $overall{$iter} }[$idx_sockets];
my $uptime = ${ $overall{$iter} }[$idx_uptime];
my $pipes = ${ $overall{$iter} }[$idx_pipes];
my $day = $days{$iter};
my @keys = ('all');
if ($uptime < $uptime_last) {
$num_reboots++;
if (defined $arg_detail) {
printf "--reboot detected----%28s", '-'x28;
if (!(defined $arg_report)) {
foreach (keys %matched) {printf "%25s", '-'x25;}
} else {
foreach (keys %matched) {printf "%12s", '-'x12;}
}
print "\n";
}
}
if ((defined $arg_detail) || ($iter == $iters[-1])) {
printf "%04d-%02d-%02d %02d:%02d:%02d",
${ $timestamps{$iter} }[$idx_yy],
${ $timestamps{$iter} }[$idx_month],
${ $timestamps{$iter} }[$idx_day],
${ $timestamps{$iter} }[$idx_hh],
${ $timestamps{$iter} }[$idx_mm],
${ $timestamps{$iter} }[$idx_ss];
}
# Cumulate stats for regression
if ($days{$iter} >= ($last_time - 1.0)) { push @keys, '1day';}
foreach my $key (@keys) {
$stats{$key}{'DAY'}{'sum_X'} += $day;
$stats{$key}{'TOTAL'}{'sum_X'} += $ftotal;
$stats{$key}{'PIPES'}{'sum_X'} += $pipes;
$stats{$key}{'FILES'}{'sum_X'} += $f_files;
$stats{$key}{'SOCKETS'}{'sum_X'} += $sockets;
$stats{$key}{'DAY'}{'sum_XX'} += ($day * $day);
$stats{$key}{'TOTAL'}{'sum_XX'} += ($ftotal * $ftotal);
$stats{$key}{'PIPES'}{'sum_XX'} += ($pipes * $pipes);
$stats{$key}{'FILES'}{'sum_XX'} += ($f_files * $f_files);
$stats{$key}{'SOCKETS'}{'sum_XX'} += ($sockets * $sockets);
$stats{$key}{'DAY'}{'sum_XY'} += ($day * $day);
$stats{$key}{'TOTAL'}{'sum_XY'} += ($ftotal * $day);
$stats{$key}{'PIPES'}{'sum_XY'} += ($pipes * $day);
$stats{$key}{'FILES'}{'sum_XY'} += ($f_files * $day);
$stats{$key}{'SOCKETS'}{'sum_XY'} += ($sockets * $day);
$num{$key}++;
}
foreach my $cmd (sort {$b cmp $a} keys %matched) {
# Sum up key values for commands that match
my ($SUM_u, $SUM_r, $SUM_w, $SUM_FD) = (0.0, 0.0, 0.0, 0.0);
foreach my $pid (keys %{ $matched{$cmd} }) {
if (defined $data{$iter}{$cmd}{$pid}) {
$SUM_u += ${ $data{$iter}{$cmd}{$pid} }[$idx_U];
$SUM_r += ${ $data{$iter}{$cmd}{$pid} }[$idx_R];
$SUM_w += ${ $data{$iter}{$cmd}{$pid} }[$idx_W];
$SUM_FD += ${ $data{$iter}{$cmd}{$pid} }[$idx_FD];
}
}
if ((defined $arg_detail) || ($iter == $iters[-1])) {
if (!(defined $arg_report)) {
printf " | %8d %8d %8d", $SUM_u, $SUM_r, $SUM_w;
} else {
printf " %8d", $SUM_FD;
}
}
# Cumulate stats for regression
foreach my $key (@keys) {
$stats{$key}{'u_'.$cmd}{'sum_X'} += $SUM_u;
$stats{$key}{'r_' .$cmd}{'sum_X'} += $SUM_r;
$stats{$key}{'w_' .$cmd}{'sum_X'} += $SUM_w;
$stats{$key}{'u_'.$cmd}{'sum_XX'} += ($SUM_u * $SUM_u);
$stats{$key}{'r_' .$cmd}{'sum_XX'} += ($SUM_r * $SUM_r);
$stats{$key}{'w_' .$cmd}{'sum_XX'} += ($SUM_w * $SUM_w);
$stats{$key}{'u_'.$cmd}{'sum_XY'} += ($SUM_u * $day);
$stats{$key}{'r_' .$cmd}{'sum_XY'} += ($SUM_r * $day);
$stats{$key}{'w_' .$cmd}{'sum_XY'} += ($SUM_w * $day);
$stats{$key}{'fd_' .$cmd}{'sum_X'} += $SUM_FD;
$stats{$key}{'fd_' .$cmd}{'sum_XX'} += ($SUM_FD * $SUM_FD);
$stats{$key}{'fd_' .$cmd}{'sum_XY'} += ($SUM_FD * $day);
}
}
if ((defined $arg_detail) || ($iter == $iters[-1])) {
printf "\n";
}
# save uptime for comparison
$uptime_last = $uptime;
}
# Perform simple linear regression on all variables
foreach my $key (keys %stats) {
foreach my $var (keys %{ $stats{$key} }) {
($stats{$key}{$var}{'int'}, $stats{$key}{$var}{'slope'}) = &linear_fit(
$stats{$key}{'DAY'}{'sum_X'}, # 'DAY' is 'x' variable
$stats{$key}{$var}{'sum_X'},
$stats{$key}{$var}{'sum_XY'},
$stats{$key}{'DAY'}{'sum_XX'},
$num{$key}
);
}
}
%file_slope = ();
foreach my $cmd (keys %{file_stats}) {
my ($count, $cur_files, $max_files, $sum_X, $sum_Y, $sum_XX, $sum_XY) = @{$file_stats{$cmd}};
my ($intercept, $slope) = &linear_fit($sum_X, $sum_Y, $sum_XY, $sum_XX, $count);
$file_slope{$cmd} = $slope;
}
# Print out linear trends
# [ OBSOLETE ] printf "%20s %8s %7s %6s %5s", '-'x20, '-'x8, '-'x7, '-'x6, '-'x5;
printf "%20s", '-'x20;
if (!(defined $arg_report)) {
foreach my $cmd (sort {$b cmp $a} keys %matched) {
printf " | %8s %8s %8s", '-'x8, '-'x8, '-'x8;
}
} else {
foreach my $cmd (sort {$b cmp $a} keys %matched) {
printf " %15s", '-'x15;
}
}
print "\n";
foreach my $key (sort {$b cmp $a} keys %stats) {
printf "%20s", ($key eq 'all') ? "LONG TREND: (FD/d)" : "1 DAY TREND: (FD/d)";
if (!(defined $arg_report)) {
foreach my $cmd (sort {$b cmp $a} keys %matched) {
printf " | %8.3f %8.3f %8.3f",
$stats{$key}{'u_'.$cmd}{'slope'},
$stats{$key}{'w_' .$cmd}{'slope'},
$stats{$key}{'r_' .$cmd}{'slope'};
}
} else {
foreach my $cmd (sort {$b cmp $a} keys %matched) {
printf " %8.3f", $stats{$key}{'fd_' .$cmd}{'slope'};
}
}
if (($key eq 'all') && ($num_reboots > 0)) {
printf " (%d reboots)", $num_reboots;
}
print "\n";
}
my $n = 0;
# Print out hirunner process growth
printf "\nPROCESSES WITH HIGHEST GROWTH (1 DAY TREND: > %.1f FD's/day):\n", $arg_thresh;
printf "%9s %9s %9s %s\n", 'CurFDs', 'HiFDs', 'Rate', 'COMMAND';
printf "%9s %9s %9s %s\n", '-'x8, '-'x8, '-'x8, '-'x9;
foreach my $cmd (sort {$file_slope{$b} <=> $file_slope{$a} } keys %file_slope) {
last if ($file_slope{$cmd} < $arg_thresh);
my $max_iter = $iters[-1];
my ($count, $cur_files, $max_files, $sum_X, $sum_Y, $sum_XX, $sum_XY) = @{$file_stats{$cmd}};
if ((defined $arg_transient) || ((($max_iter >= 142) && ($count > 70)) ||
(($max_iter < 142) && ($max_iter/2 < $count)))) { # print only processes seen most of the time
printf "%9.3f %9.3f %9.3f %s\n", $cur_files, $max_files, $file_slope{$cmd}, $cmd;
$n++;
}
}
print "none\n" if ($n == 0);
print "\n";
}
exit 0;
#######################################################################################################################
# Lightweight which(), derived from CPAN File::Which
sub which {
my ($exec) = @_;
return undef unless $exec;
my $all = wantarray;
my @results = ();
my @path = File::Spec->path;
foreach my $file ( map { File::Spec->catfile($_, $exec) } @path ) {
next if -d $file;
if (-x _) { return $file unless $all; push @results, $file; }
}
$all ? return @results : return undef;
}
# Process "parse_filestats" command line arguments and set defaults
sub get_parse_filestats_args {
# Returned parameters
local (*::arg_all, *::arg_match, *::arg_name,
*::arg_pids, *::arg_commands, *::arg_exact, *::arg_commands_excl,
*::arg_list, *::arg_detail, *::arg_thresh, *::arg_transient,
*::arg_path, *::arg_dur, *::arg_report, *::arg_files) = @_;
# Local variables
my ($fail, $arg_help) = ();
my @tmp = ();
# Use the Argument processing module
use Getopt::Long;
# Print usage if no arguments
if (!@ARGV) {
&Usage();
exit 0;
}
# Process input arguments
$fail = 0;
GetOptions(
"all", \$::arg_all, # CURRENTLY UNUSED
"match", \$::arg_match,
"name=s", \$::arg_name,
"pid=i", \@::arg_pids,
"cmd=s", \@::arg_commands,
"exact=s", \@::arg_exact,
"excl=s", \@::arg_commands_excl,
"list", \$::arg_list,
"detail", \$::arg_detail,
"thresh=f", \$::arg_thresh,
"transient", \$::arg_transient,
"dir=s", \$::arg_path,
"dur=f", \$::arg_dur,
"report", \$::arg_report,
"help|?", \$arg_help
) || GetOptionsMessage();
# Print help documentation if user has selected -help
&ListHelp() if (defined $arg_help);
# Listify @::arg_pids
@tmp = ();
if (@::arg_pids) {
@tmp = @::arg_pids; @::arg_pids = ();
foreach my $pid (@tmp) { push @::arg_pids, (split /,/, $pid); }
}
# Listify @::arg_commands
@tmp = ();
if (@::arg_commands) {
@tmp = @::arg_commands; @::arg_commands = ();
foreach my $cmd (@tmp) { push @::arg_commands, (split /,/, $cmd); }
}
# Listify @::arg_exact
@tmp = ();
if (@::arg_exact) {
@tmp = @::arg_exact; @::arg_exact = ();
foreach my $cmd (@tmp) { push @::arg_exact, (split /,/, $cmd); }
}
# Listify @::arg_commands_excl
@tmp = ();
if (@::arg_commands_excl) {
@tmp = @::arg_commands_excl; @::arg_commands_excl = ();
foreach my $cmd (@tmp) { push @::arg_commands_excl, (split /,/, $cmd); }
}
# Give warning messages and usage when parameters are specified incorrectly.
my $cnt = 0;
$cnt++ if (defined $::arg_name);
$cnt++ if (defined $::arg_list);
##$cnt++ if (defined $::arg_all);
# [ JGAULD - maybe add $::arg_match]
if ($cnt > 1) {
warn "$SCRIPT: Input error: cannot specify more than one of {--list} or {--name <pattern>} options.\n";
$fail = 1;
}
if ($fail == 1) {
# touch variables here to make silly warning go away
$::arg_all = ""; $::arg_match = "";
$::arg_name = ""; $::arg_list = ""; $::arg_detail = "";
$::arg_thresh = 0; $::arg_transient = "";
&Usage();
exit 1;
}
# Assume remaining options are filenames
@::arg_files = @ARGV;
# Set defaults for options requiring values
if (!(defined $::arg_thresh)) {
$::arg_thresh = 1.0; # Default to 10 FD/d
}
if (!(defined $::arg_dur)) {
$::arg_dur = 7.0; # Default to 7.0 days worth of data
} else {
$::arg_dur = 1.0 if ($::arg_dur < 1.0); # minimum 1 day worth of data
}
$::arg_path ||= $DEFAULT_PATH;
$::arg_detail = 1 if (defined $::arg_report); # print details if 'report' option chosen
}
sub GetOptionsMessage {
# Print out a warning message and then print program usage.
warn "$SCRIPT: Error processing input arguments.\n";
&Usage();
exit 1;
}
sub Usage {
# Print out program usage.
printf "Usage: $SCRIPT file1 file2 file3.gz ...\n";
printf "\t[--list | --all | --name <glob_pattern>]\n";
printf "\t[--pid <pid> ] ...\n";
printf "\t[--cmd <pattern>] ...\n";
printf "\t[--exact <pattern>] ...\n";
printf "\t[--excl <pattern>] ...\n";
printf "\t[--detail]\n";
printf "\t[--thresh <MiB/d>]\n";
printf "\t[--transient]\n";
printf "\t[--dir <path>]\n";
printf "\t[--dur <days>]\n";
printf "\t[--report]\n";
printf "\t[--help | -?]\n";
}
sub ListHelp {
# Print out tool help
printf "$SCRIPT -- parses 'filestats' data and prints processes with hirunner file growth\n";
&Usage();
printf "\nOptional input arguments:\n";
printf " --list : list all available 'file' data\n";
printf " --all : summarize all blades\n";
printf " --name <glob_pattern> : match files with pattern (file globbing allowed if in \"quotes\")\n";
printf " --pid <pid> : match 'pid' (can specify multiple pids)\n";
printf " --cmd <command|pattern> : match command name 'string' pattern (can specify multiple patterns)\n";
printf " --exact <command|pattern> : exact match command name 'string' pattern (can specify multiple patterns)\n";
printf " --excl <command|pattern> : match command name 'string' pattern to exclude (can specify multiple patterns)\n";
printf " --detail : time-series output\n";
printf " --thresh : set threshold for hirunner growth processes : default > 0.005 MiB/d\n";
printf " --transient : include transient processes (i.e., do not filter out short-lived processes)\n";
printf " --dir <path> : path to filestat files : default: $DEFAULT_PATH\n";
printf " --dur <days> : number of days of data to process : default: 7.0\n";
printf " --report : summarize details for hi-runner file growth\n";
printf " --help : this help information\n";
printf "\nfile data storage: %s/%s\n", $DEFAULT_PATH, "<hostname>_<yyyy>-<mm>-<dd>_<hh>00_filestats{.gz}";
printf "(most data files are gzip compressed)\n";
printf "\nExamples:\n";
printf " $SCRIPT --all (i.e., summarize all hosts\n";
printf " $SCRIPT --name 2014-02-22 (i.e., match files containing 2014-02-22)\n";
printf " $SCRIPT --name compute-0 --cmd python (i.e., compute-0, specify process(es))\n";
printf " $SCRIPT --name compute-0 --pid 1 --pid 2 --detail (i.e., slot 1, specify PIDs 1 and 2 time-series)\n";
printf " $SCRIPT --name controller-0 --cmd python --excl blah --detail (i.e., time-series for 'python', but not 'blah')\n";
printf "\nReported Memory Headings:\n";
printf " FD (#) - Total File descriptors for process\n";
printf " U (#) - Read Write files open for process\n";
printf " W (#) - Write Only files open for process\n";
printf " R (#) - Read Only files open for process\n";
printf " TREND - reported in change of Files per day";
printf "\n";
exit 0;
}
# Calculate linear regression coefficients for linear equation, y = a + b*x
sub linear_fit {
my ($sum_X, $sum_Y, $sum_XY, $sum_XX, $n) = @_;
my ($a, $b, $s1_sq) = ();
# Prevent doing regression with less than 2 points
return (0,0) if ($n < 2);
$s1_sq = ($sum_XX - $sum_X / $n * $sum_X) / ($n - 1);
# Prevent divide by zero
return (0,0) if ($s1_sq <= 0.0);
$b = ($n * $sum_XY - $sum_X * $sum_Y) / $n / ($n - 1) / $s1_sq;
$a = ($sum_Y - $b * $sum_X)/$n;
return ($a, $b);
}
1;

View File

@ -1,228 +0,0 @@
#!/usr/bin/perl
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# parse_iostat
#
# Purpose:
#
# Modification history:
# - 2015-Dec-259 - Jim Gauld, prototype created.
use 5.10.0;
use warnings;
use strict;
use Time::Local 'timelocal_nocheck'; # inverse time functions
use File::Basename;
use File::Spec ();
use Data::Dumper;
my $SCRIPT = basename($0);
# Timestamp variables
my ($wday, $month, $day, $hh, $mm, $ss, $yy, $ns) = ();
my @T0 = localtime();
my $yy00 = 1900 + $T0[5];
my $cc00 = 100*int($yy00/100);
# Argument list parameters
our ($arg_device, @arg_files) = ();
# Determine location of gunzip binary
our $GUNZIP = which('gunzip');
if (!(defined $GUNZIP)) {
die "*error* cannot find 'gunzip' binary. Cannot continue.\n";
}
our $BUNZIP2 = which('bunzip2');
if (!(defined $BUNZIP2)) {
die "*error* cannot find 'bunzip2' binary. Cannot continue.\n";
}
# Parse input arguments and print tool usage if necessary
&get_parse_iostat_args(\$arg_device, \@arg_files);
# Compile regular expressions
my $re_dev = qr/\Q$::arg_device\E/;
foreach my $file (@ARGV) {
print "processing file: $file\n";
if ($file =~ /\.gz$/) {
open(FILE, "$::GUNZIP -c $file |") || die "Cannot open file: $file ($!)\n";
} elsif ($file =~ /\.bz2$/) {
open(FILE, "$::BUNZIP2 -c $file |") || die "Cannot open file: $file ($!)\n";
} else {
open(FILE, $file) || die "Cannot open file: $file ($!)\n";
}
#my ($timestamp, $timestamp0, $time_fmt) = ("", "", "");
my ($field, $idx, $len);
my $found = 0;
my @dev_x;
my @dev_T;
my $dev_N;
my %dev_H;
my ($time_fmt) = ("");
# Wipe out data and statistics per file.
my (%data, %stats, %series) = ();
my $first = 1;
READ_LOOP: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
# timestamp
# 12/23/15 18:56:50
if (/^(\d{2})\/(\d{2})\/(\d{2})\s+(\d{2}):(\d{2}):(\d{2})/) { # ignore timezone
$month = $1; $day = $2; $yy = $3 + $cc00; $hh = $4; $mm = $5; $ss = $6; $ns = 0;
#print "TIME: $_";
$found = 0;
next;
}
if (/^avg-cpu:/) {
$_ = <FILE>; $_ = <FILE>;
#print "AVG: $_";
next
}
if (/^Device:/) {
#print "DEV: $_\n";
@dev_T = split(/\s+/, $_); shift @dev_T if (/^\s+/);
$dev_N = scalar(@dev_T);
# determine lower and upper indices for numerical fields
for ($idx=0; $idx < $dev_N; $idx++) {
$field = $dev_T[$idx];
$dev_H{ $field } = $idx;
}
# Read in each device
DEV_LOOP: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
last DEV_LOOP if (/^$/);
if (/\b$re_dev\b/) {
@dev_x = split(/\s+/, $_); shift @dev_x if (/^\s+/);
$len = scalar(@dev_x);
$found = 1;
}
}
}
# Print line of data if we have it
if ($found == 1) {
# Print header (per file)
if ($first == 1) {
printf "%4s-%2s-%2s %2s:%2s:%2s ", 'yyyy', 'mm', 'dd', 'hh', 'mm', 'ss';
printf "%-8s ", $dev_T[0];
for ($idx=1; $idx < $dev_N; $idx++) {
printf "%9s ", $dev_T[$idx];
}
printf "\n";
$first = 0;
}
printf "%04d-%02d-%02d %02d:%02d:%02d ", $yy, $month, $day, $hh, $mm, $ss;
printf "%-8s ", $dev_x[0];
for ($idx=1; $idx < $dev_N; $idx++) {
printf "%9.2f ", $dev_x[$idx];
}
printf "\n";
}
}
# Print blank line between files
print "\n";
}
exit 0;
#######################################################################################################################
# Lightweight which(), derived from CPAN File::Which
sub which {
my ($exec) = @_;
return undef unless $exec;
my $all = wantarray;
my @results = ();
my @path = File::Spec->path;
foreach my $file ( map { File::Spec->catfile($_, $exec) } @path ) {
next if -d $file;
if (-x _) { return $file unless $all; push @results, $file; }
}
$all ? return @results : return undef;
}
# Process "parse_memory" command line arguments and set defaults
sub get_parse_iostat_args {
# Returned parameters
(local *::arg_device, local *::arg_files) = @_;
# Local variables
my ($fail, $arg_help) = ();
my @tmp = ();
# Use the Argument processing module
use Getopt::Long;
# Print usage if no arguments
if (!@ARGV) {
&Usage();
exit 0;
}
# Process input arguments
$fail = 0;
GetOptions(
"device=s", \$::arg_device,
"help|?", \$arg_help
) || GetOptionsMessage();
# Print help documentation if user has selected -help
&ListHelp() if (defined $arg_help);
# Give warning messages and usage when parameters are specified incorrectly.
if (!( defined $::arg_device)) {
warn "$SCRIPT: Input error: must specify --device <dev>n.\n";
$fail = 1;
}
if ($fail == 1) {
# touch variables here to make silly warning go away
&Usage();
exit 1;
}
$::arg_device ||= 'sda';
# Assume remaining options are filenames
@::arg_files = @ARGV;
}
sub GetOptionsMessage {
# Print out a warning message and then print program usage.
warn "$SCRIPT: Error processing input arguments.\n";
&Usage();
exit 1;
}
sub Usage {
# Print out program usage.
printf "Usage: $SCRIPT OPTIONS file1 file2 file3.gz ...\n";
printf "\t[--device <pattern>] ...\n";
printf "\t[--help | -?]\n";
}
sub ListHelp {
# Print out tool help
printf "$SCRIPT -- parses 'iostat' data for matching device name\n";
&Usage();
printf "\nOptional input arguments:\n";
printf " --device <pattern> : match device name\n";
printf " --help : this help information\n";
printf "\n";
exit 0;
}
1;

View File

@ -1,229 +0,0 @@
#!/usr/bin/perl
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# parse_netstats
#
# Purpose: Summarize networking stats for each interface by parsing
# output from /proc/net/dev. Summarize rx and tx packet rates (pkt/s),
# bandwidth (Mbit/s), and bytes per packet (B/pkt).
# Calculates sample average and sample standard deviation.
#
# Modification history:
# - 2014-Feb-25 - Jim Gauld, prototype created.
use 5.10.0;
use warnings;
use strict;
use File::Spec ();
use Time::Local 'timelocal_nocheck'; # inverse time functions
# Timestamp variables
my ($wday, $month, $day, $hh, $mm, $ss, $yy, $ns) = ();
# Determine location of gunzip binary
our $GUNZIP = which('gunzip');
if (!(defined $GUNZIP)) {
die "*error* cannot find 'gunzip' binary. Cannot continue.\n";
}
our $BUNZIP2 = which('bunzip2');
if (!(defined $BUNZIP2)) {
die "*error* cannot find 'bunzip2' binary. Cannot continue.\n";
}
foreach my $file (@ARGV) {
print "processing file: $file\n";
if ($file =~ /\.gz$/) {
open(FILE, "$::GUNZIP -c $file |") || die "Cannot open file: $file ($!)\n";
} elsif ($file =~ /\.bz2$/) {
open(FILE, "$::BUNZIP2 -c $file |") || die "Cannot open file: $file ($!)\n";
} else {
open(FILE, $file) || die "Cannot open file: $file ($!)\n";
}
my ($epoc, $epoc0, $dt, $uptime) = (0,0,0);
my ($iface,$timestamp,$timestamp0, $time_fmt) = ("", "", "", "");
my ($rx,$tx,$rx_B,$tx_B, $rx0,$tx0,$rx_B0,$tx_B0) = (0,0,0,0, 0,0,0,0);
my ($rx_pps, $tx_pps, $rx_Mbps, $tx_Mbps, $rx_Bpp, $tx_Bpp);
my $WARNING = "";
my $Mpb = 1.0E6/8;
# Wipe out data and statistics per file.
my (%data, %stats, %series) = ();
# Print header (per file)
printf "%18s %5s | %9s %12s %9s | %9s %12s %9s | %s\n",
"interface", "dt(s)",
"rx(pkt/s)", "rx(Mbps)", "rx(B/pkt)",
"tx(pkt/s)", "tx(Mbps)", "tx(B/pkt)",
"date/time";
my @var_list = ('dt', 'rx_pps', 'tx_pps', 'rx_Mbps', 'tx_Mbps', 'rx_Bpp', 'tx_Bpp');
READ_LOOP: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
# Hi-resolution timestamp
# time: Tue 2009-04-07 18:17:05.074387000 UTC +0000 uptime: 1153.09 897.13
if (/time:\s+(\w+)\s+(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})\.(\d{9})\s+\w+\s+\S+\s+uptime:\s+(\S+)\s+/) { # ignore timezone
$wday = $1; $yy = $2; $month = $3; $day = $4; $hh = $5; $mm = $6; $ss = $7; $ns = $8; $uptime = $9;
$timestamp0 = $timestamp; $epoc0 = $epoc; # store previous
$timestamp = [($wday,$month,$day,$hh,$mm,$ss,$yy,$ns)];
$epoc = $9;
$dt = $epoc - $epoc0;
next;
}
# Inter-| Receive | Transmit
# face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
if (/\s*(\S+):\s*(\d+)\s+(\d+)\s+\d+\s+\d+\s+\d+\s+\d+\s+\d+\s+\d+\s+(\d+)\s+(\d+)\s+\d+\s+\d+\s+\d+\s+\d+\s+\d+\s+\d+/) {
$iface = $1; $rx_B = $2; $rx = $3; $tx_B = $4; $tx = $5;
($rx0,$tx0,$rx_B0,$tx_B0) = ($rx,$tx,$rx_B,$tx_B);
if (!(defined $data{$iface}{'rx0'})) {
$data{$iface}{'rx0'} = 0; $data{$iface}{'tx0'} = 0;
$data{$iface}{'rx_B0'} = 0; $data{$iface}{'tx_B0'} = 0;
foreach my $item (@var_list) {
$stats{$iface}{$item}{'sumX'} = 0.0;
$stats{$iface}{$item}{'sumX2'} = 0.0;
$stats{$iface}{$item}{'N'} = 0;
$stats{$iface}{$item}{'avg'} = 0;
$stats{$iface}{$item}{'stdev'} = 0;
}
} else {
$data{$iface}{'rx0'} = $data{$iface}{'rx'};
$data{$iface}{'tx0'} = $data{$iface}{'tx'};
$data{$iface}{'rx_B0'} = $data{$iface}{'rx_B'};
$data{$iface}{'tx_B0'} = $data{$iface}{'tx_B'};
}
# Saver current measurement
$data{$iface}{'rx'} = $rx;
$data{$iface}{'tx'} = $tx;
$data{$iface}{'rx_B'} = $rx_B;
$data{$iface}{'tx_B'} = $tx_B;
$data{$iface}{'dt'} = $dt;
if (($dt > 0) && (($data{$iface}{'rx0'} > 0) || ($data{$iface}{'tx0'} > 0))) {
$data{$iface}{'rx_pps'} = ($data{$iface}{'rx'} - $data{$iface}{'rx0'})/$dt;
$data{$iface}{'tx_pps'} = ($data{$iface}{'tx'} - $data{$iface}{'tx0'})/$dt;
$data{$iface}{'rx_Mbps'} = ($data{$iface}{'rx_B'} - $data{$iface}{'rx_B0'})/$dt/$Mpb;
$data{$iface}{'tx_Mbps'} = ($data{$iface}{'tx_B'} - $data{$iface}{'tx_B0'})/$dt/$Mpb;
$data{$iface}{'rx_pps'} = ($data{$iface}{'rx_pps'} < 0.0) ? -1.0 : $data{$iface}{'rx_pps'};
$data{$iface}{'tx_pps'} = ($data{$iface}{'tx_pps'} < 0.0) ? -1.0 : $data{$iface}{'tx_pps'};
$data{$iface}{'rx_Mbps'} = ($data{$iface}{'rx_Mbps'} < 0.0) ? -1.0 : $data{$iface}{'rx_Mbps'};
$data{$iface}{'tx_Mbps'} = ($data{$iface}{'tx_Mbps'} < 0.0) ? -1.0 : $data{$iface}{'tx_Mbps'};
} else {
$data{$iface}{'rx_pps'} = -1;
$data{$iface}{'tx_pps'} = -1;
$data{$iface}{'rx_Mbps'} = -1;
$data{$iface}{'tx_Mbps'} = -1;
}
if (($data{$iface}{'rx0'} > 0) && ($data{$iface}{'rx_pps'} > 0) && ($data{$iface}{'rx_Mbps'} > 0)) {
$data{$iface}{'rx_Bpp'} = ($data{$iface}{'rx_B'} - $data{$iface}{'rx_B0'}) / ($data{$iface}{'rx'} - $data{$iface}{'rx0'});
} elsif (($data{$iface}{'rx_Mbps'} != -1) && (abs($data{$iface}{'rx_pps'}) < 1.0E6)) {
$data{$iface}{'rx_Bpp'} = 0.0;
} else {
$data{$iface}{'rx_Bpp'} = -1;
}
if (($data{$iface}{'tx0'} > 0) && ($data{$iface}{'tx_pps'} > 0) && ($data{$iface}{'tx_Mbps'} > 0)) {
$data{$iface}{'tx_Bpp'} = ($data{$iface}{'tx_B'} - $data{$iface}{'tx_B0'}) / ($data{$iface}{'tx'} - $data{$iface}{'tx0'});
} elsif (($data{$iface}{'tx_Mbps'} != -1) && (abs($data{$iface}{'tx_pps'}) < 1.0E6)) {
$data{$iface}{'tx_Bpp'} = 0.0;
} else {
$data{$iface}{'tx_Bpp'} = -1;
}
if (($dt > 0) && (($data{$iface}{'rx0'} > 0) || ($data{$iface}{'tx0'} > 0))) {
foreach my $item (@var_list) {
if ($data{$iface}{$item} >= 0.0) {
$stats{$iface}{$item}{'sumX'} += $data{$iface}{$item};
$stats{$iface}{$item}{'sumX2'} += ($data{$iface}{$item} * $data{$iface}{$item});
$stats{$iface}{$item}{'N'} += 1;
}
}
push @{$series{$iface}}, [ ($dt,
$data{$iface}{'rx_pps'}, $data{$iface}{'rx_Mbps'}, $data{$iface}{'rx_Bpp'},
$data{$iface}{'tx_pps'}, $data{$iface}{'tx_Mbps'}, $data{$iface}{'tx_Bpp'},
$timestamp)];
}
next;
}
}
foreach $iface (keys %series) {
while (my $elem = shift @{$series{$iface}}) {
($dt, $data{$iface}{'rx_pps'}, $data{$iface}{'rx_Mbps'}, $data{$iface}{'rx_Bpp'},
$data{$iface}{'tx_pps'}, $data{$iface}{'tx_Mbps'}, $data{$iface}{'tx_Bpp'}, $timestamp) = @{$elem};
($wday,$month,$day,$hh,$mm,$ss,$yy,$ns) = @$timestamp;
$time_fmt = sprintf("%04d-%02d-%02d %02d:%02d:%02d.%03d", $yy, $month, $day, $hh, $mm, $ss, $ns/1.0E6);
printf "%18s %5.2f | %9.1f %12.5f %9.0f | %9.1f %12.5f %9.0f | %21s\n",
$iface, $dt,
$data{$iface}{'rx_pps'}, $data{$iface}{'rx_Mbps'}, $data{$iface}{'rx_Bpp'},
$data{$iface}{'tx_pps'}, $data{$iface}{'tx_Mbps'}, $data{$iface}{'tx_Bpp'},
$time_fmt;
}
# Calculate final stats
foreach my $item (@var_list) {
# Calculate sample mean
if ($stats{$iface}{$item}{'N'} > 0) {
$stats{$iface}{$item}{'avg'} = $stats{$iface}{$item}{'sumX'} / $stats{$iface}{$item}{'N'};
} else {
$stats{$iface}{$item}{'avg'} = 0.0;
}
# Calculate sample standard deviation
if ($stats{$iface}{$item}{'N'} > 2) {
$stats{$iface}{$item}{'stdev'} = ($stats{$iface}{$item}{'sumX2'}
- ($stats{$iface}{$item}{'sumX'}**2)/$stats{$iface}{$item}{'N'})
/ ($stats{$iface}{$item}{'N'} - 1);
if ($stats{$iface}{$item}{'stdev'} > 0.0) {
$stats{$iface}{$item}{'stdev'} = sqrt( $stats{$iface}{$item}{'stdev'} );
} else {
$stats{$iface}{$item}{'stdev'} = 0.0;
}
} else {
$stats{$iface}{$item}{'stdev'} = 0.0;
}
}
# Print summary (average and standard deviation)
printf "%18s %5s | %9s %12s %9s | %9s %12s %9s | %s\n",
'-'x18, '-'x5, '-'x9, '-'x12, '-'x9, '-'x9, '-'x12, '-'x9, '-'x31;
printf "%18s %5.2f | %9.1f %12.5f %9.0f | %9.1f %12.5f %9.0f | %21s %s\n",
$iface, $stats{$iface}{'dt'}{'avg'},
$stats{$iface}{'rx_pps'}{'avg'}, $stats{$iface}{'rx_Mbps'}{'avg'}, $stats{$iface}{'rx_Bpp'}{'avg'},
$stats{$iface}{'tx_pps'}{'avg'}, $stats{$iface}{'tx_Mbps'}{'avg'}, $stats{$iface}{'tx_Bpp'}{'avg'},
$time_fmt, 'Average';
printf "%18s %5.1f | %9.1f %12.5f %9.0f | %9.1f %12.5f %9.0f | %21s %s\n",
$iface, $stats{$iface}{'dt'}{'stdev'},
$stats{$iface}{'rx_pps'}{'stdev'}, $stats{$iface}{'rx_Mbps'}{'stdev'}, $stats{$iface}{'rx_Bpp'}{'stdev'},
$stats{$iface}{'tx_pps'}{'stdev'}, $stats{$iface}{'tx_Mbps'}{'stdev'}, $stats{$iface}{'tx_Bpp'}{'stdev'},
$time_fmt, 'StdDev';
print "\n";
}
# Print blank line between files
if ($WARNING) {print $WARNING, "\n";};
print "\n";
}
exit 0;
#######################################################################################################################
# Lightweight which(), derived from CPAN File::Which
sub which {
my ($exec) = @_;
return undef unless $exec;
my $all = wantarray;
my @results = ();
my @path = File::Spec->path;
foreach my $file ( map { File::Spec->catfile($_, $exec) } @path ) {
next if -d $file;
if (-x _) { return $file unless $all; push @results, $file; }
}
$all ? return @results : return undef;
}
1;

View File

@ -1,202 +0,0 @@
#!/usr/bin/perl
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# parse_postgres
#
# Purpose: Summarize various postgres stats.
#
# Modification history:
# - 2014-Mar-10 - Jim Gauld, prototype created.
use 5.10.0;
use warnings;
use strict;
use File::Spec ();
use Time::Local 'timelocal_nocheck'; # inverse time functions
use constant SI_k => 1.0E3;
use constant SI_M => 1.0E6;
use constant SI_G => 1.0E9;
use constant Ki => 1024.0;
use constant Mi => 1024.0*1024.0;
use constant Gi => 1024.0*1024.0*1024.0;
# Timestamp variables
my ($wday, $month, $day, $hh, $mm, $ss, $yy, $ns) = ();
# Determine location of gunzip binary
our $GUNZIP = which('gunzip');
if (!(defined $GUNZIP)) {
die "*error* cannot find 'gunzip' binary. Cannot continue.\n";
}
our $BUNZIP2 = which('bunzip2');
if (!(defined $BUNZIP2)) {
die "*error* cannot find 'bunzip2' binary. Cannot continue.\n";
}
foreach my $file (@ARGV) {
print "processing file: $file\n";
if ($file =~ /\.gz$/) {
open(FILE, "$::GUNZIP -c $file |") || die "Cannot open file: $file ($!)\n";
} elsif ($file =~ /\.bz2$/) {
open(FILE, "$::BUNZIP2 -c $file |") || die "Cannot open file: $file ($!)\n";
} else {
open(FILE, $file) || die "Cannot open file: $file ($!)\n";
}
my ($epoc, $epoc0, $dt, $uptime) = (0,0,0);
my ($iface,$timestamp,$timestamp0, $time_fmt) = ("", "", "", "");
my ($db, $db_size);
my ($schema, $tbl, $unit) = ('-','-','-');
my ($tbl_size, $idx_size, $tot_size, $live_tuples, $dead_tuples) = (0,0,0,0,0);
# Wipe out data and statistics per file.
#my (%data, %stats, %series) = ();
#my @var_list = ('dt', 'rx_pps', 'tx_pps', 'rx_Mbps', 'tx_Mbps', 'rx_Bpp', 'tx_Bpp');
READ_LOOP: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
# Hi-resolution timestamp
# time: Tue 2009-04-07 18:17:05.074387000 UTC +0000 uptime: 1153.09 897.13
if (/time:\s+(\w+)\s+(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})\.(\d{9})\s+\w+\s+\S+\s+uptime:\s+(\S+)\s+/) { # ignore timezone
$wday = $1; $yy = $2; $month = $3; $day = $4; $hh = $5; $mm = $6; $ss = $7; $ns = $8; $uptime = $9;
$timestamp0 = $timestamp; $epoc0 = $epoc; # store previous
$timestamp = [($wday,$month,$day,$hh,$mm,$ss,$yy,$ns)];
$epoc = $9;
$dt = $epoc - $epoc0;
next;
}
if (/# postgres database sizes/) {
READ_SIZES: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
last READ_SIZES if (/^$/);
# CGRIFFIN update
$time_fmt = sprintf("%04d-%02d-%02d %02d:%02d:%02d.%03d", $yy, $month, $day, $hh, $mm, $ss, $ns/1.0E6);
if (/^\s+(\S+)\s+\|\s+(\d+)\s+\|\s+\d+\s+(GB|MB|kB|bytes)/) {
$db = $1; $db_size = $2;
printf "uptime: $uptime, db: $db, db_size: $db_size\n";
my $csv = '/tmp/'.$db.'_size'.'.csv';
if ( !(-e $csv)) {
open(CSV, "> $csv") || die "Could not open file: $csv ($!)";
print CSV join(',', 'date/time', 'db_size'), "\n";
} else {
open(CSV, ">> $csv") || die "Could not open file: $csv ($!)";
}
print CSV join(',', $time_fmt, $db_size), "\n";
close(CSV);
}
}
}
if (/# postgres database: (\S+)/) {
$db = $1;
print "FOUND: db = $db\n";
READ_TABLES: while($_ = <FILE>) {
s/[\0\e\f\r\a]//g; chomp; # strip control characters if any
last READ_TABLES if (/^$/);
if (/^\s+(\S+)\s+\|\s+(\S+)\s+\|\s+(\d+)\s+(GB|MB|kB|bytes)\s+\|\s+(\d+)\s+(GB|MB|kB|bytes)\s+\|\s+(\d+)\s+(GB|MB|kB|bytes)\s+\|\s+(\d+)\s+\|\s+(\d+)/) {
$schema = $1; $tbl = $2;
$tbl_size = $3; $unit = $4;
if ($unit eq 'GB') {
$tbl_size *= SI_G;
} elsif ($unit eq 'MB') {
$tbl_size *= SI_M;
} elsif ($unit eq 'kB') {
$tbl_size *= SI_k;
}
$idx_size = $5; $unit = $6;
if ($unit eq 'GB') {
$idx_size *= SI_G;
} elsif ($unit eq 'MB') {
$idx_size *= SI_M;
} elsif ($unit eq 'kB') {
$idx_size *= SI_k;
}
$tot_size = $7; $unit = $8;
if ($unit eq 'GB') {
$tot_size *= SI_G;
} elsif ($unit eq 'MB') {
$tot_size *= SI_M;
} elsif ($unit eq 'kB') {
$tot_size *= SI_k;
}
$live_tuples = $9; $dead_tuples = $10;
# CGRIFFIN update: only include tables of notable sizes Ceilometer, Heat and Nova
if (($db eq 'ceilometer') && ($tbl =~ /\bmeter|metadata_text|metadata_int|sample|resource|trait_text|trait_int|trait_datetime|event\b/)) {
$time_fmt = sprintf("%04d-%02d-%02d %02d:%02d:%02d.%03d", $yy, $month, $day, $hh, $mm, $ss, $ns/1.0E6);
#print "s=$schema, t=$tbl, sz=$tbl_size, isz=$idx_size, tsz=$tot_size, live=$live_tuples,dead=$dead_tuples\n";
my $csv = '/tmp/'.$db.'_'.$tbl.'.csv';
if ( !(-e $csv)) {
open(CSV, "> $csv") || die "Could not open file: $csv ($!)";
print CSV join(',', 'date/time', 'schema', 'table', 'table_size', 'index_size', 'total_size', 'live_tuples', 'dead_tuples'), "\n";
} else {
open(CSV, ">> $csv") || die "Could not open file: $csv ($!)";
}
print CSV join(',', $time_fmt, $schema, $tbl, $tbl_size, $idx_size, $tot_size, $live_tuples, $dead_tuples), "\n";
close(CSV);
}
if (($db eq 'heat') && ($tbl =~ /\bservice|raw_template|event|stack\b/)) {
$time_fmt = sprintf("%04d-%02d-%02d %02d:%02d:%02d.%03d", $yy, $month, $day, $hh, $mm, $ss, $ns/1.0E6);
#print "s=$schema, t=$tbl, sz=$tbl_size, isz=$idx_size, tsz=$tot_size, live=$live_tuples,dead=$dead_tuples\n";
my $csv = '/tmp/'.$db.'_'.$tbl.'.csv';
if ( !(-e $csv)) {
open(CSV, "> $csv") || die "Could not open file: $csv ($!)";
print CSV join(',', 'date/time', 'schema', 'table', 'table_size', 'index_size', 'total_size', 'live_tuples', 'dead_tuples'), "\n";
} else {
open(CSV, ">> $csv") || die "Could not open file: $csv ($!)";
}
print CSV join(',', $time_fmt, $schema, $tbl, $tbl_size, $idx_size, $tot_size, $live_tuples, $dead_tuples), "\n";
close(CSV);
}
if (($db eq 'nova') && ($tbl =~ /\binstance_actions_events|instance_faults|instance_actions|instance_extra|instances|reservations|instance_system_metadata|instance_info_caches|block_device_mapping|compute_nodes|pci_devices|instance_id_mappings|migrations|services|task_log|aggregate_hosts\b/)) {
$time_fmt = sprintf("%04d-%02d-%02d %02d:%02d:%02d.%03d", $yy, $month, $day, $hh, $mm, $ss, $ns/1.0E6);
#print "s=$schema, t=$tbl, sz=$tbl_size, isz=$idx_size, tsz=$tot_size, live=$live_tuples,dead=$dead_tuples\n";
my $csv = '/tmp/'.$db.'_'.$tbl.'.csv';
if ( !(-e $csv)) {
open(CSV, "> $csv") || die "Could not open file: $csv ($!)";
print CSV join(',', 'date/time', 'schema', 'table', 'table_size', 'index_size', 'total_size', 'live_tuples', 'dead_tuples'), "\n";
} else {
open(CSV, ">> $csv") || die "Could not open file: $csv ($!)";
}
print CSV join(',', $time_fmt, $schema, $tbl, $tbl_size, $idx_size, $tot_size, $live_tuples, $dead_tuples), "\n";
close(CSV);
}
}
}
}
}
print "\n";
}
exit 0;
#######################################################################################################################
# Lightweight which(), derived from CPAN File::Which
sub which {
my ($exec) = @_;
return undef unless $exec;
my $all = wantarray;
my @results = ();
my @path = File::Spec->path;
foreach my $file ( map { File::Spec->catfile($_, $exec) } @path ) {
next if -d $file;
if (-x _) { return $file unless $all; push @results, $file; }
}
$all ? return @results : return undef;
}
1;

View File

@ -1,88 +0,0 @@
# The host config file is referenced in various parser scripts. It contains host specific
# parameters.
############################################################################################
# PLATFORM_CPU_LIST config parameter is used to generate memstats csv for each host
#
# For CPU occupancy, the cores that are used for platform can be determined as followed:
# Controller & storage: all cores
# CPE and compute: see file /etc/nova/compute_extended.conf. For instance
# PLATFORM_CPU_LIST="0,20,2,22" means the sum of cores 0, 20, 2 and 22 represents platform
# occupancy.
#
# The platform cores can also be determined via the command
# >system host-cpu-list <host-name>
#
# When PLATFORM_CPU_LIST value is not set, it is assumed all cores are reserved for platform
# use.
#
# Sample configuration below is for one of the lab computes
############################################################################################
PLATFORM_CPU_LIST="0 22"
############################################################################################
# SERVICE_LIST config parameter is used to generate process level schedtop csv
#
# This list can from one release to another so please keep it up to date
############################################################################################
# Controller services that are of interest are:
# AODH: aodh-api, aodh-listener, aodh-notifier, aodh-evaluator
# CEILOMETER: ceilometer-api, ceilometer-collector, ceilometer-agent-notification, ceilometer-polling, ceilometer-mem-db
# CEPH: ceph-mon, ceph-rest, ceph-alarm-manager
# CINDER: cinder-api, cinder-volume, cinder-scheduler
# GLANCE: glance-api, glance-registry
# HEAT: heat-api, heat-engine
# (ignore heat-api-cfn, heat-api-cloudwatch)
# KEYSTONE: keystone-all
# NEUTRON: neutron-server
# NOVA: nova-api, nova-scheduler, nova-conductor
# (ignore nova-consoleauth, nova-api-proxy, nova-novncproxy)
# HORIZON: openstack-dashboard
# SYSINV: sysinv-api, sysinv-agent, sysinv-conductor
# OTHERS: postgres, fmManager, libvirtd, rabbitmq-server, lighttpd, dnsmasq, beam.smp
#
# Compute services that are of interest are:
# VSWITCH: vswitch
# NOVA: nova-compute
# NEUTRON: neutron-dhcp-agent, neutron-metadata-agent
#
# Storage services that are of interest are:
# CEPH: ceph-mon, ceph-osd
#
# Sample configuration below is for the controller host
############################################################################################
SERVICE_LIST="aodh-api aodh-listener aodh-notifier aodh-evaluator ceilometer-api ceilometer-collector ceilometer-agent-notification ceilometer-polling ceilometer-mem-db ceph-mon ceph-rest ceph-alarm-manager cinder-api cinder-volume cinder-scheduler glance-api glance-registry heat-api heat-engine keystone-all neutron-server nova-api nova-scheduler nova-conductor openstack_dashboard sysinv-api sysinv-agent sysinv-conductor postgres beam.smp libvirtd rabbitmq-server fmManager lighttpd dnsmasq"
############################################################################################
# NETSTATS_INTERFACE_LIST config parameter is used to generate netstats csv
#
# Sample configuration below is for one of the lab controllers
# Either cat the /proc/net/dev file or inspect one of the collected netstats bz2 files
# (e.g. bzcat controller-0_2016-11-23_0546_netstats.bz2 | less -S -#10) to see the list of
# interfaces configured for the host. Those interfaces with all 0 values can be skipped.
############################################################################################
NETSTATS_INTERFACE_LIST="eno1 bond0 ens801f0 ens801f1 bond0.109"
############################################################################################
# IOSTATS_DEVICE_LIST config parameter is used to generate iostats csv
#
# Sample configuration below is for one of the lab controllers
#
# Inspect one of the collected iostat.bz2 files
# (e.g. bzcat controller-0_2016-11-23_0955_iostat.bz2 | less -S -#10) to see the list of
# devices configured for the host.
############################################################################################
IOSTATS_DEVICE_LIST="sda sdb dm-0 dm-1 dm-2 dm-3 dm-4 dm-5 dm-6 dm-7 dm-8 drbd0 drbd1 drbd2 drbd3"
############################################################################################
# DISKSTATS_FILESYSTEM_LIST config parameter is used to generate diskstats csv
# Use "df -l" command to see the list of partitions
#
# Sample configuration below is for one of the lab controllers. The format for each fs item
# is <filesystem-name>|<mount-name>.
#
# Either use "df -l" command to see the filesystem list or inspect one of the collected
# diskstats.bz2 files (e.g. bzcat controller-0_2016-11-23_1057_diskstats.bz2 | less -S -#10)
# to see the list of filesystems configured for the host.
############################################################################################
DISKSTATS_FILESYSTEM_LIST="/dev/sda3|/ /dev/mapper/cgts--vg-scratch--lv|/scratch /dev/mapper/cgts--vg-log--lv|/var/log /dev/mapper/cgts--vg-backup--lv|/opt/backups /dev/mapper/cgts--vg-ceph--mon--lv|/var/lib/ceph/mon /dev/mapper/cgts--vg-img--conversions--lv|/opt/img-conversions /dev/drbd2|/opt/platform /dev/drbd0|/var/lib/postgresql /dev/drbd3|/opt/cgcs /dev/drbd1|/var/lib/rabbitmq"

View File

@ -1,100 +0,0 @@
# The lab config file is referenced in various data download and parser scripts.
# This file is configured for lab WCP35-60, a 2-4-20 system, and serves as a sample.
############################################################################################
# LAB CONFIGURATIONS
#
# Sample configuration below is for WCP35-60 (2 controllers, 4 storages and 20 computes)
############################################################################################
# Change the controller IPs and list of hosts to reflect the system under test.
CONTROLLER0_IP="128.224.150.232"
CONTROLLER1_IP="128.224.150.233"
CONTROLLER_LIST="controller-0 controller-1"
STORAGE_LIST="storage-0 storage-1 storage-2 storage-3"
COMPUTE_LIST="compute-0 compute-1 compute-2 compute-3 compute-4 compute-5 compute-6 compute-7 compute-8 compute-9 compute-10 compute-11 compute-12 compute-13 compute-14 compute-15 compute-16 compute-17 compute-18 compute-19"
INFLUX_HOST="localhost"
INFLUX_PORT="8086"
INFLUX_DB=""
############################################################################################
# OUTPUT FILE CONFIGURATION
############################################################################################
# Update list of output file as needed. This is used in the cleanup-uncompressed.sh script
FILE_LIST="ceph diskstats filestats iostat memstats memtop netstats occtop postgres rabbitmq schedtop top vswitch"
############################################################################################
# PARSING CONFIGURATIONS
############################################################################################
# If there is a large number of computes, they need to be parsed one batch at a time,
# otherwise, the analysis server will be brought down to a crawl.
BATCH_SIZE=25
# To see the list of databases
# >sudo su postgres
# >psql
# >\l
# Can skip template0 and template1
#
# Currently the Excel file that produces postgres connection charts expects the following
# database listed in the specified order:
# DATABASE_LIST="cinder glance keystone nova neutron ceilometer heat sysinv aodh postgres nova_api"
#
# which is not in the same order as psql \l command. Please keep the DATABASE_LIST setting
# in the conf file empty until this is corrected.
DATABASE_LIST=""
# Below is a sample configuration of TABLE_LIST which is referenced in parsing postgres stats
# step (see parse-all.sh script). It contains the major tables of services that showed either
# continous/abnormal database growth in the past (nova, heat and ceilometer). If set, the parser
# will generate the detailed stats for the specified tables.
#
# To see the list of tables for a particular service (e.g. ceilometer)
# >sudo su postgres
# >psql
# >\c ceilometer
# >\dt+
#
# The script parse_postgres currently supports all tables in the TABLE_LIST below. Prior to
# adding new tables to this list, parse_postgres script must be updated first.
# TODO: Update parse_postgres to enable table list configuration
TABLE_LIST="ceilometer_meter ceilometer_metadata_text ceilometer_metadata_int \
ceilometer_sample ceilometer_resource ceilometer_trait_text \
ceilometer_trait_int ceilometer_trait_datetime ceilometer_event \
heat_service heat_raw_template heat_event heat_stack \
nova_instance_actions_events nova_instance_faults nova_instance_actions \
nova_instance_extra nova_instances nova_reservations \
nova_instance_system_metadata nova_instance_info_caches \
nova_block_device_mapping nova_compute_nodes nova_pci_devices \
nova_instance_id_mappings nova_migrations nova_services \
nova_task_log nova_aggregate_hosts"
# Below is a sample configuration of RABBITMQ queues which is references in parsing rabbitmq stats
# step (see parse-all.sh script). If set, the parser will generate the detailed stats for the specified
# queues.
#
# To see the list of rabbitmq
# >sudo rabbitmqctl list_queues
#
# To see the list of queues in more details
# >sudo rabbitmqctl list_queues messages name pid messages_ready messages_unacknowledged memory consumers
#
# The notifications.info queue is the AVS ceilometer notification queue which has the most traffic.
# In the past, notifications.info and versioned_notifications.info queues were growing over time due
# configuration related issues. These have been resolved in R3.
RABBITMQ_QUEUE_LIST="notifications.info versioned_notifications.info"
# Parsing process schedtop currently takes a long time. Turn this on when the generation of detailed stats
# for each specified processes is desired. The list of processes is specific to each host type (controller/
# compute/storage) and thus is set in host.conf.
#
# It is recommended to leave this option turned off and run parse-schedtop.sh only for the target host(s) and
# target service(s) after having analyzed occtop and schedtop summary output files (occtop-xxx.csv &
# schedtop-summary-xxx.txt).
#
# Valid values are "Y" or "N". Default is "N"
GENERATE_PROCESS_SCHEDTOP="N"

View File

@ -1,25 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# Create /opt/backups/tmp/syseng-data directory on controller 0, change mode of this
# directory to 777 and place this script and the lab.conf files there. It is recommended
# to set up password-less login from the controller to all storage and compute hosts
# before running the script.
#
if [ ! -f lab.conf ]; then
echo "Lab configuration file is missing."
echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info."
exit 1
fi
source ./lab.conf
HOST_LIST="${STORAGE_LIST} ${COMPUTE_LIST}"
for HOST in ${HOST_LIST}; do
rsync -azvh ${HOST}:/tmp/syseng_data/* .
done

View File

@ -1,71 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# This script is used to parse stats data for storage and compute hosts. It is not
# relevant for CPE. For large office, it is called by parse-everything.sh script.
# File lab.conf must exist for the script to run. The STORAGE_LIST and COMPUTE_LIST
# config parameters can be set to suit the parsing needs.
PARSERDIR=$(dirname $0)
. ${PARSERDIR}/parse-util.sh
if [ ! -f lab.conf ]; then
echo "Lab configuration file is missing."
echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info."
exit 1
fi
source ./lab.conf
if [ -z "${STORAGE_LIST}" ]; then
# This script could be invoked from another script or run separately so write to both
# console and log file.
echo "STORAGE_LIST is not set in lab.conf file. Skipping stats parsing for all storage."
WARNLOG "STORAGE_LIST is not set in lab.conf file. Skipping stats parsing for all storage."
else
for HOST in ${STORAGE_LIST}; do
LOG "Parsing stats data for storage host ${HOST}"
if [ -d ${HOST} ]; then
cd ${HOST}
bzip2 ${HOST}* > /dev/null 2>&1
../parse-all.sh ${HOST} > /dev/null 2>&1 &
cd ..
else
ERRLOG "${HOST} does not exist. Parsing skipped."
fi
done
fi
if [ -z "${COMPUTE_LIST}" ]; then
echo "COMPUTE_LIST is not set in lab.conf file. Skipping stats parsing for all computes."
WARNLOG "COMPUTE_LIST is not set in lab.conf file. Skipping stats parsing for all computes."
exit 1
else
# If there is a large number of computes, they need to be parsed one batch at a time,
# otherwise, the analysis server will be brought down to a crawl. Set the number of
# computes to process in parallel as batches of 25 if it's not set in lab.conf
BATCH_SIZE=${BATCH_SIZE:-25}
count=0
for HOST in ${COMPUTE_LIST}; do
LOG "Parsing stats data for compute host ${HOST}"
if [ -d ${HOST} ]; then
cd ${HOST}
bzip2 ${HOST}* > /dev/null 2>&1
../parse-all.sh ${HOST} > /dev/null 2>&1 &
cd ..
((count++))
if [ $count == ${BATCH_SIZE} ]; then
# Wait for this batch to finish before starting a new one
wait
count=0
fi
else
ERRLOG "${HOST} does not exist. Parsing skipped."
fi
done
fi

View File

@ -1,14 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
# This script is used to parse stats data for all hosts. It is called only for large office.
# For CPE, use parse-controlers.sh script
./parse-controllers.sh &
wait
./parse-computes.sh

View File

@ -1,68 +0,0 @@
#!/bin/bash
#Copyright (c) 2016 Wind River Systems, Inc.
#
#SPDX-License-Identifier: Apache-2.0
#
NODE=$1
if [ `ls vswitch*.csv 2>/dev/null | wc -l` -gt 0 ]; then
rm vswitch*.csv
fi
[ -e tmp.txt ] && rm tmp.txt
FILES=$(ls *vswitch.bz2 | sort)
for FILE in ${FILES}; do
bzcat ${FILE} | grep -E "time\:|vshell|\||" >> tmp.txt
done
while IFS='' read -r LINE || [[ -n "${LINE}" ]]; do
if [[ "${LINE}" == "# vshell engine-list" ]]; then
CURTABLE="engine"
elif [[ "${LINE}" == "# vshell engine-list" ]]; then
CURTABLE="engine"
elif [[ "${LINE}" == "# vshell engine-stats-list" ]]; then
CURTABLE="engine-stats"
elif [[ "${LINE}" == "# vshell port-list" ]]; then
CURTABLE="ports"
elif [[ "${LINE}" == "# vshell port-stats-list" ]]; then
CURTABLE="port-stats"
elif [[ "${LINE}" == "# vshell network-list" ]]; then
CURTABLE="networks"
elif [[ "${LINE}" == "# vshell network-stats-list" ]]; then
CURTABLE="network-stats"
elif [[ "${LINE}" == "# vshell interface-list" ]]; then
CURTABLE="interfaces"
elif [[ "${LINE}" == "# vshell interface-stats-list" ]]; then
CURTABLE="interface-stats"
else
TEST=$(echo ${LINE} | awk '{print $1}')
if [[ "${TEST}" == "time:" ]]; then
TIMESTAMP=$(echo ${LINE} | awk '{print $3" "$4}')
elif [[ "${CURTABLE}" == "engine-stats" ]]; then
ENGINE=$(echo ${LINE} | awk '{print $4}')
if [[ "${ENGINE}" == "0" ]] || [[ "${ENGINE}" == "1" ]]; then
PARAMS=$(echo ${LINE} | awk '{print $4","$6","$8","$10","$12","$14","$16","$18","$20}')
echo "${TIMESTAMP},${PARAMS}" >>vswitch-engine-${ENGINE}-${NODE}.csv
fi
elif [[ "${CURTABLE}" == "port-stats" ]]; then
PORTTYPE=$(echo ${LINE} | awk '{print $6}')
if [[ "${PORTTYPE}" == "physical" ]]; then
PORTNUM=$(echo ${LINE} | awk '{print $4}')
PARAMS=$(echo ${LINE} | awk '{print $8","$10","$12","$14","$16","$18","$20}')
echo "${TIMESTAMP},${PARAMS}" >>vswitch-port-${PORTNUM}-${NODE}.csv
fi
elif [[ "${CURTABLE}" == "interface-stats" ]]; then
IFNAME=$(echo ${LINE} | awk '{print $8}')
if [[ "${IFNAME}" == "eth0" ]] || [[ "${IFNAME}" == "eth1" ]]; then
PARAMS=$(echo ${LINE} | awk '{print $10","$12","$14","$16","$18","$20","$22","$24","$26","$28}')
echo "${TIMESTAMP},${PARAMS}" >>vswitch-interface-${IFNAME}-${NODE}.csv
fi
fi
fi
done < tmp.txt
rm tmp.txt