diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0a97fb7 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +/downloads +installer-prebuilt +.idea +*.egg-info +*.swp +*.pyc +.cache/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/centos_pkg_dirs b/centos_pkg_dirs new file mode 100644 index 0000000..6dc97ec --- /dev/null +++ b/centos_pkg_dirs @@ -0,0 +1,251 @@ +mwa-thales/nova-api-proxy +mwa-perian/ceph +mwa-perian/ceph-manager +mwa-chilon/tsconfig +mwa-pitta/compute-huge +mwa-pitta/computeconfig +mwa-pitta/configutilities +mwa-pitta/controllerconfig +mwa-pitta/storageconfig +mwa-cleo/fm-api +mwa-cleo/fm-common +mwa-cleo/fm-mgr +mwa-cleo/fm-doc +middleware/filesystem/recipes-common/logmgmt +middleware/filesystem/recipes-control/filesystem-scripts +middleware/io-monitor/recipes-common/io-monitor +mwa-beas/mtce-common +mwa-beas/mtce-compute +mwa-beas/mtce-control +mwa-beas/mtce-storage +middleware/recipes-common/build-info +mwa-gplv2/dpkg +mwa-pitta/sysinv/cgts-client +mwa-pitta/sysinv/sysinv-agent +mwa-pitta/sysinv/sysinv +mwa-thales/guest-client +mwa-thales/guest-agent +mwa-thales/guest-comm +middleware/branding/recipes-control/wrs-branding +middleware/perf/recipes-common/io-scheduler +middleware/util/recipes-common/collector +mwa-perian/openstack/openstack-aodh +mwa-perian/openstack/openstack-murano +mwa-perian/openstack/python-muranoclient +mwa-perian/openstack/openstack-murano-ui +mwa-perian/openstack/openstack-ironic +mwa-perian/openstack/python-ironicclient +mwa-perian/openstack/python-magnumclient +mwa-perian/openstack/openstack-magnum +mwa-perian/openstack/openstack-magnum-ui +mwa-perian/openstack/openstack-ras +mwa-perian/openstack/openstack-panko +mwa-perian/openstack/python-ceilometer +mwa-perian/openstack/python-ceilometerclient +mwa-perian/openstack/python-cinder +mwa-perian/openstack/python-cinderclient +mwa-perian/openstack/python-glance +mwa-perian/openstack/python-glance-store +mwa-perian/openstack/python-glanceclient +mwa-perian/openstack/python-heat/openstack-heat +mwa-perian/openstack/python-heat/wrs-heat-template +mwa-perian/openstack/python-heatclient +mwa-perian/openstack/python-horizon +mwa-perian/openstack/python-keystone +mwa-perian/openstack/python-keystoneclient +mwa-perian/openstack/python-keystonemiddleware +mwa-perian/openstack/python-networking-bgpvpn +mwa-perian/openstack/python-networking-sfc +mwa-perian/openstack/python-networking-odl +mwa-perian/openstack/python-neutron +mwa-perian/openstack/python-neutron-dynamic-routing +mwa-perian/openstack/python-neutron-lib +mwa-perian/openstack/python-neutronclient +mwa-perian/openstack/python-nova +mwa-perian/openstack/python-novaclient +mwa-perian/openstack/python-openstackdocstheme +mwa-perian/openstack/python-oslo-concurrency +mwa-perian/openstack/python-oslo-service +mwa-perian/openstack/python-oslo-messaging +mwa-perian/openstack/rabbitmq-server +mwa-sparta/vm-topology +mwa-perian/openstack/python-keystoneauth1 +mwa-perian/openstack/python-openstackclient +mwa-perian/openstack/python-osc-lib +mwa-perian/openstack/python-openstacksdk +mwa-perian/openstack/python-django-openstack-auth +mwa-delphi/remote-clients +mwa-delphi/install-log-server +mwa-sparta/core/initscripts +mwa-sparta/core/util-linux +mwa-sparta/base/setup +mwa-sparta/base/lshell +mwa-gplv2/cgcs-users +mwa-sparta/base/namespace-utils +mwa-sparta/base/nss-pam-ldapd +mwa-sparta/base/centos-release +mwa-sparta/base/expect-lite +mwa-gplv2/cluster-resource-agents +mwa-sparta/connectivity/nfs-utils +mwa-sparta/connectivity/dhcp +mwa-sparta/connectivity/openssh +mwa-sparta/devtools/facter +mwa-sparta/devtools/qemu +mwa-gplv2/rpm +mwa-gplv2/libfdt +mwa-sparta/devtools/nfscheck/recipes-common/nfscheck +mwa-sparta/devtools/update-motd +mwa-sparta/devtools/vim +mwa-sparta/devtools/python/python-django +mwa-gplv3/python-psycopg2 +mwa-sparta/devtools/python-setuptools +mwa-gplv2/bash +mwa-sparta/extended/e2fsprogs +mwa-gplv3/crontabs +mwa-gplv2/haproxy +mwa-gplv2/iptables +mwa-gplv2/iscsi-initiator-utils +mwa-gplv2/ldapscripts +mwa-sparta/extended/libvirt +mwa-sparta/extended/libvirt-python +mwa-sparta/extended/lighttpd +mwa-gplv2/netpbm +mwa-gplv3/parted +mwa-sparta/extended/python-cephclient +mwa-gplv3/python-keyring +mwa-sparta/extended/python-ryu +mwa-sparta/extended/python-smartpm +mwa-sparta/extended/lldpd +mwa-sparta/extended/logrotate +mwa-sparta/extended/nova-utils +mwa-sparta/extended/ntp +mwa-sparta/extended/pam +mwa-gplv3/rsync +mwa-sparta/extended/shadow +mwa-sparta/extended/shim-unsigned +mwa-sparta/extended/shim-signed +mwa-sparta/extended/syslog-ng +mwa-gplv2/net-tools +mwa-sparta/extended/novnc +mwa-sparta/extended/sanlock +mwa-sparta/extended/sudo +mwa-sparta/extended/cloud-init +mwa-sparta/extended/irqbalance +mwa-sparta/extended/watchdog +mwa-gplv3/seabios +mwa-gplv2/tpmdd +mwa-gplv2/integrity +mwa-gplv2/drbd +mwa-gplv2/drbd-tools +mwa-gplv2/intel-e1000e +mwa-gplv2/intel-i40e +mwa-gplv2/intel-i40evf +mwa-gplv2/intel-ixgbe +mwa-gplv2/intel-ixgbevf +mwa-gplv2/qat17 +mwa-sparta/networking/net-snmp +mwa-sparta/networking/openldap +mwa-sparta/networking/mlx4-config +mwa-sparta/networking/openvswitch +mwa-sparta/networking/scapy +mwa-thales/nfv +mwa-sparta/restapi-doc +mwa-gplv3/grub2 +recipes-bsp/grubby +mwa-gplv3/dnsmasq +mwa-sparta/support/libevent +mwa-sparta/support/tgt +mwa-solon/service-mgmt/sm-common-1.0.0 +mwa-solon/service-mgmt/sm-db-1.0.0 +mwa-solon/service-mgmt/sm-1.0.0 +mwa-solon/service-mgmt-api +mwa-solon/service-mgmt-client +mwa-solon/service-mgmt-tools +mwa-sparta/security/wrs-ssl +mwa-sparta/security/tss2 +mwa-sparta/security/tpm2-tools +mwa-sparta/security/tpm2-openssl-engine +mwa-sparta/security/libtpms +mwa-sparta/security/swtpm +mwa-sparta/security/audit +mwa-cleo/snmp-ext +middleware/util/recipes-common/libtrap-handler +mwa-chilon/cgcs-patch +mwa-chilon/patch-alarm +middleware/patching/recipes-common/enable-dev-patch +middleware/util/recipes-common/platform-util +middleware/util/recipes-common/monitor-tools +middleware/util/recipes-common/engtools/hostdata-collectors +mwa-sparta/kernel-std +mwa-sparta/devtools/puppet-4.8.2 +mwa-beas/installer/pxe-network-installer +mwa-pitta/config-gate +mwa-chilon/requests-toolbelt +mwa-perian/openstack/python-wsme +mwa-pitta/puppet-manifests +mwa-sparta/devtools/puppet-modules/puppet-ovs_dpdk +mwa-sparta/devtools/puppet-modules/puppet-boolean-1.0.2 +mwa-sparta/devtools/puppet-modules/puppet-create_resources +mwa-sparta/devtools/puppet-modules/puppet-dnsmasq +mwa-sparta/devtools/puppet-modules/puppet-drbd-0.3.1 +mwa-sparta/devtools/puppet-modules/puppet-filemapper +mwa-sparta/devtools/puppet-modules/puppet-haproxy-1.5.0 +mwa-sparta/devtools/puppet-modules/puppet-ldap +mwa-sparta/devtools/puppet-modules/puppet-lvm +mwa-sparta/devtools/puppet-modules/puppet-network +mwa-sparta/devtools/puppet-modules/puppet-nslcd +mwa-sparta/devtools/puppet-modules/puppet-postgresql-4.8.0 +mwa-sparta/devtools/puppet-modules/puppet-puppi +mwa-sparta/devtools/puppet-modules/puppet-rabbitmq-5.5.0 +mwa-sparta/devtools/puppet-modules/puppet-staging +mwa-sparta/devtools/puppet-modules/puppet-stdlib-4.12.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-ceilometer-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-ceph-2.2.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-cinder-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-glance-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-heat-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-ironic-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-keystone-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-murano-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-magnum-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-neutron-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-nova-11.4.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-openstacklib-11.3.0 +mwa-sparta/devtools/puppet-modules/openstack/puppet-panko-11.3.0 +mwa-pitta/puppet-modules-wrs/puppet-mtce +mwa-pitta/puppet-modules-wrs/puppet-sshd +mwa-pitta/puppet-modules-wrs/puppet-nfv +mwa-pitta/puppet-modules-wrs/puppet-nova_api_proxy +mwa-pitta/puppet-modules-wrs/puppet-patching +mwa-pitta/puppet-modules-wrs/puppet-sysinv +mwa-pitta/puppet-modules-wrs/puppet-dcorch +mwa-pitta/puppet-modules-wrs/puppet-dcmanager +mwa-sparta/base/tis-extensions +mwa-sparta/python/python-eventlet +mwa-sparta/python/python-requests +mwa-chilon/patch-scripts/EXAMPLE_0001 +mwa-chilon/patch-scripts/EXAMPLE_0002 +mwa-chilon/patch-scripts/EXAMPLE_0003 +mwa-chilon/patch-scripts/EXAMPLE_RR +mwa-chilon/patch-scripts/EXAMPLE_MTCE +mwa-chilon/patch-scripts/EXAMPLE_NEUTRON +mwa-chilon/patch-scripts/EXAMPLE_AODH +mwa-chilon/patch-scripts/EXAMPLE_HEAT +mwa-chilon/patch-scripts/EXAMPLE_VIM +mwa-chilon/patch-scripts/EXAMPLE_NOVA +mwa-chilon/patch-scripts/EXAMPLE_SYSINV +mwa-gplv2/mariadb +mwa-sparta/extended/systemd +mwa-sparta/extended/python-gunicorn +mwa-sparta/devtools/puppet-modules/openstack/puppet-oslo-11.3.0 +mwa-cleo/snmp-audittrail +mwa-sparta/python/python-3parclient +mwa-sparta/python/python-lefthandclient +mwa-sparta/extended/iproute +mwa-perian/openstack/distributedcloud +mwa-perian/openstack/distributedcloud-client +mwa-sparta/extended/tboot +mwa-beas/kickstart +mwa-sparta/mellanox/libibverbs +mwa-sparta/mellanox/mlnx-ofa_kernel +mwa-sparta/mellanox/rdma-core diff --git a/centos_pkg_dirs_installer b/centos_pkg_dirs_installer new file mode 100644 index 0000000..6c92af2 --- /dev/null +++ b/centos_pkg_dirs_installer @@ -0,0 +1,2 @@ +mwa-gplv2/rpm +mwa-gplv3/anaconda diff --git a/centos_pkg_dirs_rt b/centos_pkg_dirs_rt new file mode 100644 index 0000000..4b42423 --- /dev/null +++ b/centos_pkg_dirs_rt @@ -0,0 +1,13 @@ +mwa-sparta/kernel-rt +mwa-gplv2/intel-e1000e +mwa-gplv2/intel-i40e +mwa-gplv2/intel-i40evf +mwa-gplv2/intel-ixgbe +mwa-gplv2/intel-ixgbevf +mwa-gplv2/qat17 +mwa-gplv2/drbd +mwa-gplv2/tpmdd +mwa-gplv2/integrity +mwa-sparta/mellanox/libibverbs +mwa-sparta/mellanox/mlnx-ofa_kernel +mwa-sparta/mellanox/rdma-core diff --git a/extras/scripts/make_patch.sh b/extras/scripts/make_patch.sh new file mode 100755 index 0000000..3a64554 --- /dev/null +++ b/extras/scripts/make_patch.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +pushd `dirname $0` > /dev/null +SCRIPTPATH=`pwd` +popd > /dev/null + +# CGCSPATCH_DIR=$MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/patching/recipes-common/cgcs-patch +CGCSPATCH_DIR=$SCRIPTPATH/../../middleware/patching/recipes-common/cgcs-patch + +# Set environment variables for python +export PYTHONPATH=$CGCSPATCH_DIR/cgcs-patch +export PYTHONDONTWRITEBYTECODE=true + +# Run the patch_build tool +exec $CGCSPATCH_DIR/bin/make_patch "$@" + diff --git a/extras/scripts/modify_patch.sh b/extras/scripts/modify_patch.sh new file mode 100755 index 0000000..91cdff2 --- /dev/null +++ b/extras/scripts/modify_patch.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +pushd `dirname $0` > /dev/null +SCRIPTPATH=`pwd` +popd > /dev/null + +# CGCSPATCH_DIR=$MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/patching/recipes-common/cgcs-patch +CGCSPATCH_DIR=$SCRIPTPATH/../../middleware/patching/recipes-common/cgcs-patch + +# Set environment variables for python +export PYTHONPATH=$CGCSPATCH_DIR/cgcs-patch +export PYTHONDONTWRITEBYTECODE=true + +# Run the patch_build tool +exec $CGCSPATCH_DIR/bin/modify_patch "$@" + diff --git a/extras/scripts/patch_build.sh b/extras/scripts/patch_build.sh new file mode 100755 index 0000000..e1f4cd0 --- /dev/null +++ b/extras/scripts/patch_build.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CGCSPATCH_DIR=$MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/patching/recipes-common/cgcs-patch + +# Source release-info +. $MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +export PLATFORM_RELEASE + +# Set environment variables for python +export PYTHONPATH=$CGCSPATCH_DIR/cgcs-patch +export PYTHONDONTWRITEBYTECODE=true + +# Run the patch_build tool +exec $CGCSPATCH_DIR/bin/patch_build "$@" + diff --git a/extras/scripts/query_patch.sh b/extras/scripts/query_patch.sh new file mode 100755 index 0000000..be89ce3 --- /dev/null +++ b/extras/scripts/query_patch.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +pushd `dirname $0` > /dev/null +SCRIPTPATH=`pwd` +popd > /dev/null + +# CGCSPATCH_DIR=$MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/patching/recipes-common/cgcs-patch +CGCSPATCH_DIR=$SCRIPTPATH/../../middleware/patching/recipes-common/cgcs-patch + +# Set environment variables for python +export PYTHONPATH=$CGCSPATCH_DIR/cgcs-patch +export PYTHONDONTWRITEBYTECODE=true + +# Run the patch_build tool +exec $CGCSPATCH_DIR/bin/query_patch "$@" + diff --git a/git/.gitignore b/git/.gitignore new file mode 100644 index 0000000..90cb1c6 --- /dev/null +++ b/git/.gitignore @@ -0,0 +1,47 @@ +/api-site +/ceilometer +/cinder +/compute-api +/glance +/heat +/horizon +/identity-api +/image-api +/ironic +/python-ironicclient +/keystone +/magnum +/magnum-ui +/netconn-api +/neutron +/nova +/object-api +/python-ceilometerclient +/python-neutronclient +/python-novaclient +/libvirt +/qemu +/networking-odl +/packstack +/python-cinderclient +/python-glanceclient +/python-openstacksdk +/pktgen-dpdk +/murano +/murano-dashboard +/python-magnumclient +/python-muranoclient +/openstack-ras +/python-lefthandclient +/python-3parclient +/ceph +/seabios +/neutron-dynamic-routing +/networking-bgpvpn +/ryu +/ima-evm-utils +/rpm +/distributedcloud +/distributedcloud-client +/networking-sfc +/neutron-lib diff --git a/git/ceilometer.txt b/git/ceilometer.txt new file mode 100644 index 0000000..6069a7d --- /dev/null +++ b/git/ceilometer.txt @@ -0,0 +1 @@ +github.com/openstack/ceilometer diff --git a/git/ceph.txt b/git/ceph.txt new file mode 100644 index 0000000..d18376c --- /dev/null +++ b/git/ceph.txt @@ -0,0 +1 @@ +git.ceph.com/ceph.git diff --git a/git/cinder.txt b/git/cinder.txt new file mode 100644 index 0000000..3364e72 --- /dev/null +++ b/git/cinder.txt @@ -0,0 +1 @@ +github.com/openstack/cinder diff --git a/git/distributedcloud-client.txt b/git/distributedcloud-client.txt new file mode 100644 index 0000000..bc8110d --- /dev/null +++ b/git/distributedcloud-client.txt @@ -0,0 +1 @@ +github.com/openstack/python-kingbirdclient diff --git a/git/distributedcloud.txt b/git/distributedcloud.txt new file mode 100644 index 0000000..e4da945 --- /dev/null +++ b/git/distributedcloud.txt @@ -0,0 +1 @@ +github.com/openstack/kingbird diff --git a/git/glance.txt b/git/glance.txt new file mode 100644 index 0000000..46925a4 --- /dev/null +++ b/git/glance.txt @@ -0,0 +1 @@ +github.com/openstack/glance diff --git a/git/heat.txt b/git/heat.txt new file mode 100644 index 0000000..be8b9d2 --- /dev/null +++ b/git/heat.txt @@ -0,0 +1 @@ +github.com/openstack/heat diff --git a/git/horizon.txt b/git/horizon.txt new file mode 100644 index 0000000..be7099b --- /dev/null +++ b/git/horizon.txt @@ -0,0 +1 @@ +github.com/openstack/horizon diff --git a/git/ima-evm-utils.txt b/git/ima-evm-utils.txt new file mode 100644 index 0000000..d03dc85 --- /dev/null +++ b/git/ima-evm-utils.txt @@ -0,0 +1 @@ +git.code.sf.net/p/linux-ima/ima-evm-utils diff --git a/git/keystone.txt b/git/keystone.txt new file mode 100644 index 0000000..c6e36d4 --- /dev/null +++ b/git/keystone.txt @@ -0,0 +1 @@ +github.com/openstack/keystone diff --git a/git/libvirt.txt b/git/libvirt.txt new file mode 100644 index 0000000..74aa111 --- /dev/null +++ b/git/libvirt.txt @@ -0,0 +1 @@ +libvirt.org/libvirt diff --git a/git/magnum-ui.txt b/git/magnum-ui.txt new file mode 100644 index 0000000..590aff7 --- /dev/null +++ b/git/magnum-ui.txt @@ -0,0 +1 @@ +github.com/openstack/magnum-ui diff --git a/git/magnum.txt b/git/magnum.txt new file mode 100644 index 0000000..5d7aecf --- /dev/null +++ b/git/magnum.txt @@ -0,0 +1 @@ +github.com/openstack/magnum diff --git a/git/murano-dashboard.txt b/git/murano-dashboard.txt new file mode 100644 index 0000000..546fde7 --- /dev/null +++ b/git/murano-dashboard.txt @@ -0,0 +1 @@ +github.com/openstack/murano-dashboard diff --git a/git/murano.txt b/git/murano.txt new file mode 100644 index 0000000..304e40e --- /dev/null +++ b/git/murano.txt @@ -0,0 +1 @@ +github.com/openstack/murano diff --git a/git/networking-bgpvpn.txt b/git/networking-bgpvpn.txt new file mode 100644 index 0000000..b5dfc63 --- /dev/null +++ b/git/networking-bgpvpn.txt @@ -0,0 +1 @@ +github.com/openstack/networking-bgpvpn diff --git a/git/networking-odl.txt b/git/networking-odl.txt new file mode 100644 index 0000000..6de6475 --- /dev/null +++ b/git/networking-odl.txt @@ -0,0 +1 @@ +git.openstack.org/openstack/networking-odl diff --git a/git/networking-sfc.txt b/git/networking-sfc.txt new file mode 100644 index 0000000..d261d6c --- /dev/null +++ b/git/networking-sfc.txt @@ -0,0 +1 @@ +github.com/openstack/networking-sfc diff --git a/git/neutron-dynamic-routing.txt b/git/neutron-dynamic-routing.txt new file mode 100644 index 0000000..a087699 --- /dev/null +++ b/git/neutron-dynamic-routing.txt @@ -0,0 +1 @@ +github.com/openstack/neutron-dynamic-routing diff --git a/git/neutron.txt b/git/neutron.txt new file mode 100644 index 0000000..7f39bd3 --- /dev/null +++ b/git/neutron.txt @@ -0,0 +1 @@ +github.com/openstack/neutron diff --git a/git/nova.txt b/git/nova.txt new file mode 100644 index 0000000..215a2c0 --- /dev/null +++ b/git/nova.txt @@ -0,0 +1 @@ +github.com/openstack/nova diff --git a/git/openstack-ras.txt b/git/openstack-ras.txt new file mode 100644 index 0000000..3dad546 --- /dev/null +++ b/git/openstack-ras.txt @@ -0,0 +1 @@ +github.com/openstack/openstack-resource-agents diff --git a/git/packstack.txt b/git/packstack.txt new file mode 100644 index 0000000..1c75c87 --- /dev/null +++ b/git/packstack.txt @@ -0,0 +1 @@ +github.com/openstack/packstack diff --git a/git/pktgen-dpdk.txt b/git/pktgen-dpdk.txt new file mode 100644 index 0000000..7280d27 --- /dev/null +++ b/git/pktgen-dpdk.txt @@ -0,0 +1 @@ +github.com/pktgen/pktgen-dpdk diff --git a/git/python-3parclient.txt b/git/python-3parclient.txt new file mode 100644 index 0000000..e9d23de --- /dev/null +++ b/git/python-3parclient.txt @@ -0,0 +1 @@ +github.com/hpe-storage/python-3parclient diff --git a/git/python-ceilometerclient.txt b/git/python-ceilometerclient.txt new file mode 100644 index 0000000..65a8c2f --- /dev/null +++ b/git/python-ceilometerclient.txt @@ -0,0 +1,2 @@ +github.com/openstack/python-ceilometerclient + diff --git a/git/python-cinderclient.txt b/git/python-cinderclient.txt new file mode 100644 index 0000000..ecc3d38 --- /dev/null +++ b/git/python-cinderclient.txt @@ -0,0 +1 @@ +github.com/openstack/python-cinderclient diff --git a/git/python-glanceclient.txt b/git/python-glanceclient.txt new file mode 100644 index 0000000..4291035 --- /dev/null +++ b/git/python-glanceclient.txt @@ -0,0 +1 @@ +github.com/openstack/python-glanceclient diff --git a/git/python-lefthandclient.txt b/git/python-lefthandclient.txt new file mode 100644 index 0000000..209c5ae --- /dev/null +++ b/git/python-lefthandclient.txt @@ -0,0 +1 @@ +github.com/hpe-storage/python-lefthandclient diff --git a/git/python-magnumclient.txt b/git/python-magnumclient.txt new file mode 100644 index 0000000..63e3448 --- /dev/null +++ b/git/python-magnumclient.txt @@ -0,0 +1 @@ +github.com/openstack/python-magnumclient diff --git a/git/python-muranoclient.txt b/git/python-muranoclient.txt new file mode 100644 index 0000000..a3207ed --- /dev/null +++ b/git/python-muranoclient.txt @@ -0,0 +1 @@ +github.com/openstack/python-muranoclient diff --git a/git/python-neutronclient.txt b/git/python-neutronclient.txt new file mode 100644 index 0000000..250d5fa --- /dev/null +++ b/git/python-neutronclient.txt @@ -0,0 +1 @@ +github.com/openstack/python-neutronclient diff --git a/git/python-novaclient.txt b/git/python-novaclient.txt new file mode 100644 index 0000000..c0817ae --- /dev/null +++ b/git/python-novaclient.txt @@ -0,0 +1 @@ +github.com/openstack/python-novaclient diff --git a/git/python-openstacksdk.txt b/git/python-openstacksdk.txt new file mode 100644 index 0000000..e77777a --- /dev/null +++ b/git/python-openstacksdk.txt @@ -0,0 +1 @@ +github.com/openstack/python-openstacksdk diff --git a/git/qemu.txt b/git/qemu.txt new file mode 100644 index 0000000..668c73a --- /dev/null +++ b/git/qemu.txt @@ -0,0 +1 @@ +git.qemu.org/qemu diff --git a/git/rpm.txt b/git/rpm.txt new file mode 100644 index 0000000..a6309e9 --- /dev/null +++ b/git/rpm.txt @@ -0,0 +1 @@ +github.com/rpm-software-management/rpm diff --git a/git/ryu.txt b/git/ryu.txt new file mode 100644 index 0000000..b44f0c9 --- /dev/null +++ b/git/ryu.txt @@ -0,0 +1 @@ +github.com/osrg/ryu diff --git a/git/seabios.txt b/git/seabios.txt new file mode 100644 index 0000000..117b82e --- /dev/null +++ b/git/seabios.txt @@ -0,0 +1 @@ +git.seabios.org/seabios diff --git a/middleware/branding/recipes-control/wrs-branding/LICENSE b/middleware/branding/recipes-control/wrs-branding/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/branding/recipes-control/wrs-branding/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/branding/recipes-control/wrs-branding/centos/build_srpm.data b/middleware/branding/recipes-control/wrs-branding/centos/build_srpm.data new file mode 100755 index 0000000..188f58b --- /dev/null +++ b/middleware/branding/recipes-control/wrs-branding/centos/build_srpm.data @@ -0,0 +1,2 @@ +SRC_DIR="sources" +TIS_PATCH_VER=6 diff --git a/middleware/branding/recipes-control/wrs-branding/centos/wrs-branding.spec b/middleware/branding/recipes-control/wrs-branding/centos/wrs-branding.spec new file mode 100755 index 0000000..5ce0298 --- /dev/null +++ b/middleware/branding/recipes-control/wrs-branding/centos/wrs-branding.spec @@ -0,0 +1,28 @@ +Summary: Titanium Cloud Branding Information +Name: wrs-branding +Version: 4.0.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown + +Source0: %{name}-%{version}.tar.gz + +%define cgcs_sdk_deploy_dir /opt/deploy/cgcs_sdk + +%description +Titanium Cloud Branding allows alternate branding to be used for Horizon +in place of the default included with Titanium Cloud. + +%install +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/%{cgcs_sdk_deploy_dir} +install -m 644 %{SOURCE0} $RPM_BUILD_ROOT/%{cgcs_sdk_deploy_dir}/%{name}-%{version}.tgz + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +%{cgcs_sdk_deploy_dir} diff --git a/middleware/branding/recipes-control/wrs-branding/sources/LICENSE b/middleware/branding/recipes-control/wrs-branding/sources/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/branding/recipes-control/wrs-branding/sources/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/branding/recipes-control/wrs-branding/sources/horizon/README b/middleware/branding/recipes-control/wrs-branding/sources/horizon/README new file mode 100755 index 0000000..fda4d49 --- /dev/null +++ b/middleware/branding/recipes-control/wrs-branding/sources/horizon/README @@ -0,0 +1,74 @@ +Copyright © 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 +----------------------------------------------------------------------- + + +Titanium Cloud Custom Branding +------------------------------- + +Custom branding for the Titanium Cloud is achieved through the creation and application +of a tarball containing a customized horizon theme and associated branding files + + +Creating a Custom Branding Tarball +---------------------------------- + +The tarball containing the customized branding files must be of the extension .tgz; its +file name has no constraints. 'sample_branding.tgz' has been included as an example of +the end result. Please note that branding tarballs created for previous releases of +Titanium Cloud are not forward-compatible, and must be recreated to accommodate the +latest branding styles present in Horizon. An example of how to compress an existing +theme would be to do the following from inside the branding's root directory: + tar czf new_branding.tgz * + +Two types of items are required in the branding tarball: + 1) The contents of a valid horizon theme directory as documented here: + docs.openstack.org/developer/horizon/topics/customizing.html + i.e. in the top level of the tarball are the theme folders 'static' and 'templates' (if used). + Both the default and material themes included with horizon are available to be + inherited from if desired. Note that when referencing assets included in the custom theme, + the theme name will be 'custom'. + + To base the new theme off of the standard titanium cloud theme, or to view an example + of how the tarball should be formatted, refer to the example tarball named + sample_branding.tgz included with this SDK. + + 2) Also in the top level of the tarball, a file named manifest.py which is used to overwrite the default + horizon settings used by Titanium Cloud. A description of its contents are as follows: + + SITE_BRANDING is the name that will be used in the site title, note that there are + other occurrences of the branding name that must be changed directly in the templates, + + HORIZON_CONFIG["help_url"] is the link that the help button will redirect + users to, note that the knowledge library url must also be changed directly in + the template, + + Sample entries for these variables are shown below: + + SITE_BRANDING = "Sample System Name" + HORIZON_CONFIG["help_url"] = "http://www.windriver.com/support/" + + +Applying a Custom Branding Tarball +---------------------------------- + +This branding tarball can be applied at various stages, the steps for each stage are +described below: + + 1) Before running config_controller + i) Copy the branding tarball to the /opt/branding directory, once config_controller + is run this branding will be automatically used by horizon + + 2) After running config_controller + i) If a previous branding tarball was already in use, + delete it from /opt/branding, + ii) Copy the new branding tarball to the /opt/branding directory on the + active controller, + iii) Execute the command + sudo service horizon restart + this will process the new branding files on the active controller, + iv) Lock the inactive controller, + v) Unlock the inactive controller. + lock and unlock can coincide with login banner customization, see + also README.banner diff --git a/middleware/branding/recipes-control/wrs-branding/sources/horizon/sample_branding.tgz b/middleware/branding/recipes-control/wrs-branding/sources/horizon/sample_branding.tgz new file mode 100755 index 0000000..59aba86 Binary files /dev/null and b/middleware/branding/recipes-control/wrs-branding/sources/horizon/sample_branding.tgz differ diff --git a/middleware/branding/recipes-control/wrs-branding/sources/login-banner/README b/middleware/branding/recipes-control/wrs-branding/sources/login-banner/README new file mode 100755 index 0000000..52ae11f --- /dev/null +++ b/middleware/branding/recipes-control/wrs-branding/sources/login-banner/README @@ -0,0 +1,132 @@ +Copyright (c) 2016 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 +-------------------------------------------------------------------- + +Titanium Cloud Customization of Login Banner Files +--------------------------------------------------- + +This feature permits the customization of prelogin message (issue) and +postlogin message of the day (motd) across the entire Titanium Cloud +cluster. It is a convenience feature that simplifies propagation of +the customized files. + +Introduction +------------ + +See also 'man issue' and 'man motd'. + +The following files can be customized, and are treated by this feature: + /etc/issue (login banner, console) + /etc/issue.net (login banner, ssh) + /etc/motd.head (message of the day, header) + /etc/motd.tail (message of the day, footer) + +The following files are not customizable, because each is generated by +software: + /etc/motd + /etc/sysinv/motd.system + /etc/platform/motd.license + +issue and issue.net are free standing files, while /etc/motd is +generated from the following sources, in the following order: + /etc/motd.head + /etc/sysinv/motd.system + /etc/platform/motd.license + /etc/motd.tail + +The customization procedure during Titanium Cloud Installation and +Commissioning is summarized as follows: + 1) provide customization files + 2) run 'config_controller' + +The customization procedure after config_controller is summarized +as follows: + 3) provide customization files + 4) run 'apply_banner_customization' + 5) lock/unlock each node + +Example tarball +--------------- + +sample_banner.tgz has been provided (adjacent to this README). +Extract the tarball on the active controller to the directory +/opt/banner, or as indicated in the following sections. + +The tarball contains examples of the four customizable login banner files. + +Procedure prior to config_controller +------------------------------------ + +When setting up a new cluster, immediately after the first controller +boots and before running config_controller: + +1) Provide customization files + +To customize any of the four customizable banner files listed above, +provide the new files in the following location: + /opt/banner/issue + /opt/banner/issue.net + /opt/banner/motd.head + /opt/banner/motd.tail + +I.e., + sudo mkdir -p /opt/banner + sudo tar xf sample_banner.tgz -C /opt/banner + +Each file is customized individually; Edit or remove these files in +/opt/banner according to requirements. + +2) Run config_controller + +When config_controller is run these files are moved from /opt/banner +to configuration storage and are applied to the controller node as it is +initialized. All nodes in the cluster which are subsequently configured +will retrieve these custom banners as well. + +Note: +In the event that an error is reported for the banner customization, +customization can be repeated after config_controller using the +following 'Procedure after deployment'; errors for customization do not +impact config_controller. + +Procedure after deployment +-------------------------- +Customization can be performed any time after deployment: + +1) Login to the active controller +2) Switch to root user, i.e., 'sudo bash' +3) Provide any of the customized banner files in /opt/banner, or another + directory: + /opt/banner/issue + /opt/banner/issue.net + /opt/banner/motd.head + /opt/banner/motd.tail +4) Apply the customization, execute: + 'apply_banner_customization /opt/banner', or + 'apply_banner_customization ' + + The default path, if no parameter is specified, is the current + working directory, i.e., + 'apply_banner_customization $(pwd)' + + The banners are applied to configuration and installed on the + current node, active controller. + +5) Lock and unlock other nodes in the cluster, either from the CLI or + the GUI, to install the customization on each node. Lock and unlock + can coincide with horizon branding procedure; see also adjacent + ../horizon/README for horizon branding. + + All subsequent newly added nodes will automatically get the banner + customization. + +Manual Restoration +------------------ + +Replaced versions of banner files can be retrieved at the user's +discretion from each node in the cluster, from the directory: + /opt/banner.bk + +A maximum of 10 versions of each file are maintained, with the oldest +versions replaced only when the count reaches 10. diff --git a/middleware/branding/recipes-control/wrs-branding/sources/login-banner/sample_banner.tgz b/middleware/branding/recipes-control/wrs-branding/sources/login-banner/sample_banner.tgz new file mode 100644 index 0000000..c6bfad9 Binary files /dev/null and b/middleware/branding/recipes-control/wrs-branding/sources/login-banner/sample_banner.tgz differ diff --git a/middleware/filesystem/recipes-common/logmgmt/.gitignore b/middleware/filesystem/recipes-common/logmgmt/.gitignore new file mode 100644 index 0000000..d2679b4 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/.gitignore @@ -0,0 +1,6 @@ +!.distro +.distro/centos7/rpmbuild/RPMS +.distro/centos7/rpmbuild/SRPMS +.distro/centos7/rpmbuild/BUILD +.distro/centos7/rpmbuild/BUILDROOT +.distro/centos7/rpmbuild/SOURCES/logmgmt*tar.gz diff --git a/middleware/filesystem/recipes-common/logmgmt/LICENSE b/middleware/filesystem/recipes-common/logmgmt/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/filesystem/recipes-common/logmgmt/PKG-INFO b/middleware/filesystem/recipes-common/logmgmt/PKG-INFO new file mode 100644 index 0000000..46c15a9 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 1.1 +Name: logmgmt +Version: 1.0 +Summary: Management of /var/log filesystem +Home-page: +Author: Windriver +Author-email: info@windriver.com +License: Apache-2.0 + +Description: Management of /var/log filesystem + + +Platform: UNKNOWN diff --git a/middleware/filesystem/recipes-common/logmgmt/centos/build_srpm.data b/middleware/filesystem/recipes-common/logmgmt/centos/build_srpm.data new file mode 100644 index 0000000..9e65d54 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/centos/build_srpm.data @@ -0,0 +1,4 @@ +SRC_DIR="logmgmt" +COPY_LIST_TO_TAR="scripts" +COPY_LIST="$SRC_DIR/LICENSE" +TIS_PATCH_VER=4 diff --git a/middleware/filesystem/recipes-common/logmgmt/centos/logmgmt.spec b/middleware/filesystem/recipes-common/logmgmt/centos/logmgmt.spec new file mode 100644 index 0000000..8f544c7 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/centos/logmgmt.spec @@ -0,0 +1,73 @@ +Summary: Management of /var/log filesystem +Name: logmgmt +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz +Source1: LICENSE + +BuildRequires: python-setuptools +BuildRequires: systemd-devel +Requires: systemd +Requires: python-daemon + +%description +Management of /var/log filesystem + +%define local_bindir /usr/bin/ +%define local_etc_initd /etc/init.d/ +%define local_etc_pmond /etc/pmon.d/ +%define pythonroot /usr/lib64/python2.7/site-packages + +%define debug_package %{nil} + +%prep +%setup + +# Remove bundled egg-info +rm -rf *.egg-info + +%build +%{__python} setup.py build + +%install +%{__python} setup.py install --root=$RPM_BUILD_ROOT \ + --install-lib=%{pythonroot} \ + --prefix=/usr \ + --install-data=/usr/share \ + --single-version-externally-managed + +install -d -m 755 %{buildroot}%{local_bindir} +install -p -D -m 700 scripts/bin/logmgmt %{buildroot}%{local_bindir}/logmgmt +install -p -D -m 700 scripts/bin/logmgmt_postrotate %{buildroot}%{local_bindir}/logmgmt_postrotate +install -p -D -m 700 scripts/bin/logmgmt_prerotate %{buildroot}%{local_bindir}/logmgmt_prerotate + +install -d -m 755 %{buildroot}%{local_etc_initd} +install -p -D -m 700 scripts/init.d/logmgmt %{buildroot}%{local_etc_initd}/logmgmt + +install -d -m 755 %{buildroot}%{local_etc_pmond} +install -p -D -m 644 scripts/pmon.d/logmgmt %{buildroot}%{local_etc_pmond}/logmgmt + +install -p -D -m 664 scripts/etc/systemd/system/logmgmt.service %{buildroot}%{_unitdir}/logmgmt.service + +%post +/usr/bin/systemctl enable logmgmt.service >/dev/null 2>&1 + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +%doc LICENSE +%{local_bindir}/* +%{local_etc_initd}/* +%dir %{local_etc_pmond} +%{local_etc_pmond}/* +%{_unitdir}/logmgmt.service +%dir %{pythonroot}/%{name} +%{pythonroot}/%{name}/* +%dir %{pythonroot}/%{name}-%{version}.0-py2.7.egg-info +%{pythonroot}/%{name}-%{version}.0-py2.7.egg-info/* diff --git a/middleware/filesystem/recipes-common/logmgmt/logmgmt/LICENSE b/middleware/filesystem/recipes-common/logmgmt/logmgmt/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/logmgmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/filesystem/recipes-common/logmgmt/logmgmt/logmgmt/__init__.py b/middleware/filesystem/recipes-common/logmgmt/logmgmt/logmgmt/__init__.py new file mode 100644 index 0000000..0da84c8 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/logmgmt/logmgmt/__init__.py @@ -0,0 +1,6 @@ +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" diff --git a/middleware/filesystem/recipes-common/logmgmt/logmgmt/logmgmt/logmgmt.py b/middleware/filesystem/recipes-common/logmgmt/logmgmt/logmgmt/logmgmt.py new file mode 100644 index 0000000..d4b08b8 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/logmgmt/logmgmt/logmgmt.py @@ -0,0 +1,263 @@ +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +################### +# IMPORTS +################### +import logging +import logging.handlers +import time +import os +import subprocess +import glob +import re +import sys + +from daemon import runner + +import prepostrotate + +################### +# CONSTANTS +################### +LOG_DIR = '/var/lib/logmgmt' +LOG_FILE = LOG_DIR + '/logmgmt.log' +PID_FILE = '/var/run/logmgmt.pid' +LOG_FILE_MAX_BYTES = 1024*1024 +LOG_FILE_BACKUP_COUNT = 5 + +PERCENT_FREE_CRITICAL = 10 +PERCENT_FREE_MAJOR = 20 + +LOGROTATE_PERIOD = 600 # Every ten minutes + +################### +# METHODS +################### +def start_polling(): + logmgmt_daemon = LogMgmtDaemon() + logmgmt_runner = runner.DaemonRunner(logmgmt_daemon) + logmgmt_runner.daemon_context.umask = 0o022 + logmgmt_runner.do_action() + +def handle_exception(exc_type, exc_value, exc_traceback): + """ + Exception handler to log any uncaught exceptions + """ + logging.error("Uncaught exception", + exc_info=(exc_type, exc_value, exc_traceback)) + sys.__excepthook__(exc_type, exc_value, exc_traceback) + + +################### +# CLASSES +################### +class LogMgmtDaemon(): + """ Daemon process representation of + the /var/log monitoring program + """ + def __init__(self): + # Daemon-specific init + self.stdin_path = '/dev/null' + self.stdout_path = '/dev/null' + self.stderr_path = '/dev/null' + self.pidfile_path = PID_FILE + self.pidfile_timeout = 5 + + self.monitored_files = [] + self.unmonitored_files = [] + + self.last_logrotate = 0 + self.last_check = 0 + + def configure_logging(self, level=logging.DEBUG): + my_exec = os.path.basename(sys.argv[0]) + + if not os.path.exists(LOG_DIR): + os.mkdir(LOG_DIR, 0755) + + log_format = '%(asctime)s: ' \ + + my_exec + '[%(process)s]: ' \ + + '%(filename)s(%(lineno)s): ' \ + + '%(levelname)s: %(message)s' + + fmt = logging.Formatter(fmt=log_format) + + # Use python's log rotation, rather than logrotate + handler = logging.handlers.RotatingFileHandler( + LOG_FILE, + maxBytes=LOG_FILE_MAX_BYTES, + backupCount=LOG_FILE_BACKUP_COUNT) + + my_logger = logging.getLogger() + my_logger.setLevel(level) + + handler.setFormatter(fmt) + handler.setLevel(level) + my_logger.addHandler(handler) + + # Log uncaught exceptions to file + sys.excepthook = handle_exception + + def run(self): + self.configure_logging() + + while True: + self.check_var_log() + + # run/poll every 1 min + time.sleep(60) + + def get_percent_free(self): + usage = os.statvfs('/var/log') + return ((usage.f_bavail * 100) / usage.f_blocks) + + def get_monitored_files(self): + self.monitored_files = [] + + try: + output = subprocess.check_output(['/usr/sbin/logrotate', '-d', '/etc/logrotate.conf'], + stderr=subprocess.STDOUT) + + for line in output.split('\n'): + fields = line.split() + if len(fields) > 0 and fields[0] == "considering": + self.monitored_files.extend(glob.glob(fields[2])) + self.monitored_files.extend(glob.glob(fields[2] + '.[0-9].gz')) + self.monitored_files.extend(glob.glob(fields[2] + '.[0-9][0-9].gz')) + except: + logging.error('Failed to determine monitored files') + + def get_unmonitored_files(self): + self.unmonitored_files = [] + + try: + output = subprocess.check_output(['find', '/var/log', '-type', 'f']) + + for fname in output.split('\n'): + if fname in self.monitored_files: + continue + + # Ignore some files + if '/var/log/puppet' in fname \ + or '/var/log/dmesg' in fname \ + or '/var/log/rabbitmq' in fname \ + or '/var/log/lastlog' in fname: + continue + + if os.path.exists(fname): + self.unmonitored_files.append(fname) + + except: + logging.error('Failed to determine unmonitored files') + + def purge_files(self, index): + pattern = re.compile('.*\.([0-9]*)\.gz') + for fname in sorted(self.monitored_files): + result = pattern.match(fname) + if result: + if int(result.group(1)) >= index: + logging.info("Purging file: %s" % fname) + try: + os.remove(fname) + except OSError as e: + logging.error('Failed to remove file: %s', e) + + def run_logrotate(self): + self.last_logrotate = int(time.time()) + try: + subprocess.check_call(['/usr/sbin/logrotate', '/etc/logrotate.conf']) + except: + logging.error('Failed logrotate') + + def run_logrotate_forced(self): + self.last_logrotate = int(time.time()) + try: + subprocess.check_call(['/usr/sbin/logrotate', '-f', '/etc/logrotate.conf']) + except: + logging.error('Failed logrotate -f') + + def timecheck(self): + # If we're more than a couple of mins since the last timecheck, + # there could have been a large time correction, which would skew + # our timing. Reset the logrotate timestamp to ensure we don't miss anything + now = int(time.time()) + + if self.last_check > now or (now - self.last_check) > 120: + self.last_logrotate = 0 + + self.last_check = now + + def check_var_log(self): + self.timecheck() + + try: + prepostrotate.ensure_bash_log_locked_down() + except Exception as e: + logging.exception('Failed to ensure bash.log locked', e) + + pf = self.get_percent_free() + + if pf > PERCENT_FREE_CRITICAL: + # We've got more than 10% free space, so just run logrotate every ten minutes + now = int(time.time()) + if self.last_logrotate > now or (now - self.last_logrotate) > LOGROTATE_PERIOD: + logging.info("Running logrotate") + self.run_logrotate() + + return + + logging.warning("Reached critical disk usage for /var/log: %d%% free" % pf) + + # We're running out of disk space, so we need to start deleting files + for index in range(20, 11, -1): + logging.info("/var/log is %d%% free. Purging rotated .%d.gz files to free space" % (pf, index)) + self.get_monitored_files() + self.purge_files(index) + pf = self.get_percent_free() + + if pf >= PERCENT_FREE_MAJOR: + # We've freed up enough space. Do a logrotate and leave + logging.info("/var/log is %d%% free. Running logrotate" % pf) + self.run_logrotate() + return + + # We still haven't freed up enough space, so try a logrotate + logging.info("/var/log is %d%% free. Running logrotate" % pf) + self.run_logrotate() + + pf = self.get_percent_free() + if pf >= PERCENT_FREE_MAJOR: + return + + # Try a forced rotate + logging.info("/var/log is %d%% free. Running forced logrotate" % pf) + self.run_logrotate_forced() + + pf = self.get_percent_free() + if pf >= PERCENT_FREE_MAJOR: + return + + # Start deleting unmonitored files + self.get_monitored_files() + self.get_unmonitored_files() + logging.info("/var/log is %d%% free. Deleting unmonitored files to free space" % pf) + for fname in sorted(self.unmonitored_files, key=os.path.getsize, reverse=True): + logging.info("Deleting unmonitored file: %s" % fname) + try: + os.remove(fname) + except OSError as e: + logging.error('Failed to remove file: %s', e) + pf = self.get_percent_free() + if pf >= PERCENT_FREE_MAJOR: + logging.info("/var/log is %d%% free." % pf) + return + + # Nothing else to be done + logging.info("/var/log is %d%% free." % pf) + return + diff --git a/middleware/filesystem/recipes-common/logmgmt/logmgmt/logmgmt/prepostrotate.py b/middleware/filesystem/recipes-common/logmgmt/logmgmt/logmgmt/prepostrotate.py new file mode 100644 index 0000000..242c795 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/logmgmt/logmgmt/prepostrotate.py @@ -0,0 +1,60 @@ +""" +Copyright (c) 2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +################### +# IMPORTS +################### + +import array +import fcntl +import struct +import glob + +EXT2_APPEND_FL = 0x00000020 +EXT4_EXTENTS_FL = 0x00080000 + +EXT_IOC_SETFLAGS = 0x40086602 +EXT_IOC_GETFLAGS = 0x80086601 + + +def _is_file_append_only(filename): + buf = array.array('h', [0]) + with open(filename, 'r') as f: + fcntl.ioctl(f.fileno(), EXT_IOC_GETFLAGS, buf) + has_append_only = (buf.tolist()[0] & EXT2_APPEND_FL) == EXT2_APPEND_FL + return has_append_only + + +def _set_file_attrs(filename, attrs): + flags = struct.pack('i', attrs) + with open(filename, 'r') as f: + fcntl.ioctl(f.fileno(), EXT_IOC_SETFLAGS, flags) + + +def chattr_add_append_only(filename): + _set_file_attrs(filename, EXT2_APPEND_FL | EXT4_EXTENTS_FL) + + +def chattr_remove_append_only(filename): + _set_file_attrs(filename, EXT4_EXTENTS_FL) + + +def prerotate(): + for filename in glob.glob("/var/log/bash.log*"): + if _is_file_append_only(filename): + chattr_remove_append_only(filename) + + +def postrotate(): + for filename in glob.glob("/var/log/bash.log*"): + if not _is_file_append_only(filename): + chattr_add_append_only(filename) + + +def ensure_bash_log_locked_down(): + # need the same functionality as postrotate + postrotate() diff --git a/middleware/filesystem/recipes-common/logmgmt/logmgmt/setup.py b/middleware/filesystem/recipes-common/logmgmt/logmgmt/setup.py new file mode 100644 index 0000000..34db8fb --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/logmgmt/setup.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import setuptools + +setuptools.setup(name='logmgmt', + version='1.0.0', + description='logmgmt', + license='Apache-2.0', + packages=['logmgmt'], + entry_points={ + } +) diff --git a/middleware/filesystem/recipes-common/logmgmt/scripts/bin/logmgmt b/middleware/filesystem/recipes-common/logmgmt/scripts/bin/logmgmt new file mode 100644 index 0000000..bb02df9 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/scripts/bin/logmgmt @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +try: + from logmgmt import logmgmt +except EnvironmentError as e: + print >> sys.stderr, "Error importing logmgmt: ", str(e) + sys.exit(1) + +logmgmt.start_polling() diff --git a/middleware/filesystem/recipes-common/logmgmt/scripts/bin/logmgmt_postrotate b/middleware/filesystem/recipes-common/logmgmt/scripts/bin/logmgmt_postrotate new file mode 100644 index 0000000..dfdb097 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/scripts/bin/logmgmt_postrotate @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +try: + from logmgmt import prepostrotate +except EnvironmentError as e: + print >> sys.stderr, "Error importing prepostrotate: ", str(e) + sys.exit(1) + +prepostrotate.postrotate() + diff --git a/middleware/filesystem/recipes-common/logmgmt/scripts/bin/logmgmt_prerotate b/middleware/filesystem/recipes-common/logmgmt/scripts/bin/logmgmt_prerotate new file mode 100644 index 0000000..f641d61 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/scripts/bin/logmgmt_prerotate @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +try: + from logmgmt import prepostrotate +except EnvironmentError as e: + print >> sys.stderr, "Error importing prepostrotate: ", str(e) + sys.exit(1) + +prepostrotate.prerotate() + diff --git a/middleware/filesystem/recipes-common/logmgmt/scripts/etc/systemd/system/logmgmt.service b/middleware/filesystem/recipes-common/logmgmt/scripts/etc/systemd/system/logmgmt.service new file mode 100644 index 0000000..8fdc05b --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/scripts/etc/systemd/system/logmgmt.service @@ -0,0 +1,14 @@ +[Unit] +Description=Titanium Cloud Log Management +After=network.target syslog-ng.service iscsid.service sw-patch.service +Before=config.service pmon.service + +[Service] +Type=forking +ExecStart=/etc/init.d/logmgmt start +ExecStop=/etc/init.d/logmgmt stop +ExecReload=/etc/init.d/logmgmt restart +PIDFile=/var/run/logmgmt.pid + +[Install] +WantedBy=multi-user.target diff --git a/middleware/filesystem/recipes-common/logmgmt/scripts/init.d/logmgmt b/middleware/filesystem/recipes-common/logmgmt/scripts/init.d/logmgmt new file mode 100644 index 0000000..b736215 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/scripts/init.d/logmgmt @@ -0,0 +1,96 @@ +#!/bin/sh +# +# Copyright (c) 2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +### BEGIN INIT INFO +# Provides: logmgmt +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Daemon for /var/log management +# Description: Daemon for /var/log management +### END INIT INFO + +DESC="logmgmt" +DAEMON="/usr/bin/logmgmt" +RUNDIR="/var/run" +PIDFILE=$RUNDIR/$DESC.pid + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/prod/$(cat $PIDFILE) + if [ -d ${PIDFILE} ]; then + echo "$DESC already running." + exit 0 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + mkdir -p $RUNDIR + start-stop-daemon --start --quiet \ + --pidfile ${PIDFILE} --exec ${DAEMON} start + + #--make-pidfile + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + exit 1 + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + exit 0 + else + echo "$DESC is not running but has pid file" + exit 1 + fi + fi + echo "$DESC is not running" + exit 3 +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/middleware/filesystem/recipes-common/logmgmt/scripts/pmon.d/logmgmt b/middleware/filesystem/recipes-common/logmgmt/scripts/pmon.d/logmgmt new file mode 100644 index 0000000..5e12bf2 --- /dev/null +++ b/middleware/filesystem/recipes-common/logmgmt/scripts/pmon.d/logmgmt @@ -0,0 +1,24 @@ +; +; Copyright (c) 2014-2016 Wind River Systems, Inc. +; +; SPDX-License-Identifier: Apache-2.0 +; +[process] +process = logmgmt +pidfile = /var/run/logmgmt.pid +script = /etc/init.d/logmgmt +style = lsb ; ocf or lsb +severity = minor ; Process failure severity + ; critical : host is failed + ; major : host is degraded + ; minor : log is generated +restarts = 5 ; Number of back to back unsuccessful restarts before severity assertion +interval = 10 ; Number of seconds to wait between back-to-back unsuccessful restarts +debounce = 20 ; Number of seconds the process needs to run before declaring + ; it as running O.K. after a restart. + ; Time after which back-to-back restart count is cleared. +startuptime = 10 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active: heartbeat monitoring, i.e. request / response messaging + diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/.gitignore b/middleware/filesystem/recipes-control/filesystem-scripts/.gitignore new file mode 100644 index 0000000..c03fe64 --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/.gitignore @@ -0,0 +1,6 @@ +!.distro +.distro/centos7/rpmbuild/RPMS +.distro/centos7/rpmbuild/SRPMS +.distro/centos7/rpmbuild/BUILD +.distro/centos7/rpmbuild/BUILDROOT +.distro/centos7/rpmbuild/SOURCES/filesystem-scripts*tar.gz diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/LICENSE b/middleware/filesystem/recipes-control/filesystem-scripts/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/PKG-INFO b/middleware/filesystem/recipes-control/filesystem-scripts/PKG-INFO new file mode 100644 index 0000000..5643c29 --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 1.1 +Name: filesystem-scripts +Version: 1.0 +Summary: File System Script Package +Home-page: +Author: Windriver +Author-email: info@windriver.com +License: Apache-2.0 + +Description: File System Script Package + + +Platform: UNKNOWN diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/centos/build_srpm.data b/middleware/filesystem/recipes-control/filesystem-scripts/centos/build_srpm.data new file mode 100644 index 0000000..04f8d4f --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/centos/build_srpm.data @@ -0,0 +1,3 @@ +SRC_DIR="filesystem-scripts-1.0" +COPY_LIST="$PKG_BASE/LICENSE" +TIS_PATCH_VER=2 diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/centos/filesystem-scripts.spec b/middleware/filesystem/recipes-control/filesystem-scripts/centos/filesystem-scripts.spec new file mode 100644 index 0000000..07a4e60 --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/centos/filesystem-scripts.spec @@ -0,0 +1,56 @@ +Summary: File System Script Package +Name: filesystem-scripts +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz +Source1: LICENSE + +BuildRequires: systemd-devel +Requires: /bin/systemctl + +%description +File System Script Package + +%define local_bindir /usr/bin/ +%define local_etc_initd /etc/init.d/ +%define local_ocfdir /usr/lib/ocf/resource.d/platform/ + +%define debug_package %{nil} + +%prep +%setup + +%build + +%install + +install -d -m 755 %{buildroot}%{local_etc_initd} +install -p -D -m 755 uexportfs %{buildroot}%{local_etc_initd}/uexportfs + +install -d -m 755 %{buildroot}%{local_ocfdir} +install -p -D -m 755 nfsserver-mgmt %{buildroot}%{local_ocfdir}/nfsserver-mgmt + +install -d -m 755 %{buildroot}%{local_bindir} +install -p -D -m 755 nfs-mount %{buildroot}%{local_bindir}/nfs-mount + +install -p -D -m 644 uexportfs.service %{buildroot}%{_unitdir}/uexportfs.service + +%post +/bin/systemctl enable uexportfs.service + + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%license LICENSE +%defattr(-,root,root,-) +%{local_bindir}/* +%{local_etc_initd}/* +%dir %{local_ocfdir} +%{local_ocfdir}/* +%{_unitdir}/uexportfs.service diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/LICENSE b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/nfs-mount b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/nfs-mount new file mode 100644 index 0000000..dc35a20 --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/nfs-mount @@ -0,0 +1,81 @@ +#!/bin/bash +# +# Copyright (c) 2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This utility is a wrapper around mount, to provide a single script +# with preferred options for NFS mounting. It takes exactly two arguments: +# - network source path +# - local destination path +# + +function show_help() +{ + cat >&2 << EOF +$(basename $0): + Wrapper around "mount" to provide a set of default options for NFS mounts. + This utility takes exactly two arguments: + - network source path + - local destination path + +EOF + exit 1 +} + +function get_proto() +{ + local host=$1 + + # Check /etc/hosts for the hostname + local ipaddr=$(cat /etc/hosts | awk -v host=$host '$2 == host {print $1}') + if [[ "$ipaddr" =~ ^[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$ ]] + then + echo "udp" + return + fi + if [[ "$ipaddr" =~ ^[0-9a-z]*\:[0-9a-z\:]*$ ]] + then + echo "udp6" + return + fi + # Try the DNS query + ipaddr=$(dig +short ANY $host) + if [[ "$ipaddr" =~ ^[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$ ]] + then + echo "udp" + return + fi + if [[ "$ipaddr" =~ ^[0-9a-z]*\:[0-9a-z\:]*$ ]] + then + echo "udp6" + return + fi + + # Use default of udp to avoid invalid option + echo "udp" + return +} + + +if [[ ${BASH_ARGC[0]} != 2 ]] +then + show_help +fi + +if mountpoint -q $2 +then + echo "$2 is already mounted. Not mounting." + exit +fi + +HOST=`echo $1|awk -F ':' '{print $1}'` +declare proto=`get_proto $HOST` +declare -i timeo=30 +declare -i rw_size=1024 +declare DEFAULT_OPTS="timeo=$timeo,proto=$proto,vers=3,rsize=$rw_size,wsize=$rw_size" + +exec mount -t nfs -o $DEFAULT_OPTS $1 $2 + diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/nfsserver-mgmt b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/nfsserver-mgmt new file mode 100644 index 0000000..1bd26f8 --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/nfsserver-mgmt @@ -0,0 +1,307 @@ +#!/bin/sh +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# +# Support: www.windriver.com +# +# Purpose: This resource agent manages File System Service +# +# RA Spec: +# +# http://www.opencf.org/cgi-bin/viewcvs.cgi/specs/ra/resource-agent-api.txt?rev=HEAD +# +####################################################################### +# Initialization: + +: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} +. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + +####################################################################### + +####################################################################### + +usage() { + cat < + + +1.0 + + + This OCF Compliant Resource Agent makes sure the nfs server is in the + correct state. + + + +Makes sure the NFS Server is in the correct state. + + + + +List of exports that should not be exported anymore +List of exports + + + + +List of mounts that should not be mounted anymore +List of mounts + + + + + + + + + + + + +END + return ${OCF_SUCCESS} +} + +nfs_server_mgmt_validate() { + check_binary service + check_binary grep + check_binary cat + check_binary exportfs + check_binary fuser + check_binary mount + check_binary umount + check_binary logger + return ${OCF_SUCCESS} +} + +nfs_server_mgmt_status() { + return ${OCF_NOT_RUNNING} +} + +nfs_server_mgmt_monitor () { + return ${OCF_NOT_RUNNING} +} + +check_exportfs () { + local CLIENTSPEC_DIR=$1 + local rc + + OLD_IFS="${IFS}" + IFS=":" + STR_ARRAY=( $CLIENTSPEC_DIR ) + IFS="${OLD_IFS}" + + CLIENTSPEC=${STR_ARRAY[0]} + DIR=${STR_ARRAY[1]} + + cat /proc/fs/nfsd/exports | grep "${CLIENTSPEC}" | grep "${DIR}" > /dev/null 2>&1 + rc=$? + if [ ${rc} -eq 0 ] + then + return 1 + fi + + exportfs | grep "${CLIENTSPEC}" | grep "${DIR}" > /dev/null 2>&1 + rc=$? + if [ ${rc} -eq 0 ] + then + return 1 + fi + + return 0 +} + +do_nfs_restart () { + while true + do + ocf_log info "NFS Server restart" + service nfsserver restart + ocf_log info "NFS Server restart complete" + + service nfsserver status | grep stopped > /dev/null 2>&1 || { + ocf_log info "NFS Server is now running" + return ${OCF_SUCCESS} + } + + ocf_log error "NFS Server still not running" + sleep 5 + done +} + +do_umount () { + local DEV_DIR=$1 + + OLD_IFS="${IFS}" + IFS=":" + STR_ARRAY=( $DEV_DIR ) + IFS="${OLD_IFS}" + + DEV=${STR_ARRAY[0]} + DIR=${STR_ARRAY[1]} + + mount | grep "${DEV}" | grep "${DIR}" > /dev/null 2>&1 || { + ocf_log info "${DEV} ${DIR} not mounted" + return ${OCF_SUCCESS} + } + + ocf_log info "umount on ${DEV} ${DIR}" + umount ${DEV} ${DIR} > /dev/null 2>&1 + + while true + do + mount | grep "${DEV}" | grep "${DIR}" > /dev/null 2>&1 || { + ocf_log info "unmounted ${DEV} ${DIR} successfully" + return ${OCF_SUCCESS} + } + + if fuser -KILL -m -k ${DIR} + then + ocf_log info "Processes killed using ${DEV} ${DIR}" + else + ocf_log info "No processes using ${DEV} ${DIR}" + fi + + ocf_log info "umount force on ${DEV} ${DIR}" + umount -f ${DEV} ${DIR} > /dev/null 2>&1 + + sleep 2 + + mount | grep "${DEV}" | grep "${DIR}" > /dev/null 2>&1 || { + ocf_log info "unmounted ${DEV} ${DIR} successfully" + return ${OCF_SUCCESS} + } + + do_nfs_restart + done + + return ${OCF_SUCCESS} +} + +nfs_server_mgmt_start () { + service nfsserver status | grep stopped > /dev/null 2>&1 || { + ocf_log info "NFS Server is running" + return ${OCF_SUCCESS} + } + + ocf_log error "NFS Server not running, restarting" + do_nfs_restart + return $? +} + +nfs_server_mgmt_stop () { + local need_nfs_restart=0 + local rc + + # Make sure exports are stopped... + while IFS=',' read -ra CLIENTSPEC_DIRS + do + for CLIENTSPEC_DIR in "${CLIENTSPEC_DIRS[@]}" + do + check_exportfs ${CLIENTSPEC_DIR} + rc=$? + if [ ${rc} -eq 1 ] + then + ocf_log info "export ${CLIENTSPEC_DIR} found" + let need_nfs_restart=1 + fi + done + done <<< "${OCF_RESKEY_exports}" + + if [ ${need_nfs_restart} -ne 0 ] + then + do_nfs_restart + rc=$? + if [ ${rc} -ne ${OCF_SUCCESS} ] + then + return ${rc} + fi + fi + + # Make sure mounts are not mounted. + while IFS=',' read -ra MOUNTS + do + for MOUNT in "${MOUNTS[@]}" + do + do_umount ${MOUNT} + done + done <<< "${OCF_RESKEY_mounts}" + + return ${OCF_SUCCESS} +} + +nfs_server_mgmt_reload () { + local rc + + nfs_server_mgmt_stop + rc=$? + if [ $rc -eq ${OCF_SUCCESS} ] + then + nfs_server_mgmt_start + rc=$? + if [ $rc -eq ${OCF_SUCCESS} ] + then + ocf_log info "NFS Server Managment reloaded" + fi + fi + + if [ ${rc} -ne ${OCF_SUCCESS} ] + then + ocf_log info "NFS Server Managment reload failed (rc=${rc})" + fi + + return ${rc} +} + +case ${__OCF_ACTION} in + meta-data) meta_data + exit ${OCF_SUCCESS} + ;; + usage|help) usage + exit ${OCF_SUCCESS} + ;; +esac + +# Anything except meta-data and help must pass validation +nfs_server_mgmt_validate || exit $? + +case ${__OCF_ACTION} in + start) nfs_server_mgmt_start + ;; + stop) nfs_server_mgmt_stop + ;; + status) nfs_server_mgmt_status + ;; + reload) nfs_server_mgmt_reload + ;; + monitor) nfs_server_mgmt_monitor + ;; + validate-all) nfs_server_mgmt_validate + ;; + *) usage + exit ${OCF_ERR_UNIMPLEMENTED} + ;; +esac diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/uexportfs b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/uexportfs new file mode 100644 index 0000000..ba3c103 --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/uexportfs @@ -0,0 +1,19 @@ +#! /bin/sh +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +### BEGIN INIT INFO +# Provides: uexportfs +# Default-Start: 3 +# Default-Stop: +# Short-Description: Used to remove all exported filesystems on a boot +### END INIT INFO + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +exportfs -r > /dev/null 2>&1 + +: exit 0 diff --git a/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/uexportfs.service b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/uexportfs.service new file mode 100644 index 0000000..d687ad6 --- /dev/null +++ b/middleware/filesystem/recipes-control/filesystem-scripts/filesystem-scripts-1.0/uexportfs.service @@ -0,0 +1,12 @@ +[Unit] +Description=Titanium Cloud Filesystem Initialization +After=network.target nfscommon.service + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/etc/init.d/uexportfs start +ExecStop=/etc/init.d/uexportfs stop + +[Install] +WantedBy=multi-user.target diff --git a/middleware/io-monitor/recipes-common/io-monitor/.gitignore b/middleware/io-monitor/recipes-common/io-monitor/.gitignore new file mode 100644 index 0000000..246ca5c --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/.gitignore @@ -0,0 +1,6 @@ +!.distro +.distro/centos7/rpmbuild/RPMS +.distro/centos7/rpmbuild/SRPMS +.distro/centos7/rpmbuild/BUILD +.distro/centos7/rpmbuild/BUILDROOT +.distro/centos7/rpmbuild/SOURCES/io-monitor*tar.gz diff --git a/middleware/io-monitor/recipes-common/io-monitor/LICENSE b/middleware/io-monitor/recipes-common/io-monitor/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/io-monitor/recipes-common/io-monitor/PKG-INFO b/middleware/io-monitor/recipes-common/io-monitor/PKG-INFO new file mode 100644 index 0000000..e282248 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 1.1 +Name: io-monitor +Version: 1.0 +Summary: Poll iostat and raise alarms for excessive conditions +Home-page: +Author: Windriver +Author-email: info@windriver.com +License: Apache-2.0 + +Description: Poll iostat and raise alarms for excessive conditions + + +Platform: UNKNOWN diff --git a/middleware/io-monitor/recipes-common/io-monitor/centos/build_srpm.data b/middleware/io-monitor/recipes-common/io-monitor/centos/build_srpm.data new file mode 100644 index 0000000..1257df4 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/centos/build_srpm.data @@ -0,0 +1,3 @@ +SRC_DIR="io-monitor" +COPY_LIST_TO_TAR="files scripts" +TIS_PATCH_VER=6 diff --git a/middleware/io-monitor/recipes-common/io-monitor/centos/io-monitor.spec b/middleware/io-monitor/recipes-common/io-monitor/centos/io-monitor.spec new file mode 100644 index 0000000..0bacae0 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/centos/io-monitor.spec @@ -0,0 +1,81 @@ +Summary: Poll iostat and raise alarms for excessive conditions +Name: io-monitor +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz + +BuildRequires: python-setuptools +BuildRequires: systemd-units +BuildRequires: systemd-devel +BuildRequires: fm-api +Requires: /bin/systemctl + +%description +Poll iostat and raise alarms for excessive conditions + +%define local_bindir /usr/bin/ +%define local_etc /etc/ +%define local_etc_initd /etc/init.d/ +%define local_etc_pmond /etc/pmon.d/ +%define local_etc_logrotated /etc/logrotate.d/ +%define pythonroot /usr/lib64/python2.7/site-packages + +%define debug_package %{nil} + +%prep +%setup + +%build +%{__python} setup.py build + +%install +%{__python} setup.py install --root=$RPM_BUILD_ROOT \ + --install-lib=%{pythonroot} \ + --prefix=/usr \ + --install-data=/usr/share \ + --single-version-externally-managed + +install -d -m 755 %{buildroot}%{local_etc}%{name} +install -p -D -m 700 files/io-monitor.conf %{buildroot}%{local_etc}%{name}/io-monitor.conf + +install -d -m 755 %{buildroot}%{local_etc_pmond} +install -p -D -m 644 scripts/pmon.d/io-monitor.conf %{buildroot}%{local_etc_pmond}/io-monitor.conf + +install -d -m 755 %{buildroot}%{local_etc_initd} +install -p -D -m 700 scripts/init.d/io-monitor-manager %{buildroot}%{local_etc_initd}/io-monitor-manager + +install -d -m 755 %{buildroot}%{local_bindir} +install -p -D -m 700 scripts/bin/io-monitor-manager %{buildroot}%{local_bindir}/io-monitor-manager + +install -d -m 755 %{buildroot}%{local_etc_logrotated} +install -p -D -m 644 files/io-monitor.logrotate %{buildroot}%{local_etc_logrotated}/io-monitor.logrotate + +install -d -m 755 %{buildroot}%{_unitdir} +install -m 644 -p -D files/%{name}-manager.service %{buildroot}%{_unitdir}/%{name}-manager.service + +%post +/bin/systemctl enable %{name}-manager.service + +%clean +rm -rf $RPM_BUILD_ROOT + +# Note: The package name is io-monitor but the import name is io_monitor so +# can't use '%{name}'. +%files +%defattr(-,root,root,-) +%doc LICENSE +%{local_bindir}/* +%{local_etc}%{name}/* +%{local_etc_initd}/* +%{local_etc_pmond}/* +%{_unitdir}/%{name}-manager.service +%dir %{local_etc_logrotated} +%{local_etc_logrotated}/* +%dir %{pythonroot}/io_monitor +%{pythonroot}/io_monitor/* +%dir %{pythonroot}/io_monitor-%{version}.0-py2.7.egg-info +%{pythonroot}/io_monitor-%{version}.0-py2.7.egg-info/* diff --git a/middleware/io-monitor/recipes-common/io-monitor/files/io-monitor-manager.service b/middleware/io-monitor/recipes-common/io-monitor/files/io-monitor-manager.service new file mode 100644 index 0000000..de845b4 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/files/io-monitor-manager.service @@ -0,0 +1,18 @@ +[Unit] +Description=Daemon for polling iostat status +After=local-fs.target +Before=pmon.service + +[Service] +Type=forking +Restart=no +KillMode=process +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/io-monitor-manager start +ExecStop=/etc/rc.d/init.d/io-monitor-manager stop +ExecReload=/etc/rc.d/init.d/io-monitor-manager reload +PIDFile=/var/run/io-monitor/io-monitor-manager.pid + +[Install] +WantedBy=multi-user.target + diff --git a/middleware/io-monitor/recipes-common/io-monitor/files/io-monitor.conf b/middleware/io-monitor/recipes-common/io-monitor/files/io-monitor.conf new file mode 100644 index 0000000..b7579b1 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/files/io-monitor.conf @@ -0,0 +1,60 @@ +[DEFAULT] +# Run as a daemon +#daemon_mode = True + +# Sleep interval (in seconds) between iostat executions [1..59] +#wait_time = 1 + +#Global debug level. Note: All monitors will be clipped at this setting. +#global_log_level = DEBUG + +[cinder_congestion] +# SSD: Large moving average window size (in samples). +#ssd_large_window_size = 30 + +# SSD: Medium moving average window size (in samples). +#ssd_medium_window_size = 60 + +# SSD: Small moving average window size (in samples). +#ssd_small_window_size = 90 + +# SSD: Value required in a moving average window to trigger next state. +#ssd_thresh_sustained_await = 1000 + +# SSD: Max await time. Anomalous data readings are clipped to this. +#ssd_thresh_max_await = 5000 + +# HDD: Large moving average window size (in samples). +#hdd_large_window_size = 240 + +# HDD: Medium moving average window size (in samples). +#hdd_medium_window_size = 180 + +# HDD: Small moving average window size (in samples). +#hdd_small_window_size = 120 + +# HDD: Value required in a moving average window to trigger next state. +#hdd_thresh_sustained_await = 1500 + +# HDD: Max await time. Anomalous data readings are clipped to this. +#hdd_thresh_max_await = 5000 + +# Monitor debug level. Note: global level must be equialent or lower. +#log_level = INFO + +# Modify how often status messages appear in the log. 0.0 is never, 1.0 is for +# every iostat execution. +#status_log_rate_modifier = 0.2 + +# Enable FM Alarm generation +#generate_fm_alarms = True + +# Number of same consecutive congestion state seen before raising/clearing alarms. +#fm_alarm_debounce = 5 + +# Write monitor data to a csv for analysis +#output_write_csv = False + +# Directory where monitor output will be located. +#output_csv_dir = /tmp + diff --git a/middleware/io-monitor/recipes-common/io-monitor/files/io-monitor.logrotate b/middleware/io-monitor/recipes-common/io-monitor/files/io-monitor.logrotate new file mode 100644 index 0000000..7e2b518 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/files/io-monitor.logrotate @@ -0,0 +1,11 @@ +/var/log/io-monitor.log { + nodateext + size 10M + start 1 + rotate 10 + missingok + notifempty + compress + delaycompress + copytruncate +} diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/LICENSE b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/__init__.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/__init__.py new file mode 100644 index 0000000..a2665fb --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/__init__.py @@ -0,0 +1,10 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import pbr.version + +__version__ = pbr.version.VersionInfo('io-monitor').version_string() +__release__ = pbr.version.VersionInfo('io-monitor').release_string() diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/constants.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/constants.py new file mode 100644 index 0000000..bb2f97e --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/constants.py @@ -0,0 +1,38 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import oslo_i18n as i18n + +DOMAIN = 'io_monitor' +_translators = i18n.TranslatorFactory(domain=DOMAIN) + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# HOST OS + +WRLINUX = 'wrlinux' +CENTOS = 'CentOS Linux' + +# ALARMS + +# Reasons for alarm +ALARM_REASON_BUILDING = _('Cinder I/O Congestion is above normal range and ' + 'is building') +ALARM_REASON_CONGESTED = _('Cinder I/O Congestion is high and impacting ' + 'guest performance') + +# Repair actions for alarm +REPAIR_ACTION_MAJOR_ALARM = _('Reduce the I/O load on the Cinder LVM ' + 'backend. Use Cinder QoS mechanisms on high ' + 'usage volumes.') +REPAIR_ACTION_CRITICAL_ALARM = _('Reduce the I/O load on the Cinder LVM ' + 'backend. Cinder actions may fail until ' + 'congestion is reduced. Use Cinder QoS ' + 'mechanisms on high usage volumes.') + +# All cinder volume group device mapper names begin with this +CINDER_DM_PREFIX = 'cinder--volumes' diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/io_monitor_manager.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/io_monitor_manager.py new file mode 100644 index 0000000..64f2f24 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/io_monitor_manager.py @@ -0,0 +1,189 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# IMPORTS +import logging +import time +import math +import os +import sys + +from daemon import runner +from io_monitor import __version__ +from io_monitor.constants import DOMAIN +from io_monitor.options import CONF +from io_monitor.options import add_common_opts +from io_monitor.monitors.cinder.congestion import CinderCongestionMonitor +import subprocess + +# OPTIONS + +# CONSTANTS +LOG_FILE = '/var/log/io-monitor.log' +PID_FILE = '/var/run/io-monitor/io-monitor-manager.pid' +CONFIG_COMPLETE = '/etc/platform/.initial_config_complete' + +LOG = logging.getLogger(DOMAIN) + +LOG_FORMAT_DEBUG = '%(asctime)s.%(msecs)03d: ' \ + + os.path.basename(sys.argv[0]) + '[%(process)s]: ' \ + + '%(filename)s(%(lineno)s) - %(funcName)-20s: ' \ + + '%(levelname)s: %(message)s' + +LOG_FORMAT_NORMAL = '%(asctime)s.%(msecs)03d: [%(process)s]: ' \ + + '%(levelname)s: %(message)s' + + +# METHODS +def _start_polling(log_handle): + io_monitor_daemon = IOMonitorDaemon() + io_monitor_runner = runner.DaemonRunner(io_monitor_daemon) + io_monitor_runner.daemon_context.umask = 0o022 + io_monitor_runner.daemon_context.files_preserve = [log_handle.stream] + io_monitor_runner.do_action() + + +def handle_exception(exc_type, exc_value, exc_traceback): + """ + Exception handler to log any uncaught exceptions + """ + LOG.error("Uncaught exception", + exc_info=(exc_type, exc_value, exc_traceback)) + sys.__excepthook__(exc_type, exc_value, exc_traceback) + + +def configure_logging(): + + level_dict = {'ERROR': logging.ERROR, + 'WARN': logging.WARN, + 'INFO': logging.INFO, + 'DEBUG': logging.DEBUG} + + if CONF.global_log_level in level_dict.keys(): + level = level_dict[CONF.global_log_level] + else: + level = logging.INFO + + # When we deamonize the default logging stream handler is closed. We need + # manually setup logging so that we can pass the file_handler into the + # monitor classes. + LOG.setLevel(level) + h = logging.FileHandler(LOG_FILE) + h.setLevel(level) + f = logging.Formatter(LOG_FORMAT_NORMAL, datefmt='%Y-%m-%d %H:%M:%S') + h.setFormatter(f) + LOG.addHandler(h) + + # Log uncaught exceptions to file + sys.excepthook = handle_exception + + return h + + +def main(): + # Set up configuration options + add_common_opts() + CONF(project='io-monitor', version=__version__) + + # Set up logging. Allow all levels. The monitor will restrict the level + # further as it sees fit + log_handle = configure_logging() + + # Dump config + CONF.log_opt_values(LOG, logging.INFO) + if CONF.daemon_mode: + sys.argv = [sys.argv[0], 'start'] + _start_polling(log_handle) + + +# CLASSES + +class IOMonitorDaemon(): + """ Daemon process representation of + the iostat monitoring program + """ + def __init__(self): + # Daemon-specific init + self.stdin_path = '/dev/null' + self.stdout_path = '/dev/null' + self.stderr_path = '/dev/null' + self.pidfile_path = PID_FILE + self.pidfile_timeout = 5 + + # Monitors + self.ccm = None + + def run(self): + + # We are started by systemd so wait for initial config to be completed + while not os.path.exists(CONFIG_COMPLETE): + LOG.info("Waiting: Initial configuration is not complete") + time.sleep(30) + + LOG.info("Initializing monitors..") + # Cinder Congestion Monitor + self.ccm = CinderCongestionMonitor() + + # Ensure system is monitorable + if not self.ccm.is_system_monitorable(): + LOG.error("This system in not configured for Cinder LVM") + + # Wait for something to kill us. Since we are managed by pmon + # we don't want to exit at this point + def sleepy_time(t): + while True: + t = t * 2 + yield t + + LOG.info("Will standby performing no further actions") + for s in sleepy_time(1): + time.sleep(s) + + sys.exit() + + LOG.info("Starting: Running iostat %d times per minute" % + math.ceil(60/(CONF.wait_time+1))) + + try: + command = "iostat -dx -t -p ALL" + while True: + process = subprocess.Popen(command.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + output, error = process.communicate() + if output: + # Send the iostat input to the monitor + self._monitor_ccm_send_inputs(output) + + # Instruct the monitor to process the data + self._monitor_ccm_generate_output() + + time.sleep(CONF.wait_time) + except KeyboardInterrupt: + LOG.info('Exiting...') + + return_code = process.poll() + LOG.error("return code = %s " % return_code) + + def _monitor_ccm_send_inputs(self, inputs): + # LOG.debug(inputs) + + # Process output from iteration + lines = inputs.split('\n') + for pline in lines[2:]: + self.ccm.parse_iostats(pline.strip()) + + def _monitor_ccm_generate_output(self): + self.ccm.generate_status() + +if __name__ == "__main__": + + if not os.geteuid() == 0: + sys.exit("\nOnly root can run this\n") + + main() diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/monitors/__init__.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/monitors/__init__.py new file mode 100644 index 0000000..754a8f4 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/monitors/__init__.py @@ -0,0 +1,5 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/monitors/cinder/__init__.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/monitors/cinder/__init__.py new file mode 100644 index 0000000..754a8f4 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/monitors/cinder/__init__.py @@ -0,0 +1,5 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/monitors/cinder/congestion.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/monitors/cinder/congestion.py new file mode 100644 index 0000000..a63aae0 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/monitors/cinder/congestion.py @@ -0,0 +1,774 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2016-2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import collections +import logging +import pyudev +import math +import operator +import os +import platform +import re +import subprocess + +from fm_api import fm_api +from fm_api import constants as fm_constants +from io_monitor import constants +from io_monitor.constants import DOMAIN +from io_monitor.utils.data_collector import DeviceDataCollector +from io_monitor.constants import _ +from oslo_config import cfg + +ccm_opts = [ + cfg.IntOpt('ssd_small_window_size', + default=30, + help=('SSD: Small moving average window size (in seconds).')), + cfg.IntOpt('ssd_medium_window_size', + default=60, + help=('SSD: Medium moving average window size (in seconds).')), + cfg.IntOpt('ssd_large_window_size', + default=90, + help=('SSD: Large moving average window size (in seconds).')), + cfg.IntOpt('ssd_thresh_sustained_await', + default=1000, + help=('SSD: Value required in a moving average window to ' + 'trigger next state.')), + cfg.IntOpt('ssd_thresh_max_await', + default=5000, + help=('SSD: Max await time. Anomalous data readings are clipped' + ' to this.')), + cfg.IntOpt('hdd_small_window_size', + default=120, + help=('HDD: Small moving average window size (in seconds).')), + cfg.IntOpt('hdd_medium_window_size', + default=180, + help=('HDD: Medium moving average window size (in seconds).')), + cfg.IntOpt('hdd_large_window_size', + default=240, + help=('HDD: Large moving average window size (in seconds).')), + cfg.IntOpt('hdd_thresh_sustained_await', + default=1500, + help=('HDD: Value required in a moving average window to ' + 'trigger next state.')), + cfg.IntOpt('hdd_thresh_max_await', + default=5000, + help=('HDD: Max await time. Anomalous data readings are clipped' + ' to this.')), + cfg.StrOpt('log_level', + default='INFO', + choices=('ERROR', 'WARN', 'INFO', 'DEBUG'), + help=('Monitor debug level. Note: global level must be' + ' equialent or lower.')), + cfg.FloatOpt('status_log_rate_modifier', default=0.2, + help=('Modify how often status messages appear in the log.' + '0.0 is never, 1.0 is for every iostat execution.')), + cfg.BoolOpt('generate_fm_alarms', default=True, + help=('Enable FM Alarm generation')), + cfg.IntOpt('fm_alarm_debounce', default=5, + help=('Number of consecutive same congestion states seen ' + 'before raising/clearing alarms.')), + cfg.BoolOpt('output_write_csv', default=False, + help=('Write monitor data to a csv for analysis')), + cfg.StrOpt('output_csv_dir', default='/tmp', + help=('Directory where monitor output will be located.')), +] + +CONF = cfg.CONF +CONF.register_opts(ccm_opts, group="cinder_congestion") + +LOG = logging.getLogger(DOMAIN) + + +class CinderCongestionMonitor(object): + # Congestion States + STATUS_NORMAL = "Normal" + STATUS_BUILDING = "Building" + STATUS_CONGESTED = "Limiting" + + # disk type + CINDER_DISK_SSD = 0 + CINDER_DISK_HDD = 1 + + def __init__(self): + # Setup logging + level_dict = {'ERROR': logging.ERROR, + 'WARN': logging.WARN, + 'INFO': logging.INFO, + 'DEBUG': logging.DEBUG} + + if CONF.cinder_congestion.log_level in level_dict.keys(): + LOG.setLevel(level_dict[CONF.cinder_congestion.log_level]) + else: + LOG.setLevel(logging.INFO) + + LOG.info("Initializing %s..." % self.__class__.__name__) + + # DRBD file + self.drbd_file = '/etc/drbd.d/drbd-cinder.res' + + # iostat parsing regex + self.ts_regex = re.compile(r"(\d{2}/\d{2}/\d{2,4}) " + "(\d{2}:\d{2}:\d{2})") + self.device_regex = re.compile( + r"(\w+-?\w+)\s+(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)" + "\s+(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)\s+" + "(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)") + + # window sizes + self.s_window_sec = CONF.cinder_congestion.ssd_small_window_size + self.m_window_sec = CONF.cinder_congestion.ssd_medium_window_size + self.l_window_sec = CONF.cinder_congestion.ssd_large_window_size + + # state variables + self.latest_time = None + self.congestion_status = self.STATUS_NORMAL + + # init data collector + self.device_dict = {} + + # devices + self.phys_cinder_device = None + self.base_cinder_devs = [] + self.base_cinder_tracking_devs = [] + self.non_cinder_dynamic_devs = ['drbd0', 'drbd1', 'drbd2', 'drbd3', + 'drbd5'] + self.non_cinder_phys_devs = [] + + # set the default operational scenarios + self.await_minimal_spike = CONF.cinder_congestion.ssd_thresh_max_await + self.await_sustained_congestion = ( + CONF.cinder_congestion.ssd_thresh_sustained_await) + + # FM + self.fm_api = fm_api.FaultAPIs() + self.fm_state_count = collections.Counter() + + # CSV handle + self.csv = None + + # status logging + self.status_skip_count = 0 + + # to compare with current g_count + self.last_g_count = 0 + + message_rate = math.ceil(60 / (CONF.wait_time+1)) + self.status_skip_total = math.ceil( + message_rate/(message_rate * + CONF.cinder_congestion.status_log_rate_modifier)) + LOG.info("Display status message at %d per minute..." % + (message_rate * + CONF.cinder_congestion.status_log_rate_modifier)) + + # Clear any exiting alarms + self._clear_fm() + + def _is_number(self, s): + try: + float(s) + return True + except ValueError: + return False + + def command(self, arguments, **kwargs): + """ Execute e command and capture stdout, stderr & return code """ + process = subprocess.Popen( + arguments, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + **kwargs) + out, err = process.communicate() + return out, err, process.returncode + + def device_path_to_device_node(self, device_path): + try: + output, _, _ = self.command(["udevadm", "settle", "-E", + device_path]) + out, err, retcode = self. command(["readlink", "-f", device_path]) + out = out.rstrip() + except Exception as e: + return None + + return out + + def _get_disk_type(self, device_node): + if device_node: + proc_device_file = '/sys/block/' + device_node + \ + '/queue/rotational' + if os.path.exists(proc_device_file): + with open(proc_device_file) as fileobject: + for line in fileobject: + return int(line.rstrip()) + + # If the disk is unknown assume an SSD. + return self.CINDER_DISK_SSD + + + def _is_cinder_related_device(self,device_node): + name = "" + if device_node: + proc_device_file = '/sys/block/' + device_node + \ + '/dm/name' + + if os.path.exists(proc_device_file): + with open(proc_device_file) as fileobject: + for line in fileobject: + name = line.rstrip() + + if constants.CINDER_DM_PREFIX in name: + return True + + return False + + def _is_cinder_backing_device(self, device_node): + name = "" + if device_node: + proc_device_file = '/sys/block/' + device_node + \ + '/dm/name' + if os.path.exists(proc_device_file): + with open(proc_device_file) as fileobject: + for line in fileobject: + name = line.rstrip() + + if any(s in name for s in ['pool', 'anchor']): + if device_node not in self.base_cinder_devs: + self.base_cinder_devs.append(device_node) + if any(s in name for s in ['tdata', 'tmeta']): + if device_node not in self.base_cinder_tracking_devs: + self.base_cinder_tracking_devs.append(device_node) + + LOG.info("Cinder Base Devices = %s; Tracking %s" % ( + self.base_cinder_devs, self.base_cinder_tracking_devs)) + return True + + return False + + def _determine_cinder_devices(self): + # Check to see if we have DRBD device we are syncing + if os.path.exists(self.drbd_file): + + # grab the data + with open(self.drbd_file) as fileobject: + + drbd_dev_regex = re.compile(r"device\s+/dev/(\w+);") + drbd_disk_path_regex = re.compile( + r"disk\s+\"(/dev/disk/by-path/(.+))\";") + drbd_disk_node_regex = re.compile(r"/dev/(\w+)") + partition_regex = re.compile(r"(sd\w+)\d+") + + for line in fileobject: + m = drbd_dev_regex.match(line.strip()) + if m: + self.base_cinder_devs.append(m.group(1)) + + m = drbd_disk_path_regex.match(line.strip()) + if m: + drbd_disk = self.device_path_to_device_node(m.group(1)) + + drbd_disk_sd = drbd_disk_node_regex.match(drbd_disk) + if drbd_disk_sd: + self.base_cinder_devs.append(drbd_disk_sd.group(1)) + + d = partition_regex.match(drbd_disk_sd.group(1)) + if d: + self.phys_cinder_device = d.group(1) + self.base_cinder_devs.append(d.group(1)) + + # Which host OS? + if platform.linux_distribution()[0] == constants.WRLINUX: + dm_major = 252 + else: + dm_major = 253 + + # Grab the device mapper devices and pull out the base cinder + # devices + dmsetup_regex = re.compile(r'^([\w-]+)\s+\((\d+):(\d+)\)') + + dmsetup_command = 'dmsetup ls' + dmsetup_process = subprocess.Popen(dmsetup_command, + stdout=subprocess.PIPE, + shell=True) + dmsetup_output = dmsetup_process.stdout.read() + lines = dmsetup_output.split('\n') + for l in lines: + m = dmsetup_regex.match(l.strip()) + if m: + if m.group(2) == str(dm_major): + # LOG.debug("%s %s %s" % (m.group(1), + # m.group(2), + # m.group(3))) + if constants.CINDER_DM_PREFIX in m.group(1): + if 'pool' in m.group(1) or 'anchor' in m.group(1): + self.base_cinder_devs.append( + "dm-" + m.group(3)) + if 'tdata' in m.group(1) or 'tmeta' in m.group(1): + self.base_cinder_tracking_devs.append( + "dm-" + m.group(3)) + else: + self.non_cinder_dynamic_devs.append( + "dm-" + m.group(3)) + + # If the tracking devs are non existant, then we didn't find any + # thin pool entries. Therefore we are thickly provisioned and need + # to track the physical device + if len(self.base_cinder_tracking_devs) == 0: + self.base_cinder_tracking_devs.append( + self.phys_cinder_device) + + # Use UDEV info to grab all phyical disks + context = pyudev.Context() + for device in context.list_devices(subsystem='block', + DEVTYPE='disk'): + if device['MAJOR'] == '8': + device = str(os.path.basename(device['DEVNAME'])) + if device != self.phys_cinder_device: + self.non_cinder_phys_devs.append(device) + + def _update_device_stats(self, ts, device, current_iops, current_await): + if device not in self.device_dict: + # For AIO systems nova-local will be provisioned later and + # differently based on the instance_backing value for the compute + # functionality. Check for cinder specific dm devices and ignore + # all others + if not self._is_cinder_related_device(device): + return + self._is_cinder_backing_device(device) + self.device_dict.update( + {device: DeviceDataCollector( + device, + [DeviceDataCollector.DATA_IOPS, + DeviceDataCollector.DATA_AWAIT], + self.s_window_sec, + self.m_window_sec, + self.l_window_sec)}) + self.device_dict[device].set_data_caps( + DeviceDataCollector.DATA_AWAIT, + self.await_minimal_spike) + self.device_dict[device].set_congestion_thresholds( + self.await_minimal_spike, + self.await_sustained_congestion) + + self.device_dict[device].update_data(ts, + DeviceDataCollector.DATA_IOPS, + current_iops) + self.device_dict[device].update_data(ts, + DeviceDataCollector.DATA_AWAIT, + current_await) + self.device_dict[device].update_congestion_status() + + def is_system_monitorable(self): + if not os.path.exists(self.drbd_file): + LOG.error("%s does not exist" % self.drbd_file) + return False + + # Discover devices on this host + self._determine_cinder_devices() + + # Get the cinder disk type and set the monitor values accordingly + disk_type = self._get_disk_type(self.phys_cinder_device) + if disk_type: + self.s_window_sec = CONF.cinder_congestion.hdd_small_window_size + self.m_window_sec = CONF.cinder_congestion.hdd_medium_window_size + self.l_window_sec = CONF.cinder_congestion.hdd_large_window_size + self.await_minimal_spike = ( + CONF.cinder_congestion.hdd_thresh_max_await) + self.await_sustained_congestion = ( + CONF.cinder_congestion.hdd_thresh_sustained_await) + else: + self.s_window_sec = CONF.cinder_congestion.ssd_small_window_size + self.m_window_sec = CONF.cinder_congestion.ssd_medium_window_size + self.l_window_sec = CONF.cinder_congestion.ssd_large_window_size + self.await_minimal_spike = ( + CONF.cinder_congestion.ssd_thresh_max_await) + self.await_sustained_congestion = ( + CONF.cinder_congestion.ssd_thresh_sustained_await) + + LOG.info("Physical Cinder Disk = %s - %s" % + (self.phys_cinder_device, + "HDD" if disk_type else "SSD")) + LOG.info("Cinder Base Devices = %s; Tracking %s" % ( + self.base_cinder_devs, self.base_cinder_tracking_devs)) + LOG.info("Non-Cinder Devices = %s" % ( + self.non_cinder_dynamic_devs + self.non_cinder_phys_devs)) + + return True + + def get_operational_thresholds(self): + return (self.await_minimal_spike, + self.await_sustained_congestion) + + def set_operational_thresholds(self, + await_minimal_spike, + await_sustained_congestion): + if await_minimal_spike: + self.await_minimal_spike = await_minimal_spike + if await_sustained_congestion: + self.await_sustained_congestion = await_sustained_congestion + + def _flush_stale_devices(self): + for d in self.device_dict.keys(): + if self.device_dict[d].is_data_stale(self.latest_time): + self.device_dict.pop(d, None) + + def _log_device_data_windows(self, device): + LOG.debug("%-6s: %s %s" % ( + device, + self.device_dict[device].get_element_windows_avg_string( + DeviceDataCollector.DATA_AWAIT), + self.device_dict[device].get_element_windows_avg_string( + DeviceDataCollector.DATA_IOPS))) + + def _log_congestion_status(self, congestion_data): + congestion_data.c_freq_dict.update( + dict.fromkeys( + set(['N', 'B', 'L']).difference( + congestion_data.c_freq_dict), 0)) + congestion_data.g_freq_dict.update( + dict.fromkeys( + set(['N', 'B', 'L']).difference( + congestion_data.g_freq_dict), 0)) + + LOG.info("Status (%-8s): Cinder Devs IOPS [ %10.2f, %10.2f, %10.2f ] " + "Guests Counts %s; Guest Await[ %10.2f, %10.2f, %10.2f ]" % ( + congestion_data.status, + congestion_data.c_iops_avg_list[0], + congestion_data.c_iops_avg_list[1], + congestion_data.c_iops_avg_list[2], + dict(congestion_data.g_freq_dict), + congestion_data.g_await_avg_list[0], + congestion_data.g_await_avg_list[1], + congestion_data.g_await_avg_list[2])) + + def _determine_congestion_state(self): + + # Analyze devices + cinder_congestion_freq = collections.Counter() + cinder_iops_avg = [0.0, 0.0, 0.0] + guest_congestion_freq = collections.Counter() + guest_await_avg = [0.0, 0.0, 0.0] + + for d, dc in self.device_dict.iteritems(): + if d in self.base_cinder_devs: + if d in self.base_cinder_tracking_devs: + cinder_congestion_freq.update(dc.get_congestion_status()) + cinder_iops_avg = map(operator.add, + cinder_iops_avg, + dc.get_element_windows_avg_list( + DeviceDataCollector.DATA_IOPS)) + # LOG.debug("C: %s " % cinder_iops_avg) + # self._log_device_data_windows(d) + + elif d not in (self.base_cinder_devs + + self.non_cinder_dynamic_devs + + self.non_cinder_phys_devs): + guest_congestion_freq.update( + dc.get_congestion_status(debug=True)) + guest_await_avg = map(operator.add, + guest_await_avg, + dc.get_element_windows_avg_list( + DeviceDataCollector.DATA_AWAIT)) + # LOG.debug("G: %s " % guest_await_avg) + # self._log_device_data_windows(d) + + if list(cinder_congestion_freq.elements()): + cinder_iops_avg[:] = [i/len(list( + cinder_congestion_freq.elements())) for i in cinder_iops_avg] + + if list(guest_congestion_freq.elements()): + guest_await_avg[:] = [i/len(list( + guest_congestion_freq.elements())) for i in guest_await_avg] + + self.congestion_status = self.STATUS_NORMAL + if DeviceDataCollector.STATUS_BUILDING in guest_congestion_freq: + self.congestion_status = self.STATUS_BUILDING + if DeviceDataCollector.STATUS_CONGESTED in guest_congestion_freq: + self.congestion_status = self.STATUS_CONGESTED + + congestion_data = collections.namedtuple("congestion_data", + ["timestamp", "status", + "c_freq_dict", + "c_iops_avg_list", + "g_count", + "g_freq_dict", + "g_await_avg_list"]) + + return congestion_data(self.latest_time, + self.congestion_status, + cinder_congestion_freq, + cinder_iops_avg, + sum(guest_congestion_freq.values()), + guest_congestion_freq, + guest_await_avg) + + def _clear_fm(self): + building = fm_constants.FM_ALARM_ID_STORAGE_CINDER_IO_BUILDING + limiting = fm_constants.FM_ALARM_ID_STORAGE_CINDER_IO_LIMITING + + entity_instance_id = "cinder_io_monitor" + ccm_alarm_ids = [building, limiting] + + existing_alarms = [] + for alarm_id in ccm_alarm_ids: + alarm_list = self.fm_api.get_faults_by_id(alarm_id) + if not alarm_list: + continue + for alarm in alarm_list: + existing_alarms.append(alarm) + + if len(existing_alarms) > 1: + LOG.warn("WARNING: we have more than one existing alarm") + + for a in existing_alarms: + self.fm_api.clear_fault(a.alarm_id, entity_instance_id) + LOG.info( + _("Clearing congestion alarm {} - severity: {}, " + "reason: {}, service_affecting: {}").format( + a.uuid, a.severity, a.reason_text, True)) + + def _update_fm(self, debounce_count, override=None): + + building = fm_constants.FM_ALARM_ID_STORAGE_CINDER_IO_BUILDING + limiting = fm_constants.FM_ALARM_ID_STORAGE_CINDER_IO_LIMITING + + if override: + self.congestion_status = override + + # Update the status count + self.fm_state_count.update(self.congestion_status[0]) + + # Debounce alarms: If I have more than one congestion type then clear + # the counts as we have crossed a threshold + if len(self.fm_state_count) > 1: + self.fm_state_count.clear() + self.fm_state_count.update(self.congestion_status[0]) + return + + # Debounce alarms: Make sure we have see this alarm state for a specifc + # number of samples + count = self.fm_state_count.itervalues().next() + if count < debounce_count: + return + + # We are past the debounce state. Now take action. + entity_instance_id = "cinder_io_monitor" + ccm_alarm_ids = [building, limiting] + + existing_alarms = [] + for alarm_id in ccm_alarm_ids: + alarm_list = self.fm_api.get_faults_by_id(alarm_id) + if not alarm_list: + continue + for alarm in alarm_list: + existing_alarms.append(alarm) + + if len(existing_alarms) > 1: + LOG.warn("WARNING: we have more than one existing alarm") + + if self.congestion_status is self.STATUS_NORMAL: + for a in existing_alarms: + self.fm_api.clear_fault(a.alarm_id, entity_instance_id) + LOG.info( + _("Clearing congestion alarm {} - severity: {}, " + "reason: {}, service_affecting: {}").format( + a.uuid, a.severity, a.reason_text, True)) + + elif self.congestion_status is self.STATUS_BUILDING: + alarm_is_raised = False + for a in existing_alarms: + if a.alarm_id != building: + self.fm_api.clear_fault(a.alarm_id, entity_instance_id) + LOG.info( + _("Clearing congestion alarm {} - severity: {}, " + "reason: {}, service_affecting: {}").format( + a.uuid, a.severity, a.reason_text, True)) + else: + alarm_is_raised = True + + if not alarm_is_raised: + severity = fm_constants.FM_ALARM_SEVERITY_MAJOR + reason_text = constants.ALARM_REASON_BUILDING + + fault = fm_api.Fault( + alarm_id=building, + alarm_type=fm_constants.FM_ALARM_TYPE_2, + alarm_state=fm_constants.FM_ALARM_STATE_SET, + entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER, + entity_instance_id=entity_instance_id, + severity=severity, + reason_text=reason_text, + probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8, + proposed_repair_action=constants.REPAIR_ACTION_MAJOR_ALARM, + service_affecting=True) + alarm_uuid = self.fm_api.set_fault(fault) + if alarm_uuid: + LOG.info( + _("Created congestion alarm {} - severity: {}, " + "reason: {}, service_affecting: {}").format( + alarm_uuid, severity, reason_text, True)) + else: + LOG.error( + _("Failed to create congestion alarm - severity: {}," + "reason: {}, service_affecting: {}").format( + severity, reason_text, True)) + + elif self.congestion_status is self.STATUS_CONGESTED: + alarm_is_raised = False + for a in existing_alarms: + if a.alarm_id != limiting: + self.fm_api.clear_fault(a.alarm_id, entity_instance_id) + LOG.info( + _("Clearing congestion alarm {} - severity: {}, " + "reason: {}, service_affecting: {}").format( + a.uuid, a.severity, a.reason_text, True)) + else: + alarm_is_raised = True + + if not alarm_is_raised: + severity = fm_constants.FM_ALARM_SEVERITY_CRITICAL + reason_text = constants.ALARM_REASON_CONGESTED + repair = constants.REPAIR_ACTION_CRITICAL_ALARM + fault = fm_api.Fault( + alarm_id=limiting, + alarm_type=fm_constants.FM_ALARM_TYPE_2, + alarm_state=fm_constants.FM_ALARM_STATE_SET, + entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER, + entity_instance_id=entity_instance_id, + severity=severity, + reason_text=reason_text, + probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8, + proposed_repair_action=repair, + service_affecting=True) + alarm_uuid = self.fm_api.set_fault(fault) + if alarm_uuid: + LOG.info( + _("Created congestion alarm {} - severity: {}, " + "reason: {}, service_affecting: {}").format( + alarm_uuid, severity, reason_text, True)) + else: + LOG.error( + _("Failed to congestion storage alarm - severity: {}," + "reason: {}, service_affecting: {}").format( + severity, reason_text, True)) + + def _create_output(self, output_dir, congestion_data): + if not self.csv: + LOG.info("Creating output") + if os.path.exists(output_dir): + if output_dir.endswith('/'): + fn = output_dir + 'ccm.csv' + else: + fn = output_dir + '/ccm.csv' + else: + fn = '/tmp/ccm.csv' + try: + self.csv = open(fn, 'w') + except Exception as e: + raise e + + self.csv.write("Timestamp, Congestion Status, " + "Cinder Devs Normal, " + "Cinder Devs Building, Cinder Devs Limiting," + "Cinder IOPS Small, " + "Cinder IOPS Med, Cinder IOPS Large," + "Guest Vols Normal, " + "Guest Vols Building, Guest Vols Limiting," + "Guest Await Small, " + "Guest Await Med, Guest Await Large") + LOG.info("Done writing") + + congestion_data.c_freq_dict.update( + dict.fromkeys(set(['N', 'B', 'L']).difference( + congestion_data.c_freq_dict), 0)) + congestion_data.g_freq_dict.update( + dict.fromkeys(set(['N', 'B', 'L']).difference( + congestion_data.g_freq_dict), 0)) + + self.csv.write( + ",".join( + (str(congestion_data.timestamp), + str(congestion_data.status[0]), + str(congestion_data.c_freq_dict[ + DeviceDataCollector.STATUS_NORMAL]), + str(congestion_data.c_freq_dict[ + DeviceDataCollector.STATUS_BUILDING]), + str(congestion_data.c_freq_dict[ + DeviceDataCollector.STATUS_CONGESTED]), + str(congestion_data.c_iops_avg_list[0]), + str(congestion_data.c_iops_avg_list[1]), + str(congestion_data.c_iops_avg_list[2]), + str(congestion_data.g_freq_dict[ + DeviceDataCollector.STATUS_NORMAL]), + str(congestion_data.g_freq_dict[ + DeviceDataCollector.STATUS_BUILDING]), + str(congestion_data.g_freq_dict[ + DeviceDataCollector.STATUS_CONGESTED]), + str(congestion_data.g_await_avg_list[0]), + str(congestion_data.g_await_avg_list[1]), + str(congestion_data.g_await_avg_list[2])) + ) + '\n' + ) + + # flush the python buffer + self.csv.flush() + + # make sure the os pushes the data to disk + os.fsync(self.csv.fileno()) + + def generate_status(self): + # Purge stale devices + self._flush_stale_devices() + + # Get congestion state + data = self._determine_congestion_state() + if self.status_skip_count < self.status_skip_total: + self.status_skip_count += 1 + else: + self._log_congestion_status(data) + self.status_skip_count = 0 + + # Send alarm updates to FM if configured and there are guest volumes + # present (won't be on the standby controller) + if CONF.cinder_congestion.generate_fm_alarms: + if data.g_count > 0: + self._update_fm(CONF.cinder_congestion.fm_alarm_debounce) + elif data.g_count == 0 and self.last_g_count > 0: + self._clear_fm() + + # Save the current guest count view + self.last_g_count = data.g_count + + # Save output + if CONF.cinder_congestion.output_write_csv: + self._create_output(CONF.cinder_congestion.output_csv_dir, + data) + + def parse_iostats(self, line): + # LOG.debug(line) + m = self.ts_regex.match(line) + if m: + self.latest_time = m.group(0) + + m = self.device_regex.match(line) + if m: + # LOG.debug(line) + # LOG.debug("%s: %f %f" % (m.group(1) , + # float(m.group(4)) + float(m.group(5)), + # float(m.group(10)))) + if not (self._is_number(m.group(4)) and + self._is_number(m.group(5)) and + self._is_number(m.group(10))): + LOG.error("ValueError: invalid input: r/s = %s, w/s = %s " + "await = %s" % (m.group(4), m.group(5), m.group(10))) + else: + if not any(s in m.group(1) for s in ['loop', 'ram', 'nb', + 'md', 'scd'] + + self.non_cinder_phys_devs): + self._update_device_stats(self.latest_time, + m.group(1), + (float(m.group(4)) + + float(m.group(5))), + float(m.group(10))) diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/options.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/options.py new file mode 100644 index 0000000..7fde2f4 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/options.py @@ -0,0 +1,27 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from oslo_config import cfg + +CONF = cfg.CONF + +common_opts = [ + cfg.BoolOpt('daemon_mode', default=True, + help=('Run as a daemon')), + cfg.IntOpt('wait_time', default=1, min=1, max=59, + help=('Sleep interval (in seconds) between iostat executions ' + '[1..59]')), + cfg.StrOpt('global_log_level', + default='DEBUG', + choices=['DEBUG', 'INFO', 'WARN', 'ERROR'], + help=('Global debug level. Note: All monitors will be clipped ' + 'at this setting.')) +] + + +def add_common_opts(): + CONF.register_cli_opts(common_opts) diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/cinder_stress_increment.sh b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/cinder_stress_increment.sh new file mode 100755 index 0000000..64f516b --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/cinder_stress_increment.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +if [[ $EUID -ne 0 ]]; then + echo "This script must be run as root" 1>&2 + exit 1 +fi + +TEST_ROOT=$PWD +HEAT_CHECK=${TEST_ROOT}/heat_check.sh + +STRESSOR_CREATE=${TEST_ROOT}/cinder_stress_increment_create.sh +STRESSOR_DELETE=${TEST_ROOT}/cinder_stress_increment_delete.sh + +## one volume/VM/stack +#YAML=${TEST_ROOT}/yaml/cinder_v1_bon0.yaml +#YAML=${TEST_ROOT}/yaml/cinder_v1_bon1.yaml +#YAML=${TEST_ROOT}/yaml/cinder_v1_bon1_cpuburn.yaml + +## Two volumes/VM/stack +#YAML=${TEST_ROOT}/yaml/cinder_v2_bon0.yaml +#YAML=${TEST_ROOT}/yaml/cinder_v2_bon2.yaml +#YAML=${TEST_ROOT}/yaml/cinder_v2_bon2_cpuburn.yaml + +## 4 volumes/VM/stack +#YAML=${TEST_ROOT}/yaml/cinder_v4_bon0.yaml +#YAML=${TEST_ROOT}/yaml/cinder_v4_bon4.yaml +#YAML=${TEST_ROOT}/yaml/cinder_v4_bon4_cpuburn.yaml + +## test +#YAML=${TEST_ROOT}/yaml/cinder_nokia_v5_bon0.yaml +YAML=${TEST_ROOT}/yaml/cinder_nokia_v5_bon1.yaml +#YAML=${TEST_ROOT}/yaml/cinder_nokia_v5_bon2.yaml +#YAML=${TEST_ROOT}/yaml/cinder_nokia_v5_bon3.yaml +#YAML=${TEST_ROOT}/yaml/cinder_nokia_v5_bon4.yaml +#YAML=${TEST_ROOT}/yaml/cinder_nokia_v5_bon4_cpuburn.yaml + +for stack_num in 1 2 4 8 14 +#for stack_num in $(seq 1 32) +do + + + echo "$stack_num: Creating stacks" + sudo -u wrsroot ${STRESSOR_CREATE} $YAML $stack_num + + source /etc/nova/openrc + AM_I_CREATING="sudo -u wrsroot $HEAT_CHECK | grep CREATE_IN_PROGRESS" + while [[ $(eval $AM_I_CREATING) != "" ]]; do + echo "$stack_num: Creating..." + sleep 15 + done + + ANY_CREATE_ERRORS="sudo -u wrsroot $HEAT_CHECK | grep CREATE_FAILED" + if [[ $(eval $ANY_CREATE_ERRORS) != "" ]]; then + echo "$stack_num: Creating stacks failed" + exit -1 + else + # Run at steady state for 60s + echo "$stack_num: Running at steady state for an additional 10 seconds" + sleep 10 + fi + + echo "$stack_num: Deleting stacks" + sudo -u wrsroot ${STRESSOR_DELETE} $stack_num + + AM_I_DELETING="sudo -u wrsroot $HEAT_CHECK | grep DELETE_IN_PROGRESS" + while [[ $(eval $AM_I_DELETING) != "" ]]; do + echo "$stack_num: Deleting..." + done + + ANY_DELETE_ERRORS="sudo -u wrsroot $HEAT_CHECK | grep DELETE_FAILED" + if [[ $(eval $ANY_DELETE_ERRORS) != "" ]]; then + echo "$stack_num: Deleting stacks failed" + else + echo "$stack_num: Create/Delete successful" + fi + + sleep 10 + +done + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/cinder_stress_increment_create.sh b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/cinder_stress_increment_create.sh new file mode 100755 index 0000000..aa7b253 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/cinder_stress_increment_create.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +case $# in + 0|1) + echo "Usage: `basename $0` <# of stacks>" + exit $E_BADARGS + ;; +esac + +YAML=$1 +NUM_STACKS=$2 + +for i in $(seq 1 $NUM_STACKS) +do + source $HOME/openrc.tenant1 + heat stack-create -f $YAML stack-$i +done + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/cinder_stress_increment_delete.sh b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/cinder_stress_increment_delete.sh new file mode 100755 index 0000000..2f4a7c0 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/cinder_stress_increment_delete.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +case $# in + 0) + echo "Usage: `basename $0` <# of stacks>" + exit $E_BADARGS + ;; +esac + +NUM_STACKS=$1 + +for i in $(seq 1 $NUM_STACKS) +do + source /etc/nova/openrc + heat stack-delete stack-$i +done + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon0.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon0.yaml new file mode 100644 index 0000000..57d62e6 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon0.yaml @@ -0,0 +1,88 @@ +heat_template_version: '2013-05-23' + +description: + One Bonnie, 5 volumes. No root volumes, CoW images + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + Test_volume_1: + type: OS::Cinder::Volume + properties: + name: Test_volume_1 + size: 5 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + image: centos_nkstress + flavor: smallvol + #key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: Test_volume_1 }, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_2 }, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_3 }, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_4 }, device_name: "vde" } + - { volume_id: { get_resource: Test_volume_5 }, device_name: "vdf" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - mkfs.ext4 /dev/vdf + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mkdir /mnt/f + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - mount /dev/vdf /mnt/f/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon0_20GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon0_20GB.yaml new file mode 100755 index 0000000..84689ca --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon0_20GB.yaml @@ -0,0 +1,84 @@ +heat_template_version: '2013-05-23' + +description: + No Bonnie, One root volume 20GB and non-root 4 volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 20 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1.yaml new file mode 100644 index 0000000..2881803 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1.yaml @@ -0,0 +1,89 @@ +heat_template_version: '2013-05-23' + +description: + One Bonnie, 5 volumes. No root volumes, CoW images + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + Test_volume_1: + type: OS::Cinder::Volume + properties: + name: Test_volume_1 + size: 5 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + image: centos_nkstress + flavor: smallvol + #key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: Test_volume_1 }, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_2 }, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_3 }, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_4 }, device_name: "vde" } + - { volume_id: { get_resource: Test_volume_5 }, device_name: "vdf" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - mkfs.ext4 /dev/vdf + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mkdir /mnt/f + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - mount /dev/vdf /mnt/f/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 9999999 >> /root/stabi_1.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1_10GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1_10GB.yaml new file mode 100755 index 0000000..150d6da --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1_10GB.yaml @@ -0,0 +1,87 @@ +heat_template_version: '2013-05-23' + +description: + One Bonnie, One root volume 10GB and non-root 4 5GB volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 10 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /home/centos -u root -x 1000 >> /root/stabi_1.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1_20GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1_20GB.yaml new file mode 100755 index 0000000..7396b27 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1_20GB.yaml @@ -0,0 +1,87 @@ +heat_template_version: '2013-05-23' + +description: + One Bonnie, One root volume 20GB and non-root 4 volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 20 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /home/centos -u root -x 1000 >> /root/stabi_1.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1_50GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1_50GB.yaml new file mode 100755 index 0000000..b4d014f --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon1_50GB.yaml @@ -0,0 +1,87 @@ +heat_template_version: '2013-05-23' + +description: + One Bonnie, One root volume 50GB and non-root 4 volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 50 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /home/centos -u root -x 1000 >> /root/stabi_1.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon2.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon2.yaml new file mode 100644 index 0000000..a5ed221 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon2.yaml @@ -0,0 +1,90 @@ +heat_template_version: '2013-05-23' + +description: + Two Bonnies, 5 volumes. No root volumes, CoW images + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + Test_volume_1: + type: OS::Cinder::Volume + properties: + name: Test_volume_1 + size: 5 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + image: centos_nkstress + flavor: smallvol + #key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: Test_volume_1 }, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_2 }, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_3 }, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_4 }, device_name: "vde" } + - { volume_id: { get_resource: Test_volume_5 }, device_name: "vdf" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - mkfs.ext4 /dev/vdf + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mkdir /mnt/f + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - mount /dev/vdf /mnt/f/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 999 >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/c -u root -x 999 >> /root/stabi_2.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon2_20GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon2_20GB.yaml new file mode 100755 index 0000000..909eabd --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon2_20GB.yaml @@ -0,0 +1,88 @@ +heat_template_version: '2013-05-23' + +description: + Two Bonnies, One root volume 50GB and non-root 4 volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 20 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /home/centos -u root -x 1000 >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 1000 >> /root/stabi_2.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon2_50GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon2_50GB.yaml new file mode 100755 index 0000000..2479279 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon2_50GB.yaml @@ -0,0 +1,88 @@ +heat_template_version: '2013-05-23' + +description: + One Bonnie, One root volume 50GB and non-root 4 volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 50 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /home/centos -u root -x 1000 >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 1000 >> /root/stabi_2.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon3.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon3.yaml new file mode 100644 index 0000000..1d5de1c --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon3.yaml @@ -0,0 +1,91 @@ +heat_template_version: '2013-05-23' + +description: + One Bonnie, 5 volumes. No root volumes, CoW images + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + Test_volume_1: + type: OS::Cinder::Volume + properties: + name: Test_volume_1 + size: 5 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + image: centos_nkstress + flavor: smallvol + #key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: Test_volume_1 }, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_2 }, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_3 }, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_4 }, device_name: "vde" } + - { volume_id: { get_resource: Test_volume_5 }, device_name: "vdf" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - mkfs.ext4 /dev/vdf + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mkdir /mnt/f + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - mount /dev/vdf /mnt/f/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 999 >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/c -u root -x 999 >> /root/stabi_2.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/d -u root -x 999 >> /root/stabi_3.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon3_20GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon3_20GB.yaml new file mode 100755 index 0000000..61e00c8 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon3_20GB.yaml @@ -0,0 +1,89 @@ +heat_template_version: '2013-05-23' + +description: + Two Bonnies, One root volume 50GB and non-root 4 volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 20 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /home/centos -u root -x 1000 >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 1000 >> /root/stabi_2.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/c -u root -x 1000 >> /root/stabi_2.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon3_50GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon3_50GB.yaml new file mode 100755 index 0000000..4dde9b8 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon3_50GB.yaml @@ -0,0 +1,89 @@ +heat_template_version: '2013-05-23' + +description: + Four Bonnies, One root volume 50GB and non-root 4 volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 50 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /home/centos -u root -x 1000 >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 1000 >> /root/stabi_2.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/c -u root -x 1000 >> /root/stabi_3.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon4.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon4.yaml new file mode 100644 index 0000000..ff4c0f4 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon4.yaml @@ -0,0 +1,92 @@ +heat_template_version: '2013-05-23' + +description: + One Bonnie, 5 volumes. No root volumes, CoW images + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + Test_volume_1: + type: OS::Cinder::Volume + properties: + name: Test_volume_1 + size: 5 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + image: centos_nkstress + flavor: smallvol + #key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: Test_volume_1 }, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_2 }, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_3 }, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_4 }, device_name: "vde" } + - { volume_id: { get_resource: Test_volume_5 }, device_name: "vdf" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - mkfs.ext4 /dev/vdf + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mkdir /mnt/f + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - mount /dev/vdf /mnt/f/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 999 >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/c -u root -x 999 >> /root/stabi_2.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/d -u root -x 999 >> /root/stabi_3.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/e -u root -x 999 >> /root/stabi_4.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon4_20GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon4_20GB.yaml new file mode 100755 index 0000000..5d8770d --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon4_20GB.yaml @@ -0,0 +1,90 @@ +heat_template_version: '2013-05-23' + +description: + Four Bonnies, One root volume 50GB and non-root 4 volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 20 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /home/centos -u root -x 1000 >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 1000 >> /root/stabi_2.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/c -u root -x 1000 >> /root/stabi_2.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/d -u root -x 1000 >> /root/stabi_2.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon4_50GB.yaml b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon4_50GB.yaml new file mode 100755 index 0000000..80909d4 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/test-tools/yaml/cinder_nokia_v6_bon4_50GB.yaml @@ -0,0 +1,90 @@ +heat_template_version: '2013-05-23' + +description: + Four Bonnie, One root volume 50GB and non-root 4 volumes + +parameters: + Network_Name: + type: string + description: Network which is used for servers + default: tenant1-mgmt-net + +resources: + + root_volume_1: + type: OS::Cinder::Volume + properties: + name: root_volume_1 + image: centos_nkstress + size: 50 + + Test_volume_2: + type: OS::Cinder::Volume + properties: + name: Test_volume_2 + size: 5 + + Test_volume_3: + type: OS::Cinder::Volume + properties: + name: Test_volume_3 + size: 5 + + Test_volume_4: + type: OS::Cinder::Volume + properties: + name: Test_volume_4 + size: 5 + + Test_volume_5: + type: OS::Cinder::Volume + properties: + name: Test_volume_5 + size: 5 + + Stabi_volume_write: + type: OS::Nova::Server + depends_on: root_volume_1 + properties: + name: { list_join : [ "-", [{get_param: 'OS::stack_name'}, 'Stabi_volume_write']]} + flavor: smallvol + #key_name: newkey + availability_zone: "nova" + networks: + - network: { get_param: Network_Name } + block_device_mapping: + - { volume_id: { get_resource: root_volume_1}, device_name: "vda" } + - { volume_id: { get_resource: Test_volume_2}, device_name: "vdb" } + - { volume_id: { get_resource: Test_volume_3}, device_name: "vdc" } + - { volume_id: { get_resource: Test_volume_4}, device_name: "vdd" } + - { volume_id: { get_resource: Test_volume_5}, device_name: "vde" } + + user_data_format: RAW + user_data: | + #cloud-config + user: centos + password: centos + chpasswd: {expire: False} + ssh_pwauth: True + runcmd: + - echo "Creating file systems..." > /root/stabi_1.log& + - mkfs.ext4 /dev/vdb + - mkfs.ext4 /dev/vdc + - mkfs.ext4 /dev/vdd + - mkfs.ext4 /dev/vde + - echo "Mounting directories..." >> /root/stabi_1.log& + - mkdir /mnt/b + - mkdir /mnt/c + - mkdir /mnt/d + - mkdir /mnt/e + - mount /dev/vdb /mnt/b/ + - mount /dev/vdc /mnt/c/ + - mount /dev/vdd /mnt/d/ + - mount /dev/vde /mnt/e/ + - echo "Starting bonnie++..." >> /root/stabi_1.log& + - date >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /home/centos -u root -x 1000 >> /root/stabi_1.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/b -u root -x 1000 >> /root/stabi_2.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/c -u root -x 1000 >> /root/stabi_2.log& + - /usr/sbin/bonnie++ -b -n 100 -d /mnt/d -u root -x 1000 >> /root/stabi_2.log& + diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/__init__.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/__init__.py new file mode 100644 index 0000000..754a8f4 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/__init__.py @@ -0,0 +1,5 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py new file mode 100644 index 0000000..869e878 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import logging +import os + +from io_monitor.constants import DOMAIN +from io_monitor.utils.data_window import DataCollectionWindow + +LOG = logging.getLogger(DOMAIN) + + +class DeviceDataCollector(object): + # Moving average windows + MA_WINDOW_SMA = 0 + MA_WINDOW_MED = 1 + MA_WINDOW_LAR = 2 + + # Device status + STATUS_NORMAL = "N" + STATUS_BUILDING = "B" + STATUS_CONGESTED = "L" + + # Data tracked + DATA_IOPS = "iops" + DATA_AWAIT = "await" + + def __init__(self, device_node, data_elements, + size_sma, size_med, size_lar): + + self.node = device_node + + if os.path.exists('/sys/block/' + self.node + '/dm/name'): + self.name = open('/sys/block/' + self.node + '/dm/name', + 'r').read().rstrip() + else: + self.name = self.node + + self.data_dict = {} + self.data_caps = {self.DATA_AWAIT: -1, self.DATA_IOPS: -1} + self.timestamp = None + + self.congestion_status = self.STATUS_NORMAL + self.congestion_await_minimal_spike = -1 + self.congestion_await_sustained = -1 + + for element in data_elements: + self.data_dict.update({element: [ + DataCollectionWindow(size_sma, stuck_data_override=True), + DataCollectionWindow(size_med, stuck_data_override=True), + DataCollectionWindow(size_lar, stuck_data_override=True)]}) + + def update_congestion_status(self): + # Bail if threshold is not set + if self.congestion_await_sustained == -1: + return + + ma_sma = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_SMA) + ma_med = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_MED) + ma_lar = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_LAR) + + # Set the congestion status based on await moving average + if self.congestion_status is self.STATUS_NORMAL: + if ma_sma > self.congestion_await_sustained: + self.congestion_status = self.STATUS_BUILDING + + if self.congestion_status is self.STATUS_BUILDING: + if ma_lar > self.congestion_await_sustained: + self.congestion_status = self.STATUS_CONGESTED + LOG.warn("Node %s (%s) is experiencing high await times." + % (self.node, self.name)) + elif ma_sma < self.congestion_await_sustained: + self.congestion_status = self.STATUS_NORMAL + + if self.congestion_status is self.STATUS_CONGESTED: + if ma_med < self.congestion_await_sustained: + self.congestion_status = self.STATUS_BUILDING + + def update_data(self, ts, element, value): + self.timestamp = ts + + # LOG.debug("%s: e = %s, v= %f" % (self.node, element, value)) + for w in [self.MA_WINDOW_SMA, + self.MA_WINDOW_MED, + self.MA_WINDOW_LAR]: + self.data_dict[element][w].update(value, self.data_caps[element]) + + def get_latest(self, element): + if element not in self.data_dict: + LOG.error("Error: invalid element requested = %s" % element) + return 0 + + return self.data_dict[element][self.MA_WINDOW_SMA].get_latest() + + def get_average(self, element, window): + if window not in [self.MA_WINDOW_SMA, + self.MA_WINDOW_MED, + self.MA_WINDOW_LAR]: + LOG.error("WindowError: invalid window requested = %s" % window) + return 0 + + if element not in self.data_dict: + LOG.error("Error: invalid element requested = %s" % element) + return 0 + + return self.data_dict[element][window].get_average() + + def is_data_stale(self, ts): + return not (ts == self.timestamp) + + def get_congestion_status(self, debug=False): + + if debug: + ma_sma = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_SMA) + ma_med = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_MED) + ma_lar = self.get_average(self.DATA_AWAIT, self.MA_WINDOW_LAR) + + LOG.debug("%s [ %6.2f %6.2f %6.2f ] %d" % + (self.node, ma_sma, ma_med, ma_lar, + self.congestion_await_sustained)) + + return self.congestion_status + + def set_data_caps(self, element, cap): + if element in self.data_caps: + self.data_caps[element] = cap + + def set_congestion_thresholds(self, await_minimal_spike, + await_sustained_congestion): + self.congestion_await_minimal_spike = await_minimal_spike + self.congestion_await_sustained = await_sustained_congestion + + def get_element_windows_avg_list(self, element): + return [self.get_average(element, self.MA_WINDOW_SMA), + self.get_average(element, self.MA_WINDOW_MED), + self.get_average(element, self.MA_WINDOW_LAR)] + + def get_element_windows_avg_string(self, element): + return "%s [ %9.2f, %9.2f, %9.2f ]" % ( + element, + self.get_average(element, self.MA_WINDOW_SMA), + self.get_average(element, self.MA_WINDOW_MED), + self.get_average(element, self.MA_WINDOW_LAR)) diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_window.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_window.py new file mode 100644 index 0000000..4caaf15 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_window.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import collections + + +class DataCollectionWindow(object): + # If the same data is seen repeatedly, then override with 0.0 as this + # device is no longer updating + CONSECUTIVE_SAME_DATA = 5 + + def __init__(self, size, stuck_data_override=False): + self.window = collections.deque(size*[0.0], size) + self.timestamp = None + self.last_value = 0.0 + self.total = 0.0 + self.avg = 0.0 + + # iostat will produce a "stuck data" scenario when called with less + # than two iterations and I/O has stopped on the device + self.stuck_override = stuck_data_override + self.stuck_count = 0 + + def update(self, value, cap): + # Handle stuck data and override + if self.stuck_override and value != 0: + if value == self.last_value: + self.stuck_count += 1 + else: + self.stuck_count = 0 + + # Save latest value + self.last_value = value + + if self.stuck_count > self.CONSECUTIVE_SAME_DATA: + value = 0.0 + else: + # Cap the values due to squirly data + if cap > 0: + value = min(value, cap) + + expired_value = self.window.pop() + + # Adjust push the new + self.window.appendleft(value) + + # Adjust the sums + self.total += (value - expired_value) + + # Adjust the average + self.avg = max(0.0, self.total/len(self.window)) + + def get_latest(self): + return self.last_value + + def get_average(self): + return self.avg diff --git a/middleware/io-monitor/recipes-common/io-monitor/io-monitor/setup.py b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/setup.py new file mode 100644 index 0000000..ecb143a --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/io-monitor/setup.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import setuptools + +setuptools.setup(name='io_monitor', + version='1.0.0', + description='IO Monitor', + license='Apache-2.0', + packages=['io_monitor', 'io_monitor.monitors', + 'io_monitor.monitors.cinder', 'io_monitor.utils'], + entry_points={ + }) diff --git a/middleware/io-monitor/recipes-common/io-monitor/scripts/bin/io-monitor-manager b/middleware/io-monitor/recipes-common/io-monitor/scripts/bin/io-monitor-manager new file mode 100644 index 0000000..a8479b3 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/scripts/bin/io-monitor-manager @@ -0,0 +1,17 @@ +#!/usr/bin/env python +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import sys + +try: + from io_monitor import io_monitor_manager +except EnvironmentError as e: + print >> sys.stderr, "Error importing io_monitor_manager: ", str(e) + sys.exit(1) + +io_monitor_manager.main() diff --git a/middleware/io-monitor/recipes-common/io-monitor/scripts/init.d/io-monitor-manager b/middleware/io-monitor/recipes-common/io-monitor/scripts/init.d/io-monitor-manager new file mode 100644 index 0000000..5ed4ee8 --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/scripts/init.d/io-monitor-manager @@ -0,0 +1,100 @@ +#!/bin/sh +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +### BEGIN INIT INFO +# Provides: io-monitor-manager +# Required-Start: +# Required-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Daemon for polling iostat status +# Description: Daemon for polling iostat status +### END INIT INFO + +DESC="io-monitor-manager" +DAEMON="/usr/bin/io-monitor-manager" +RUNDIR="/var/run/io-monitor" +PIDFILE=$RUNDIR/$DESC.pid + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/prod/$(cat $PIDFILE) + if [ -d ${PIDFILE} ]; then + echo "$DESC already running." + exit 0 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + mkdir -p $RUNDIR + start-stop-daemon --start --quiet \ + --pidfile ${PIDFILE} --exec ${DAEMON} -- --daemon_mode + + #--make-pidfile + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + exit 1 + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &> /dev/null ; then + echo "$DESC is running" + exit 0 + else + echo "$DESC is not running but has pid file" + exit 1 + fi + fi + echo "$DESC is not running" + exit 3 +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/middleware/io-monitor/recipes-common/io-monitor/scripts/pmon.d/io-monitor.conf b/middleware/io-monitor/recipes-common/io-monitor/scripts/pmon.d/io-monitor.conf new file mode 100644 index 0000000..5dddb6f --- /dev/null +++ b/middleware/io-monitor/recipes-common/io-monitor/scripts/pmon.d/io-monitor.conf @@ -0,0 +1,19 @@ +[process] +process = io-monitor-manager +pidfile = /var/run/io-monitor/io-monitor-manager.pid +script = /etc/init.d/io-monitor-manager +style = lsb ; ocf or lsb +severity = minor ; Process failure severity + ; critical : host is failed + ; major : host is degraded + ; minor : log is generated +restarts = 5 ; Number of back to back unsuccessful restarts before severity assertion +interval = 10 ; Number of seconds to wait between back-to-back unsuccessful restarts +debounce = 20 ; Number of seconds the process needs to run before declaring + ; it as running O.K. after a restart. + ; Time after which back-to-back restart count is cleared. +startuptime = 10 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active: heartbeat monitoring, i.e. request / response messaging + diff --git a/middleware/patching/recipes-common/enable-dev-patch/centos/build_srpm.data b/middleware/patching/recipes-common/enable-dev-patch/centos/build_srpm.data new file mode 100644 index 0000000..ddd5493 --- /dev/null +++ b/middleware/patching/recipes-common/enable-dev-patch/centos/build_srpm.data @@ -0,0 +1,2 @@ +TIS_PATCH_VER=1 +COPY_LIST="enable-dev-patch/*" diff --git a/middleware/patching/recipes-common/enable-dev-patch/centos/enable-dev-patch.spec b/middleware/patching/recipes-common/enable-dev-patch/centos/enable-dev-patch.spec new file mode 100644 index 0000000..23380a5 --- /dev/null +++ b/middleware/patching/recipes-common/enable-dev-patch/centos/enable-dev-patch.spec @@ -0,0 +1,28 @@ +Summary: Enable installation of developer patches +Name: enable-dev-patch +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: dev_certificate_enable.bin + +%description +Enables the installation of Titanium patches signed by developers + +%prep + +%build + +%install + install -m 755 -d %{buildroot}%{_sysconfdir}/pki/wrs + install -m 444 %{SOURCE0} %{buildroot}%{_sysconfdir}/pki/wrs/dev_certificate_enable.bin + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +%{_sysconfdir}/pki/wrs/dev_certificate_enable.bin + diff --git a/middleware/patching/recipes-common/enable-dev-patch/enable-dev-patch/dev_certificate_enable.bin b/middleware/patching/recipes-common/enable-dev-patch/enable-dev-patch/dev_certificate_enable.bin new file mode 100644 index 0000000..a074ffb --- /dev/null +++ b/middleware/patching/recipes-common/enable-dev-patch/enable-dev-patch/dev_certificate_enable.bin @@ -0,0 +1 @@ ++DI:#0Wɸ^׳AyӘ b1+2;Y3ncE;lĊ!P.^fE.jk~Mcg)(F^Oe \oɨL">^>'SZimL^6:lƳyVB4n%91PG s99]cN*m+2^?;NRx)\磙u. ƽ)$0Xjo֧5XP) \ No newline at end of file diff --git a/middleware/perf/recipes-common/io-scheduler/centos/build_srpm.data b/middleware/perf/recipes-common/io-scheduler/centos/build_srpm.data new file mode 100644 index 0000000..abc7209 --- /dev/null +++ b/middleware/perf/recipes-common/io-scheduler/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="$FILES_BASE/*" +TIS_PATCH_VER=0 diff --git a/middleware/perf/recipes-common/io-scheduler/centos/files/60-io-scheduler.rules b/middleware/perf/recipes-common/io-scheduler/centos/files/60-io-scheduler.rules new file mode 100644 index 0000000..6660173 --- /dev/null +++ b/middleware/perf/recipes-common/io-scheduler/centos/files/60-io-scheduler.rules @@ -0,0 +1,28 @@ +# This file contains the rules to customize io scheduler. + +# Heuristics: +# 'deadline' io-scheduler tuned settings +# - deadline generally recommended for databases, servers, and SSDs, +# and for more deterministic latency +# - note that read_expire is a key tuning parameter here +# - the following is recommended by DRBD user guide +# front_merges: 0 (from 1) +# read_expire: 150 (from 500) +# write_expire: 1500 (from 5000) +# +# 'noop' io scheduler for variants of HW-RAID. +# - RAID controller will do its own separate scheduling +# +# Overall: +# - We prefer to guarantee latency more than fairness for all platform services, +# especially under extreme read and write load, e.g, when creating/deleting +# multiple heat stacks, or running disk intensive operations. + +ACTION=="add|change", SUBSYSTEM=="block", KERNEL=="sd[a-z]", ATTR{queue/scheduler}="deadline" +ACTION=="add|change", SUBSYSTEM=="block", KERNEL=="sd[a-z]", ATTR{queue/iosched/front_merges}="0" +ACTION=="add|change", SUBSYSTEM=="block", KERNEL=="sd[a-z]", ATTR{queue/iosched/read_expire}="150" +ACTION=="add|change", SUBSYSTEM=="block", KERNEL=="sd[a-z]", ATTR{queue/iosched/write_expire}="1500" + +# Set noop io scheduler for variants of HW-RAID. +# HP ProLiant DL360p Gen8; HP ProLiant DL380p Gen8 +ACTION=="add|change", SUBSYSTEM=="block", KERNEL=="sd[a-z]", ATTRS{raid_level}=="*RAID*", ATTR{queue/scheduler}="noop" diff --git a/middleware/perf/recipes-common/io-scheduler/centos/files/LICENSE b/middleware/perf/recipes-common/io-scheduler/centos/files/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/perf/recipes-common/io-scheduler/centos/files/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/perf/recipes-common/io-scheduler/centos/io-scheduler.spec b/middleware/perf/recipes-common/io-scheduler/centos/io-scheduler.spec new file mode 100644 index 0000000..af5d941 --- /dev/null +++ b/middleware/perf/recipes-common/io-scheduler/centos/io-scheduler.spec @@ -0,0 +1,29 @@ +Summary: CGCS IO Scheduler Configuration +Name: io-scheduler +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown + +Source0: 60-io-scheduler.rules +Source1: LICENSE + +%define udev_rules_d %{_sysconfdir}/udev/rules.d + +%description +CGCS io scheduler configuration and tuning. + +%install +mkdir -p %{buildroot}%{udev_rules_d} +install -m 644 %{SOURCE0} %{buildroot}%{udev_rules_d}/60-io-scheduler.rules + +%post +/bin/udevadm control --reload-rules +/bin/udevadm trigger --type=devices --subsystem-match=block + +%files +%license ../SOURCES/LICENSE +%defattr(-,root,root,-) +%{_sysconfdir}/udev/rules.d diff --git a/middleware/recipes-common/build-info/PKG-INFO b/middleware/recipes-common/build-info/PKG-INFO new file mode 100644 index 0000000..24a1927 --- /dev/null +++ b/middleware/recipes-common/build-info/PKG-INFO @@ -0,0 +1,12 @@ +Metadata-Version: 1.1 +Name: build-info +Version: 1.0 +Summary: build-info version 1.0-r3 +Home-page: +Author: Windriver +Author-email: info@windriver.com +License: Apache-2.0 + +Description: CGTS build information package + +Platform: UNKNOWN diff --git a/middleware/recipes-common/build-info/build-info-1.0/LICENSE b/middleware/recipes-common/build-info/build-info-1.0/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/recipes-common/build-info/build-info-1.0/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/recipes-common/build-info/build-info-1.0/collect.sh b/middleware/recipes-common/build-info/build-info-1.0/collect.sh new file mode 100755 index 0000000..b7c3427 --- /dev/null +++ b/middleware/recipes-common/build-info/build-info-1.0/collect.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +# +# Copyright (c) 2013-2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +root="../../../../../.." +centOSBuildRoot=".." +jenkinsBuildFileName="BUILD" +jenkinsBuildFile="$root/$jenkinsBuildFileName" +jenkinsBuildFileCentOS="$centOSBuildRoot/$jenkinsBuildFileName" +releaseInfoFile="../release-info.inc" +destFile="build.info" +destH="build_info.h" + +# If Jenkins build file does not exist in the expected Rel 2 directory, +# check if it was packaged in the source RPM +if [ ! -e $jenkinsBuildFile ]; then + if [ -e $jenkinsBuildFileCentOS ]; then + jenkinsBuildFile=$jenkinsBuildFileCentOS + fi +fi + +if [ -e $releaseInfoFile ]; then + source $releaseInfoFile +fi + +if [ "${REPO}x" == "x" ]; then + REPO=`grep CONFIGURE_CMD $root/config.properties | awk ' { print $1 } ' | awk -F '"' ' { print $2 } ' | sed 's|\(.*\)\(\/.*\/.*\)$|\1|g'` +fi + +if [ -e $jenkinsBuildFile ]; then + cp $jenkinsBuildFile $destFile + source $jenkinsBuildFile +else + # PLATFORM_RELEASE should be set in release-info.inc + if [ "x${PLATFORM_RELEASE}" == "x" ]; then + SW_VERSION="Unknown" + else + SW_VERSION="${PLATFORM_RELEASE}" + fi + + BUILD_TARGET="Unknown" + BUILD_TYPE="Informal" + BUILD_ID="n/a" + JOB="n/a" + if [ "${BUILD_BY}x" == "x" ]; then + BUILD_BY="$USER" + fi + BUILD_NUMBER="n/a" + BUILD_HOST="$HOSTNAME" + if [ "${BUILD_DATE}x" == "x" ]; then + BUILD_DATE=`date "%F %T %z"` + if [ $? -ne 0 ]; then + BUILD_DATE=`date "+%F %T %z"` + fi + fi + + echo "SW_VERSION=\"$SW_VERSION\"" > $destFile + echo "BUILD_TARGET=\"$BUILD_TARGET\"" >> $destFile + echo "BUILD_TYPE=\"$BUILD_TYPE\"" >> $destFile + echo "BUILD_ID=\"$BUILD_ID\"" >> $destFile + echo "" >> $destFile + echo "JOB=\"$JOB\"" >> $destFile + echo "BUILD_BY=\"$BUILD_BY\"" >> $destFile + echo "BUILD_NUMBER=\"$BUILD_NUMBER\"" >> $destFile + echo "BUILD_HOST=\"$BUILD_HOST\"" >> $destFile + echo "BUILD_DATE=\"$BUILD_DATE\"" >> $destFile + echo "" >> $destFile + echo "BUILD_DIR=\""`bash -c "cd $root; pwd"`"\"" >> $destFile + echo "WRS_SRC_DIR=\"$REPO\"" >> $destFile + if [ "${WRS_GIT_BRANCH}x" == "x" ]; then + echo "WRS_GIT_BRANCH=\""`cd $REPO; git status -s -b | grep '##' | awk ' { printf $2 } '`"\"" >> $destFile + else + echo "WRS_GIT_BRANCH=\"$WRS_GIT_BRANCH\"" >> $destFile + fi + + echo "CGCS_SRC_DIR=\"$REPO/addons/wr-cgcs/layers/cgcs\"" >> $destFile + if [ "${CGCS_GIT_BRANCH}x" == "x" ]; then + echo "CGCS_GIT_BRANCH=\""`cd $REPO/addons/wr-cgcs/layers/cgcs/; git status -s -b | grep '##' | awk ' { printf $2 } '`"\"" >> $destFile + else + echo "CGCS_GIT_BRANCH=\"$CGCS_GIT_BRANCH\"" >> $destFile + fi + +fi + +echo "#ifndef _BUILD_INFO_H_" > $destH +echo "#define _BUILD_INFO_H_" >> $destH +echo "" >> $destH +echo "#define RELEASE_NAME \"$RELEASE_NAME\"" >> $destH +echo "#define SW_VERSION \"$SW_VERSION\"" >> $destH +echo "" >> $destH +echo "#define BUILD_TARGET \"$BUILD_TARGET\"" >> $destH +echo "#define BUILD_TYPE \"$BUILD_TYPE\"" >> $destH +echo "#define BUILD_ID \"$BUILD_ID\"" >> $destH +echo "" >> $destH +echo "#define JOB \"$JOB\"" >> $destH +echo "#define BUILD_BY \"$BUILD_BY\"" >> $destH +echo "#define BUILD_NUMBER \"$BUILD_NUMBER\"" >> $destH +echo "#define BUILD_HOST \"$BUILD_HOST\"" >> $destH +echo "#define BUILD_DATE \"$BUILD_DATE\"" >> $destH +echo "#endif /* _BUILD_INFO_H_ */" >> $destH diff --git a/middleware/recipes-common/build-info/centos/build-info.spec b/middleware/recipes-common/build-info/centos/build-info.spec new file mode 100644 index 0000000..18ca8cd --- /dev/null +++ b/middleware/recipes-common/build-info/centos/build-info.spec @@ -0,0 +1,50 @@ +Summary: build-info version 1.0-r3 +Name: build-info +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz +Source1: LICENSE + +%description +Build Info + +%define local_etcdir /etc +%define local_incdir /usr/include + +%define debug_package %{nil} + +%package -n build-info-dev +Summary: build-info version 1.0-r3 - Development files +Group: devel + +%description -n build-info-dev +Build Info This package contains symbolic links, header files, and related items necessary for software development. + +%files +%license ../LICENSE +%defattr(-,root,root,-) +%{local_etcdir}/* + +%prep +%setup + +%build +./collect.sh + +%install +install -d -m 755 %{buildroot}%{local_etcdir} +install -m 644 build.info %{buildroot}/%{local_etcdir} +install -d -m 755 %{buildroot}%{local_incdir} +install -m 644 build_info.h %{buildroot}/%{local_incdir} + +%clean +rm -rf $RPM_BUILD_ROOT + +%files -n build-info-dev +%defattr(-,root,root,-) +%{local_incdir}/* + diff --git a/middleware/recipes-common/build-info/centos/build_srpm b/middleware/recipes-common/build-info/centos/build_srpm new file mode 100755 index 0000000..b93c548 --- /dev/null +++ b/middleware/recipes-common/build-info/centos/build_srpm @@ -0,0 +1,130 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +source "$SRC_BASE/build-tools/spec-utils" + +if [ "x$DATA" == "x" ]; then + echo "ERROR: Environment variable 'DATA' not defined." + exit 1 +fi + +if [ ! -f "$DATA" ]; then + echo "ERROR: Couldn't find '$PWD/$DATA'" + exit 1 +fi + +unset TIS_PATCH_VER # Ensure there's nothing in the env already + +source $DATA + +if [ -z "$TIS_PATCH_VER" ]; then + echo "ERROR: TIS_PATCH_VER must be defined" + exit 1 +fi + +SRC_DIR="/build-info-1.0" +VERSION=$(grep '^Version:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') +TAR_NAME=$(grep '^Name:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') +CUR_DIR=`pwd` +BUILD_DIR="$RPMBUILD_BASE" + +# Additional files to include in the archive (if they exist). +EXTRA_FILES="./release-info.inc" +if [ -f $MY_WORKSPACE/BUILD ]; then + EXTRA_FILES+=" $MY_WORKSPACE/BUILD" +else + if [ -f $MY_WORKSPACE/../BUILD ]; then + EXTRA_FILES+=" $MY_WORKSPACE/../BUILD" + fi +fi + +mkdir -p $BUILD_DIR/SRPMS + +TAR_UNCOMPRESSED="$TAR_NAME-$VERSION.tar" +TAR="${TAR_UNCOMPRESSED}.gz" +COMPRESS="gzip" +TAR_PATH="$BUILD_DIR/SOURCES" + +# copy the LICENSE for rpm spec %license directive +cp .$SRC_DIR/LICENSE $BUILD_DIR/SOURCES/ + +# Check to see if our tarball needs updating +TAR_NEEDED=0 +if [ -f $TAR_PATH/$TAR ]; then + n=`find . -cnewer $TAR_PATH/$TAR -and ! -path './.git*' \ + -and ! -path './build/*' \ + -and ! -path './.pc/*' \ + -and ! -path './patches/*' \ + -and ! -path "./$DISTRO/*" \ + -and ! -path './pbr-*.egg/*' \ + | wc -l` + if [ $n -gt 0 ]; then + TAR_NEEDED=1 + fi + + # check to see if any of our EXTRA_FILES are newer than the archive + for file in "$EXTRA_FILES"; do + if [ $file -nt $TAR_PATH/$TAR ]; then + TAR_NEEDED=1 + fi + done +else + TAR_NEEDED=1 +fi + +if [ $TAR_NEEDED -gt 0 ]; then + tar cvf $TAR_PATH/$TAR_UNCOMPRESSED .$SRC_DIR \ + --exclude '.git*' --exclude 'build' --exclude='.pc' \ + --exclude='patches' --exclude="$DISTRO" --exclude='pbr-*.egg' \ + --transform "s,^\.$SRC_DIR/LICENSE,LICENSE," \ + --transform "s,^\.$SRC_DIR,$TAR_NAME-$VERSION," +fi + +for file in $EXTRA_FILES; do + if [ -e $file ]; then + tar rf $TAR_PATH/$TAR_UNCOMPRESSED -C $(dirname "${file}") $(basename "${file}") + fi +done + +$COMPRESS $TAR_PATH/$TAR_UNCOMPRESSED + +for SPEC in `ls $BUILD_DIR/SPECS`; do + SPEC_PATH="$BUILD_DIR/SPECS/$SPEC" + RELEASE=`spec_find_tag Release "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: 'Release' not found in '$SPEC_PATH'" + fi + NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: 'Name' not found in '$SPEC_PATH'" + fi + SRPM="$NAME-$VERSION-$RELEASE.src.rpm" + SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM" + + BUILD_NEEDED=0 + if [ -f $SRPM_PATH ]; then + n=`find . -cnewer $SRPM_PATH | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + else + BUILD_NEEDED=1 + fi + + if [ $BUILD_NEEDED -gt 0 ]; then + echo "SPEC file: $SPEC_PATH" + echo "SRPM build directory: $BUILD_DIR" + echo "TIS_PATCH_VER: $TIS_PATCH_VER" + + sed -i -e "1 i%define tis_patch_ver $TIS_PATCH_VER" $SPEC_PATH + rpmbuild -bs $SPEC_PATH --define="%_topdir $BUILD_DIR" --define="_tis_dist .tis" + fi +done + + + + + diff --git a/middleware/recipes-common/build-info/centos/build_srpm.data b/middleware/recipes-common/build-info/centos/build_srpm.data new file mode 100644 index 0000000..d3f64f3 --- /dev/null +++ b/middleware/recipes-common/build-info/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=3 diff --git a/middleware/recipes-common/build-info/release-info.inc b/middleware/recipes-common/build-info/release-info.inc new file mode 100644 index 0000000..b32e0c2 --- /dev/null +++ b/middleware/recipes-common/build-info/release-info.inc @@ -0,0 +1,12 @@ +# +# Copyright (c) 2014-2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# Inclusion file to set release variables +# +# Note: Sourced from scripts, so needs to be bash-able +# +PLATFORM_RELEASE="18.03" diff --git a/middleware/util/recipes-common/collector/LICENSE b/middleware/util/recipes-common/collector/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/util/recipes-common/collector/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/util/recipes-common/collector/centos/build_srpm.data b/middleware/util/recipes-common/collector/centos/build_srpm.data new file mode 100644 index 0000000..69bf012 --- /dev/null +++ b/middleware/util/recipes-common/collector/centos/build_srpm.data @@ -0,0 +1,2 @@ +SRC_DIR="scripts" +TIS_PATCH_VER=24 diff --git a/middleware/util/recipes-common/collector/centos/collector.spec b/middleware/util/recipes-common/collector/centos/collector.spec new file mode 100644 index 0000000..40c51c5 --- /dev/null +++ b/middleware/util/recipes-common/collector/centos/collector.spec @@ -0,0 +1,66 @@ +Summary: CGCS Platform Data Collection Scripts Package +Name: collector +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz + +%description +This packages scripts that implement data and log collection that field +support can execute to gather current state and runtime history for off +platform analysis and debug. + +%prep +%setup + +%install +mkdir -p %{buildroot} + +install -d 755 -d %{buildroot}%{_sysconfdir}/collect.d +install -d 755 -d %{buildroot}%{_sysconfdir}/collect +install -d 755 -d %{buildroot}/usr/local/sbin +install -d 755 -d %{buildroot}/usr/local/bin +install -d 755 -d %{buildroot}%{_sbindir} + +install -m 755 collect %{buildroot}/usr/local/sbin/collect +install -m 755 collect_host %{buildroot}/usr/local/sbin/collect_host +install -m 755 collect_date %{buildroot}/usr/local/sbin/collect_date +install -m 755 collect_utils %{buildroot}/usr/local/sbin/collect_utils +install -m 755 collect_parms %{buildroot}/usr/local/sbin/collect_parms +install -m 755 collect_mask_passwords %{buildroot}/usr/local/sbin/collect_mask_passwords +install -m 755 expect_done %{buildroot}/usr/local/sbin/expect_done + +install -m 755 collect_sysinv.sh %{buildroot}%{_sysconfdir}/collect.d/collect_sysinv +install -m 755 collect_psqldb.sh %{buildroot}%{_sysconfdir}/collect.d/collect_psqldb +install -m 755 collect_openstack.sh %{buildroot}%{_sysconfdir}/collect.d/collect_openstack +install -m 755 collect_networking.sh %{buildroot}%{_sysconfdir}/collect.d/collect_networking +install -m 755 collect_ceph.sh %{buildroot}%{_sysconfdir}/collect.d/collect_ceph +install -m 755 collect_sm.sh %{buildroot}%{_sysconfdir}/collect.d/collect_sm +install -m 755 collect_tc.sh %{buildroot}%{_sysconfdir}/collect.d/collect_tc +install -m 755 collect_nfv_vim.sh %{buildroot}%{_sysconfdir}/collect.d/collect_nfv_vim +install -m 755 collect_vswitch.sh %{buildroot}%{_sysconfdir}/collect.d/collect_vswitch +install -m 755 collect_patching.sh %{buildroot}%{_sysconfdir}/collect.d/collect_patching +install -m 755 collect_coredump.sh %{buildroot}%{_sysconfdir}/collect.d/collect_coredump +install -m 755 collect_crash.sh %{buildroot}%{_sysconfdir}/collect.d/collect_crash +install -m 755 collect_ima.sh %{buildroot}%{_sysconfdir}/collect.d/collect_ima + +install -m 755 etc.exclude %{buildroot}%{_sysconfdir}/collect/etc.exclude +install -m 755 run.exclude %{buildroot}%{_sysconfdir}/collect/run.exclude + +ln -sf /usr/local/sbin/collect %{buildroot}/usr/local/bin/collect +ln -sf /usr/local/sbin/collect %{buildroot}%{_sbindir}/collect + +%clean +rm -rf %{buildroot} + +%files +%license LICENSE +%defattr(-,root,root,-) +%{_sysconfdir}/collect/* +%{_sysconfdir}/collect.d/* +/usr/local/sbin/* +/usr/local/bin/collect +%{_sbindir}/collect diff --git a/middleware/util/recipes-common/collector/scripts/LICENSE b/middleware/util/recipes-common/collector/scripts/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/util/recipes-common/collector/scripts/collect b/middleware/util/recipes-common/collector/scripts/collect new file mode 100755 index 0000000..e4bb8a2 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect @@ -0,0 +1,1267 @@ +#! /bin/bash +######################################################################## +# +# Copyright (c) 2014-2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +######################################################################## +# +# Description: This script creates a tarball of logs and runtime +# configuration information for any of the following +# +# - current host ... collect +# - specified host ... collect hostname +# - group of hosts ... collect --list ... +# - all hosts ... collect --all +# +# Behavior : See print_help below. +# +# Inclusions : What is collected. +# +# - /var/log +# - /var/run (exclusions listed in /etc/collect/exclude.list) +# - area specific configuration and data -> ./var/extra +# - all databases in plain text ; except for ceilometer and keystone +# +# Additional collected info is expressed by the following runtime output. +# Generally, individual commands that display output have that output +# redirected to the appropriate info file in /scratch/var/extra +# +# wrsroot@controller-0:/scratch# sudo collect +# nodetype : controller +# Collector: /scratch +# Extra Dir: /scratch/var/extra +# Database : /scratch/database +# Tarball : /scratch/controller-0.20140318.232925.tgz +# ------------------------------------------------------------------------ +# controller-0: Process Info ......: /scratch/var/extra/process.info +# controller-0: Host Info .........: /scratch/var/extra/host.info +# controller-0: Memory Info .......: /scratch/var/extra/memory.info +# controller-0: Filesystem Info ...: /scratch/var/extra/filesystem.info +# controller-0: Bash History ......: /scratch/var/extra/history.info +# controller-0: Interrupt Info ....: /scratch/var/extra/interrupt.info +# controller-0: HA Info ...........: /scratch/var/extra/crm.info +# controller-0: CIB Admin Info ....: /scratch/var/extra/crm.xml +# controller-0: Mtce Info .........: /scratch/var/extra/mtce.info +# controller-0: Networking Info ...: /scratch/var/extra/networking.info +# controller-0: RabbitMQ Info .....: /scratch/var/extra/rabbitmq.info +# controller-0: Database Info .....: /scratch/var/extra/database.info +# controller-0: Dumping Database ..: /scratch/database/postgres.db.sql.txt +# controller-0: Dumping Database ..: /scratch/database/glance.db.sql.txt +# controller-0: Dumping Database ..: /scratch/database/nova.db.sql.txt +# controller-0: Dumping Database ..: /scratch/database/cinder.db.sql.txt +# controller-0: Dumping Database ..: /scratch/database/heat.db.sql.txt +# controller-0: Dumping Database ..: /scratch/database/neutron.db.sql.txt +# controller-0: Dumping Database ..: /scratch/database/sysinv.db.sql.txt +# controller-0: Creating Tarball ..: /scratch/controller-0.20140318.232925.tgz +# +# Tarball: /scratch/..tgz +# +# The script first collects the process, host, memory, +# filesystem, interrupt and HA information. +# It then proceeds to calls run-parts against the +# /etc/collect.d direcory which contains service level +# collectors. Additional collected can be added to that +# collect.d directory and will be called automatically. +# +# Warning: Script currently must be run as root. +# The collector scripts consider nodetype when deciding +# which commands to execute where. +# +################################################################## + + +TOOL_NAME=collect +TOOL_VER=2 +TOOL_REV=0 + +# collect must be run as wrsroot +if [ ${UID} -eq 0 ]; then + echo "Error: Cannot run collect as 'root' user" + exit 1 +fi + +# pull in common utils and environment +source /usr/local/sbin/collect_utils + +# get the host type +nodetype="" +subfunction="" +PLATFORM_CONF=/etc/platform/platform.conf +if [ -e ${PLATFORM_CONF} ] ; then + source ${PLATFORM_CONF} +fi + +ACTIVE=false +if [ "${nodetype}" == "controller" ] ; then + KEYRING_PATH="/opt/platform/.keyring" + if [ -e ${KEYRING_PATH} ] ; then + CRED=`find /opt/platform/.keyring -name .CREDENTIAL` + if [ ! -z "${CRED}" ] ; then + NOVAOPENRC="/etc/nova/openrc" + if [ -e ${NOVAOPENRC} ] ; then + ACTIVE=true + source ${NOVAOPENRC} 2>/dev/null 1>/dev/null + fi + fi + fi +fi + +function clean_up() +{ + `reset` + echo "" +} + +function control_c() +{ + echo "" + echo "... received exit signal ..." + clean_up + exit 0 +} + +# Handle exit signals +trap control_c SIGINT +trap control_c SIGTERM + + + +# static expect log level control ; +# 0 = hide expect output +# 1 = show expect outout +USER_LOG_MODE=0 + +# static execution status 'return value' +RETVAL=0 + +# limit scp bandwidth to 1MB/s +# increase limit of scp bandwidth from 1MB/s to 10MB/s +SCP_CMD="scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PreferredAuthentications=password -o PubkeyAuthentication=no -l $((10*8*1000))" +SCP_TIMEOUT="600" +SSH_CMD="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PreferredAuthentications=password -o PubkeyAuthentication=no" +NOWDATE=`date +"%Y%m%d.%H%M%S"` +COLLECT_BASE_DIR="/scratch" +collect_host="/usr/local/sbin/collect_host" +CURR_DIR=`pwd` + + +# common permission error strings +pw_error="orry, try again" +ac_error="ermission denied" + +function print_help() +{ + echo "" + echo "Titanium Cloud Log Collection Tool, version ${TOOL_VER}.${TOOL_REV}" + echo "" + echo "Usage: ${TOOL_NAME} [COMMANDS ...] {options}" + echo "" + echo "Titanium Cloud 'collect' is used by the customer support organization" + echo " to collect logs and data for off system analysis." + echo "" + echo "Running collect will collect logs to /scratch/" + echo "on the host collect is run from. Use host names to specify which hosts to collect from." + echo "" + echo "Host data collection scope can be the current host, any single specified hostname," + echo "a --list of hostnames or --all hosts in the system using a single command." + echo "" + echo "Optionally specify --start-date and/or --end-date options to limit" + echo " the date range and therefore size of the collect." + echo "" + echo "Optionally specify a --name prefix of the collected tar file." + echo "" + echo "With the command set specified, simply run collect as wrsroot and when" + echo "prompted provide the wrsroot sudo password and let collect handle the rest." + echo "" + echo "Scope Options:" + echo "" + echo " collect ... collect logs for current host" + echo " collect host1 ... collect logs for single named host" + echo " collect host1 host2 host3 ... collect logs for stacked host list" + echo " collect [--list | -l] host1 host2 host3 ... collect logs for list of named hosts" + echo " collect [--all | -a] ... collect data for all hosts" + echo "" + echo "Dated Collect:" + echo "" + echo "collect [--start-date | -s] YYYYMMDD ... collection of logs on and after this date" + echo "collect [--end-date | -e] YYYYMMDD ... collection of logs on and before this date" + echo "" + echo "Tarball Prefix:" + echo "" + echo "collect [--name | -n] {scope and date options} ... specify the name prefix of the collect tarball" + echo "" + echo "Detailed Display:" + echo "" + echo "collect [--verbose | -v] ... print details during collect" + echo "" + echo "Avoid password and security masking:" + echo "" + echo "collect [--skip-mask] ... skip masking of collect data" + echo "" + echo "Examples:" + echo "" + echo "collect ... all logs for current host" + echo "collect --all ... all logs from all hosts in the system" + echo "collect --all --start-date 20150101 ... logs dated on and after Jan 1 2015 from all hosts" + echo "collect --all --start-date 20151101 --end-date 20160201 ... logs dated between Nov 1, 2015 and Feb 1 2016 from all hosts" + echo "collect --start-date 20151101 --end-date 20160201 ... only logs dated between Nov 1, 2015 and Feb 1 2016 for current host" + echo "collect --list controller-0 compute-0 storage-0 ... all logs from specified host list" + echo "collect --list controller-0 compute-1 --end-date 20160201 ... only logs before Nov 1, 2015 for host list" + echo "collect --list controller-1 storage-0 --start-date 20160101 ... only logs after Jan 1 2016 for controller-1 and storage-0" + echo "" + exit 0 +} + +# command line arguement variables ; defaulted +DEBUG=false +CLEAN=false +VERBOSE=false +SKIP_MASK=false + +# date variables +STARTDATE="any" +STARTTIME="any" +ENDDATE="any" +ENDTIME="any" +GETSTARTDATE=false +GETENDDATE=false + +# host selection variables +LISTING=false +ALLHOSTS=false +HOSTS=1 +HOSTLIST=(${HOSTNAME}) +THISHOST=false + +COLLECT_TARNAME="" + +# clear multi option modes +function clear_variable_args() +{ + LISTING=false + GETSTARTDATE=false + GETENDDATE=false +} + +# +# Utility function to print a status message and record the last error code +# +# Assumptions: Handles specific cases of invalid password and permission errors +# by exiting so as to avoid repeated errors during multi-host +# collection. +# +# $1 - status string +# $2 - status code number +# +function print_status() +{ + local string=${1} + local code=${2} + + logger -t ${COLLECT_TAG} "${string} (reason:${code})" + + # if the status code is in the FAIL range ( less than WARNING ) then update RETVAL + if [ ${code} -lt ${WARN_WARNING} ] ; then + RETVAL=${code} + fi + + if [ ${RETVAL} -eq ${FAIL_PASSWORD} ] ; then + + echo "Invalid password ; exiting (${string})" + exit ${RETVAL} + + elif [ ${RETVAL} -eq ${FAIL_PERMISSION} ] ; then + + echo "Permission error ; exiting (${string})" + exit ${RETVAL} + + elif [ ${RETVAL} -eq ${FAIL_UNREACHABLE} ] ; then + + echo "${string} (reason:${code}:host unreachable)" + + elif [ ${RETVAL} -eq ${FAIL_PERMISSION_SKIP} -o ${RETVAL} -eq ${FAIL_PERMISSION} ] ; then + + echo "${string} (reason:${code}:permission error)" + + elif [ ${RETVAL} -eq ${FAIL_OUT_OF_SPACE} ] ; then + + echo "${string} (reason:${code}) ; need to increase available space in host ${COLLECT_BASE_DIR}" + + elif [ ${RETVAL} -eq ${FAIL_OUT_OF_SPACE_LOCAL} ] ; then + + echo "${string} (reason:${code}) ; need to increase available space in ${HOSTNAME}:${COLLECT_BASE_DIR}" + + elif [ ${RETVAL} -eq ${FAIL_INSUFFICIENT_SPACE} ] ; then + + echo "${string} (reason:${code}) ; ${HOSTNAME}:${COLLECT_BASE_DIR} usage must be below ${MIN_PERCENT_SPACE_REQUIRED}%" + + elif [ ${RETVAL} -ge ${FAIL_TIMEOUT} -a ${RETVAL} -le ${FAIL_TIMEOUT9} ] ; then + + echo "${string} (reason:${code}:operation timeout)" + + else + echo "${string} (reason:${code})" + fi +} + +# +# checks to see if the specified hostname is known +# to inventory as a valid provisioned host + +# $1 - this_hostname + +function is_valid_host() +{ + local this_hostname=${1} + + if [ "${this_hostname}" == "None" ] ; then + return ${FAIL_HOSTNAME} + elif [ "${this_hostname}" == "${HOSTNAME}" ] ; then + return $PASS + elif [ "${ACTIVE}" = true ] ; then + system host-show "${this_hostname}" 2>/dev/null 1>/dev/null + if [ ${?} -ne 0 ] ; then + return ${FAIL_HOSTNAME} + fi + else + print_status "Error: can only run collect for remote hosts on active controller" ${FAIL_INACTIVE} + exit ${FAIL_INACTIVE} + fi + return $PASS +} + + +# Parse the command line +while [[ ${#} -gt 0 ]] ; do + + key="${1}" + + case $key in + + -h|--help) + print_help + exit 0 + ;; + + -n|--name) + COLLECT_TARNAME=${2}_${NOWDATE} + clear_variable_args + shift + ;; + + -v|--verbose) + VERBOSE=true + ;; + + -c|--clean) + CLEAN=true + ;; + + -l|--list) + if [[ ${#} -lt 2 ]] ; then + print_status "Error: empty host --list" ${FAIL} + exit ${FAIL} + fi + is_valid_host "${2}" + if [ ${?} -ne 0 ] ; then + print_status "Error: empty host --list or invalid first hostname" ${FAIL} + exit ${FAIL} + fi + + HOSTLIST=(${2}) + HOSTS=1 + if [ "${2}" == "${HOSTNAME}" ] ; then + THISHOST=true + elif [ "${ACTIVE}" = false ] ; then + print_status "Error: can only run collect for remote hosts on active controller" ${FAIL_INACTIVE} + exit ${FAIL_INACTIVE} + fi + LISTING=true + GETSTARTDATE=false + GETENDDATE=false + shift + ;; + + -a|--all|all) + if [ "${ACTIVE}" = false ] ; then + print_status "Error: can only run collect for remote hosts on active controller" ${FAIL_INACTIVE} + exit ${FAIL_INACTIVE} + fi + ALLHOSTS=true + HOSTLIST=(${HOSTNAME}) + HOSTS=1 + THISHOST=true + clear_variable_args + ;; + + -s|--start-date) + STARTDATE="${2}" + LISTING=false + GETSTARTDATE=true + GETENDDATE=false + shift + ;; + + -e|--end-date) + ENDDATE="${2}" + LISTING=false + GETSTARTDATE=false + GETENDDATE=true + shift + ;; + + -d|--debug) + DEBUG=true + USER_LOG_MODE=1 + clear_variable_args + ;; + + --skip-mask) + SKIP_MASK=true + shift + ;; + + *) + if [ "${LISTING}" = true ] ; then + is_valid_host ${key} + if [ ${?} -eq 0 ] ; then + HOSTS=$((${HOSTS} + 1)) + HOSTLIST=( "${HOSTLIST[@]}" ${key} ) + if [ "${key}" == "${HOSTNAME}" ] ; then + THISHOST=true + fi + else + # make the invalid hostname a warning only. + # if we got here then at least the first hostname was valid + print_status "Warning: cannot collect data from unknown host '${key}'" ${WARN_HOSTNAME} + fi + elif [ "${GETSTARTDATE}" = true ] ; then + dlog "accepting but ignoring legacy starttime specification" + elif [ "${GETENDDATE}" = true ] ; then + dlog "accepting but ignoring legacy endtime specification" + else + is_valid_host ${key} + RETVAL=${?} + if [ ${RETVAL} -eq 0 ] ; then + HOSTLIST=${key} + HOSTS=1 + LISTING=true + if [ "${key}" == "${HOSTNAME}" ] ; then + THISHOST=true + fi + else + print_status "Error: cannot collect data from unknown host '${key}'" ${RETVAL} + exit ${RETVAL} + fi + fi + GETSTARTDATE=false + GETENDDATE=false + ;; + esac + shift # past argument or value +done + +if [ ${RETVAL} -ne 0 ]; then + echo "command line parse error (${RETVAL})" + print_help + exit ${RETVAL} +fi + + +# +# request root password and use it for +# all the expect driven requests below +# +read -s -p "[sudo] password for ${USER}:" pw +echo "" + +# Although bash 'read' will handle sanitizing the password +# input for the purposes of storing it in ${pw}, expect +# will need certain special characters to be backslash +# delimited +pw=${pw/\\/\\\\} # replace '\' with '\\' +pw=${pw/\]/\\\]} # replace ']' with '\]' +pw=${pw/\[/\\\[} # replace '[' with '\[' +pw=${pw/$/\\$} # replace '$' with '\$' +pw=${pw/\"/\\\"} # replace '"' with '\"' + +# +# if the user specified the '--all' option then override +# the current list and add them all from inventory. +# +if [ "${ALLHOSTS}" = true ] ; then + + for foreign_host in $(system host-list | grep '[0-9]' | cut -d '|' -f 3 | tr -d ' ' | grep -v ${HOSTNAME}); do + if [ "${foreign_host}" != "None" ] ; then + HOSTS=$((${HOSTS} + 1)) + HOSTLIST=( "${HOSTLIST[@]}" ${foreign_host}) + dlog "Host:${HOSTS}: ${foreign_host}" + fi + done + +elif [ ${HOSTS} == 0 ] ; then + + HOSTLIST=${HOSTNAME} + THISHOST=true + COLLECT_TARNAME="${HOSTNAME}_${NOWDATE}" + +fi + +# Print Summary +if [ "${DEBUG}" == true ] ; then + + echo "HOSTLIST = <${HOSTLIST[@]}>" + echo "HOSTS = ${HOSTS}" + echo "ALLHOSTS = ${ALLHOSTS}" + echo "STARTDATE= ${STARTDATE}" + echo "ENDDATE = ${ENDDATE}" + + for hosts in "${HOSTLIST[@]}" ; do + echo "Host:${hosts}" + done + +elif [ ${HOSTS} -eq 0 ] ; then + + print_status "Error: no hosts specified" "${FAIL}" + exit ${FAIL} + +elif [ "${CLEAN}" == false ] ; then + + ilog "collecting data from ${HOSTS} host(s): ${HOSTLIST[@]}" + +else + + ilog "cleaning scratch space on ${HOSTLIST[@]}" + +fi + +# +# removes contents of the local /scratch directory +# +# $1 - host +# $2 - specified directory (always $COLLECT_BASE_DIR) +# +function clean_scratch_dir_local () +{ + local this_hostname=${1} + local directory=${2} + +/usr/bin/expect << EOF + log_user ${USER_LOG_MODE} + spawn bash -i + set timeout 60 + expect -re $ + send -- "sudo rm -rf ${directory}/*_????????.??????* ; cat ${cmd_done_file}\n" + expect { + "assword:" { send "${pw}\r" ; exp_continue } + "${cmd_done_sig}" { exit ${PASS} } + "annot remove" { exit ${FAIL_CLEANUP} } + "${pw_error}" { exit ${FAIL_PASSWORD} } + "${ac_error}" { exit ${FAIL_PERMISSION} } + timeout { exit ${FAIL_TIMEOUT} } + } +EOF + local rc=${?} + if [ ${rc} -ne ${PASS} ] ; then + print_status "Error: clean_scratch_dir_local ${this_hostname} failed" ${rc} + fi + return ${rc} +} + +# +# cleans the contents of the specified hosts's scratch dir +# +# $1 - this hostname +# $2 - specified directory (always $COLLECT_BASE_DIR) +# +function clean_scratch_dir_remote() +{ + local this_hostname=${1} + local directory=${2} + +/usr/bin/expect << EOF + log_user ${USER_LOG_MODE} + spawn bash -i + expect -re $ + set timeout 60 + send "${SSH_CMD} wrsroot@${this_hostname}\n" + expect { + "assword:" { + send "${pw}\r" + expect { + "${this_hostname}" { + set timeout 30 + expect -re $ + send "sudo rm -rf ${directory}/*_????????.??????* ; cat ${cmd_done_file}\n" + expect { + "assword:" { send -- "${pw}\r" ; exp_continue } + "${cmd_done_sig}" { exit ${PASS} } + "${cmd_done_file}: No such file or directory" { exit ${PASS} } + "annot remove" { exit ${FAIL_CLEANUP} } + "${pw_error}" { exit ${FAIL_PASSWORD} } + "${ac_error}" { exit ${FAIL_PERMISSION}} + timeout { exit ${FAIL_TIMEOUT3} } + } + } + timeout { exit ${FAIL_TIMEOUT1} } + } + } + "(yes/no)?" { + send "yes\r" + exp_continue + } + "No route to host" { + exit ${FAIL_UNREACHABLE} + } + "Could not resolve hostname" { + exit ${FAIL_UNREACHABLE} + } + timeout { exit ${FAIL_TIMEOUT} } + } +EOF + local rc=${?} + if [ ${rc} -ne ${PASS} ] ; then + print_status "Error: clean_scratch_dir_remote ${this_hostname} failed" ${rc} + fi + return ${rc} +} + +# +# deletes a remote directory or file +# +# $1 - this hostname +# $2 - dir or file with full path +# +function delete_remote_dir_or_file() +{ + local this_hostname=${1} + local dir_or_file=${2} + +/usr/bin/expect << EOF + log_user ${USER_LOG_MODE} + spawn bash -i + expect -re $ + set timeout 60 + send "${SSH_CMD} wrsroot@${this_hostname}\n" + expect { + "assword:" { + send "${pw}\r" + expect { + "${this_hostname}:" { + set timeout 10 + expect -re $ + send "sudo rm -rf ${dir_or_file} ; cat ${cmd_done_file}\n" + expect { + "assword:" { send -- "${pw}\r" ; exp_continue } + "${cmd_done_sig}" { exit ${PASS} } + "${cmd_done_file}: No such file or directory" { exit ${PASS} } + "annot remove" { exit ${FAIL_CLEANUP} } + "${pw_error}" { exit ${FAIL_PASSWORD} } + "${ac_error}" { exit ${FAIL_PERMISSION}} + timeout { exit ${FAIL_TIMEOUT3} } + } + } + timeout { exit ${FAIL_TIMEOUT1} } + } + } + "(yes/no)?" { + send "yes\r" + exp_continue + } + "No route to host" { + exit ${FAIL_UNREACHABLE} + } + "Could not resolve hostname" { + exit ${FAIL_UNREACHABLE} + } + timeout { exit ${FAIL_TIMEOUT} } + } +EOF + local rc=${?} + if [ ${rc} -ne ${PASS} ] ; then + print_status "Error: delete_remote_dir_or_file ${this_hostname} failed" ${rc} + fi + return ${rc} +} + +HOST_COLLECT_ERROR_LOG="/tmp/host_collect_error.log" +# +# Fetch a file from a remote host using the global pw +# $1 - this hostname +# $2 - remote source path/filename +# $3 - local path destination +# +function get_file_from_host() +{ + local this_hostname=${1} + local remote_src=${2} + local local_dest=${3} + + remove_file_local ${HOST_COLLECT_ERROR_LOG} + +/usr/bin/expect << EOF + log_user ${USER_LOG_MODE} + spawn bash -i + set timeout ${SCP_TIMEOUT} + expect -re $ + send "${SCP_CMD} wrsroot@${this_hostname}:${remote_src} ${local_dest} 2>>${HOST_COLLECT_ERROR_LOG}\n" + expect { + "assword:" { + send "${pw}\r" + expect { + "100%" { exit ${PASS} } + "${pw_error}" { exit ${FAIL_PASSWORD} } + "${ac_error}" { exit ${FAIL_PERMISSION}} + timeout { exit ${FAIL_TIMEOUT1} } + } + } + "No route to host" { + exit ${FAIL_UNREACHABLE} + } + "Could not resolve hostname" { + exit ${FAIL_UNREACHABLE} + } + timeout { exit ${FAIL_TIMEOUT} } + } +EOF + local rc=${?} + if [ ${rc} -ne ${PASS} ] ; then + print_status "failed to get_file_from ${this_hostname}" ${rc} + else + # Look for "No space left on device" error + grep -q "${FAIL_OUT_OF_SPACE_STR}" ${HOST_COLLECT_ERROR_LOG} + if [ "$?" == "0" ] ; then + rc=${FAIL_OUT_OF_SPACE} + fi + fi + + remove_file_local ${HOST_COLLECT_ERROR_LOG} + + return ${rc} +} + +# +# Create the local dated collect dir where all +# the tarballs for this collect will get put. +# +# Permissions are set to make it easy to copy +# tarballs from remote host into +# +# $1 - the fill dir +# +function create_collect_dir_local() +{ + local dir=${1} + +/usr/bin/expect << EOF + log_user ${USER_LOG_MODE} + spawn bash -i + set timeout 10 + expect -re $ + send "sudo mkdir -m 775 -p ${dir} ; cat ${cmd_done_file}\n" + expect { + "assword:" { + send "${pw}\r" + expect { + "${cmd_done_sig}" { exit ${PASS} } + "${pw_error}" { exit ${FAIL_PASSWORD} } + "${ac_error}" { exit ${FAIL_PERMISSION}} + timeout { exit ${FAIL_TIMEOUT1} } + } + } + "${cmd_done_sig}" { exit ${PASS} } + "${ac_error}" { exit ${FAIL_PERMISSION}} + timeout { exit ${FAIL_TIMEOUT} } + } +EOF + local rc=${?} + if [ ${rc} -ne ${PASS} ] ; then + print_status "failed to create_collect_dir_local for ${dir}" ${rc} + fi + return ${rc} +} + +# +# Delete the specified file using sudo +# +# $1 - the file to be delete with full path specified +# +function remove_file_local() +{ + local local_file=${1} + local rc=${PASS} + + if [ -e ${local_file} ] ; then + +/usr/bin/expect << EOF + log_user ${USER_LOG_MODE} + spawn bash -i + set timeout 10 + expect -re $ + send -- "sudo rm -f ${local_file} ; cat ${cmd_done_file}\n" + expect { + "assword:" { send -- "${pw}\r" ; exp_continue } + "${cmd_done_sig}" { exit ${PASS} } + "annot remove" { exit ${FAIL_CLEANUP} } + "${pw_error}" { exit ${FAIL_PASSWORD} } + "${ac_error}" { exit ${FAIL_PERMISSION} } + timeout { exit ${FAIL_TIMEOUT} } + } +EOF + local rc=${?} + if [ ${rc} -ne ${PASS} ] ; then + print_status "failed to remove_file_local ${local_file}" ${rc} + fi + fi + return ${rc} +} + +# +# Delete the specified file using sudo +# +# $1 - the directory to be removed with full path specified +# +function remove_dir_local() +{ + local dir=${1} + +/usr/bin/expect << EOF + log_user ${USER_LOG_MODE} + spawn bash -i + set timeout 10 + expect -re $ + send -- "sudo rm -rf ${dir} ; cat ${cmd_done_file}\n" + expect { + "assword:" { send -- "${pw}\r" ; exp_continue } + "${cmd_done_sig}" { exit ${PASS} } + "annot remove" { exit ${FAIL_CLEANUP} } + "${pw_error}" { exit ${FAIL_PASSWORD} } + "${ac_error}" { exit ${FAIL_PERMISSION} } + timeout { exit ${FAIL_TIMEOUT} } + } +EOF + local rc=${?} + if [ ${rc} -ne ${PASS} ] ; then + print_status "failed to remove_dir_local ${dir}" ${rc} + fi + return ${rc} +} + +# +# Move a file and change permissions using sudo +# +# $1 - src path/file +# $2 - dest path/file +# +function move_file_local() +{ + local src=${1} + local dst=${2} + +/usr/bin/expect << EOF + log_user ${USER_LOG_MODE} + spawn bash -i + set timeout 10 + expect -re $ + send -- "sudo mv ${src} ${dst} ; cat ${cmd_done_file}\n" + expect { + "assword:" { send -- "${pw}\r" ; exp_continue } + "${cmd_done_sig}" { exit ${PASS} } + "annot remove" { exit ${FAIL_CLEANUP} } + "${pw_error}" { exit ${FAIL_PASSWORD} } + "${ac_error}" { exit ${FAIL_PERMISSION} } + timeout { exit ${FAIL_TIMEOUT} } + } +EOF + local rc=${?} + if [ ${rc} -ne ${PASS} ] ; then + print_status "failed to move_file_local ${src} to ${dst}" ${rc} + fi + return ${rc} +} + +# Append the echoed collect done with collect duration and file size +# ... done (HH:MM:SS xxM) +function echo_stats() +{ + local secs=${1} + local file=${2} + + echo -n " ($(date -d@${secs} -u +%H:%M:%S)" + if [ -e ${file} ] ; then + size=$(du -h ${file} | cut -f 1 2>/dev/null) + if [ $? -eq 0 ] ; then + printf " %5s)\n" "${size}" + return + fi + fi + echo ")" +} + + +# Handle clean command +if [ "${CLEAN}" == true ] ; then + for host in "${HOSTLIST[@]}" ; do + if [ "${host}" != " " ] ; then + + if [ "${host}" == "None" ] ; then + continue + elif [ "${host}" == "" ] ; then + continue + fi + + echo -n "cleaning ${host}:${COLLECT_BASE_DIR} ... " + if [ "${host}" == "${HOSTNAME}" ] ; then + clean_scratch_dir_local ${host} ${COLLECT_BASE_DIR} + if [ ${?} -eq ${PASS} ] ; then + echo "done" + fi + else + clean_scratch_dir_remote ${host} ${COLLECT_BASE_DIR} + if [ ${?} -eq ${PASS} ] ; then + echo "done" + fi + fi + logger -t ${COLLECT_TAG} "user cleaned ${host}:${COLLECT_BASE_DIR} content" + fi + done + exit 0 +fi + + +if [ ! -z ${COLLECT_TARNAME} ] ; then + + # User specified tarname + COLLECT_NAME=${COLLECT_TARNAME} + COLLECT_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" + TARBALL_NAME="${COLLECT_DIR}.tar" + named="user-named" + +elif [ "${ALLHOSTS}" = true ] ; then + + # All hosts bundle + COLLECT_NAME="ALL_NODES_${NOWDATE}" + COLLECT_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" + TARBALL_NAME="${COLLECT_DIR}.tar" + named="all-nodes" + + +elif [ ${HOSTS} -eq 1 ] ; then + + # Single host bundle + COLLECT_NAME="${HOSTLIST[0]}_${NOWDATE}" + COLLECT_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" + TARBALL_NAME="${COLLECT_DIR}.tar" + named="single-node" + +else + + # Otherwise its a multi host bundle + COLLECT_NAME="SELECT_NODES_${NOWDATE}" + COLLECT_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" + TARBALL_NAME="${COLLECT_DIR}.tar" + named="selected-node" + +fi + +# +# Create the local collect directory where +# the tarball(s) will temporarily stored +# +create_collect_dir_local "${COLLECT_DIR}" + +declare COLLECT_START_TIME=${SECONDS} + +declare -i longest_hostname=0 +for host in "${HOSTLIST[@]}" ; do + len=${#host} + if [ $len -gt ${longest_hostname} ] ; then + longest_hostname=$len + fi +done + +# +# Loop over all the targetted hosts and +# 1. run collect +# 2. copy the tarball to $COLLECT_DIR +# +for host in "${HOSTLIST[@]}" ; do + if [ "${host}" != " " ] ; then + + if [ "${host}" == "None" ] ; then + continue + elif [ "${host}" == "" ] ; then + continue + fi + + HOST_START_TIME=${SECONDS} + + TARNAME="${host}_${NOWDATE}" + + # line up the hostr namaes + echo -n "collecting" + len=${#host} + for ((i=len;i>${COLLECT_ERROR_LOG} ; cat ${cmd_done_file})\n" + expect { + "assword:" { + send "${pw}\r" + expect { + "${cmd_done_sig}" { exit ${PASS} } + "${pw_error}" { exit ${FAIL_PASSWORD} } + "${ac_error}" { exit ${FAIL_PERMISSION} } + timeout { exit ${FAIL_TIMEOUT1} } + } + } + timeout { exit ${FAIL_TIMEOUT} } + } +EOF + RETVAL=${?} + if [ ${RETVAL} -ne ${PASS} ] ; then + collect_errors ${HOSTNAME} + print_status "failed to create ${TARBALL_NAME}" ${RETVAL} + else + collect_errors ${HOSTNAME} + RETVAL=$? + if [ ${RETVAL} -eq ${PASS} ] ; then + secs=$((SECONDS-COLLECT_START_TIME)) + echo -n "done" + echo_stats $secs "${TARBALL_NAME}" + logger -t ${COLLECT_TAG} "created ${named} tarball ${TARBALL_NAME}" + else + echo "removing incomplete collect: ${TARBALL_NAME}" + remove_file_local "${TARBALL_NAME}" + fi + fi + remove_file_local ${COLLECT_ERROR_LOG} + remove_dir_local "${COLLECT_DIR}" + +# return to callers dir +cd ${CURR_DIR} + +exit ${RETVAL} diff --git a/middleware/util/recipes-common/collector/scripts/collect_ceph.sh b/middleware/util/recipes-common/collector/scripts/collect_ceph.sh new file mode 100755 index 0000000..0898d8d --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_ceph.sh @@ -0,0 +1,83 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +SERVICE="ceph" +LOGFILE="${extradir}/ceph.info" +echo "${hostname}: Ceph Info .........: ${LOGFILE}" + +function is_service_active() +{ + active=`sm-query service management-ip | grep "enabled-active"` + if [ -z "$active" ] ; then + return 0 + else + return 1 + fi +} + +function exit_if_timeout() +{ + if [ "$?" = "124" ] ; then + echo "Exiting due to ceph command timeout" >> ${LOGFILE} + exit 0 + fi +} + +############################################################################### +# Only Controller +############################################################################### +if [ "$nodetype" = "controller" ] ; then + + # Using timeout with all ceph commands because commands can hang for + # minutes if the ceph cluster is down. If ceph is not configured, the + # commands return immediately. + + delimiter ${LOGFILE} "ceph status" + timeout 30 ceph status >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + exit_if_timeout + + delimiter ${LOGFILE} "ceph mon dump" + timeout 30 ceph mon dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + exit_if_timeout + + delimiter ${LOGFILE} "ceph osd dump" + timeout 30 ceph osd dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + exit_if_timeout + + delimiter ${LOGFILE} "ceph osd tree" + timeout 30 ceph osd tree >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + exit_if_timeout + + delimiter ${LOGFILE} "ceph osd crush dump" + timeout 30 ceph osd crush dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + exit_if_timeout + + is_service_active + if [ "$?" = "0" ] ; then + exit 0 + fi + + delimiter ${LOGFILE} "ceph df" + timeout 30 ceph df >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + exit_if_timeout + + delimiter ${LOGFILE} "ceph osd df tree" + timeout 30 ceph osd df tree >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + exit_if_timeout + + delimiter ${LOGFILE} "ceph health detail" + timeout 30 ceph health detail >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + exit_if_timeout + +fi + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_coredump.sh b/middleware/util/recipes-common/collector/scripts/collect_coredump.sh new file mode 100644 index 0000000..7614909 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_coredump.sh @@ -0,0 +1,35 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables + +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +SERVICE="coredump" +LOGFILE="${extradir}/${SERVICE}.info" + + +COREDUMPDIR="/var/lib/systemd/coredump" + +echo "${hostname}: Core Dump Info ....: ${LOGFILE}" + +files=`ls ${COREDUMPDIR} | wc -l` +if [ "${files}" == "0" ] ; then + echo "No core dumps" >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +else + COMMAND="ls -lrtd ${COREDUMPDIR}/*" + delimiter ${LOGFILE} "${COMMAND}" + ${COMMAND} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + COMMAND="md5sum ${COREDUMPDIR}/*" + delimiter ${LOGFILE} "${COMMAND}" + ${COMMAND} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +fi + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_crash.sh b/middleware/util/recipes-common/collector/scripts/collect_crash.sh new file mode 100644 index 0000000..fc8c798 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_crash.sh @@ -0,0 +1,30 @@ +#! /bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables + +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +SERVICE="crash" +LOGFILE="${extradir}/${SERVICE}.info" + + +CRASHDIR="/var/crash" + +echo "${hostname}: Kernel Crash Info .: ${LOGFILE}" + +COMMAND="find ${CRASHDIR}" +delimiter ${LOGFILE} "${COMMAND}" +${COMMAND} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +COMMAND="rsync -a --include=*.txt --include=*/ --exclude=* ${CRASHDIR} ${basedir}/var/" +delimiter ${LOGFILE} "${COMMAND}" +${COMMAND} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_date b/middleware/util/recipes-common/collector/scripts/collect_date new file mode 100755 index 0000000..22c62fb --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_date @@ -0,0 +1,1064 @@ +#!/bin/bash +####################################################################### +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +######################################################################## +# +# This file is a new member of the Titanium Cloud "Log Collect Utility". +# This file enhances date restricted collect in response. +# +# This file is invoked by collect_host when a date restricted +# collect using --start-date and/or --end-date options are used. +# +# This new data restricted collect service applies to /var/log and its +# subdirectories only. This service determines if a log file is to be +# included in dated collect by looking at the logs at the head and tail +# of the files and subdirectories in /var/log. Those dates are then +# compared to the user specified date range. if a file is determined to +# contain logs within that date range then that file is included in the +# collect log. A valid log date prefix is "YYYY-MM-DD". +# +# Unfortunately, not all log files contain the correct date placement and +# format. This feature has implemented special case handling for many but not +# not all of such cases. To avoid accidental exclusion of a key file, this +# feature will by default include log files whose log date content could +# not be determined if its file date is after the specified start date. +# +# Note: local convension , example ${head_date} vs ${HEAD_DATE} +# +# Lower case date variables contain integer values while +# Upper case date variables contain formatted string values of same. +# +# Calling sequence: +# +# /usr/local/sbin/collect_date +# /usr/local/sbin/collect_date 20170701 20170901 /tmp/file.list true +# +######################################################################## + +# +# Import commands, variables and convenience functions available to +# all collectors ; common and user defined. +# +source /usr/local/sbin/collect_utils + +# where to find the logs +declare -r baselogdir="/var/log" + +# include / exclude labels +declare -r INCLUDE_FILE="inc" +declare -r EXCLUDE_FILE="exc" + +# a global reason string that is only valid +# in the context of the file beeing looked at. +declare __this_reason="" + +# setup defaults +INC_FILE_LIST="/var/run/collect_include.list" +EXC_FILE_LIST="/var/run/collect_exclude.list" +NOD_FILE_LIST="/var/run/collect_nodate.list" + +BOT_DATE="2000-01-01" # beginning of time date +bot_date=730013 # beginning of time date as integer + +EOT_DATE="9999-12-31" # end of time date +eot_date=3649810 # end of time date as integer + +# manage debug mode +DEBUG="${4}" +set_debug_mode "${DEBUG}" +echo "Debug Mode: ${DEBUG}" + +dlog "collect_date args: ${1} ${2} ${3} ${4} ${5}" + +############################################################################# +# +# 'track' is the main accounting procedure that manages file inclusions and +# exclusions as well as the metrics around all the parsed files. +# +# It also reports accounting mismatch logs, if they occur (should not) +# and the file that started the mismatch (to assist in debug). +# +# $1 - filename +# $2 - label +# +############################################################################# + +# accounting defaults +declare -i file_count=0 +declare -i inc_file_count=0 +declare -i exc_file_count=0 +declare -i empty_file_count=0 + +function track() +{ + local fn="${1}" + local label="${2}" + + if [ -z "${fn}" ] ; then + elog "Ignoring call with empty filename" + return + + elif [ "${label}" == "totals" ] ; then + ((file_count++)) + return + + elif [ "${label}" == "empty" ] ; then + ((empty_file_count++)) + return + + elif [ "${label}" == "${INCLUDE_FILE}" ] ; then + manage_file "${fn}" "${label}" "${__this_reason}" + ((inc_file_count++)) + + elif [ "${label}" == "${EXCLUDE_FILE}" ] ; then + manage_file "${fn}" "${label}" "${__this_reason}" + ((exc_file_count++)) + + else + elog "Unknown label '${label}'" + + fi + + sum=$((inc_file_count + exc_file_count)) + if [ ${file_count} -ne ${sum} ] ; then + wlog "MISMATCH: ${file_count} != ${inc_file_count} + ${exc_file_count} - ${fn}" + fi +} + +############################################################################ +# +# 'summary' is an accounting display procedure used to show the +# accounting results of the total number of files processed, +# number of empty files and most importanly the number if +# included or excluded files. +# +############################################################################ + +function summary() +{ + dlog "Summary:" + dlog "Total Files: ${file_count}" + dlog "Empty Files: ${empty_file_count}" + dlog "Added Files: ${inc_file_count}" + dlog "Omitd Files: ${exc_file_count}" +} + +############################################################################# +# +# 'date_to_int' converts a standard formatted YYYY-MM-DD string date +# to an integer and stores it in __this_integer_date variable +# to be used in context on demand. +# +############################################################################# + +# short lived global integer date value updated by date_to_int utility +declare -i __this_integer_date="" + +function date_to_int() +{ + local yy="${1:0:4}" + local mm="${1:5:2}" + local dd="${1:8:2}" + + # handle leading zeros in month and day + if [ "${mm:0:1}" == "0" ] ; then + mm=${mm:1:1} + fi + if [ "${dd:0:1}" == "0" ] ; then + dd=${dd:1:1} + fi + + # 365 days in a year, 31 days in a month, 1 day in a day + __this_integer_date=$((yy*365 + mm*31 + dd)) +} + +############################################################################ +# +# 'create_list_file' removes old/stale list file and creates a new empty +# one with correct permissions. +# +############################################################################ + +function create_list_file() +{ + local fn="${1}" + if [ -e "${fn}" ] ; then + rm -f "${fn}" + fi + touch "${fn}" + chmod 644 "${fn}" +} + +######################################################################## +# +# Handle the incoming 'start' and 'end' date format defensively. +# +# If the date is with no dashes as it would come in from the user's +# date specification then set it up like the standard date delimited with '-' +# i.e. 20171002 is updated to 2017-10-02. +# +# If verified to be in the standard format just copy in. +# +# Otherwise assume the start date is from the beginning of time or +# end date is the end of time. + +# load up the start date string and integer representation +if [ -z "${1}" ] ; then + START_DATE="${BOT_DATE}" +elif [[ "${1}" =~ [0-9]{4}[0-9]{2}[0-9]{2} ]] ; then + START_DATE="${1:0:4}-${1:4:2}-${1:6:2}" +elif [[ "${1}" =~ [0-9]{4}-[0-9]{2}-[0-9]{2} ]] ; then + START_DATE="${1}" +else + START_DATE="${BOT_DATE}" +fi + +# Convert the correct or corrected 'start' date to an integer value +date_to_int "${START_DATE}" +start_date=${__this_integer_date} + + +# load up the end date string and integer representation +if [ -z "${2}" ] ; then + END_DATE="${EOT_DATE}" +elif [[ "${2}" =~ [0-9]{4}[0-9]{2}[0-9]{2} ]] ; then + END_DATE="${2:0:4}-${2:4:2}-${2:6:2}" +elif [[ "${2}" =~ [0-9]{4}-[0-9]{2}-[0-9]{2} ]] ; then + END_DATE="${2}" +else + END_DATE="${EOT_DATE}" +fi + +# Convert the correct or corrected 'end' date to an integer value +date_to_int "${END_DATE}" +end_date=${__this_integer_date} + +# Handle user error of specifying an end date that is before the start date +if [ ${start_date} -gt ${end_date} ] ; then + wlog "invalid date range ; end date (${END_DATE}:${end_date}) is before start (${START_DATE}:${start_date})" + wlog "correcting to defaults: from ${START_DATE} to ${END_DATE}" + START_DATE="${BOT_DATE}" + END_DATE="${EOT_DATE}" + start_date=${bot_date} + end_date="${eot_date}" +fi + +ilog "collecting log files containing logs dated ${START_DATE} to ${END_DATE} (inclusive)" + + +if [ "${3}" == "" ] ; then + elog "dated collect include file list name not specified ... exiting" + exit 1 +else + VAR_LOG_INCLUDE_LIST=${3} +fi + +create_list_file "${VAR_LOG_INCLUDE_LIST}" +create_list_file "${INC_FILE_LIST}" +create_list_file "${EXC_FILE_LIST}" +create_list_file "${NOD_FILE_LIST}" + +# Declare and init the include and exclude debug lists. +inclist=("") +exclist=("") + +############################################################################# +# +# 'filedatelist' is a list of files that are known to not contain dated logs. +# Instead these files are included unless its file date is +# older that the specified start date. +# +############################################################################# + +filedatelist=("") +filedatelist+=("/var/log/wtmp") +filedatelist+=("/var/log/dmesg") +filedatelist+=("/var/log/dmesg.old") +filedatelist+=("/var/log/sm-trap.log") +filedatelist+=("/var/log/sm-customer.log") +filedatelist+=("/var/log/sm-customer.alarm") +filedatelist+=("/var/log/sm-shutdown.log") +filedatelist+=("/var/log/nfv-vim-events.log") +filedatelist+=("/var/log/fm-customer.log") +filedatelist+=("/var/log/fm-alarm.log") +filedatelist+=("/var/log/lighttpd-access.log") +filedatelist+=("/var/log/audit/audit.log") +filedatelist+=("/var/log/rabbitmq/shutdown_log") +filedatelist+=("/var/log/rabbitmq/startup_log") +filedatelist+=("/var/log/rabbitmq/wait_log") +filedatelist+=("/var/log/rabbitmq/rabbit@localhost.log") +filedatelist+=("/var/log/nfv-vim-alarms.log") +filedatelist+=("/var/log/vswitch.cmds.log") + +# This is a list of files to always include +autoaddlist=("") +autoaddlist+=("/var/log/collect.log") + +######################################################################### +# +# 'is_in_range' returns true if the specified log file data range +# is within the bounded date range specified by the caller. +# Otherwise a false is returned. +# +# ${1} is HEAD_DATE and is the date of the first log of the file in contect +# ${2} is TAIL_DATE and is the date of the last log in the file in context +# +# expected date format is ... YYYY-MM-DD +# +# Calling Sequence is ... is_in_range HEAD_DATE TAIL_DATE +# +# There are several cases that aer handled ; +# see case comment inline below. +# +######################################################################### + +function is_in_range() +{ + local HEAD_DATE="${1}" + local TAIL_DATE="${2}" + if [[ ${HEAD_DATE} =~ [0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + + # Convert the date to an integer value + # to make the compare easier and faster + date_to_int "${HEAD_DATE}" + head_date=${__this_integer_date} + + if [[ ${TAIL_DATE} =~ [0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + + # Convert the date to an integer value + # to make the compare easier and faster + date_to_int "${TAIL_DATE}" + tail_date=${__this_integer_date} + + in_range=false + + # The last log is before the start date or the first log is after the end date + # if [[ "${TAIL_DATE}" < "${START_DATE}" || "${HEAD_DATE}" > "${END_DATE}" ]] ; then + if [ ${tail_date} -lt ${start_date} -o ${head_date} -gt ${end_date} ] ; then + __this_reason+=":case 0" + in_range=false + + # Case 1: the head after the start but before the end date + # .... S ... head ... E .... + elif [ ${head_date} -ge ${start_date} -a ${head_date} -le ${end_date} ] ; then + __this_reason+=":case 1" + in_range=true + + # Case 2: the tail after the start but before the end date + # .... S ... tail ... E .... + elif [ ${tail_date} -ge ${start_date} -a ${tail_date} -le ${end_date} ] ; then + __this_reason+=":case 2" + in_range=true + + # Case 3: log file date range spans the start and end dates + # head S ... ... E tail + elif [ ${head_date} -le ${start_date} -a ${tail_date} -ge ${end_date} ] ; then + __this_reason+=":case 3" + in_range=true + + else + __this_reason+=":default" + fi + else + __this_reason+=":invalid-tail-date" + # so the tail date is unknown. + # include this file as long as the head date is before end date + if [ ${head_date} -lt ${end_date} ] ; then + in_range=true + else + in_range=false + fi + fi + + if [ "${in_range}" = true ] ; then + __this_reason+=":in-range ${HEAD_DATE} to ${TAIL_DATE}" + true + else + __this_reason+=":out-of-range ${HEAD_DATE} to ${TAIL_DATE}" + false + fi + return + fi + + __this_reason+=":date-format-error ${HEAD_DATE} to ${TAIL_DATE}" + true + return +} + +########################################################################### +# +# Name : want_this_file +# +# Description: This utility first compares the filename to known exception +# cases and handles them accordingly. Exception cases do look +# for the date but with different methods. Once the date info +# is or is not found then the choice to or not to include it +# follows same general logic as others below. +# +# If not an exception case then it determines the file type +# and performs any preprocessing required. i.e. uncompressing +# the file and switching the filename to the uncompressed name. +# Data files or other unknown file types are automatically +# included without further data query by immediately returning +# true. +# +# With an expected supported filename in hand this utility will +# extract the date-only (time not included) portion, the first +# 10 characters of the first and last logs and determin if this +# logfile has logs that fall withing the specified date range. +# +# Returns : If there is no valid date found then true is returned. +# If file contains in range logs then true is returned. +# if file does not contain in range logs then false is returned. +# +# Parameters : $1 is the full pathed log file name. +# +# $1 - the filename of the file to check the date for +# +########################################################################### + +function want_this_file() +{ + local inc=true + local LOGFILE="${1}" + local filetype=$(file "${LOGFILE}") + local HEAD_DATE="" + local TAIL_DATE="" + + for add in "${autoaddlist[@]}" + do + if [ "${add}" == "${LOGFILE}" ] ; then + __this_reason+="autoadd" + true + return + fi + done + + ########################################################################## + # Exception Case: known free formatted log files. + ########################################################################## + # + # Some log files are known to not contain properly dated logs. + # Such files may just contian free format strings of information. + # + # A list of such files is in hard coded in filedatelist. + # TODO: consider making this a file that is loaded. + # + # Check to see if this is an auto add file + # Only exlude such files if its last modified date is before start date. + # + ########################################################################## + for add in "${filedatelist[@]}" + do + if [ "${add}" == "${LOGFILE}" ] ; then + __this_reason+="filedate" + + # Don't include empty files that are in the hard coded filedatelist + filetype=$(file "${LOGFILE}") + if [ ! -z "${filetype}" ] ; then + case ${filetype} in + *empty*) + __this_reason="empty" + track "${LOGFILE}" "empty" + false + return + ;; + *) + ;; + esac + fi + + # get last modified date + FILE_DATE=$(stat -c %y "${LOGFILE}" | cut -b 1-10) + date_to_int "${FILE_DATE}" + if [ ${__this_integer_date} -ge ${start_date} ] ; then + __this_reason+=":in-range ${FILE_DATE}" + true + else + __this_reason+=":out-of-range ${FILE_DATE}" + false + fi + return + fi + done + + # O.K. if we get here then this filename is not in the static list + if [ ! -z "${filetype}" ] ; then + + case ${filetype} in + + *directory*) + # Skip over a directory only path. + # No worries, the files in that directory will be handled. + __this_reason+="directory" + false + return + ;; + + *ASCII*|*text*|*compressed*) + + if [[ ${filetype} == *"compressed"* ]] ; then + fileext=${LOGFILE##*.} + case "${fileext}" in + gz) + tmpfile=$(mktemp) + #__this_reason+="gzipped" + zcat "${LOGFILE}" | head -5 > "$tmpfile" + zcat "${LOGFILE}" | tail -5 >> "$tmpfile" + + # save the current compressed log filename + # so that it can be restored after the + # recursion call below + LOGFILE_save="${LOGFILE}" + want_this_file "$tmpfile" + rc=${?} + LOGFILE="${LOGFILE_save}" + + # cleanup ; get rid of the temp file + rm -f "$tmpfile" 2>/dev/null + if [ ${rc} -eq 0 ] ; then + true + else + false + fi + return + ;; + tgz) + __this_reason+="tarball" + true + return + ;; + *) + __this_reason+="compress:[${fileext}]" + true + return + ;; + esac + fi + + # Read the first log in the file + HEAD_DATE=$(head -1 "${LOGFILE}") + + ############################################################## + # Minor Exception Case: empty/short first log + ############################################################## + # + # handle one empty or short first line by fetching second log + # + ############################################################## + + if [ ${#HEAD_DATE} -lt 10 ] ; then + HEAD_DATE=$(head -2 "${LOGFILE}" | sed -n '2p' | cut -b 1-11) + fi + + + ############################################################## + # Typical Case: YYYY-MM-DD + ############################################################## + # + # check for most typical date format. + # + ############################################################## + + if [[ ${HEAD_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + __this_reason+="typical" + TAIL_DATE=$(tail -1 "${LOGFILE}" | cut -b 1-11) + if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + + # a call to 'is_in_range' returns false (1) if this + # file's logs are all out of range date + is_in_range "${HEAD_DATE:0:10}" "${TAIL_DATE:0:10}" + if [ $? -eq 0 ] ; then + true + else + false + fi + return + + else + + ####################################################### + # Exception Case: Unrecognized date format in last log + ####################################################### + # + # try the second last line. This case is typical in + # cron.log in 15.12 MAIL logs which send a purious ')' + # as a second log. Also if the log file has auto blank + # lines between logs leaving a blank line as the last + # log. + # + # this exception ties the second last log instead. + # + ####################################################### + TAIL_DATE=$(tail -2 "${LOGFILE}" | sed -n '1p' | cut -b 1-11) + if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + + is_in_range "${HEAD_DATE:0:10}" "${TAIL_DATE:0:10}" + if [ $? -eq 0 ] ; then + true + else + false + fi + return + + else + # default to true if the dates could not be parsed + __this_reason+=":invalid-tail-date" + + date_to_int "${HEAD_DATE}" + head_date=${__this_integer_date} + + # so the tail date is unknown. + # include this file as long as the head date is before end date + if [ ${head_date} -lt ${end_date} ] ; then + true + else + false + fi + return + fi + fi + + else + + ########################################################### + # Exception Case 1: logs date prefix starts with '[' + ########################################################### + # + # logdate starts with a '[' ... [2017-10-02 + # + # In this case we just recognize it and increment past it + # and then assume the last log will have the same format + # + ########################################################### + + if [ "${HEAD_DATE:0:1}" == "[" ] ; then + __this_reason+="exception1" + HEAD_DATE=${HEAD_DATE:1:11} + if [[ ${HEAD_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + + TAIL_DATE=$(tail -1 "${LOGFILE}" | cut -b 2-11) + if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + __this_reason+=".1" + is_in_range "${HEAD_DATE:0:10}" "${TAIL_DATE:0:10}" + if [ $? -eq 0 ] ; then + true + else + false + fi + return + else + TAIL_DATE=$(tail -1 "${LOGFILE}" | cut -b 1-10) + if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + __this_reason+=".2" + is_in_range "${HEAD_DATE:0:10}" "${TAIL_DATE:0:10}" + if [ $? -eq 0 ] ; then + true + else + false + fi + return + + else + + if [ "${TAIL_DATE:0:1}" == "[" ] ; then + __this_reason+=".3" + TAIL_DATE=${TAIL_DATE:1:11} + if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + is_in_range "${HEAD_DATE}" "${TAIL_DATE}" + if [ $? -eq 0 ] ; then + true + else + false + fi + return + else + __this_reason+=":invalid-tail-date" + true + return + fi + else + __this_reason+=":tail-date-not-found" + is_in_range "${HEAD_DATE}" "${EOT_DATE}" + if [ $? -eq 0 ] ; then + true + else + false + fi + return + fi + fi + fi + else + # /var/log/dmesg is typical of this case + # no log date and many logs start with [ uptime] + __this_reason+=":invalid-head-date" + true + return + fi + + ########################################################### + # Exception Case 2: journel.log handling + ########################################################### + # + # first log in file contains start and stop date + # + # "-- Logs begin at Thu 2017-07-06 12:28:35 UTC, end at Thu 2017-07-06 12:33:31 UTC. --" + # ^^^^^^^^^^ ^^^^^^^^^^ + # + # This exception case gets the head and tail log date from + # this first log. + ########################################################### + + elif [ "${HEAD_DATE:0:13}" == "-- Logs begin" ] ; then + __this_reason+="exception2" + + # need to get more of the line + HEAD_DATE=$(head -1 "${LOGFILE}") + + is_in_range "${HEAD_DATE:21:10}" "${HEAD_DATE:57:10}" + if [ $? -eq 0 ] ; then + true + else + false + fi + return + + ########################################################### + # Exception Case 3: journel.log handling + ########################################################### + # + # some logs like openstack.log have some logs that are + # prefixed by keystone:log. This case handles that + # + ########################################################### + elif [ "${HEAD_DATE:0:13}" == "keystone:log " ] ; then + __this_reason+="exception3" + + # need to get more of the line + HEAD_DATE="${HEAD_DATE:13:10}" + TAIL_DATE=$(tail -1 "${LOGFILE}") + + if [ "${TAIL_DATE:0:13}" == "keystone:log " ] ; then + TAIL_DATE="${TAIL_DATE:13:10}" + else + TAIL_DATE="${TAIL_DATE:0:10}" + fi + + is_in_range "${HEAD_DATE}" "${TAIL_DATE}" + if [ $? -eq 0 ] ; then + true + else + false + fi + return + + else + + ####################################################### + # Exception Case 4: horizon.log + ####################################################### + # + # Search the first and last 30 logs for a valid date. + # This should handle seeing a traceback at the head or + # tail of the log file. + # + ####################################################### + __this_reason+="exception4" + temp_head=$(head -30 "${LOGFILE}") + for ((loop_head=1;loop_head<31;loop_head++)) + do + HEAD_DATE=$(echo "${temp_head}" | sed -n "${loop_head}"p | cut -b 1-10) + if [[ ${HEAD_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + temp_tail=$(tail -30 "${LOGFILE}") + for ((loop_tail=1;loop_tail<31;loop_tail++)) + do + TAIL_DATE=$(echo "${temp_tail}" | sed -n ${loop_tail}p | cut -b 1-10) + if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then + + is_in_range "${HEAD_DATE}" "${TAIL_DATE}" + if [ $? -eq 0 ] ; then + true + else + false + fi + return + + fi + done + + # default to including it if no date at + # the end of the file is found + true + return + fi + done + + ###################################################### + # Exception Case 5: + ###################################################### + # + # Otherwise the file has no date or the date + # format is unrecognized so just include the file + # regardless of its date. + # + ###################################################### + __this_reason="nodate" + true + return + fi + fi + ;; + + *archive*) + + # Archive files like .tar are not extracted. + # Instead it is only collected if its last modified date is + # after the start date + + __this_reason+="archive" + FILE_DATE=$(stat -c %y "${LOGFILE}" | cut -b 1-10) + date_to_int "${FILE_DATE}" + if [ ${__this_integer_date} -ge ${start_date} ] ; then + __this_reason+=":in-range ${FILE_DATE}" + true + else + __this_reason+=":out-of-range ${FILE_DATE}" + false + fi + return + ;; + + *empty*) + __this_reason="empty" + track "${LOGFILE}" "empty" + false + return + ;; + + *data*) + __this_reason="data" + true + return + ;; + + *executable*) + __this_reason="executable" + true + return + ;; + + # very short file (no magic) + *"very short file"*) + __this_reason="small" + true + return + ;; + + *link*) + __this_reason="link" + false + return + ;; + + *swap*) + + __this_reason="swap" + false + return + ;; + + *fifo*) + + __this_reason="fifo" + false + return + ;; + + *socket*) + + __this_reason="socket" + false + return + ;; + + *) + __this_reason="other" + true + return + ;; + esac + else + __this_reason="unknown" + wlog "Adding ${logfile} ; unknown filetype" + true + return + fi + + # catch all default + true + return +} + +############################################################################# +# +# 'manage_file' adds the specified file to either the 'include' or exclude' +# reason lists. In the include case the most important part of +# this function appends the filename to the file specified by +# "VAR_LOG_INCLUDE_LIST" which is the file that collect_host +# uses to know what files in /var/log need to be included in +# the collect tarball. +# +############################################################################# + +function manage_file() +{ + local filename="${1}" + local action="${2}" + local reason="${3}" + + if [ "${action}" == "${EXCLUDE_FILE}" ] ; then + echo "${filename} excluded (${reason})" >> "${EXC_FILE_LIST}" + else + echo "${filename} included (${reason})" >> "${INC_FILE_LIST}" + + # add the file to the list of files to be collected + echo "${filename}" >> ${VAR_LOG_INCLUDE_LIST} + fi + + dlog "${action}: ${filename} (${reason})" +} + +############################################################################# +# +# 'handle_response' adds or excludes the specified file based on +# arguement $2 being 0 - true - include or +# !0 - false - exclude +# +# $1 - file +# $2 - include control ( true or false ) +# +############################################################################# + +function handle_response() +{ + local logfile="${1}" + local include="${2}" + + if [ "${include}" -eq 0 ] ; then + inclist=("${inclist[@]}" ${logfile}) + track "${logfile}" "${INCLUDE_FILE}" + + else + exclist=("${exclist[@]}" ${logfile}) + track "${logfile}" "${EXCLUDE_FILE}" + fi + + # record any that have been tagged as 'nodate' as + # candidate for special handling. + if [[ "${__this_reason}" == *"nodate"* ]] ; then + echo "${logfile}" >> "${NOD_FILE_LIST}" + fi +} + +########################################################################### +########################################################################### +# +# Lets start looking at the files now ... +# +# Get all the files in /var/log base dir (not the subdirectories) +# +########################################################################### +########################################################################### + +# get a list of the files in "baselogdir" ; aka /var/log +# will look at the sub directories later. +dirlist+=$(find ${baselogdir} -mindepth 1 -maxdepth 1 -type f) + +# +# Debug: +# +# To debug handling a specific file as a filelist override. +# This clears the list in favor of the specific file specified as +# argument 8 on the command line. +# +if [ "${5}" != "" ] ; then + dlog "Overriding dirlist with specified file:${5}" + dirlist=("${5}") +fi + +# echo "${baselogdir} filelist: ... ${dirlist}..." +for logfile in ${dirlist} +do + # echo "File: ${logfile}" + __this_reason="" + track "${logfile}" "totals" + want_this_file "${logfile}" + handle_response "${logfile}" "${?}" +done + +########################################################################### +# Get all the files in baselogdir subdirectories # +########################################################################### + +subdirlist=$(find ${baselogdir} -mindepth 1 -maxdepth 20 -type d) + +# +# Debug: +# +# To debug handling a specific file that is in a /var/log subdirectory as a +# filelist override. +# +if [ "${5}" != "" ] ; then + dlog "Overriding subdirlist with specified file:${5}" + subdirlist=("") +fi + +# echo "${baselogdir} subdirlist ${subdirlist}..." +for logdir in ${subdirlist} +do + __this_reason="" + + # this find must find more than just its own dir + # so we compare to greater than one + if [ $(find "${logdir}" | wc -l) -gt 1 ]; then + for logfile in ${logdir}/* + do + __this_reason="" + track "$logfile" "totals" + want_this_file "$logfile" + handle_response "$logfile" "$?" + done + else + __this_reason="empty" + manage_file "${logdir}" "${EXCLUDE_FILE}" "empty directory" + fi +done + + +dlog "Include List: ${INC_FILE_LIST}" +for inc in "${inclist[@]}" +do + if [ ${#inc} -gt 2 ] ; then + dlog "including ${inc}" + # echo "${inc}" >> "${INC_FILE_LIST}.summary" + fi +done + + +dlog "Exclude List: ${EXC_FILE_LIST}" +for exc in "${exclist[@]}" +do + if [ ${#exc} -gt 2 ] ; then + dlog "excluding ${exc}" + # echo "${exc}" >> "${EXC_FILE_LIST}.summary" + fi +done + +summary + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_host b/middleware/util/recipes-common/collector/scripts/collect_host new file mode 100755 index 0000000..873b191 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_host @@ -0,0 +1,482 @@ +#! /bin/bash +######################################################################## +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +######################################################################## + +# +# Import commands, variables and convenience functions available to +# all collectors ; common and user defined. +# +source /usr/local/sbin/collect_utils + +KEYRING_PATH="/opt/platform/.keyring" +if [ -e ${KEYRING_PATH} ] ; then + CRED=`find /opt/platform/.keyring -name .CREDENTIAL` + if [ ! -z "${CRED}" ] ; then + NOVAOPENRC="/etc/nova/openrc" + if [ -e ${NOVAOPENRC} ] ; then + source ${NOVAOPENRC} 2>/dev/null 1>/dev/null + fi + fi +fi + +# +# parse input parameters +# +COLLECT_NAME="${1}" +DEBUG=${8} +set_debug_mode ${DEBUG} + +# Calling parms +# +# 1 = collect name +# 2 = start date option +# 3 = start date +# 4 = "any" (ignored - no longer used ; kept to support upgrades/downgrades) +# 5 = end date option +# 6 = end date +# 7 = "any" (ignored - no longer used ; kept to support upgrades/downgrades) +# 8 = debug mode +logger -t ${COLLECT_TAG} "${0} ${1} ${2} ${3} ${4} ${5} ${6} ${7} ${8}" + +# parse out the start data/time data if it is present +STARTDATE_RANGE=false +STARTDATE="any" +if [ "${2}" == "${STARTDATE_OPTION}" ] ; then + if [ "${3}" != "any" -a ${#3} -gt 7 ] ; then + STARTDATE_RANGE=true + STARTDATE="${3}" + fi +fi + +# parse out the end date/time if it is present +ENDDATE_RANGE=false +ENDDATE="any" +if [ "${5}" == "${ENDDATE_OPTION}" ] ; then + if [ "${6}" != "any" -a ${#6} -gt 7 ] ; then + ENDDATE_RANGE=true + ENDDATE="${6}" + fi +fi + +COLLECT_BASE_DIR="/scratch" +EXTRA="var/extra" +hostname="${HOSTNAME}" +COLLECT_NAME_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" +EXTRA_DIR="${COLLECT_NAME_DIR}/${EXTRA}" +TARBALL="${COLLECT_NAME_DIR}.tgz" +COLLECT_PATH="/etc/collect.d" +RUN_EXCLUDE="/etc/collect/run.exclude" +ETC_EXCLUDE="/etc/collect/etc.exclude" +COLLECT_INCLUDE="/var/run /etc /root" +FLIGHT_RECORDER_PATH="var/lib/sm/" +FLIGHT_RECORDER_FILE="sm.eru.v1" +VAR_LOG_INCLUDE_LIST="/tmp/${COLLECT_NAME}.lst" +COLLECT_DIR_PCENT_CMD="df --output=pcent ${COLLECT_BASE_DIR}" +COLLECT_DIR_USAGE_CMD="df -h ${COLLECT_BASE_DIR}" +COLLECT_DATE="/usr/local/sbin/collect_date" + +function log_space() +{ + local msg=${1} + + space="`${COLLECT_DIR_USAGE_CMD}`" + space1=`echo "${space}" | grep -v Filesystem` + ilog "${COLLECT_BASE_DIR} ${msg} ${space1}" +} + +function space_precheck() +{ + space="`${COLLECT_DIR_PCENT_CMD}`" + space1=`echo "${space}" | grep -v Use` + size=`echo ${space1} | cut -f 1 -d '%'` + if [ ${size} -ge 0 -a ${size} -le 100 ] ; then + ilog "${COLLECT_BASE_DIR} is $size% full" + if [ ${size} -ge ${MIN_PERCENT_SPACE_REQUIRED} ] ; then + wlog "${HOSTNAME}:${COLLECT_BASE_DIR} does not have enough available space in to perform collect" + wlog "${HOSTNAME}:${COLLECT_BASE_DIR} must be below ${MIN_PERCENT_SPACE_REQUIRED}% to perform collect" + wlog "Increase available space in ${HOSTNAME}:${COLLECT_BASE_DIR} and retry operation." + echo "${FAIL_INSUFFICIENT_SPACE_STR}" + exit ${FAIL_INSUFFICIENT_SPACE} + fi + else + wlog "unable to parse available space from '${COLLECT_DIR_PCENT_CMD}' output" + fi +} + +space_precheck + +CURR_DIR=`pwd` +mkdir -p ${COLLECT_NAME_DIR} +cd ${COLLECT_NAME_DIR} + +# create dump target extra-stuff directory +mkdir -p ${EXTRA_DIR} + +RETVAL=0 + +# Remove any previous collect error log. +# Start this collect with an empty file. +# +# stderr is directed to this log during the collect process. +# By searching this log after collect_host is run we can find +# errors that occured during collect. +# The only real error that we care about right now is the +# +# "No space left on device" error +# +rm -f ${COLLECT_ERROR_LOG} +touch ${COLLECT_ERROR_LOG} +chmod 644 ${COLLECT_ERROR_LOG} +echo "`date '+%F %T'` :${COLLECT_NAME_DIR}" > ${COLLECT_ERROR_LOG} + +ilog "creating local collect tarball ${COLLECT_NAME_DIR}.tgz" + +################################################################################ +# Run collect scripts to check system status +################################################################################ +function collect_parts() +{ + if [ -d ${COLLECT_PATH} ]; then + for i in ${COLLECT_PATH}/*; do + if [ -f $i ]; then + $i ${COLLECT_NAME_DIR} ${EXTRA_DIR} ${hostname} + fi + done + fi +} + + +function collect_extra() +{ + # dump process lists + LOGFILE="${EXTRA_DIR}/process.info" + echo "${hostname}: Process Info ......: ${LOGFILE}" + + delimiter ${LOGFILE} "ps -e -H -o ..." + ${PROCESS_DETAIL_CMD} >> ${LOGFILE} + + # Collect process and thread info (tree view) + delimiter ${LOGFILE} "pstree --arguments --ascii --long --show-pids" + pstree --arguments --ascii --long --show-pids >> ${LOGFILE} + + # Collect process, thread and scheduling info (compute subfunction only) + # (also gets process 'affinity' which is useful on computes; + which ps-sched.sh >/dev/null 2>&1 + if [ $? -eq 0 ]; then + delimiter ${LOGFILE} "ps-sched.sh" + ps-sched.sh >> ${LOGFILE} + fi + + # Collect process, thread and scheduling, and elapsed time + # This has everything that ps-sched.sh does, except for cpu affinity mask, + # adds: stime,etime,time,wchan,tty). + delimiter ${LOGFILE} "ps -eL -o pid,lwp,ppid,state,class,nice,rtprio,priority,psr,stime,etime,time,wchan:16,tty,comm,command" + ps -eL -o pid,lwp,ppid,state,class,nice,rtprio,priority,psr,stime,etime,time,wchan:16,tty,comm,command >> ${LOGFILE} + + # Various host attributes + LOGFILE="${EXTRA_DIR}/host.info" + echo "${hostname}: Host Info .........: ${LOGFILE}" + + # CGCS build info + delimiter ${LOGFILE} "${BUILD_INFO_CMD}" + ${BUILD_INFO_CMD} >> ${LOGFILE} + + delimiter ${LOGFILE} "uptime" + uptime >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "cat /proc/cmdline" + cat /proc/cmdline >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "cat /proc/version" + cat /proc/version >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "cat /proc/cpuinfo" + cat /proc/cpuinfo >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "ip addr show" + ip addr show >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "lspci -nn" + lspci -nn >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "find /sys/kernel/iommu_groups/ -type l" + find /sys/kernel/iommu_groups/ -type l >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # networking totals + delimiter ${LOGFILE} "cat /proc/net/dev" + cat /proc/net/dev >> ${LOGFILE} + + delimiter ${LOGFILE} "dmidecode" + dmidecode >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # summary of scheduler tunable settings + delimiter ${LOGFILE} "cat /proc/sched_debug | head -15" + cat /proc/sched_debug | head -15 >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + if [ "${SKIP_MASK}" = "true" ]; then + delimiter ${LOGFILE} "facter (excluding ssh info)" + facter | grep -iv '^ssh' >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + else + delimiter ${LOGFILE} "facter" + facter >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + fi + + if [[ "$nodetype" == "compute" || "$subfunction" == *"compute"* ]] ; then + delimiter ${LOGFILE} "topology" + topology >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + fi + + LOGFILE="${EXTRA_DIR}/memory.info" + echo "${hostname}: Memory Info .......: ${LOGFILE}" + + delimiter ${LOGFILE} "cat /proc/meminfo" + cat /proc/meminfo >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "cat /sys/devices/system/node/node?/meminfo" + cat /sys/devices/system/node/node?/meminfo >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "cat /proc/slabinfo" + log_slabinfo ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "ps -e -o ppid,pid,nlwp,rss:10,vsz:10,cmd --sort=-rss" + ps -e -o ppid,pid,nlwp,rss:10,vsz:10,cmd --sort=-rss >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # mounted hugepages + delimiter ${LOGFILE} "lsof | grep /mnt/huge" + lsof | awk '($3 !~ /^[0-9]+$/ && /\/mnt\/huge/) || NR==1 {print $0;}' >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # hugepages numa mapping + delimiter ${LOGFILE} "grep huge /proc/*/numa_maps" + grep -e " huge " /proc/*/numa_maps >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # rootfs and tmpfs usage + delimiter ${LOGFILE} "df -h -H -T --local -t rootfs -t tmpfs" + df -h -H -T --local -t rootfs -t tmpfs >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + LOGFILE="${EXTRA_DIR}/filesystem.info" + echo "${hostname}: Filesystem Info ...: ${LOGFILE}" + + # disk inodes usage + delimiter ${LOGFILE} "df -h -H -T --local -t rootfs -t tmpfs" + df -h -H -T --local -t rootfs -t tmpfs >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # disk space usage + delimiter ${LOGFILE} "df -h -H -T --local -t ext2 -t ext3 -t ext4 -t xfs --total" + df -h -H -T --local -t ext2 -t ext3 -t ext4 -t xfs --total >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # disk inodes usage + delimiter ${LOGFILE} "df -h -H -T --local -i -t ext2 -t ext3 -t ext4 -t xfs --total" + df -h -H -T --local -i -t ext2 -t ext3 -t ext4 -t xfs --total >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # disks by-path values + delimiter ${LOGFILE} "ls -lR /dev/disk" + ls -lR /dev/disk >> ${LOGFILE} + + # disk summary (requires sudo/root) + delimiter ${LOGFILE} "fdisk -l" + fdisk -l >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "cat /proc/scsi/scsi" + cat /proc/scsi/scsi >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # Controller specific stuff + if [ "$nodetype" = "controller" ] ; then + + delimiter ${LOGFILE} "cat /proc/drbd" + cat /proc/drbd >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "/sbin/drbdadm dump" + /sbin/drbdadm dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + fi + + # LVM summary + delimiter ${LOGFILE} "/usr/sbin/vgs --version ; /usr/sbin/pvs --version ; /usr/sbin/lvs --version" + /usr/sbin/vgs --version >> ${LOGFILE} + /usr/sbin/pvs --version >> ${LOGFILE} + /usr/sbin/lvs --version >> ${LOGFILE} + + delimiter ${LOGFILE} "/usr/sbin/vgs --all --options all" + /usr/sbin/vgs --all --options all >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "/usr/sbin/pvs --all --options all" + /usr/sbin/pvs --all --options all >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "/usr/sbin/lvs --all --options all" + /usr/sbin/lvs --all --options all >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # iSCSI Information + LOGFILE="${EXTRA_DIR}/iscsi.info" + echo "${hostname}: iSCSI Information ......: ${LOGFILE}" + + if [ "$nodetype" = "controller" ] ; then + # Controller- LIO exported initiators summary + delimiter ${LOGFILE} "targetcli ls" + targetcli ls >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # Controller - LIO sessions + delimiter ${LOGFILE} "targetcli sessions detail" + targetcli sessions detail >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + elif [[ "$nodetype" == "compute" || "$subfunction" == *"compute"* ]] ; then + # Compute - iSCSI initiator information + collect_dir=${EXTRA_DIR}/iscsi_initiator_info + mkdir -p ${collect_dir} + cp -rf /run/iscsi-cache/nodes/* ${collect_dir} + find ${collect_dir} -type d -exec chmod 750 {} \; + + # Compute - iSCSI initiator active sessions + delimiter ${LOGFILE} "iscsiadm -m session" + iscsiadm -m session >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # Compute - iSCSI udev created nodes + delimiter ${LOGFILE} "ls -la /dev/disk/by-path | grep \"iqn\"" + ls -la /dev/disk/by-path | grep "iqn" >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + fi + + LOGFILE="${EXTRA_DIR}/history.info" + echo "${hostname}: Bash History ......: ${LOGFILE}" + + # history + delimiter ${LOGFILE} "cat /home/wrsroot/.bash_history" + cat /home/wrsroot/.bash_history >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + LOGFILE="${EXTRA_DIR}/interrupt.info" + echo "${hostname}: Interrupt Info ....: ${LOGFILE}" + + # interrupts + delimiter ${LOGFILE} "cat /proc/interrupts" + cat /proc/interrupts >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "cat /proc/softirqs" + cat /proc/softirqs >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # Controller specific stuff + if [ "$nodetype" = "controller" ] ; then + netstat -pan > ${EXTRA_DIR}/netstat.info + fi + + LOGFILE="${EXTRA_DIR}/blockdev.info" + echo "${hostname}: Block Devices Info : ${LOGFILE}" + + # Collect block devices - show all sda and cinder devices, and size + delimiter ${LOGFILE} "lsblk" + lsblk >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # Collect block device topology - show devices and which io-scheduler + delimiter ${LOGFILE} "lsblk --topology" + lsblk --topology >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + # Collect SCSI devices - show devices and cinder attaches, etc + delimiter ${LOGFILE} "lsblk --scsi" + lsblk --scsi >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +} + +log_space "before collect ......:" + +collect_extra +collect_parts + +# +# handle collect collect-after and collect-range and then +# in elif clause collect-before +# +VAR_LOG="/var/log" +if [ -e /www/var/log ]; then + VAR_LOG="$VAR_LOG /www/var/log" +fi + +rm -f ${VAR_LOG_INCLUDE_LIST} + +if [ "${STARTDATE_RANGE}" == true ] ; then + if [ "${ENDDATE_RANGE}" == false ] ; then + ilog "collecting $VAR_LOG files containing logs after ${STARTDATE}" + ${COLLECT_DATE} ${STARTDATE} ${ENDDATE} ${VAR_LOG_INCLUDE_LIST} ${DEBUG} "" + else + ilog "collecting $VAR_LOG files containing logs between ${STARTDATE} and ${ENDDATE}" + ${COLLECT_DATE} ${STARTDATE} ${ENDDATE} ${VAR_LOG_INCLUDE_LIST} ${DEBUG} "" + fi +elif [ "${ENDDATE_RANGE}" == true ] ; then + STARTDATE="20130101" + ilog "collecting $VAR_LOG files containing logs before ${ENDDATE}" + ${COLLECT_DATE} ${STARTDATE} ${ENDDATE} ${VAR_LOG_INCLUDE_LIST} ${DEBUG} "" +else + ilog "collecting all of $VAR_LOG" + find $VAR_LOG ! -empty > ${VAR_LOG_INCLUDE_LIST} +fi + +# Add VM console.log +for i in /etc/nova/instances/*/console.log; do + if [ -e "$i" ]; then + tmp=`dirname $i` + mkdir -p ${COLLECT_NAME_DIR}/$tmp + cp $i ${COLLECT_NAME_DIR}/$tmp + fi +done + +log_space "before first tar ....:" + +(cd ${COLLECT_NAME_DIR} ; ${IONICE_CMD} ${NICE_CMD} ${TAR_CMD} ${COLLECT_NAME_DIR}/${COLLECT_NAME}.tar -T ${VAR_LOG_INCLUDE_LIST} -X ${RUN_EXCLUDE} -X ${ETC_EXCLUDE} ${COLLECT_INCLUDE} 2>>${COLLECT_ERROR_LOG} 1>>${COLLECT_ERROR_LOG} ) + +log_space "after first tar .....:" + +(cd ${COLLECT_NAME_DIR} ; ${IONICE_CMD} ${NICE_CMD} ${UNTAR_CMD} ${COLLECT_NAME_DIR}/${COLLECT_NAME}.tar 2>>${COLLECT_ERROR_LOG} 1>>${COLLECT_ERROR_LOG} ) + +log_space "after first untar ...:" + +rm -f ${COLLECT_NAME_DIR}/${COLLECT_NAME}.tar + +log_space "after delete tar ....:" + +if [ "${SKIP_MASK}" != "true" ]; then + # Run password masking before final tar + dlog "running /usr/local/sbin/collect_mask_passwords ${COLLECT_NAME_DIR} ${EXTRA_DIR}" + /usr/local/sbin/collect_mask_passwords ${COLLECT_NAME_DIR} ${EXTRA_DIR} + log_space "after passwd masking :" +fi + +(cd ${COLLECT_BASE_DIR} ; ${IONICE_CMD} ${NICE_CMD} ${TAR_ZIP_CMD} ${COLLECT_NAME_DIR}.tgz ${COLLECT_NAME} 2>/dev/null 1>/dev/null ) + +log_space "after first tarball .:" + +mkdir -p ${COLLECT_NAME_DIR}/${FLIGHT_RECORDER_PATH} + +(cd /${FLIGHT_RECORDER_PATH} ; ${TAR_ZIP_CMD} ${COLLECT_NAME_DIR}/${FLIGHT_RECORDER_PATH}/${FLIGHT_RECORDER_FILE}.tgz ./${FLIGHT_RECORDER_FILE} 2>>${COLLECT_ERROR_LOG} 1>>${COLLECT_ERROR_LOG}) + +# Pull in an updated user.log which contains the most recent collect logs +# ... be sure to exclude any out of space logs +tail -30 /var/log/user.log | grep "COLLECT:" | grep -v "${FAIL_OUT_OF_SPACE_STR}" >> ${COLLECT_ERROR_LOG} +cp -a ${COLLECT_LOG} ${COLLECT_LOG}.last +cp -a ${COLLECT_ERROR_LOG} ${COLLECT_LOG} +cp -a ${COLLECT_LOG} ${COLLECT_NAME_DIR}/var/log + +log_space "with flight data ....:" + +(cd ${COLLECT_BASE_DIR} ; ${IONICE_CMD} ${NICE_CMD} ${TAR_ZIP_CMD} ${COLLECT_NAME_DIR}.tgz ${COLLECT_NAME} 2>>${COLLECT_ERROR_LOG} 1>>${COLLECT_ERROR_LOG} ) + +log_space "after collect .......:" + +rm -rf ${COLLECT_NAME_DIR} +rm -f ${VAR_LOG_INCLUDE_LIST} + +log_space "after cleanup .......:" + +# Check for collect errors +# Only out of space error is enough to fail this hosts's collect +collect_errors ${HOSTNAME} +RC=${?} + +rm -f ${COLLECT_ERROR_LOG} + +if [ ${RC} -ne 0 ] ; then + rm -f ${COLLECT_NAME_DIR}.tgz + ilog "${FAIL_OUT_OF_SPACE_STR} ${COLLECT_BASE_DIR}" +else + ilog "collect of ${COLLECT_NAME_DIR}.tgz succeeded" + echo "${collect_done}" +fi diff --git a/middleware/util/recipes-common/collector/scripts/collect_ima.sh b/middleware/util/recipes-common/collector/scripts/collect_ima.sh new file mode 100755 index 0000000..dd35bfa --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_ima.sh @@ -0,0 +1,60 @@ +#! /bin/bash +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +function is_extended_profile() +{ + if [ ! -n "${security_profile}" ] || [ "${security_profile}" != "extended" ]; then + return 0 + else + return 1 + fi +} + +SERVICE="ima" +LOGFILE="${extradir}/${SERVICE}.info" + +############################################################################### +# All Node Types +############################################################################### + +is_extended_profile +if [ "$?" = "0" ] ; then + exit 0 +fi + +echo "${hostname}: IMA Info ..........: ${LOGFILE}" + +delimiter ${LOGFILE} "IMA Kernel Modules" +lsmod | grep ima >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "Auditd status" +service auditd status >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +ps -aux | grep audit >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +mkdir -p ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "IMA Runtime Measurement and Violations cache" +if [ -d "/sys/kernel/security/ima" ]; then + ls /sys/kernel/security/ima >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + cp -rf /sys/kernel/security/ima ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} +else + echo "ERROR: IMA Securityfs directory does not exist!" >> ${LOGFILE} +fi + +cp -rf /etc/modprobe.d/ima.conf ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} +cp -rf /etc/modprobe.d/integrity.conf ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} +cp -rf /etc/ima.policy ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} + +# make sure all these collected files are world readible +chmod -R 755 ${extradir}/integrity + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_mask_passwords b/middleware/util/recipes-common/collector/scripts/collect_mask_passwords new file mode 100644 index 0000000..b7f0e24 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_mask_passwords @@ -0,0 +1,123 @@ +#! /bin/bash +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +COLLECT_NAME_DIR=$1 +EXTRA_DIR=$2 + +# Strip the passwords from assorted config files +for conffile in \ + ${COLLECT_NAME_DIR}/etc/aodh/aodh.conf \ + ${COLLECT_NAME_DIR}/etc/ceilometer/ceilometer.conf \ + ${COLLECT_NAME_DIR}/etc/cinder/cinder.conf \ + ${COLLECT_NAME_DIR}/etc/fm.conf \ + ${COLLECT_NAME_DIR}/etc/glance/glance-api.conf \ + ${COLLECT_NAME_DIR}/etc/glance/glance-registry.conf \ + ${COLLECT_NAME_DIR}/etc/heat/heat.conf \ + ${COLLECT_NAME_DIR}/etc/ironic/ironic.conf \ + ${COLLECT_NAME_DIR}/etc/keystone/keystone.conf \ + ${COLLECT_NAME_DIR}/etc/magnum/magnum.conf \ + ${COLLECT_NAME_DIR}/etc/murano/murano.conf \ + ${COLLECT_NAME_DIR}/etc/neutron/metadata_agent.ini \ + ${COLLECT_NAME_DIR}/etc/neutron/neutron.conf \ + ${COLLECT_NAME_DIR}/etc/nfv/nfv_plugins/nfvi_plugins/config.ini \ + ${COLLECT_NAME_DIR}/etc/nova/nova.conf \ + ${COLLECT_NAME_DIR}/etc/nslcd.conf \ + ${COLLECT_NAME_DIR}/etc/openldap/slapd.conf.backup \ + ${COLLECT_NAME_DIR}/etc/openstack-dashboard/local_settings \ + ${COLLECT_NAME_DIR}/etc/panko/panko.conf \ + ${COLLECT_NAME_DIR}/etc/patching/patching.conf \ + ${COLLECT_NAME_DIR}/etc/proxy/nova-api-proxy.conf \ + ${COLLECT_NAME_DIR}/etc/rabbitmq/murano-rabbitmq.config \ + ${COLLECT_NAME_DIR}/etc/rabbitmq/rabbitmq.config \ + ${COLLECT_NAME_DIR}/etc/sysinv/api-paste.ini \ + ${COLLECT_NAME_DIR}/etc/sysinv/sysinv.conf \ + ${COLLECT_NAME_DIR}/var/extra/platform/sysinv/*/sysinv.conf.default \ + ${COLLECT_NAME_DIR}/etc/mtc.ini + +do + if [ ! -f $conffile ]; then + continue + fi + + sed -i -r 's/^(admin_password) *=.*/\1 = xxxxxx/; + s/^(auth_encryption_key) *=.*/\1 = xxxxxx/; + s/^(bindpw) .*/\1 xxxxxx/; + s/^(rootpw) .*/\1 xxxxxx/; + s/^(connection) *=.*/\1 = xxxxxx/; + s/^( *credentials) *=.*/\1 = xxxxxx/; + s/^(metadata_proxy_shared_secret) *=.*/\1 = xxxxxx/; + s/^(password) *=.*/\1 = xxxxxx/; + s/^(rabbit_password) *=.*/\1 = xxxxxx/; + s/^(sql_connection) *=.*/\1 = xxxxxx/; + s/^(stack_domain_admin_password) *=.*/\1 = xxxxxx/; + s/^(transport_url) *=.*/\1 = xxxxxx/; + s/^(SECRET_KEY) *=.*/\1 = xxxxxx/; + s/^(keystone_auth_pw) *=.*/\1 = xxxxxx/; + s/\{default_pass, <<\".*\">>\}/\{default_pass, <<\"xxxxxx\">>\}/' $conffile +done + +find ${COLLECT_NAME_DIR} -name server-cert.pem | xargs --no-run-if-empty rm -f +rm -rf ${COLLECT_NAME_DIR}/var/extra/platform/config/*/ssh_config +rm -f ${COLLECT_NAME_DIR}/var/extra/platform/.keyring/*/python_keyring/crypted_pass.cfg +rm -f ${COLLECT_NAME_DIR}/var/extra/platform/puppet/*/hieradata/secure*.yaml + +# Mask user passwords in sysinv db dump +if [ -f ${COLLECT_NAME_DIR}/var/extra/database/sysinv.db.sql.txt ]; then + sed -i -r '/COPY i_user/, /^--/ s/^(([^\t]*\t){10})[^\t]*(\t.*)/\1xxxxxx\3/; + /COPY i_community/, /^--/ s/^(([^\t]*\t){5})[^\t]*(\t.*)/\1xxxxxx\3/; + /COPY i_trap_destination/, /^--/ s/^(([^\t]*\t){6})[^\t]*(\t.*)/\1xxxxxx\3/; + s/(identity\t[^\t]*\tpassword\t)[^\t]*/\1xxxxxx/' \ + ${COLLECT_NAME_DIR}/var/extra/database/sysinv.db.sql.txt +fi + +# Mask passwords in host profiles +grep -rl '\"name\": \"password\"' ${COLLECT_NAME_DIR}/var/extra/platform/sysinv/ \ + | xargs --no-run-if-empty perl -i -e ' + $prev=""; + while (<>) + { + if (/\"name\": \"password\"/) + { + $prev =~ s/\"value\": \".*\"/\"value\": \"xxxxxx\"/; + } + print $prev; + $prev=$_; + } + print $prev;' + +# Cleanup snmp +sed -i -r 's/(rocommunity[^ ]*).*/\1 xxxxxx/' ${COLLECT_NAME_DIR}/var/extra/platform/config/*/snmp/* +sed -i -r 's/(trap2sink *[^ ]*).*/\1 xxxxxx/' ${COLLECT_NAME_DIR}/var/extra/platform/config/*/snmp/* + +# Mask passwords in bash.log and history logs +USER_HISTORY_FILES=$(find ${COLLECT_NAME_DIR} -type f -name .bash_history 2>/dev/null) +sed -i -r 's/(snmp-comm-(delete|show)) *((\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*) *){1,}/\1 xxxxxx/; + s/(snmp.*) *(--community|-c) *(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 \2 xxxxxx/; + s/(password)=(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1=xxxxxx/; + s/(openstack.*) *(--password) *(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 \2 xxxxxx/; + s/(ldapmodifyuser.*userPassword *)(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 xxxxxx/' \ + ${USER_HISTORY_FILES} \ + ${COLLECT_NAME_DIR}/var/extra/history.info \ + ${COLLECT_NAME_DIR}/var/log/bash.log \ + ${COLLECT_NAME_DIR}/var/log/auth.log \ + ${COLLECT_NAME_DIR}/var/log/ldapscripts.log + +for f in ${COLLECT_NAME_DIR}/var/log/bash.log.*.gz \ + ${COLLECT_NAME_DIR}/var/log/auth.log.*.gz \ + ${COLLECT_NAME_DIR}/var/log/ldapscripts.log.*.gz +do + zgrep -q 'snmp|password' $f || continue + gunzip $f + unzipped=${f%%.gz} + sed -i -r 's/(snmp-comm-(delete|show)) *((\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*) *){1,}/\1 xxxxxx/; + s/(snmp.*) *(--community|-c) *(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 \2 xxxxxx/; + s/(password)=(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1=xxxxxx/; + s/(openstack.*) *(--password) *(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 \2 xxxxxx/; + s/(ldapmodifyuser.*userPassword *)(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 xxxxxx/' $unzipped + gzip $unzipped +done + diff --git a/middleware/util/recipes-common/collector/scripts/collect_networking.sh b/middleware/util/recipes-common/collector/scripts/collect_networking.sh new file mode 100755 index 0000000..d606632 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_networking.sh @@ -0,0 +1,61 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables + +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +SERVICE="networking" +LOGFILE="${extradir}/${SERVICE}.info" +echo "${hostname}: Networking Info ...: ${LOGFILE}" + +############################################################################### +# All nodes +############################################################################### +delimiter ${LOGFILE} "ip -s link" +ip -s link >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "ip -s addr" +ip -s addr >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "ip -s neigh" +ip -s neigh >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "ip rule" +ip rule >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "ip route" +ip route >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "iptables -L -v -x -n" +iptables -L -v -x -n >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "iptables -L -v -x -n -t nat" +iptables -L -v -x -n -t nat >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "iptables -L -v -x -n -t mangle" +iptables -L -v -x -n -t mangle >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + +############################################################################### +# Only Compute +############################################################################### +if [[ "$nodetype" = "compute" || "$subfunction" == *"compute"* ]] ; then + NAMESPACES=($(ip netns)) + for NS in ${NAMESPACES[@]}; do + delimiter ${LOGFILE} "${NS}" + ip netns exec ${NS} ip -s link + ip netns exec ${NS} ip -s addr + ip netns exec ${NS} ip -s neigh + ip netns exec ${NS} ip route + ip netns exec ${NS} ip rule + done >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +fi + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_nfv_vim.sh b/middleware/util/recipes-common/collector/scripts/collect_nfv_vim.sh new file mode 100644 index 0000000..4ccd309 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_nfv_vim.sh @@ -0,0 +1,45 @@ +#! /bin/bash +# +# Copyright (c) 2013-2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# Loads Up Utilities and Commands Variables +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +LOGFILE="${extradir}/nfv-vim.info" +echo "${hostname}: NFV-Vim Info ......: ${LOGFILE}" + +function is_service_active() +{ + active=`sm-query service vim | grep "enabled-active"` + if [ -z "$active" ] ; then + return 0 + else + return 1 + fi +} + +############################################################################### +# Only Controller +############################################################################### + +if [ "$nodetype" = "controller" ] ; then + is_service_active + if [ "$?" = "0" ] ; then + exit 0 + fi + + # Assumes that database_dir is unique in /etc/nfv/vim/config.ini + DATABASE_DIR=$(awk -F "=" '/database_dir/ {print $2}' /etc/nfv/vim/config.ini) + + SQLITE_DUMP="/usr/bin/sqlite3 ${DATABASE_DIR}/vim_db_v1 .dump" + + delimiter ${LOGFILE} "dump database" + timeout 30 ${SQLITE_DUMP} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +fi + +exit 0 + diff --git a/middleware/util/recipes-common/collector/scripts/collect_openstack.sh b/middleware/util/recipes-common/collector/scripts/collect_openstack.sh new file mode 100755 index 0000000..743dd5d --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_openstack.sh @@ -0,0 +1,75 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +function is_service_active() +{ + active=`sm-query service rabbit-fs | grep "enabled-active"` + if [ -z "$active" ] ; then + return 0 + else + return 1 + fi +} + +SERVICE="openstack" +LOGFILE="${extradir}/${SERVICE}.info" +echo "${hostname}: Openstack Info ....: ${LOGFILE}" + +############################################################################### +# Only Controller +############################################################################### +if [ "$nodetype" = "controller" ] ; then + + is_service_active + if [ "$?" = "0" ] ; then + exit 0 + fi + +delimiter ${LOGFILE} "openstack project list" +openstack project list >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +delimiter ${LOGFILE} "openstack user list" +openstack user list >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +MQ_STATUS="rabbitmqctl status" +delimiter ${LOGFILE} "${MQ_STATUS} | grep -e '{memory' -A30" +${MQ_STATUS} 2>/dev/null | grep -e '{memory' -A30 >> ${LOGFILE} + +delimiter ${LOGFILE} "RabbitMQ Queue Info" +num_queues=$(rabbitmqctl list_queues | wc -l); ((num_queues-=2)) +num_bindings=$(rabbitmqctl list_bindings | wc -l); ((num_bindings-=2)) +num_exchanges=$(rabbitmqctl list_exchanges | wc -l); ((num_exchanges-=2)) +num_connections=$(rabbitmqctl list_connections | wc -l); ((num_connections-=2)) +num_channels=$(rabbitmqctl list_channels | wc -l); ((num_channels-=2)) +arr=($(rabbitmqctl list_queues messages consumers memory | \ + awk '/^[0-9]/ {a+=$1; b+=$2; c+=$3} END {print a, b, c}')) +messages=${arr[0]}; consumers=${arr[1]}; memory=${arr[2]} +printf "%6s %8s %9s %11s %8s %8s %9s %10s\n" "queues" "bindings" "exchanges" "connections" "channels" "messages" "consumers" "memory" >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +printf "%6d %8d %9d %11d %8d %8d %9d %10d\n" $num_queues $num_bindings $num_exchanges $num_connections $num_channels $messages $consumers $memory >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + if [ -e /opt/cgcs/ceilometer/pipeline.yaml ] ; then + cp /opt/cgcs/ceilometer/pipeline.yaml ${extradir}/ceilometer_pipeline.yaml + fi +fi + + + +############################################################################### +# collect does not retrieve /etc/keystone dir +# Additional logic included to copy /etc/keystone directory +############################################################################### + +mkdir -p ${extradir}/../../etc/ +cp -R /etc/keystone/ ${extradir}/../../etc +chmod -R 755 ${extradir}/../../etc/keystone + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_parms b/middleware/util/recipes-common/collector/scripts/collect_parms new file mode 100644 index 0000000..6600150 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_parms @@ -0,0 +1,29 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +#echo "defaults: $1-$2-$3-$4" + +if [ -z ${1} ] ; then + basedir=/scratch +else + basedir=$1 +fi + +if [ -z ${2} ] ; then + extradir=$basedir/var/extra +else + extradir=$2 +fi + +if [ -z ${3} ] ; then + hostname=$HOSTNAME +else + hostname=$3 +fi + +mkdir -p ${extradir} diff --git a/middleware/util/recipes-common/collector/scripts/collect_patching.sh b/middleware/util/recipes-common/collector/scripts/collect_patching.sh new file mode 100755 index 0000000..3d696d2 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_patching.sh @@ -0,0 +1,45 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +SERVICE="patching" +LOGFILE="${extradir}/${SERVICE}.info" +echo "${hostname}: Patching Info .....: ${LOGFILE}" + +############################################################################### +# All nodes +############################################################################### +delimiter ${LOGFILE} "smart channel --show" +smart channel --show 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + +############################################################################### +# Only Controller +############################################################################### +if [ "$nodetype" = "controller" ] ; then + + delimiter ${LOGFILE} "sw-patch query" + sw-patch query 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "sw-patch query-hosts" + sw-patch query-hosts 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "sw-patch query-hosts --debug" + sw-patch query-hosts --debug 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "find /opt/patching" + find /opt/patching 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "find /www/pages/updates" + find /www/pages/updates 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + +fi + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_psqldb.sh b/middleware/util/recipes-common/collector/scripts/collect_psqldb.sh new file mode 100755 index 0000000..410aefa --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_psqldb.sh @@ -0,0 +1,122 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +# postgres database commands +PSQL_CMD="sudo -u postgres psql --pset pager=off -q" +PG_DUMP_CMD="sudo -u postgres pg_dump" + +SERVICE="database" +DB_DIR="${extradir}/database" +LOGFILE="${extradir}/database.info" +echo "${hostname}: Database Info .....: ${LOGFILE}" + +function is_service_active() +{ + active=`sm-query service postgres | grep "enabled-active"` + if [ -z "$active" ] ; then + return 0 + else + return 1 + fi +} + +############################################################################### +# All node types +############################################################################### +mkdir -p ${DB_DIR} + +function log_database() +{ + db_list=( $(${PSQL_CMD} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") ) + for db in "${db_list[@]}" + do + echo "postgres database: ${db}" + ${PSQL_CMD} -d ${db} -c " + SELECT + table_schema, + table_name, + pg_size_pretty(table_size) AS table_size, + pg_size_pretty(indexes_size) AS indexes_size, + pg_size_pretty(total_size) AS total_size, + live_tuples, + dead_tuples + FROM ( + SELECT + table_schema, + table_name, + pg_table_size(table_name) AS table_size, + pg_indexes_size(table_name) AS indexes_size, + pg_total_relation_size(table_name) AS total_size, + pg_stat_get_live_tuples(table_name::regclass) AS live_tuples, + pg_stat_get_dead_tuples(table_name::regclass) AS dead_tuples + FROM ( + SELECT + table_schema, + table_name + FROM information_schema.tables + WHERE table_schema='public' + AND table_type='BASE TABLE' + ) AS all_tables + ORDER BY total_size DESC + ) AS pretty_sizes; + " + done >> ${1} +} + + + +DB_EXT=db.sql.txt +function database_dump() +{ + mkdir -p ${DB_DIR} + db_list=( $(${PSQL_CMD} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") ) + for DB in "${db_list[@]}" + do + if [ "$DB" != "keystone" -a "$DB" != "ceilometer" ] ; then + echo "${hostname}: Dumping Database ..: ${DB_DIR}/$DB.$DB_EXT" + (cd ${DB_DIR} ; sudo -u postgres pg_dump $DB > $DB.$DB_EXT) + fi + done +} + +############################################################################### +# Only Controller +############################################################################### + +if [ "$nodetype" = "controller" ] ; then + is_service_active + if [ "$?" = "0" ] ; then + exit 0 + fi + + # postgres DB sizes + delimiter ${LOGFILE} "formatted ${PSQL_CMD} -c" + ${PSQL_CMD} -c " + SELECT + pg_database.datname, + pg_database_size(pg_database.datname), + pg_size_pretty(pg_database_size(pg_database.datname)) + FROM pg_database + ORDER BY pg_database_size DESC; + " >> ${LOGFILE} + + # Number of postgres connections + delimiter ${LOGFILE} "ps -C postgres -o cmd=" + ps -C postgres -o cmd= >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "call to log_database" + log_database ${LOGFILE} + + database_dump +fi + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_sm.sh b/middleware/util/recipes-common/collector/scripts/collect_sm.sh new file mode 100644 index 0000000..5f0f3c9 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_sm.sh @@ -0,0 +1,26 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +SERVICE="sm" +LOGFILE="${extradir}/sm.info" +echo "${hostname}: Service Management : ${LOGFILE}" + +############################################################################### +# Only Controller +############################################################################### + +if [ "$nodetype" = "controller" ] ; then + kill -SIGUSR1 $(>${COLLECT_ERROR_LOG} >> ${LOGFILE} +fi + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_sysinv.sh b/middleware/util/recipes-common/collector/scripts/collect_sysinv.sh new file mode 100755 index 0000000..37ba26a --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_sysinv.sh @@ -0,0 +1,73 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +SERVICE="inventory" +LOGFILE="${extradir}/${SERVICE}.info" +RPMLOG="${extradir}/rpm.info" + +function is_service_active() +{ + active=`sm-query service management-ip | grep "enabled-active"` + if [ -z "$active" ] ; then + return 0 + else + return 1 + fi +} + +############################################################################### +# Only Controller +############################################################################### +if [ "$nodetype" = "controller" ] ; then + + echo "${hostname}: Software Config ...: ${RPMLOG}" + # These go into the SERVICE.info file + delimiter ${RPMLOG} "rpm -qa" + rpm -qa >> ${RPMLOG} + + is_service_active + if [ "$?" = "0" ] ; then + exit 0 + fi + + echo "${hostname}: System Inventory ..: ${LOGFILE}" + + # These go into the SERVICE.info file + delimiter ${LOGFILE} "system host-list" + system host-list 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "system service-list" + system service-list 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "nova service-list" + nova service-list 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "neutron host-list" + neutron host-list 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "system host-port-list controller-0" + system host-port-list controller-0 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "system host-port-list controller-1" + system host-port-list controller-1 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "Dump all Instances" + nova list --fields name,status,OS-EXT-SRV-ATTR:host --all-tenant 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + delimiter ${LOGFILE} "vm-topology" + timeout 60 vm-topology --show all 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} + + cp -a /opt/platform ${extradir} +fi + + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_tc.sh b/middleware/util/recipes-common/collector/scripts/collect_tc.sh new file mode 100755 index 0000000..160b970 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_tc.sh @@ -0,0 +1,82 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +SERVICE="tc" +LOGFILE="${extradir}/tc.info" +echo "${hostname}: Traffic Controls . : ${LOGFILE}" + +############################################################################### +# Interface Info +############################################################################### +delimiter ${LOGFILE} "cat /etc/network/interfaces" +if [ -f /etc/network/interfaces ]; then + cat /etc/network/interfaces >> ${LOGFILE} +else + echo "/etc/network/interfaces NOT FOUND" >> ${LOGFILE} +fi + +delimiter ${LOGFILE} "ip link" +ip link >> ${LOGFILE} + +for i in $(ip link | grep mtu | grep eth |awk '{print $2}' | sed 's#:##g'); +do + delimiter ${LOGFILE} "ethtool ${i}" + ethtool ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "cat /sys/class/net/${i}/speed" + cat /sys/class/net/${i}/speed >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "ethtool -S ${i}" + ethtool -S ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +done + +############################################################################### +# TC Configuration Script (/usr/local/bin/cgcs_tc_setup.sh) +############################################################################### +delimiter ${LOGFILE} "cat /usr/local/bin/cgcs_tc_setup.sh" +if [ -f /usr/local/bin/cgcs_tc_setup.sh ]; then + cat /usr/local/bin/cgcs_tc_setup.sh >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +else + echo "/usr/local/bin/cgcs_tc_setup.sh NOT FOUND" >> ${LOGFILE} +fi + +############################################################################### +# TC Configuration +############################################################################### +delimiter ${LOGFILE} "tc qdisc show" +tc qdisc show >> ${LOGFILE} + +for i in $(ip link | grep htb | awk '{print $2}' | sed 's#:##g'); +do + delimiter ${LOGFILE} "tc class show dev ${i}" + tc class show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "tc filter show dev ${i}" + tc filter show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +done + +############################################################################### +# TC Statistics +############################################################################### +delimiter ${LOGFILE} "tc -s qdisc show" +tc -s qdisc show >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + +for i in $(ip link | grep htb | awk '{print $2}' | sed 's#:##g'); +do + delimiter ${LOGFILE} "tc -s class show dev ${i}" + tc -s class show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + + delimiter ${LOGFILE} "tc -s filter show dev ${i}" + tc -s filter show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} +done + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/collect_utils b/middleware/util/recipes-common/collector/scripts/collect_utils new file mode 100755 index 0000000..0e91013 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_utils @@ -0,0 +1,223 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +########################################################################################## + +DEBUG=false + +# Fail Codes +PASS=0 +FAIL=1 +RETRY=2 + +FAIL_NODETYPE=3 + +FAIL_TIMEOUT=10 +FAIL_TIMEOUT1=11 +FAIL_TIMEOUT2=12 +FAIL_TIMEOUT3=13 +FAIL_TIMEOUT4=14 +FAIL_TIMEOUT5=15 +FAIL_TIMEOUT6=16 +FAIL_TIMEOUT7=17 +FAIL_TIMEOUT8=18 +FAIL_TIMEOUT9=19 + +FAIL_PASSWORD=30 +FAIL_PERMISSION=31 +FAIL_CLEANUP=32 +FAIL_UNREACHABLE=33 +FAIL_HOSTNAME=34 +FAIL_INACTIVE=35 +FAIL_PERMISSION_SKIP=36 +FAIL_OUT_OF_SPACE=37 +FAIL_INSUFFICIENT_SPACE=38 +FAIL_OUT_OF_SPACE_LOCAL=39 +FAIL_CREATE=39 + +# Warnings are above 200 +WARN_WARNING=200 +WARN_HOSTNAME=201 + +# Failure Strings +FAIL_OUT_OF_SPACE_STR="No space left on device" +FAIL_INSUFFICIENT_SPACE_STR="Not enough space on device" + +# The minimum amount of % free space on /scratch to allow collect to proceed +MIN_PERCENT_SPACE_REQUIRED=75 + +# Log file path/names +COLLECT_LOG=/var/log/collect.log +COLLECT_ERROR_LOG=/tmp/collect_error.log + +# Load up the nova openrc file if its possible +KEYRING_PATH="/opt/platform/.keyring" +if [ -e ${KEYRING_PATH} ] ; then + CRED=`find /opt/platform/.keyring -name .CREDENTIAL` + if [ ! -z "${CRED}" ] ; then + NOVAOPENRC="/etc/nova/openrc" + if [ -e ${NOVAOPENRC} ] ; then + ACTIVE=true + source ${NOVAOPENRC} 2>/dev/null 1>/dev/null + fi + fi +fi + +# get the node and subfunction types +nodetype="" +subfunction="" +PLATFORM_CONF=/etc/platform/platform.conf +if [ -e ${PLATFORM_CONF} ] ; then + source ${PLATFORM_CONF} +fi + +if [ "${nodetype}" != "controller" -a "${nodetype}" != "compute" -a "${nodetype}" != "storage" ] ; then + logger -t ${COLLECT_TAG} "could not identify nodetype ($nodetype)" + exit $FAIL_NODETYPE +fi + +NODETYPE=$nodetype +SUBFUNCTION=$subfunction + +# Setup an expect command completion file. +# This is used to force serialization of expect +# sequences and highlight command completion +collect_done="collect done" +cmd_done_sig="expect done" +cmd_done_file="/usr/local/sbin/expect_done" + +# Compression Commands +TAR_ZIP_CMD="tar -cvzf" +TAR_UZIP_CMD="tar -xvzf" +TAR_CMD="tar -cvhf" +UNTAR_CMD="tar -xvf" +ZIP_CMD="gzip" +NICE_CMD="/usr/bin/nice -n19" +IONICE_CMD="/usr/bin/ionice -c2 -n7" +COLLECT_TAG="COLLECT" + +STARTDATE_OPTION="--start-date" +ENDDATE_OPTION="--end-date" + + +PROCESS_DETAIL_CMD="ps -e -H -o ruser,tid,pid,ppid,flags,stat,policy,rtprio,nice,priority,rss:10,vsz:10,sz:10,psr,stime,tty,cputime,wchan:14,cmd" +BUILD_INFO_CMD="cat /etc/build.info" + +################################################################################ +# Log Debug, Info or Error log message to syslog +################################################################################ +function log +{ + logger -t ${COLLECT_TAG} $@ +} + +function ilog +{ + echo "$@" + logger -t ${COLLECT_TAG} $@ + #logger -p local3.info -t ${COLLECT_TAG} $@ +} + +function elog +{ + echo "Error: $@" + logger -t ${COLLECT_TAG} $@ +} + +function wlog +{ + echo "Warning: $@" + logger -t ${COLLECT_TAG} $@ +} + +function set_debug_mode() +{ + DEBUG=${1} +} + +function dlog() +{ + if [ "$DEBUG" == true ] ; then + logger -t ${COLLECT_TAG} $@ + echo "Debug: $@" + fi +} + + +function delimiter() +{ + echo "--------------------------------------------------------------------" >> ${1} 2>>${COLLECT_ERROR_LOG} + echo "`date` : ${myhostname} : ${2}" >> ${1} 2>>${COLLECT_ERROR_LOG} + echo "--------------------------------------------------------------------" >> ${1} 2>>${COLLECT_ERROR_LOG} +} + +function log_slabinfo() +{ + PAGE_SIZE=$(getconf PAGE_SIZE) + cat /proc/slabinfo | awk -v page_size_B=${PAGE_SIZE} ' + BEGIN {page_KiB = page_size_B/1024; TOT_KiB = 0;} + (NF == 17) { + gsub(/[<>]/, ""); + printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8s\n", + $2, $3, $4, $5, $6, $7, $8, $10, $11, $12, $13, $15, $16, $17, "KiB"); + } + (NF == 16) { + num_objs=$3; obj_per_slab=$5; pages_per_slab=$6; + KiB = (obj_per_slab > 0) ? page_KiB*num_objs/obj_per_slab*pages_per_slab : 0; + TOT_KiB += KiB; + printf("%-22s %11d %8d %8d %10d %12d %1s %5d %10d %12d %1s %12d %9d %11d %8d\n", + $1, $2, $3, $4, $5, $6, $7, $9, $10, $11, $12, $14, $15, $16, KiB); + } + END { + printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8d\n", + "TOTAL", "-", "-", "-", "-", "-", ":", "-", "-", "-", ":", "-", "-", "-", TOT_KiB); + } + ' >> ${1} 2>>${COLLECT_ERROR_LOG} +} +########################################################################### +# +# Name : collect_errors +# +# Description: search COLLECT_ERROR_LOG for "No space left on device" logs +# Return 0 if no such logs are found. +# Return 1 if such logs are found +# +# Assumptions: Caller should assume a non-zero return as an indication of +# a corrupt or incomplete collect log +# +# Create logs and screen echos that record the error for the user. +# +# May look for other errors in the future +# +########################################################################### + +function collect_errors() +{ + local host=${1} + local RC=0 + + # Look for "No space left on device" error + grep -q "${FAIL_OUT_OF_SPACE_STR}" ${COLLECT_ERROR_LOG} + + if [ "$?" == "0" ] ; then + + string="failed to collect from ${host} (reason:${FAIL_OUT_OF_SPACE}:${FAIL_OUT_OF_SPACE_STR})" + + # /var/log/user.log it + logger -t ${COLLECT_TAG} "${string}" + + # logs that show up in the foreground + echo "${string}" + echo "Increase available space in ${host}:${COLLECT_BASE_DIR} and retry operation." + + # return error code + RC=1 + + fi + + return ${RC} +} diff --git a/middleware/util/recipes-common/collector/scripts/collect_vswitch.sh b/middleware/util/recipes-common/collector/scripts/collect_vswitch.sh new file mode 100644 index 0000000..e9bd2b8 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/collect_vswitch.sh @@ -0,0 +1,108 @@ +#! /bin/bash +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Loads Up Utilities and Commands Variables + +source /usr/local/sbin/collect_parms +source /usr/local/sbin/collect_utils + +SERVICE="vswitch" +LOGFILE="${extradir}/${SERVICE}.info" + +LIST_COMMANDS=( + # keep items sorted alphabetically + "address-list" + "dvr-mac-list" + "dvr-subnet-list" + "engine-list" + "engine-queue-list" + "filter-bindings-list" + "filter-rule-list" + "flow-group-list" + "flow-rule-list" + "igmp-group-list" + "igmp-interface-list" + "interface-list" + "lacp-interface-list" + "lacp-neighbour-list" + "lldp-agent-list" + "lldp-neighbour-list" + "mld-group-list" + "mld-interface-list" + "nat-list" + "neighbour-list" + "network-list" + "network-table-list" + "openflow-controller-list" + "openflow-errors-list" + "ovsdb-manager-list" + "ovsdb-monitor-list" + "port-list" + "route-list" + "router-list" + "router-list" + "snat-list" + "stream-list" + "vxlan-endpoint-list" + "vxlan-ip-endpoint-list" + "vxlan-peer-list") + +STATS_COMMANDS=( + # keep below items sorted alphabetically + "arp-stats-list" + "dvr-stats-list" + "engine-stats-list" + "filter-stats-list" + "flow-cache-stats-list" + "flow-event-stats-list" + "flow-switch-stats-list" + "flow-table-stats-list" + "icmp-stats-list" + "igmp-stats-list" + "interface-stats-list" + "ip-stats-list" + "ip-stats-list-errors" + "lacp-stats-list" + "lldp-stats-list" + "memory-stats-list" + "mld-stats-list" + "nat-stats-list" + "ndp-stats-list" + "network-stats-list" + "openflow-stats-list" + "port-queue-stats-list" + "port-rate-list" + "port-stats-list" + "snat-stats-list" + "udp-stats-list" + "vxlan-endpoint-stats-list") + +############################################################################### +# Only Compute Nodes +############################################################################### +if [[ "$nodetype" == "compute" || "$subfunction" == *"compute"* ]] ; then + + echo "${hostname}: AVS Info ..........: ${LOGFILE}" + + for COMMAND in ${LIST_COMMANDS[@]}; do + delimiter ${LOGFILE} "vshell ${COMMAND}" + vshell ${COMMAND} --expanded >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + done + + for COMMAND in ${STATS_COMMANDS[@]}; do + delimiter ${LOGFILE} "vshell ${COMMAND}" + vshell ${COMMAND} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + done + + if [[ "$sdn_enabled" == "yes" ]] ; then + delimiter ${LOGFILE} "ovsdb-client dump" + ovsdb-client dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} + fi +fi + +exit 0 diff --git a/middleware/util/recipes-common/collector/scripts/etc.exclude b/middleware/util/recipes-common/collector/scripts/etc.exclude new file mode 100644 index 0000000..d3f3827 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/etc.exclude @@ -0,0 +1,40 @@ +/etc/cinder/volumes +/etc/nova/instances +/etc/postgresql +/etc/alternatives +/etc/terminfo +/etc/tempest +/etc/security +/etc/yum +/etc/collect +/etc/collect.d +/etc/logrotate.d +/etc/logrotate* +/etc/keystone +/etc/pam.d +/etc/environment +/etc/sudoers.d +/etc/sudoers +/etc/passwd +/etc/passwd- +/etc/shadow +/etc/shadow- +/etc/gshadow +/etc/gshadow- +/etc/group +/etc/group- +/etc/ssh +/etc/X11 +/etc/bluetooth +/etc/chatscripts +/etc/cron* +/etc/rc5.d +/etc/rc4.d +/etc/rc1.d +/etc/rc2.d +/etc/bash_completion.d +/etc/pm +/etc/systemd/system/*.mount +/etc/systemd/system/ctrl-alt-del.target +/etc/ssl +/etc/mtc/tmp diff --git a/middleware/util/recipes-common/collector/scripts/expect_done b/middleware/util/recipes-common/collector/scripts/expect_done new file mode 100755 index 0000000..a846adb --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/expect_done @@ -0,0 +1 @@ +expect done diff --git a/middleware/util/recipes-common/collector/scripts/run.exclude b/middleware/util/recipes-common/collector/scripts/run.exclude new file mode 100644 index 0000000..b1c1794 --- /dev/null +++ b/middleware/util/recipes-common/collector/scripts/run.exclude @@ -0,0 +1,12 @@ +/var/run/sanlock/sanlock.sock +/var/run/tgtd.ipc_abstract_namespace.0 +/var/run/wdmd/wdmd.sock +/var/run/acpid.socket +/var/run/rpcbind.sock +/var/run/libvirt/libvirt-sock-ro +/var/run/libvirt/libvirt-sock +/var/run/dbus/system_bus_socket +/var/run/named-chroot +/var/run/avahi-daemon +/var/run/neutron/metadata_proxy +/var/run/.vswitch diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/README b/middleware/util/recipes-common/engtools/hostdata-collectors/README new file mode 100644 index 0000000..a5174af --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/README @@ -0,0 +1,12 @@ +The Engineering tools is meant to be installed as a patch. Therefore, the RPM is generated as part +of the build but is not included in the image. Assuming your development environment is fully set up, +simply run patch-engtools.sh to generate the patch: + +In this directory ($MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/util/recipes-common/engtools/hostdata-collectors), +enter the command: +>./patch-engtools.sh + +This generates ENGTOOLS-X.patch (X is Tis release version) which can be applied via sw-patch. + +The patch is built with --all-nodes option by default. This can be changed to a combination of the following: +--controller, --compute, --storage, --controller-compute, and --compute-lowlatency. diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/centos/build_srpm.data b/middleware/util/recipes-common/engtools/hostdata-collectors/centos/build_srpm.data new file mode 100644 index 0000000..81d4058 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/centos/build_srpm.data @@ -0,0 +1,2 @@ +SRC_DIR="scripts" +TIS_PATCH_VER=1 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/centos/collect-engtools.spec b/middleware/util/recipes-common/engtools/hostdata-collectors/centos/collect-engtools.spec new file mode 100644 index 0000000..91f2bb4 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/centos/collect-engtools.spec @@ -0,0 +1,101 @@ +Summary: Host performance data collection tools package +Name: engtools +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: Tools +Packager: Wind River +URL: http://www.windriver.com/ +BuildArch: noarch +Source: %{name}-%{version}.tar.gz + +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root + +Requires: iperf3 + +%description +This package contains data collection tools to monitor host performance. +Tools are general purpose engineering and debugging related. Includes +overall memory, cpu occupancy, per-task cpu, per-task scheduling, per-task +io. + +# Don't try fancy stuff like debuginfo, which is useless on binary-only +# packages. Don't strip binary too +# Be sure buildpolicy set to do nothing +%define __spec_install_post %{nil} +%define debug_package %{nil} +%define __os_install_post %{_dbpath}/brp-compress +%define _binaries_in_noarch_packages_terminate_build 0 + +%define local_dir /usr/local +%define local_bindir %{local_dir}/bin/ +%define local_initdir /etc/init.d/ +%define local_confdir /etc/engtools/ +%define local_systemddir /etc/systemd/system/ + +%prep +%setup -q + +%build +# Empty section. + +%install +mkdir -p %{buildroot} +install -d 755 %{buildroot}%{local_bindir} +# Installing additional tools, memtop, occtop and schedtop are already in the image +install -m 755 buddyinfo.py %{buildroot}%{local_bindir} +install -m 755 chewmem %{buildroot}%{local_bindir} +# Installing data collection scripts +install -m 755 ceph.sh %{buildroot}%{local_bindir} +install -m 755 cleanup-engtools.sh %{buildroot}%{local_bindir} +install -m 755 collect-engtools.sh %{buildroot}%{local_bindir} +install -m 755 diskstats.sh %{buildroot}%{local_bindir} +install -m 755 engtools_util.sh %{buildroot}%{local_bindir} +install -m 755 filestats.sh %{buildroot}%{local_bindir} +install -m 755 iostat.sh %{buildroot}%{local_bindir} +install -m 755 linux_benchmark.sh %{buildroot}%{local_bindir} +install -m 755 memstats.sh %{buildroot}%{local_bindir} +install -m 755 netstats.sh %{buildroot}%{local_bindir} +install -m 755 postgres.sh %{buildroot}%{local_bindir} +install -m 755 rabbitmq.sh %{buildroot}%{local_bindir} +install -m 755 remote/rbzip2-engtools.sh %{buildroot}%{local_bindir} +install -m 755 remote/rstart-engtools.sh %{buildroot}%{local_bindir} +install -m 755 remote/rstop-engtools.sh %{buildroot}%{local_bindir} +install -m 755 remote/rsync-engtools-data.sh %{buildroot}%{local_bindir} +install -m 755 slab.sh %{buildroot}%{local_bindir} +install -m 755 ticker.sh %{buildroot}%{local_bindir} +install -m 755 top.sh %{buildroot}%{local_bindir} +install -m 755 vswitch.sh %{buildroot}%{local_bindir} +install -m 755 live_stream.py %{buildroot}%{local_bindir} +# Installing conf file +install -d 755 %{buildroot}%{local_confdir} +install -m 644 -p -D cfg/engtools.conf %{buildroot}%{local_confdir} +# Installing init script +install -d 755 %{buildroot}%{local_initdir} +install -m 755 init.d/collect-engtools.sh %{buildroot}%{local_initdir} +# Installing service file +install -d 755 %{buildroot}%{local_systemddir} +install -m 644 -p -D collect-engtools.service %{buildroot}%{local_systemddir} + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%license LICENSE +%defattr(-,root,root,-) +%{local_bindir}/* +%{local_confdir}/* +%{local_initdir}/* +%{local_systemddir}/* + +%post +/bin/systemctl enable collect-engtools.service > /dev/null 2>&1 +/bin/systemctl start collect-engtools.service > /dev/null 2>&1 + +%preun +#/bin/systemctl --no-reload disable collect-engtools.sh.service > /dev/null 2>&1 +#/bin/systemctl stop collect-engtools.sh.service > /dev/null 2>&1 +%systemd_preun collect-engtools.service + +%postun +%systemd_postun_with_restart collect-engtools.service diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/patch-engtools.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/patch-engtools.sh new file mode 100755 index 0000000..bf9a73d --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/patch-engtools.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Designer patches: +# http://twiki.wrs.com/PBUeng/Patching + +if [ -z $MY_WORKSPACE ] || [ -z $MY_REPO ]; then + echo "Some dev environment variables are not set." + echo "Refer to http://wiki.wrs.com/PBUeng/CentOSBuildProcess for instructions." + exit 1 +fi + +ENGTOOLS=$(ls ${MY_WORKSPACE}/std/rpmbuild/RPMS/engtools*noarch.rpm 2>/dev/null) +if [ $? -ne 0 ]; then + echo "Engtools RPM has not been built. Please run \"build-pkgs engtools\" first." + exit 1 +fi + +source ${MY_REPO}/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +#TiS_REL="16.10" +#PATCH_ID="ENGTOOLS-${TiS_REL}" +PATCH_ID="ENGTOOLS-${PLATFORM_RELEASE}" + +PWD=$(pwd) + +# Create CGCS Patch +cd ${MY_WORKSPACE} +PATCH_BUILD=${MY_REPO}/addons/wr-cgcs/layers/cgcs/extras.ND/scripts/patch_build.sh +${PATCH_BUILD} --id ${PATCH_ID} --reboot-required=N \ + --summary "System engineering data collection and analysis tools." \ + --desc "System engineering data collection and analysis tools." \ + --all-nodes ${ENGTOOLS} \ + --warn "Intended for system engineering use only." +cd ${PWD} +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/LICENSE b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/buddyinfo.py b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/buddyinfo.py new file mode 100644 index 0000000..2ccfd99 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/buddyinfo.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 textwidth=79 autoindent + +""" +Python source code +Last modified: 15 Feb 2014 - 13:38 +Last author: lmwangi at gmail com +Displays the available memory fragments +by querying /proc/buddyinfo +Example: +# python buddyinfo.py +""" +import optparse +import os +import re +from collections import defaultdict +import logging + + +class Logger: + def __init__(self, log_level): + self.log_level = log_level + + def get_formatter(self): + return logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + + def get_handler(self): + return logging.StreamHandler() + + def get_logger(self): + """Returns a Logger instance for the specified module_name""" + logger = logging.getLogger('main') + logger.setLevel(self.log_level) + log_handler = self.get_handler() + log_handler.setFormatter(self.get_formatter()) + logger.addHandler(log_handler) + return logger + + +class BuddyInfo(object): + """BuddyInfo DAO""" + def __init__(self, logger): + super(BuddyInfo, self).__init__() + self.log = logger + self.buddyinfo = self.load_buddyinfo() + + def parse_line(self, line): + line = line.strip() + self.log.debug("Parsing line: %s" % line) + parsed_line = re.match("Node\s+(?P\d+).*zone\s+(?P\w+)\s+(?P.*)", line).groupdict() + self.log.debug("Parsed line: %s" % parsed_line) + return parsed_line + + def read_buddyinfo(self): + buddyhash = defaultdict(list) + buddyinfo = open("/proc/buddyinfo").readlines() + for line in map(self.parse_line, buddyinfo): + numa_node = int(line["numa_node"]) + zone = line["zone"] + free_fragments = map(int, line["nr_free"].split()) + max_order = len(free_fragments) + fragment_sizes = self.get_order_sizes(max_order) + usage_in_bytes = [block[0] * block[1] for block in zip(free_fragments, fragment_sizes)] + buddyhash[numa_node].append({ + "zone": zone, + "nr_free": free_fragments, + "sz_fragment": fragment_sizes, + "usage": usage_in_bytes }) + return buddyhash + + def load_buddyinfo(self): + buddyhash = self.read_buddyinfo() + self.log.info(buddyhash) + return buddyhash + + def page_size(self): + return os.sysconf("SC_PAGE_SIZE") + + def get_order_sizes(self, max_order): + return [self.page_size() * 2**order for order in range(0, max_order)] + + def __str__(self): + ret_string = "" + width = 20 + for node in self.buddyinfo: + ret_string += "Node: %s\n" % node + for zoneinfo in self.buddyinfo.get(node): + ret_string += " Zone: %s\n" % zoneinfo.get("zone") + ret_string += " Free KiB in zone: %.2f\n" % (sum(zoneinfo.get("usage")) / (1024.0)) + ret_string += '\t{0:{align}{width}} {1:{align}{width}} {2:{align}{width}}\n'.format( + "Fragment size", "Free fragments", "Total available KiB", + width=width, + align="<") + for idx in range(len(zoneinfo.get("sz_fragment"))): + ret_string += '\t{order:{align}{width}} {nr:{align}{width}} {usage:{align}{width}}\n'.format( + width=width, + align="<", + order = zoneinfo.get("sz_fragment")[idx], + nr = zoneinfo.get("nr_free")[idx], + usage = zoneinfo.get("usage")[idx] / 1024.0) + + return ret_string + +def main(): + """Main function. Called when this file is a shell script""" + usage = "usage: %prog [options]" + parser = optparse.OptionParser(usage) + parser.add_option("-s", "--size", dest="size", choices=["B","K","M"], + action="store", type="choice", help="Return results in bytes, kib, mib") + + (options, args) = parser.parse_args() + logger = Logger(logging.DEBUG).get_logger() + logger.info("Starting....") + logger.info("Parsed options: %s" % options) + print logger + buddy = BuddyInfo(logger) + print buddy + +if __name__ == '__main__': + main() + diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/ceph.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/ceph.sh new file mode 100644 index 0000000..4001440 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/ceph.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Usage: ceph.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + +# Print key ceph statistics +function print_ceph() +{ + print_separator + TOOL_HIRES_TIME + + cmd='ceph -s' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + + cmd='ceph osd tree' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + + cmd='ceph df detail' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} +} + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) + +for ((rep=1; rep <= REPEATS ; rep++)) +do + print_ceph + sleep ${INTERVAL_SEC} +done +print_ceph +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/cfg/engtools.conf b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/cfg/engtools.conf new file mode 100644 index 0000000..b2b940d --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/cfg/engtools.conf @@ -0,0 +1,77 @@ +# engtools configuration + +# You may comment out any unwanted fields under the Intervals section, but do not comment out any other configuration options as the python parsing utility will complain. Please follow the comments + +[LabConfiguration] +# Set this option to Y/N depending on the setup of your lab +CPE_LAB=N + +[LiveStream] +# Set this option to Y/N before patch creation to enable/disable live stats collection +ENABLE_LIVE_STREAM=Y + +# Set the duration of the live stream capture utility. Leave blank for continuous collection. Ex: 1s,1m,1h,1d +DURATION= + +[StaticCollection] +# Set this option to Y/N before patch creation to enable/disable static stats collection +ENABLE_STATIC_COLLECTION=Y + +[CollectInternal] +# controller external OAM interface used to communicate with remote server. If unset, the first interface from ifconfig will be used +CONTROLLER0_EXTERNAL_INTERFACE= +CONTROLLER1_EXTERNAL_INTERFACE= + +[RemoteServer] +# remote server influx and grafana info +INFLUX_IP=128.224.186.61 +INFLUX_PORT=8086 +INFLUX_DB= +GRAFANA_PORT=3000 + +# This key is created through Grafana. If deleted, a new key (with admin privileges) must be created and copied here +GRAFANA_API_KEY=eyJrIjoiSkR1SXcxbkVVckd1dW9PMHFKS0EzQ2hQWTd1YUhtSkIiLCJuIjoiZGJfY3JlYXRvciIsImlkIjoxfQ== + +[Intervals] +# Set the collection interval (in seconds) to be used in the live_stream.py script. If unset or commented out, that field will not be collected +memtop=10 +memstats=10 +occtop=10 +schedtop=10 +load_avg=3 +cpu_count=60 +diskstats=30 +iostat=10 +filestats=30 +netstats=10 +postgres=30 +rabbitmq=3600 +vswitch=30 + +[AdditionalOptions] +# Set this option to Y/N to enable/disable Openstack API GET/POST collection +API_REQUESTS=N + +# Set this option to Y/N to enable/disable the collection of all services and not just the ones listed below. Note that this hasn't been tested thoroughly +ALL_SERVICES=N + +# Set this option to Y/N to enable/disable fast postgres connections collection. By default, postgres connections use the same collection interval as postgres DB size (set above), this option will set the collection interval to 0 seconds while not affecting the above postgres collection interval +FAST_POSTGRES_CONNECTIONS=N + +# Set this option to Y/N to enable/disable automatic database deletion for InfluxDB and Grafana. As of now, this feature does not work with the engtools patch +AUTO_DELETE_DB=N + +[ControllerServices] +CONTROLLER_SERVICE_LIST=aodh-api aodh-listener aodh-notifier aodh-evaluator beam.smp ceilometer-api ceilometer-collector ceilometer-agent-notification ceilometer-mem-db ceph-mon ceph-rest-api ceph-alarm-manager cinder-api cinder-volume cinder-scheduler glance-api glance-registry heat-api heat-engine heat-api-cfn heat-api-cloudwatch hbsAgent ironic-api ironic-conductor keystone-all magnum-api magnum-conductor neutron-server nova-api nova-api-proxy nova-compute nova-scheduler nova-conductor nova-console-auth nova-novncproxy nova-placement-api panko-api sysinv-api sysinv-conductor postgres fmManager rabbitmq-server gunicorn postgres snmpd patch-alarm-manager lighttpd sw-patch-controller-daemon nfv-vim nfv-vim-api nfv-vim-webserver slapd mtcAgent guestAgent + +[ComputeServices] +COMPUTE_SERVICE_LIST=nova-compute neutron-dhcp-agent neutron-metadata-agent neutron-sriov-nic-agent kvm libvirtd guestServer host_agent + +[StorageServices] +STORAGE_SERVICE_LIST=ceph-mon ceph-osd ceph-manager ceph-rest-api + +[RabbitmqServices] +RABBITMQ_QUEUE_LIST=notifications.info versioned_notifications.info + +[CommonServices] +COMMON_SERVICE_LIST=dnsmasq ceilometer-polling haproxy hwmond pmond rmond fsmond sw-patch-agent sysinv-agent syslog-ng hostwd iscsid io-monitor-manager acpid hbsClient logmgmt mtcClient mtcalarmd mtclogd sshd ntpd smartd sm sm-eru sm-watchdog sm-api ceilometer keyring cinder-rtstool diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/chewmem b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/chewmem new file mode 100644 index 0000000..03ed3d8 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/chewmem @@ -0,0 +1,86 @@ +#!/usr/bin/perl +# Usage: +# ./chewmem.pl + +# Description: +# This will create a character array requiring "MiB" actual memory. +# Summarize high-level memory usage. + +# Ideally we can demonstate creating larger and larger +# successful memory allocations until Avail is near 0. +# It is very likely to trigger OOM Killer or cause reset +# if we run completely out of memory. + +use warnings; +use strict; +use POSIX qw(strftime); + +sub show_memusage() { + our $count; + $::count++; $::count %= 15; + + my $Ki = 1024.0; + my ($MemTotal, $MemFree, $Buffers, $Cached, $CommitLimit, $Committed_AS, $Slab, $SReclaimable); + # Process all entries of MEMINFO + my $file = '/proc/meminfo'; + open(FILE, $file) || die "Cannot open file: $file ($!)"; + MEMINFO_LOOP: while($_ = ) { + s/[\0\e\f\r\a]//g; chomp; # strip control characters if any + last MEMINFO_LOOP if (/^\s*$/); # end at blank-line + if (/\bMemTotal:\s+(\d+)\s+kB/) { + $MemTotal = $1; next MEMINFO_LOOP; + } + if (/\bMemFree:\s+(\d+)\s+kB/) { + $MemFree = $1; next MEMINFO_LOOP; + } + if (/\bBuffers:\s+(\d+)\s+kB/) { + $Buffers = $1; next MEMINFO_LOOP; + } + if (/\bCached:\s+(\d+)\s+kB/) { + $Cached = $1; next MEMINFO_LOOP; + } + if (/\bCommitLimit:\s+(\d+)\s+kB/) { + $CommitLimit = $1; next MEMINFO_LOOP; + } + if (/\bCommitted_AS:\s+(\d+)\s+kB/) { + $Committed_AS = $1; next MEMINFO_LOOP; + } + if (/\bSlab:\s+(\d+)\s+kB/) { + $Slab = $1; next MEMINFO_LOOP; + } + if (/\bSReclaimable:\s+(\d+)\s+kB/) { + $SReclaimable = $1; next MEMINFO_LOOP; + } + } + close(FILE); + + my $Avail_MiB = ($MemFree + $Cached + $Buffers + $SReclaimable)/$Ki; + my $Strict_MiB = ($CommitLimit - $Committed_AS)/$Ki; + my $now = strftime "%Y-%m-%d %H:%M:%S", localtime(); + if ($::count == 1) { + printf "%19s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", + 'yyyy-mm-dd hh:mm:ss', 'Tot', 'Free', 'Ca', 'Buf', 'Slab', 'CAS', 'CLim', 'Avail', 'Strict'; + } + printf "%19s %6.1f %6.1f %6.1f %6.1f %6.1f %6.1f %6.1f %6.1f %6.1f\n", + $now, $MemTotal/$Ki, $MemFree/$Ki, $Cached/$Ki, $Buffers/$Ki, $Slab/$Ki, + $Committed_AS/$Ki, $CommitLimit/$Ki, $Avail_MiB, $Strict_MiB; +} + +#------------------------------------------------------------------------------- +# MAIN PROGRAM +# Autoflush output +select(STDERR); +$| = 1; +select(STDOUT); # default +$| = 1; + +my $MiB = $ARGV[0] ||=0.0; +my $A = "A" x (1024*1024*$MiB/2); +print "Allocating $MiB MiB character array.\n"; +while(1) { + sleep(1); + show_memusage(); +} +exit 0; + +1; diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/cleanup-engtools.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/cleanup-engtools.sh new file mode 100644 index 0000000..e1f1623 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/cleanup-engtools.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# Purpose: +# Some of the engtools scripts are not shutting down gracefully. + +# Define common utility functions +TOOLBIN=$(dirname $0) +. ${TOOLBIN}/engtools_util.sh +if [ $UID -ne 0 ]; then + ERRLOG "Require sudo/root access." + exit 1 +fi + +declare -a TOOLS +TOOLS=() +TOOLS+=('collect-engtools.sh') +TOOLS+=('ceph.sh') +TOOLS+=('diskstats.sh') +TOOLS+=('iostat.sh') +TOOLS+=('rabbitmq.sh') +TOOLS+=('ticker.sh') +TOOLS+=('top.sh') +TOOLS+=('memstats.sh') +TOOLS+=('netstats.sh') +TOOLS+=('postgres.sh') +TOOLS+=('vswitch.sh') +TOOLS+=('filestats.sh') +TOOLS+=('live_stream.py') + +LOG "Cleanup engtools:" + +# Brute force methods (assume trouble with: service collect-engtools.sh stop) +# ( be sure not to clobber /etc/init.d/collect-engtools.sh ) +LOG "kill processes brute force" +pids=( $(pidof -x /usr/local/bin/collect-engtools.sh) ) +if [ ${#pids[@]} -ne 0 ] +then + LOG "killing: ${pids[@]}" + for pid in ${pids[@]} + do + LOG "kill: [ ${pid} ] " + pkill -KILL -P ${pid} + kill -9 ${pid} + done + pkill -KILL iostat + pkill -KILL top +else + LOG "no pids found" +fi + +LOG "remove pidfiles" +for TOOL in "${TOOLS[@]}" +do + rm -f -v /var/run/${TOOL}.pid +done +LOG "done" + +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/collect-engtools.service b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/collect-engtools.service new file mode 100644 index 0000000..9a68b2a --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/collect-engtools.service @@ -0,0 +1,14 @@ +[Unit] +Description=Engineering data collection tools to monitor host performance +After=network.service + +[Service] +Type=forking +ExecStart=/etc/init.d/collect-engtools.sh start +ExecStop=/etc/init.d/collect-engtools.sh stop +ExecReload=/etc/init.d/collect-engtools.sh reload +PIDFile=/var/run/collect-engtools.sh.pid +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/collect-engtools.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/collect-engtools.sh new file mode 100644 index 0000000..908c2b7 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/collect-engtools.sh @@ -0,0 +1,334 @@ +#!/bin/bash +# Usage: +# collect-engtools.sh [-f] [-p ] [-i ] [-c ] [-h] + +# Define common utility functions +TOOLBIN=$(dirname $0) +. ${TOOLBIN}/engtools_util.sh + +# ENABLE DEBUG (0=disable, 1=enable) +OPT_DEBUG=0 + +# Set options for long soak (vs, shorter collection) +#OPT_SOAK=0 # long soak +OPT_SOAK=1 # few hour soak +#OPT_SOAK=2 # < hour soak + +# Define command to set nice + ionice +CMD_IDLE=$( cmd_idle_priority ) + +# Purge configuration options +# - how much data may be created per cycle +PURGE_HEADROOM_MB=100 +# - how much remaining space to leave +PURGE_HEADROOM_PERCENT=15 +# - maximum size of data collection +PURGE_MAXUSAGE_MB=1000 + +# Affine to pinned cores +AFFINE_PINNED=1 + +# Line-buffer stream output (instead of buffered) +STDBUF="stdbuf -oL" + +# Define some common durations +DUR_60MIN_IN_SEC=$[60*60] +DUR_30MIN_IN_SEC=$[30*60] +DUR_15MIN_IN_SEC=$[15*60] +DUR_10MIN_IN_SEC=$[10*60] +DUR_5MIN_IN_SEC=$[5*60] +DUR_1MIN_IN_SEC=$[1*60] + +# Global variables +declare -a parallel_outfiles +declare df_size_bytes +declare df_avail_bytes +declare du_used_bytes +declare tgt_avail_bytes +declare tgt_used_bytes + +# do_parallel_commands - launch parallel tools with separate output files +function do_parallel_commands() +{ + parallel_outfiles=() + for elem in "${tlist[@]}" + do + tool=""; period=""; repeat=""; interval="" + my_hash="elem[*]" + local ${!my_hash} + if [ ! -z "${name}" ]; then + fname="${TOOL_DEST_DIR}/${HOSTNAME}_${timestamp}_${name}" + parallel_outfiles+=( $fname ) + LOG "collecting ${tool}, ${interval} second intervals, to: ${fname}" + if [ ! -z "${period}" ]; then + ${STDBUF} ${tool} -p ${period} -i ${interval} > ${fname} 2>/dev/null & + elif [ ! -z "${repeat}" ]; then + ${STDBUF} ${tool} --repeat=${repeat} --delay=${interval} > ${fname} 2>/dev/null & + fi + else + # run without file output (eg., ticker) + ${STDBUF} ${tool} -p ${period} -i ${interval} 2>/dev/null & + fi + done +} + +# get_current_avail_usage() - get output destination file-system usage and +# availability. +# - updates: df_size_bytes, df_avail_bytes, du_used_bytes +function get_current_avail_usage() +{ + local -a df_arr_bytes=( $(df -P --block-size=1 ${TOOL_DEST_DIR} | awk 'NR==2 {print $2, $4}') ) + df_size_bytes=${df_arr_bytes[0]} + df_avail_bytes=${df_arr_bytes[1]} + du_used_bytes=$(du --block-size=1 ${TOOL_DEST_DIR} | awk 'NR==1 {print $1}') +} + +# purge_oldest_files() - remove oldest files based on file-system available space, +# and maximum collection size +function purge_oldest_files() +{ + # get current file-system usage + get_current_avail_usage + msg=$(printf "avail %d MB, headroom %d MB; used %d MB, max %d MB" \ + $[$df_avail_bytes/1024/1024] $[$tgt_avail_bytes/1024/1024] \ + $[$du_used_bytes/1024/1024] $[$tgt_used_bytes/1024/1024]) + LOG "usage: ${msg}" + + if [[ $df_avail_bytes -lt $tgt_avail_bytes ]] || \ + [[ $du_used_bytes -gt $tgt_used_bytes ]]; then + # wait for compression to complete + wait + + get_current_avail_usage + if [[ $df_avail_bytes -lt $tgt_avail_bytes ]]; then + msg=$(printf "purge: avail %d MB < target %d MB" \ + $[$df_avail_bytes/1024/1024] $[$tgt_avail_bytes/1024/1024] ) + LOG "purge: ${msg}" + fi + if [[ $du_used_bytes -gt $tgt_used_bytes ]]; then + msg=$(printf "purge: used %d MB > target %d MB" \ + $[$du_used_bytes/1024/1024] $[$tgt_used_bytes/1024/1024] ) + LOG "purge: ${msg}" + fi + else + return + fi + + # remove files in oldest time sorted order until we meet usage targets, + # incrementally updating usage as we remve files + for file in $( ls -rt ${TOOL_DEST_DIR}/${HOSTNAME}_* 2>/dev/null ) + do + if [[ $df_avail_bytes -ge $tgt_avail_bytes ]] && \ + [[ $du_used_bytes -le $tgt_used_bytes ]]; then + break + fi + + if [ ${OPT_DEBUG} -eq 1 ]; then + msg="purge: file=$file" + if [[ $df_avail_bytes -lt $tgt_avail_bytes ]]; then + msg="${msg}, < AVAIL" + fi + if [[ $du_used_bytes -gt $tgt_used_bytes ]]; then + msg="${msg}, > MAXUSAGE" + fi + LOG "${msg}" + fi + + sz_bytes=$(stat --printf="%s" $file) + ((df_avail_bytes += sz_bytes)) + ((du_used_bytes -= sz_bytes)) + rm -fv ${file} + done +} + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Read configuration variable file if it is present +NAME=collect-engtools.sh +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Initialize tool +tools_init + +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" + +# Affine tools to NOVA pinned cores (i.e., non-cpu 0) +# - remove interference with cpu 0 +if [ "${AFFINE_PINNED}" -eq 1 ]; then + NOVA_CONF=/etc/nova/compute_extend.conf + if [ -f "${NOVA_CONF}" ]; then + source "${NOVA_CONF}" + CPULIST=${compute_pinned_cpulist} + else + CPULIST="" + fi +fi +set_affinity ${CPULIST} + +# Define output directory +if [[ "${HOSTNAME}" =~ "controller-" ]]; then + TOOL_DEST_DIR=/scratch/syseng_data/${HOSTNAME} +elif [[ "${HOSTNAME}" =~ "compute-" ]]; then + TOOL_DEST_DIR=/tmp/syseng_data/${HOSTNAME} +else + TOOL_DEST_DIR=/tmp/syseng_data/${HOSTNAME} +fi +mkdir -p ${TOOL_DEST_DIR} + +# Define daemon log output +timestamp=$( date +"%Y-%0m-%0e_%H%M" ) +DAEMON_OUT="${TOOL_DEST_DIR}/${HOSTNAME}_${timestamp}_${TOOLNAME}.log" + +# Redirect stdout and append to log if not connected to TTY +if test ! -t 1 ; then + exec 1>> ${DAEMON_OUT} +fi + +# Get current availability and usage +get_current_avail_usage + +# Calculate disk usage and availability purge targets +df_offset_bytes=$[$PURGE_HEADROOM_MB*1024*1024] +tgt_used_bytes=$[$PURGE_MAXUSAGE_MB*1024*1024] +((tgt_avail_bytes = df_size_bytes/100*PURGE_HEADROOM_PERCENT + df_offset_bytes)) + +# Set granularity based on duration +if [ $PERIOD_MIN -le 30 ]; then + GRAN_MIN=5 +else + GRAN_MIN=60 +fi + +# Adjust repeats and intervals based on GRAN_MIN granularity +PERIOD_MIN=$[($PERIOD_MIN+(GRAN_MIN-1))/GRAN_MIN*GRAN_MIN] +((REPEATS = PERIOD_MIN/GRAN_MIN)) +GRAN_MIN_IN_SEC=$[$GRAN_MIN*60] +if [ ${INTERVAL_SEC} -gt ${GRAN_MIN_IN_SEC} ]; then + INTERVAL_SEC=${GRAN_MIN_IN_SEC} +fi + +# Define tools and options +# [ JGAULD - need config file for customization; long soak vs specific tools ] +# [ Ideally sample < 5 second granularity, but files get big, and tool has cpu overhead ] +# [ Need < 5 second granularity to see cache pressure/flush issues ] +# [ Desire 60 sec interval for soak ] +if [ ${OPT_SOAK} -eq 1 ]; then + # Desire 60 second or greater interval for longer term data collections, + # otherwise collection files get too big. + schedtop_interval=20 + occtop_interval=60 + memtop_interval=60 + netstats_interval=60 + # JGAULD: temporarily increase frequency to 1 min + postgres_interval=${DUR_1MIN_IN_SEC} + #postgres_interval=${DUR_15MIN_IN_SEC} + rabbitmq_interval=${DUR_15MIN_IN_SEC} + ceph_interval=${DUR_15MIN_IN_SEC} + diskstats_interval=${DUR_15MIN_IN_SEC} + memstats_interval=${DUR_15MIN_IN_SEC} + filestats_interval=${DUR_15MIN_IN_SEC} +elif [ ${OPT_SOAK} -eq 2 ]; then + # Assume much shorter collection (eg, < hours) + schedtop_interval=2 # i.e., 2 second interval + occtop_interval=2 # i.e., 2 second interval + memtop_interval=1 # i.e., 1 second interval + netstats_interval=30 # i.e., 30 second interval + postgres_interval=${DUR_5MIN_IN_SEC} + rabbitmq_interval=${DUR_5MIN_IN_SEC} + ceph_interval=${DUR_5MIN_IN_SEC} + diskstats_interval=${DUR_5MIN_IN_SEC} + memstats_interval=${DUR_5MIN_IN_SEC} + filestats_interval=${DUR_5MIN_IN_SEC} +else + # Assume shorter collection (eg, < a few hours) + schedtop_interval=5 # i.e., 5 second interval + occtop_interval=5 # i.e., 5 second interval + memtop_interval=5 # i.e., 5 second interval + netstats_interval=30 # i.e., 30 second interval + postgres_interval=${DUR_5MIN_IN_SEC} + rabbitmq_interval=${DUR_5MIN_IN_SEC} + ceph_interval=${DUR_5MIN_IN_SEC} + diskstats_interval=${DUR_5MIN_IN_SEC} + memstats_interval=${DUR_5MIN_IN_SEC} + filestats_interval=${DUR_5MIN_IN_SEC} +fi +schedtop_repeat=$[ $PERIOD_MIN * 60 / $schedtop_interval ] +occtop_repeat=$[ $PERIOD_MIN * 60 / $occtop_interval ] +memtop_repeat=$[ $PERIOD_MIN * 60 / $memtop_interval ] +netstats_repeat=$[ $PERIOD_MIN * 60 / $netstats_interval ] + +# Disable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=0 + +# Define parallel engtools configuration +# - tool name, filename, and collection interval attributes +BINDIR=/usr/bin +LBINDIR=/usr/local/bin + +while IFS='' read -r line || [[ -n "$line" ]]; do + if [[ $line =~ 'ENABLE_STATIC_COLLECTION'* ]]; then + static_collection=${line:25:1} + fi +done < /etc/engtools/engtools.conf + +declare -a tlist +if [[ $static_collection == "Y" ]] || [[ $static_collection == "y" ]]; then + tlist+=( "tool=${LBINDIR}/top.sh name=top period=${PERIOD_MIN} interval=${DUR_1MIN_IN_SEC}" ) + tlist+=( "tool=${LBINDIR}/iostat.sh name=iostat period=${PERIOD_MIN} interval=${DUR_1MIN_IN_SEC}" ) + tlist+=( "tool=${LBINDIR}/netstats.sh name=netstats period=${PERIOD_MIN} interval=${netstats_interval}" ) + tlist+=( "tool=${BINDIR}/occtop name=occtop repeat=${occtop_repeat} interval=${occtop_interval}" ) + tlist+=( "tool=${BINDIR}/memtop name=memtop repeat=${memtop_repeat} interval=${memtop_interval}" ) + tlist+=( "tool=${BINDIR}/schedtop name=schedtop repeat=${schedtop_repeat} interval=${schedtop_interval}" ) + tlist+=( "tool=${LBINDIR}/diskstats.sh name=diskstats period=${PERIOD_MIN} interval=${diskstats_interval}" ) + tlist+=( "tool=${LBINDIR}/memstats.sh name=memstats period=${PERIOD_MIN} interval=${memstats_interval}" ) + tlist+=( "tool=${LBINDIR}/filestats.sh name=filestats period=${PERIOD_MIN} interval=${filestats_interval}" ) + if [[ "${HOSTNAME}" =~ "controller-" ]]; then + tlist+=( "tool=${LBINDIR}/ceph.sh name=ceph period=${PERIOD_MIN} interval=${ceph_interval}" ) + tlist+=( "tool=${LBINDIR}/postgres.sh name=postgres period=${PERIOD_MIN} interval=${postgres_interval}" ) + # tlist+=( "tool=${LBINDIR}/rabbitmq.sh name=rabbitmq period=${PERIOD_MIN} interval=${rabbitmq_interval}" ) + elif [[ "${HOSTNAME}" =~ "compute-" ]]; then + tlist+=( "tool=${LBINDIR}/vswitch.sh name=vswitch period=${PERIOD_MIN} interval=${DUR_1MIN_IN_SEC}" ) + fi +fi + +# ticker - shows progress on the screen +tlist+=( "tool=${LBINDIR}/ticker.sh name= period=${PERIOD_MIN} interval=${DUR_1MIN_IN_SEC}" ) + + +#------------------------------------------------------------------------------- +# Main loop +#------------------------------------------------------------------------------- +OPT_DEBUG=0 +REP=0 +while [[ ${TOOL_USR1_SIGNAL} -eq 0 ]] && + [[ ${OPT_FOREVER} -eq 1 || ${REP} -lt ${REPEATS} ]] +do + # increment loop counter + ((REP++)) + + # purge oldest files + purge_oldest_files + + # define filename timestamp + timestamp=$( date +"%Y-%0m-%0e_%H%M" ) + + # collect tools in parallel to separate output files + LOG "collecting ${TOOLNAME} at ${timestamp} for ${PERIOD_MIN} mins, repeat=${REP}" + do_parallel_commands + wait + + # Compress latest increment + LOG "compressing: ${parallel_outfiles[@]}" + ${CMD_IDLE} bzip2 -q -f ${parallel_outfiles[@]} 2>/dev/null & +done + +# wait for compression to complete +wait + +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/diskstats.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/diskstats.sh new file mode 100644 index 0000000..376dbf1 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/diskstats.sh @@ -0,0 +1,122 @@ +#!/bin/bash +# Usage: diskstats.sh +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + +# Print disk summary +function print_disk() +{ + print_separator + TOOL_HIRES_TIME + + # NOTES: + # --total (grand-total) is a new option, but don't necessarily want to add tmpfs + # or dummy filesystems. + # - use -H to print in SI (eg, GB, vs GiB) + # - can use -a to print all filesystems including dummy filesystems, but then + # there can be double-counting: + print_separator + cmd='df -h -H -T --local -t ext2 -t ext3 -t ext4 -t xfs --total' + ${ECHO} "Disk space usage ext2,ext3,ext4,xfs,tmpfs (SI):" + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + + print_separator + cmd='df -h -H -T --local -i -t ext2 -t ext3 -t ext4 -t xfs --total' + ${ECHO} "Disk inodes usage ext2,ext3,ext4,xfs,tmpfs (SI):" + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + + print_separator + cmd='drbd-overview' + ${ECHO} "drbd disk usage and status:" + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + + print_separator + cmd='lvs' + ${ECHO} "logical volumes usage and status:" + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + + print_separator + cmd='pvs' + ${ECHO} "physical volumes usage and status:" + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + + print_separator + cmd='vgs' + ${ECHO} "volume groups usage and status:" + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} +} + +# Print disk static summary +function print_disk_static() +{ + print_separator + cmd='cat /proc/scsi/scsi' + ${ECHO} "Attached devices: ${cmd}" + ${cmd} + ${ECHO} + + # fdisk - requires sudo/root + print_separator + cmd='fdisk -l' + if [ $UID -eq 0 ]; then + ${ECHO} "List disk devices: ${cmd}" + ${cmd} + else + WARNLOG "Skipping cmd=${cmd}, root/sudo passwd required" + fi + ${ECHO} + + # parted - requires sudo/root + print_separator + cmd='parted -l' + if [ $UID -eq 0 ]; then + ${ECHO} "List disk devices: ${cmd}" + ${cmd} + else + WARNLOG "Skipping cmd=${cmd}, root/sudo passwd required" + fi + ${ECHO} +} + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Print static disk information +print_disk_static + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) + +for ((rep=1; rep <= REPEATS ; rep++)) +do + print_disk + sleep ${INTERVAL_SEC} +done +print_disk +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/engtools_util.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/engtools_util.sh new file mode 100644 index 0000000..311ccd2 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/engtools_util.sh @@ -0,0 +1,478 @@ +#!/bin/bash +TOOLNAME=$(basename $0) +PIDFILE=/var/run/${TOOLNAME}.pid +TOOL_DEBUG=1 +TOOL_EXIT_SIGNAL=0 +TOOL_USR1_SIGNAL=0 +TOOL_USR2_SIGNAL=0 +TOOL_TTY=0 +if tty 1>/dev/null ; then + TOOL_TTY=1 +fi + +# [ JGAULD : SHOULD RENAME TO TOOL_X ] +OPT_USE_INTERVALS=0 +OPT_FOREVER=0 +PERIOD_MIN=5 +INTERVAL_SEC=60 +CPULIST=0 + +# Include lsb functions +if [ -d /lib/lsb ]; then +. /lib/lsb/init-functions +else +. /etc/init.d/functions +fi +# Lightweight replacement for pidofproc -p +function check_pidfile () +{ + local pidfile pid + + OPTIND=1 + while getopts p: opt ; do + case "$opt" in + p) + pidfile="$OPTARG" + ;; + esac + done + shift $(($OPTIND - 1)) + + read pid < "${pidfile}" + if [ -n "${pid:-}" ]; then + if $(kill -0 "${pid:-}" 2> /dev/null); then + echo "$pid" + return 0 + elif ps "${pid:-}" >/dev/null 2>&1; then + echo "$pid" + return 0 # program is running, but not owned by this user + else + return 1 # program is dead and /var/run pid file exists + fi + fi +} + +# tools_init - initialize tool resources +function tools_init () +{ + local rc=0 + local error=0 + TOOLNAME=$(basename $0) + + # Check for sufficient priviledges + if [ $UID -ne 0 ]; then + ERRLOG "${NAME} requires sudo/root access." + return 1 + fi + + # Check for essential binaries + ECHO=$(which echo 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ECHO=echo # use bash built-in echo + ${ECHO} "FATAL, 'echo' not found, rc=$rc"; + error=$rc + fi + DATE=$(which date 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ${ECHO} "FATAL, 'date' not found, rc=$rc"; + error=$rc + fi + + # Check for standard linux binaries, at least can use LOG functions now + # - these are used in tools_header + CAT=$(which cat 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'cat' not found, rc=$rc"; + error=$rc + fi + + ARCH=$(which arch 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'arch' not found, rc=$rc"; + error=$rc + fi + + SED=$(which sed 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'sed' not found, rc=$rc"; + error=$rc + fi + + GREP=$(which grep 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'grep' not found, rc=$rc"; + error=$rc + fi + + WC=$(which wc 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'wc' not found, rc=$rc"; + error=$rc + fi + + UNAME=$(which uname 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'uname' not found, rc=$rc"; + error=$rc + fi + + SORT=$(which sort 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'sort' not found, rc=$rc"; + error=$rc + fi + + TR=$(which tr 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'tr' not found, rc=$rc"; + error=$rc + fi + + AWK=$(which awk 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'awk' not found, rc=$rc"; + error=$rc + fi + + PKILL=$(which pkill 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'pkill' not found, rc=$rc"; + error=$rc + fi + + LS=$(which ls 2>/dev/null) + rc=$? + if [ $rc -ne 0 ]; then + ERRLOG "'ls' not found, rc=$rc"; + error=$rc + fi + + # The following block is needed for LSB systems such as Windriver Linux. + # The utility is not available on CentOS so comment it out. + # Generic utility, but may not be available + # LSB=$(which lsb_release 2>/dev/null) + # rc=$? + # if [ $rc -ne 0 ]; then + # WARNLOG "'lsb_release' not found, rc=$rc"; + # fi + + # Let parent program decide what to do with the errors, + # give ominous warning + if [ $error -eq 1 ]; then + WARNLOG "possibly cannot continue, missing linux binaries" + fi + + # Check if tool was previously running + if [ -e ${PIDFILE} ]; then + # [ JGAULD - remove pidofproc() / LSB compatibility issue ] + if check_pidfile -p "${PIDFILE}" >/dev/null; then + ERRLOG "${PIDFILE} exists and ${TOOLNAME} is running" + return 1 + else + # remove pid file + WARNLOG "${PIDFILE} exists but ${TOOLNAME} is not running; cleaning up" + rm -f ${PIDFILE} + fi + fi + + # Create pid file + echo $$ > ${PIDFILE} + + # Setup trap handler - these signals trigger child shutdown and cleanup + trap tools_exit_handler INT HUP TERM EXIT + trap tools_usr1_handler USR1 + trap tools_usr2_handler USR2 + + return ${rc} +} + +# tools_cleanup() - terminate child processes +function tools_cleanup() { + # restore signal handling to default behaviour + trap - INT HUP TERM EXIT + trap - USR1 USR2 + + local VERBOSE_OPT='' + if [ "$1" -ne "0" ]; then + LOG "cleanup invoked with code: $1" + if [ ${TOOL_DEBUG} -ne 0 ]; then + VERBOSE_OPT='-v' + fi + fi + + + # stop all processes launched from this process + pkill -TERM -P $$ + if [ "$1" -ne "0" ]; then + sleep 1 + fi + + # OK, if the above didn't work, use force + pkill -KILL -P $$ + + # remove pid file + if [ -e ${PIDFILE} ]; then + rm -f ${VERBOSE_OPT} ${PIDFILE} + fi + exit $1 +} + +# tools_exit_handler() - exit handler routine +function tools_exit_handler() { + TOOL_EXIT_SIGNAL=1 + tools_cleanup 128 +} +# tools_usr1_handler() - USR1 handler routine +function tools_usr1_handler() { + TOOL_USR1_SIGNAL=1 + LOG "caught USR1" +} +# tools_usr2_handler() - USR2 handler routine +function tools_usr2_handler() { + TOOL_USR2_SIGNAL=1 + LOG "caught USR1" +} + +# LOG(), WARNLOG(), ERRLOG() - simple print log functions (not logger) +function LOG () +{ + local tstamp_H=$( date +"%Y-%0m-%0e %H:%M:%S" ) + echo "${tstamp_H} ${HOSTNAME} $0($$): $@"; +} +function LOG_NOCR () +{ + local tstamp_H=$( date +"%Y-%0m-%0e %H:%M:%S" ) + echo -n "${tstamp_H} ${HOSTNAME} $0($$): $@"; +} +function WARNLOG () { LOG "WARN $@"; } +function ERRLOG () { LOG "ERROR $@"; } + +# TOOL_HIRES_TIME() - easily parsed date/timestamp and hi-resolution uptime +function TOOL_HIRES_TIME() +{ + echo "time: " $( ${DATE} +"%a %F %H:%M:%S.%N %Z %z" ) "uptime: " $( cat /proc/uptime ) +} + +# set_affinity() - set affinity for current script if a a CPULIST is defined +function set_affinity() { + local CPULIST=$1 + if [ -z "${CPULIST}" ]; then + return + fi + + # Set cpu affinity for current program + local TASKSET=$(which taskset 2>/dev/null) + if [ -x "${TASKSET}" ]; then + ${TASKSET} -pc ${CPULIST} $$ 2>/dev/null + fi +} + +# cmd_idle_priority() - command to set nice + ionice +function cmd_idle_priority() { + local NICE="" + local IONICE="" + + NICE=$( which nice 2>/dev/null ) + if [ $? -eq 0 ]; then + NICE="${NICE} -n 19" + else + NICE="" + fi + IONICE=$( which ionice 2>/dev/null ) + if [ $? -eq 0 ]; then + IONICE="${IONICE} -c 3" + else + IONICE="" + fi + echo "${NICE} ${IONICE}" +} + + +# print_separator() - print a horizontal separation line '\u002d' is '-' +function print_separator () { + printf '\u002d%.s' {1..80} + printf '\n' +} + +# tools_header() - print out common GenWare tools header +function tools_header() { + local TOOLNAME=$(basename $0) + + # Get timestamp + #local tstamp=$( date +"%Y-%0m-%0e %H:%M:%S" 2>/dev/null ) + local tstamp=$( date --rfc-3339=ns | cut -c1-23 2>/dev/null ) + + # Linux Generic + local UPTIME=/proc/uptime + + # Get number of online cpus + local CPUINFO=/proc/cpuinfo + local online_cpus=$( cat ${CPUINFO} | grep -i ^processor | wc -l 2>/dev/null ) + + # Get load average, run-queue size, and number of threads + local LOADAVG=/proc/loadavg + local LDAVG=( `cat ${LOADAVG} | sed -e 's#[/]# #g' 2>/dev/null` ) + + # Get current architecture + local arch=$( uname -m ) + + # Determine processor name (there are many different formats... *sigh* ) + # - build up info from multiple lines + local processor='unk' + local NAME=$( cat ${CPUINFO} | grep \ + -e '^cpu\W\W:' \ + -e ^'cpu model' \ + -e ^'model name' \ + -e ^'system type' \ + -e ^Processor \ + -e ^[Mm]achine | \ + sort -u | awk 'BEGIN{FS=":";} {print $2;}' | \ + tr '\n' ' ' | tr -s [:blank:] 2>/dev/null ) + if [ ! -z "${NAME}" ]; then + processor=${NAME} + fi + + # Determine processor speed (abort grep after first match) + local speed='unk' + local BOGO=$( cat ${CPUINFO} | grep -m1 -e ^BogoMIPS -e ^bogomips | \ + awk 'BEGIN{FS=":";} {printf "%.1f", $2;}' 2>/dev/null ) + local MHZ=$( cat ${CPUINFO} | grep -m1 -e ^'cpu MHz' -e ^clock | \ + awk 'BEGIN{FS=":";} {printf "%.1f", $2;}' 2>/dev/null ) + local MHZ2=$( cat ${CPUINFO} | grep -m1 -e ^Cpu0ClkTck -e ^'cycle frequency' | \ + awk 'BEGIN{FS=":";} {printf "%.1f", $2/1.0E6;}' 2>/dev/null ) + if [ ! -z "${MHZ}" ]; then + speed=${MHZ} + elif [ ! -z "${MHZ2}" ]; then + speed=${MHZ2} + elif [ ! -z ${BOGO} ]; then + speed=${BOGO} + fi + + # Determine OS and kernel version + local os_name=$( uname -s 2>/dev/null ) + local os_release=$( uname -r 2>/dev/null ) + + declare -a arr + + local dist_id="" + # Determine OS distribution ID + if [ lsb_pres == "yes" ]; then + arr=( $( lsb_release -i 2>/dev/null ) ) + dist_id=${arr[2]} + else + local dist_id=$(cat /etc/centos-release | awk '{print $1}' 2>/dev/null) + fi + + local dist_rel="" + if [ lsb_pres == "yes" ]; then + # Determine OS distribution release + arr=( $( cat /proc/version | awk '{print $3}' 2>/dev/null ) ) + local dist_rel=${arr[1]} + else + local dist_rel=$(cat /etc/centos-release | awk '{print $4}' 2>/dev/null) + fi + # Print generic header + echo "${TOOLNAME} -- ${tstamp} load average:${LDAVG[0]}, ${LDAVG[1]}, ${LDAVG[2]} runq:${LDAVG[3]} nproc:${LDAVG[4]}" + echo " host:${HOSTNAME} Distribution:${dist_id} ${dist_rel} ${os_name} ${os_release}" + echo " arch:${arch} processor:${processor} speed:${speed} MHz CPUs:${online_cpus}" +} + + + + +# tools_usage() - show generic tools tool usage +function tools_usage() { + if [ ${OPT_USE_INTERVALS} -eq 1 ]; then + echo "usage: ${TOOLNAME} [-f] [-p ] [-i ] [-c ] [-h]" + else + echo "Usage: ${TOOLNAME} [-f] [-p ] [-c ] [-h]" + fi +} + +# tools_print_help() - print generic tool help +function tools_print_help() { + tools_usage + echo + echo "Options:"; + echo " -f : collect forever : default: none" + echo " -p : overall collection period (minutes) : default: ${DEFAULT_PERIOD_MIN}" + if [ ${OPT_USE_INTERVALS} -eq 1 ]; then + echo " -i : sample interval (seconds) : default: ${DEFAULT_INTERVAL_SEC}" + fi + echo " -c : cpu list where tool runs (e.g., 0-1,8) : default: none" + echo + if [ ${OPT_USE_INTERVALS} -eq 1 ]; then + echo "Example: collect 5 minute period, sample every 30 seconds interval" + echo " ${TOOLNAME} -p 5 -i 30" + else + echo "Example: collect 5 minute period" + echo " ${TOOLNAME} -p 5" + fi +} + +# tools_parse_options() -- parse common options for tools scripts +function tools_parse_options() { + # check for no arguments, print usage + if [ $# -eq "0" ]; then + tools_usage + tools_cleanup 0 + exit 0 + fi + + # parse the input arguments + while getopts "fp:i:c:h" Option + do + case $Option in + f) + OPT_FOREVER=1 + PERIOD_MIN=60 + ;; + p) PERIOD_MIN=$OPTARG ;; + i) + OPT_USE_INTERVALS=1 + INTERVAL_SEC=$OPTARG + ;; + c) CPULIST=$OPTARG ;; + h) + tools_print_help + tools_cleanup 0 + exit 0 + ;; + *) + tools_usage + tools_cleanup 0 + exit 0 + ;; + esac + done + + # validate input arguments + PERIOD_MAX=$[4*24*60] + INTERVAL_MAX=$[60*60] + + error=0 + if [[ ${PERIOD_MIN} -lt 1 || ${PERIOD_MIN} -gt ${PERIOD_MAX} ]]; then + echo "-p must be > 0 and <= ${PERIOD_MAX}." + error=1 + fi + if [[ ${INTERVAL_SEC} -lt 1 || ${INTERVAL_SEC} -gt ${INTERVAL_MAX} ]]; then + echo "-i must be > 0 and <= ${INTERVAL_MAX}." + error=1 + fi + if [ ${error} -eq 1 ]; then + tools_cleanup 0 + exit 1 + fi +} diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/filestats.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/filestats.sh new file mode 100644 index 0000000..19d38a7 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/filestats.sh @@ -0,0 +1,98 @@ +#!/bin/bash +# Usage: filestats.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +PAGE_SIZE=$(getconf PAGE_SIZE) + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + + +function print_files() +{ + print_separator + TOOL_HIRES_TIME + + ${ECHO} "# ls -l /proc/*/fd" + sudo ls -l /proc/*/fd 2>/dev/null | awk \ + '$11 ~ /socket/ {a += 1} ; \ + $11 ~ /null/ {b += 1} ; \ + {c += 1} \ + END {\ + {printf "%-10s %-10s %-10s %-10s\n", "TOTAL", "FILES", "SOCKETS", "NULL PIPES"} \ + {printf "%-10s %-10s %-10s %-10s\n", c, c-(a+b) , a, b}}' + + ${ECHO} + + ${ECHO} "# lsof" + printf "%-7s %-7s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %s\n" "PID" "TOTAL" "FD" "U" "W" "R" "CWD" "RTD" "TXT" "MEM" "DEL" "TCP" "CMD" + sudo lsof +c 15| awk '$3 !~ /^[0-9]+/{ {pids[$2]["COMMAND"]=$1}\ + {pids[$2]["PID"]=$2}\ + {pids[$2]["TOTAL"]+=1}\ + {pids[$2]["TCP"]+=($8=="TCP")? 1 : 0}\ + {($4 ~ /^[0-9][0-9]*[urw]/ )? \ + pids[$2][substr($4, length($4),1)]+=1 : pids[$2][$4]+=1} } + END { + { for (i in pids) \ + if(pids[i]["PID"]!="PID") { + {printf "%-7s %-7s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %s\n", \ + pids[i]["PID"], \ + pids[i]["TOTAL"],\ + ((pids[i]["u"]!="")? pids[i]["u"] : 0) + ((pids[i]["w"]!="")? pids[i]["w"] : 0 )+ ((pids[i]["r"]!="")? pids[i]["r"] : 0),\ + (pids[i]["u"]!="")? pids[i]["u"] : 0,\ + (pids[i]["w"]!="")? pids[i]["w"] : 0,\ + (pids[i]["r"]!="")? pids[i]["r"] : 0,\ + (pids[i]["cwd"]!="")? pids[i]["cwd"] : 0,\ + (pids[i]["rtd"]!="")? pids[i]["rtd"] : 0,\ + (pids[i]["txt"]!="")? pids[i]["txt"] : 0,\ + (pids[i]["mem"]!="")? pids[i]["mem"] : 0,\ + (pids[i]["DEL"]!="")? pids[i]["DEL"] : 0,\ + (pids[i]["TCP"]!="")? pids[i]["TCP"] : 0,\ + pids[i]["COMMAND"]} }}}' | sort -n -r -k3 + + ${ECHO} + + ${ECHO} "# lsof -nP +L1" + sudo lsof -nP +L1 + ${ECHO} +} + + + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) + +for ((rep=1; rep <= REPEATS ; rep++)) +do + print_files + sleep ${INTERVAL_SEC} +done +print_files +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/init.d/collect-engtools.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/init.d/collect-engtools.sh new file mode 100644 index 0000000..5bc7b6b --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/init.d/collect-engtools.sh @@ -0,0 +1,120 @@ +#!/bin/bash +### BEGIN INIT INFO +# Provides: collect-engtools +# Required-Start: $local_fs $network $syslog postgresql +# Required-Stop: $local_fs $network $syslog postgresql +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: initscript to launch engineering tools data collection daemon +# Description: initscript to launch engineering tools data collection daemon +# Blah. +### END INIT INFO + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="collect engtools service" +NAME="collect-engtools.sh" +DAEMON=/usr/local/bin/${NAME} +DAEMON_ARGS="-f" +PIDFILE=/var/run/${NAME}.pid +SCRIPTNAME=/etc/init.d/${NAME} +DEFAULTFILE=/etc/default/${NAME} + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 +. /etc/init.d/functions +# Read configuration variable file if it is present +[ -r $DEFAULTFILE ] && . $DEFAULTFILE + +# Load the VERBOSE setting and other rcS variables +#. /lib/init/vars.sh + +# Define lsb fallback versions of: +# log_daemon_msg(), log_end_msg() +log_daemon_msg() { echo -n "${1:-}: ${2:-}"; } +log_end_msg() { echo "."; } + +# Use lsb functions to perform the operations. +if [ -f /lib/lsb/init-functions ]; then + . /lib/lsb/init-functions +fi + +# Check for sufficient priviledges +# [ JGAULD : possibly provide user = 'operator' option instead... ] +if [ $UID -ne 0 ]; then + log_daemon_msg "Starting ${NAME} requires sudo/root access." + exit 1 +fi + +case $1 in + start) + if [ -e ${PIDFILE} ]; then + pid=$(pidof -x ${NAME}) + if test "${pid}" != "" + then + echo_success "${NAME} already running" + exit + fi + fi + + + log_daemon_msg "Starting ${NAME}" + if start-stop-daemon --start --background --quiet --oknodo --pidfile ${PIDFILE} \ + --exec ${DAEMON} -- ${DAEMON_ARGS} ; then + ./usr/local/bin/live_stream.py & + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + + stop) + if [ -e ${PIDFILE} ]; then + pids=$(pidof -x ${NAME}) + if [[ ! -z "${pids}" ]] + then + echo_success "Stopping ${NAME} [$pid]" + start-stop-daemon --stop --quiet --oknodo --pidfile ${PIDFILE} --retry=TERM/3/KILL/5 + # [ JGAULD: none of the following should be necessary ] + /usr/local/bin/cleanup-engtools.sh + else + echo_failure "${NAME} is not running" + fi + else + echo_failure "${PIDFILE} does not exist" + fi + ;; + + restart) + $0 stop && sleep 2 && $0 start + ;; + + status) + if [ -e ${PIDFILE} ]; then + pid=$(pidof -x ${NAME}) + if test "${pid}" != "" + then + echo_success "${NAME} is running" + else + echo_success "${NAME} is not running" + fi + else + echo_success "${NAME} is not running" + fi + ;; + + reload) + if [ -e ${PIDFILE} ]; then + start-stop-daemon --stop --signal USR1 --quiet --pidfile ${PIDFILE} --name ${NAME} + echo_success "${NAME} reloaded successfully" + else + echo_success "${PIDFILE} does not exist" + fi + ;; + + *) + echo "Usage: $0 {start|stop|restart|reload|status}" + exit 2 + ;; +esac + +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/iostat.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/iostat.sh new file mode 100644 index 0000000..04be90c --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/iostat.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Usage: iostat.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + +IOSTAT=$( which iostat 2>/dev/null ) +if [ $? -ne 0 ]; then + print_separator + WARNLOG "iostat not available" + tools_cleanup 0 +fi + +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) +((REP = REPEATS + 1)) + +# Execute tool for specified duration +CMD="${IOSTAT} -k -x -t ${INTERVAL_SEC} ${REP}" +#LOG "CMD: ${CMD}" +${CMD} +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/linux_benchmark.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/linux_benchmark.sh new file mode 100644 index 0000000..fb1d16d --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/linux_benchmark.sh @@ -0,0 +1,547 @@ +#!/bin/bash + +username="wrsroot" +password="Li69nux*" +test_duration="30" +wait_duration="5" +udp_find_0_frameloss="1" +udp_max_iter="20" +udp_granularity="100000" +result_dir="/home/${username}/benchmark_results" +summary_file="${result_dir}/benchmark_summary.xls" +host="" +remote="" +controllers=() +computes=() +nodes=() +max_compute_node="10" +interfaces=("") +# udp header total length: Ethernet header ( 14 ) + CRC ( 4 ) + IPv4 header ( 20 ) + UDP header ( 8 ) +udp_header_len="46" +# icmp header total length: ICMP header ( 8 ) + IPv4 header ( 20 ) +icmp_header_len="28" +frame_sizes=(64 128 256 512 1024 1280 1518) +ssh_opt="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q" +# ports used for different kind of traffics except hiprio. these are chosen randomly since they are not used +# 8000 - storage; 8001 - migration; 8002 - default; 8003 - drbd +controller_ports=(8000 8001 8002 8003) +compute_ports=(8000 8001 8002) +traffic_types=(storage migration default drbd) +flow_ids=(1:20 1:30 1:40 1:50) + +function exec_cmd () +{ + node="$1" + cmd="$2" + + if [[ "${node}" == *"${host}"* ]]; then + echo "$(bash -c "${cmd}")" + else + echo "$(ssh ${ssh_opt} ${username}@${node} "${cmd}")" + fi +} + +function iperf3_server_start () +{ + local server="$1" + local result="$2" + local port="$3" + local cmd="iperf3 -s" + + if [ "${port}" ]; then + cmd="${cmd} -p ${port}" + fi + cmd="nohup ${cmd} > ${result} 2>&1 &" + $(exec_cmd "${server}" "${cmd}") +} + +function iperf3_client_tcp_start () +{ + local result="${result_dir}/throughput" + local cmd="" + local client="$1" + local server="$2" + local port="$3" + + cmd="iperf3 -t ${test_duration} -c $(get_ip_addr "${server}")" + if [ "${port}" ]; then + cmd="${cmd} -p ${port} -O ${wait_duration}" + result="${result}_parallel_${port}" + else + result="${result}_tcp" + if [[ "${server}" == *"infra"* ]]; then + result="${result}_infra" + fi + fi + $(exec_cmd "${client}" "${cmd} > ${result} 2>&1") +} + +function iperf3_client_udp_start () +{ + local result="${result_dir}/throughput_udp" + local cmd="" + local client="$1" + local server="$2" + local frame_size="$3" + local bw="0" + + if [ "${4}" ]; then + bw="${4}" + fi + + cmd="iperf3 -u -t ${test_duration} -c $(get_ip_addr ${server})" + if [ ${frame_size} ]; then + cmd="${cmd} -l ${frame_size}" + result="${result}_$[${frame_size}+${udp_header_len}]" + fi + + if [[ ${server} == *"infra"* ]]; then + result="${result}_infra" + fi + + $(exec_cmd "${client}" "${cmd} -b ${bw} >> ${result} 2>&1" ) +} + +function iperf3_stop () +{ + local node="$1" + local cmd="pkill iperf3" + $(exec_cmd "${node}" "${cmd}") +} + +function get_ip_addr () +{ + arp -a | grep -oP "(?<=$1 \()[^)]*" | head -n 1 +} + +function throughput_tcp_test() +{ + for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do + for interface in "${interfaces[@]}"; do + local interface_name="management" + local interface_suffix="" + local result_suffix="" + if [ "${interface}" == "infra" ]; then + interface_name="infrastructure" + interface_suffix="-infra" + result_suffix="_infra" + fi + local result_file="${result_dir}/throughput_tcp${result_suffix}" + printf "Running TCP throughput test between ${nodes[${i}]} and ${nodes[$[${i}+1]]}'s ${interface_name} network..." + iperf3_server_start ${nodes[$[${i}+1]]}${interface_suffix} ${result_file} + iperf3_client_tcp_start ${nodes[${i}]}${interface_suffix} ${nodes[$[${i}+1]]}${interface_suffix} + iperf3_stop ${nodes[$[${i}+1]]}${interface_suffix} + result=$(exec_cmd "${nodes[${i}]}" "awk '/sender/ {print \$7 \" \" \$8}' ${result_file}") + printf " Done (${result})\n" + done + done +} + +function throughput_udp_test () +{ + for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do + for interface in "${interfaces[@]}"; do + local interface_name="management" + local interface_suffix="" + local result_suffix="" + if [ "${interface}" == "infra" ]; then + interface_name="infrastructure" + interface_suffix="-infra" + result_suffix="_infra" + fi + echo "Running UDP throughput test between ${nodes[${i}]} and ${nodes[$[${i}+1]]}'s ${interface_name} network" + for frame_size in "${frame_sizes[@]}"; do + local max_bw="0" + local min_bw="0" + local cur_bw="0" + local old_bw="0" + local result="" + local result_unit="" + local frame_loss="" + local max_result="" + local max_result_unit="" + local max_frame_loss="" + local result_file="${result_dir}/throughput_udp_${frame_size}${result_suffix}" + local iter="0" + local diff="" + printf "\tFrame size = ${frame_size}..." + while true; do + iperf3_server_start ${nodes[$[${i}+1]]}${interface_suffix} ${result_file} + iperf3_client_udp_start ${nodes[${i}]}${interface_suffix} ${nodes[$[${i}+1]]}${interface_suffix} $[${frame_size}-${udp_header_len}] ${cur_bw} + iperf3_stop ${nodes[$[${i}+1]]}${interface_suffix} + result=$(exec_cmd "${nodes[${i}]}" "awk '/%/ {print \$7}' ${result_file} | tail -n1") + result_unit=$(exec_cmd "${nodes[${i}]}" "awk '/%/ {print \$8}' ${result_file} | tail -n1") + frame_loss=$(exec_cmd "${nodes[${i}]}" "awk '/%/ {print \$12}' ${result_file} | tail -n1 | tr -d '()%'") + if [ "${udp_find_0_frameloss}" == "1" ]; then + if [ "${iter}" -eq "0" ]; then + max_result="${result}" + max_result_unit="${result_unit}" + max_frame_loss="${frame_loss}" + fi + if [ $(echo ${frame_loss} | grep e) ]; then + frame_loss="$(echo ${frame_loss} | sed 's/e/*10^/g;s/ /*/' )" + fi + if [ "$(echo "${frame_loss} > 0" | bc -l)" -eq "1" ]; then + max_bw="${result}" + if [ "${result_unit}" == "Kbits/sec" ]; then + max_bw="$(echo "(${max_bw} * 1000) / 1" | bc)" + elif [ "${result_unit}" == "Mbits/sec" ]; then + max_bw="$(echo "(${max_bw} * 1000000) / 1" | bc)" + elif [ "${result_unit}" == "Gbits/sec" ]; then + max_bw="$(echo "(${max_bw} * 1000000000) / 1" | bc)" + fi + else + if [ "${iter}" -eq "0" ]; then + break + else + min_bw="${result}" + if [ "${result_unit}" == "Kbits/sec" ]; then + min_bw="$(echo "(${min_bw} * 1000) / 1" | bc)" + elif [ "${result_unit}" == "Mbits/sec" ]; then + min_bw="$(echo "(${min_bw} * 1000000) / 1" | bc)" + elif [ "${result_unit}" == "Gbits/sec" ]; then + min_bw="$(echo "(${min_bw} * 1000000000) / 1" | bc)" + fi + fi + fi + old_bw="${cur_bw}" + cur_bw="$[(${max_bw} + ${min_bw}) / 2]" + diff="$(echo "$[${cur_bw} - ${old_bw}]" | tr -d '-')" + #break + ((iter++)) + if [ "${diff}" -lt "${udp_granularity}" ]; then + break + fi + if [ "${udp_max_iter}" -ne "0" ] && [ "${iter}" -ge "${udp_max_iter}" ]; then + break + fi + else + break + fi + done + if [ "${udp_find_0_frameloss}" == "1" ]; then + printf " Done (%s %s @ %s%% & %s %s @ %s%%)\n" "${max_result}" "${max_result_unit}" "${max_frame_loss}" "${result}" "${result_unit}" "${frame_loss}" + else + printf " Done (%s %s @ %s%%)\n" "${result}" "${result_unit}" "${frame_loss}" + fi + done + done + done +} + +function throughput_parallel_test () +{ + local dev="" + local ip_addr="" + local interface_name="" + local interface_suffix="" + local result_file="${result_dir}/throughput_parallel" + # get device name of the interface + if [ "${#interfaces[@]}" -gt "1" ]; then + interface_name="infrastructure" + interface_suffix="-infra" + ip_addr=$(ping -c1 ${host}-infra | awk -F'[()]' '/PING/{print $2}') + else + interface_name="management" + ip_addr=$(ping -c1 ${host} | awk -F'[()]' '/PING/{print $2}') + fi + dev=$(ifconfig | grep -B1 "inet ${ip_addr}" | awk '$1!="inet" && $1!="--" {print $1}') + + + # set all the filters + for node in ${nodes[@]}; do + local ports=("${controller_ports[@]}") + if [[ "${node}" == *"compute"* ]]; then + ports=("${compute_ports[@]}") + fi + for i in $(seq 0 $[${#ports[@]} - 1]); do + if [ ${traffic_types[i]} != "default" ]; then + tc_dport="tc filter add dev ${dev} protocol ip parent 1:0 prio 1 u32 match ip protocol 6 0xff match ip dport ${ports[i]} 0xffff flowid ${flow_ids[i]}" + tc_sport="tc filter add dev ${dev} protocol ip parent 1:0 prio 1 u32 match ip protocol 6 0xff match ip sport ${ports[i]} 0xffff flowid ${flow_ids[i]}" + $(exec_cmd "${node}" "echo ${password} | sudo -S bash -c '${tc_dport}; ${tc_sport}' > /dev/null 2>&1") + fi + done + done + + # run the tests + for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do + local ports=("${controller_ports[@]}") + if [[ "${nodes[${i}]}" == *"compute"* ]]; then + ports=("${compute_ports[@]}") + fi + printf "Running parallel throughput test between ${nodes[${i}]} and ${nodes[$[${i}+1]]}'s ${interface_name} network..." + + # start the servers + for port in "${ports[@]}"; do + iperf3_server_start "${nodes[$[${i}+1]]}${interface_suffix}" "${result_file}_${port}" "${port}" + done + #start the clients + for port in "${controller_ports[@]}"; do + iperf3_client_tcp_start ${nodes[${i}]}${interface_suffix} ${nodes[$[${i}+1]]}${interface_suffix} ${port} & + done + sleep $[${test_duration} + ${wait_duration} + 1] + iperf3_stop ${nodes[$[${i}+1]]}${interface_suffix} + printf " Done\n" + + # get results + for j in $(seq 0 $[${#ports[@]} - 1]); do + result=$(exec_cmd "${nodes[${i}]}" "awk '/sender/ {print \$7 \" \" \$8}' ${result_file}_${ports[${j}]}") + printf "\t${traffic_types[$j]} = ${result}\n" + done + done + + # remove all the filters + for node in ${nodes[@]}; do + local handles=() + local ports=("${controller_ports[@]}") + if [[ "${node}" == *"compute"* ]]; then + ports=("${compute_ports[@]}") + fi + handles=($(exec_cmd "${node}" "/usr/sbin/tc filter show dev ${dev} | awk '/filter/ {print \$10}' | tail -n $[(${#ports[@]} - 1) * 2 ]")) + for handle in "${handles[@]}"; do + $(exec_cmd "${node}" "echo ${password} | sudo -S /usr/sbin/tc filter delete dev ${dev} parent 1: handle ${handle} prio 1 u32 > /dev/null 2>&1") + done + done +} + +function latency_test () +{ + for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do + for interface in "${interfaces[@]}"; do + local interface_name="management" + local interface_suffix="" + local result_suffix="" + if [ "${interface}" == "infra" ]; then + interface_name="infrastructure" + interface_suffix="-infra" + result_suffix="_infra" + fi + echo "Running latency test between ${nodes[${i}]} and ${nodes[$[${i}+1]]}'s ${interface_name} network" + for frame_size in "${frame_sizes[@]}"; do + local result_file="${result_dir}/latency_${frame_size}${result_suffix}" + printf "\tFrame size = ${frame_size}..." + $(exec_cmd "${nodes[${i}]}" "ping -s $[${frame_size}-8] -w ${test_duration} -i 0.2 ${nodes[$[${i}+1]]}${interface_suffix} > ${result_file} 2>&1") + result=$(exec_cmd "${nodes[${i}]}" "awk '/rtt/ {print \$2 \" = \" \$4 \" \" \$5}' ${result_file}") + printf " Done (%s)\n" "${result}" + done + done + done +} + +function setup () +{ + for node in ${nodes[@]}; do + iperf3_stop "${node}" + $(exec_cmd "${node}" "rm -rf ${result_dir}; mkdir -p ${result_dir}") + done +} + +function get_remote_results () +{ + for node in ${nodes[@]}; do + if [ "${node}" != "${host}" ]; then + mkdir ${result_dir}/${node} + scp ${ssh_opt} ${username}@${node}:${result_dir}/* ${result_dir}/${node} > /dev/null 2>&1 + fi + done +} + +function get_interface_info () +{ + local dev="" + local ip_addr="" + printf "Network interfaces info\n" >> ${summary_file} + for interface in "${interfaces[@]}"; do + local interface_suffix="" + local interface_name="management" + if [ "${interface}" == "infra" ]; then + interface_name="infrastructure" + interface_suffix="-infra" + fi + ip_addr=$(ping -c1 ${host}${interface_suffix} | awk -F'[()]' '/PING/{print $2}') + dev=$(ifconfig | grep -B1 "inet ${ip_addr}" | awk '$1!="inet" && $1!="--" {print $1}') + printf "%s network interface\n" "${interface_name}" >> ${summary_file} + echo ${password} | sudo -S ethtool ${dev} >> ${summary_file} + done +} + +function generate_summary () +{ + local header="" + local result="" + local result_file="" + + printf "Summary\n\n" > ${summary_file} + printf "Throughput TCP\n" >> ${summary_file} + for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do + for interface in "${interfaces[@]}"; do + local node_type="controller" + local interface_type="mgmt" + local result_suffix="" + if [[ "${nodes[${i}]}" == *"compute"* ]]; then + node_type="compute" + fi + if [ "${interface}" == "infra" ]; then + interface_type="infra" + result_suffix="_infra" + fi + header="${header},${node_type}'s ${interface_type}" + result_file="${result_dir}" + if [ ${node_type} == "compute" ]; then + result_file="${result_file}/${nodes[${i}]}" + fi + result_file="${result_file}/throughput_tcp${result_suffix}" + result="${result},$(awk '/sender/ {print $7 " " $8}' ${result_file})" + done + done + printf "%s\n%s\n\n" "${header}" "${result}" >> ${summary_file} + + printf "Throughput UDP\n" >> ${summary_file} + header=",frame,max throughput,max frameloss" + if [ "${udp_find_0_frameloss}" == "1" ]; then + header="${header},final throughput, final frameloss" + fi + for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do + for interface in "${interfaces[@]}"; do + local node_type="controller" + local interface_type="mgmt" + local result_suffix="" + if [[ "${nodes[${i}]}" == *"compute"* ]]; then + node_type="compute" + fi + if [ "${interface}" == "infra" ]; then + interface_type="infra" + result_suffix="_infra" + fi + printf "%s's %s\n%s\n" "${node_type}" "${interface_type}" "${header}" >> ${summary_file} + result_file=${result_dir} + if [ ${node_type} == "compute" ]; then + result_file="${result_file}/${nodes[${i}]}" + fi + for frame in ${frame_sizes[@]}; do + result="${frame},$(awk '/%/ {print $7 " " $8}' ${result_file}/throughput_udp_${frame}${result_suffix} | head -n1),$(awk '/%/ {print $12}' ${result_file}/throughput_udp_${frame}${result_suffix} | head -n1 | tr -d '()')" + if [ "${udp_find_0_frameloss}" == "1" ]; then + result="${result},$(awk '/%/ {print $7 " " $8}' ${result_file}/throughput_udp_${frame}${result_suffix} | tail -n1),$(awk '/%/ {print $12}' ${result_file}/throughput_udp_${frame}${result_suffix} | tail -n1 | tr -d '()')" + fi + printf ",%s\n" "${result}" >> ${summary_file} + done + printf "\n" >> ${summary_file} + done + done + + printf "Parallel throughput result\n" >> ${summary_file} + header=",Node type" + for traffic_type in "${traffic_types[@]}"; do + header="${header},${traffic_type}" + done + printf "%s\n" "${header}" >> ${summary_file} + for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do + local node_type="controller" + local ports=("${controller_ports[@]}") + if [[ "${nodes[${i}]}" == *"compute"* ]]; then + node_type="compute" + fi + result_file=${result_dir} + if [ ${node_type} == "compute" ]; then + ports=("${compute_ports[@]}") + result_file="${result_file}/${nodes[${i}]}" + fi + result=",${node_type}" + for port in "${ports[@]}"; do + result="${result},$(awk '/sender/ {print $7 " " $8}' ${result_file}/throughput_parallel_${port})" + done + printf "%s\n" "${result}" >> ${summary_file} + done + + printf "\nLatency result in ms\n" >> ${summary_file} + for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do + for interface in "${interfaces[@]}"; do + local node_type="controller" + local interface_type="mgmt" + local result_suffix="" + if [[ "${nodes[${i}]}" == *"compute"* ]]; then + node_type="compute" + fi + if [ "${interface}" == "infra" ]; then + interface_type="infra" + result_suffix="_infra" + fi + printf "%s's %s network\n" "${node_type}" "${interface_type}" >> ${summary_file} + result_file=${result_dir} + if [ ${node_type} == "compute" ]; then + result_file="${result_file}/${nodes[${i}]}" + fi + result_file="${result_file}/latency" + printf ",frame size,%s\n" "$(awk '/rtt/ {print $2}' ${result_file}_${frame_sizes}${result_suffix} | tr '/' ',' )" >> ${summary_file} + for frame_size in "${frame_sizes[@]}"; do + printf ",%s,%s\n" "${frame_size}" "$(awk '/rtt/ {print $4}' ${result_file}_${frame_size}${result_suffix} | tr '/' ',' )" >> ${summary_file} + done + + printf "latency distribution\n" >> ${summary_file} + printf ",frame size" >> ${summary_file} + for (( j = 1; j < "20" ; j+=1 )); do + printf ",%s" "$(echo "scale=3;${j}/100" | bc | awk '{printf "%.3f", $0}')" >> ${summary_file} + done + printf "\n" >> ${summary_file} + for frame_size in "${frame_sizes[@]}"; do + printf ",%s" "${frame_size}" >> ${summary_file} + for (( j = 1; j < "20" ; j+=1 )); do + printf ",%s" "$(grep -c "time=$(echo "scale=2;${j}/100" | bc | awk '{printf "%.2f", $0}')" ${result_file}_${frame_size}${result_suffix})" >> ${summary_file} + done + printf "\n" >> ${summary_file} + done + printf "\n" >> ${summary_file} + done + done + + get_interface_info +} + +echo "Starting linux interface benchmark test. ($(date))" + +# find the nodes to test +host=${HOSTNAME} +if [ "${host}" == "controller-1" ]; then + remote="controller-0" +else + remote="controller-1" +fi + +# at least another controller needs to be reachable +ping -c1 ${remote} > /dev/null 2>&1 +if [ $? -eq 0 ]; then + controllers=(${host} ${remote}) + nodes+=("${controllers[@]}") +else + echo "Stopping test as ${remote} is not reachable" + exit 1 +fi + +# check if infrastructure interface is provisioned +ping -c1 "${remote}-infra" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "Infrastructure network is provisioned" + interfaces+=("infra") +fi + +# check if there are any compute nodes +for i in $(seq 0 $[${max_compute_node} - 1]); do + ping -c1 compute-${i} > /dev/null 2>&1 + if [ $? -eq 0 ]; then + computes+=("compute-${i}") + if [ ${#computes[@]} -ge "2" ]; then + nodes+=("${computes[@]}") + break + fi + fi +done + +setup +throughput_tcp_test +throughput_udp_test +throughput_parallel_test +latency_test +get_remote_results +generate_summary +echo "Linux interface benchmark test finished. ($(date))" + diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/live_stream.py b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/live_stream.py new file mode 100644 index 0000000..8192048 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/live_stream.py @@ -0,0 +1,1578 @@ +#!/usr/bin/python + +""" +Copyright (c) 2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 +""" + +import os +import sys +import time +import datetime +import psutil +import fcntl +import logging +import ConfigParser +from multiprocessing import Process, cpu_count +from subprocess import Popen, PIPE +from collections import OrderedDict + + +# generates the required string for the areas where fields are not static +def generateString(meas, tag_n, tag_v, field_n, field_v): + base = "{},".format(meas) + try: + for i in range(len(tag_n)): + if i == len(tag_n) - 1: + # have space between tags and fields + base += "'{}'='{}' ".format(tag_n[i], str(tag_v[i])) + else: + # separate with commas + base += "'{}'='{}',".format(tag_n[i], str(tag_v[i])) + for i in range(len(field_v)): + if str(field_v[i]).replace(".", "").isdigit(): + if i == len(field_v) - 1: + base += "'{}'='{}'".format(field_n[i], str(field_v[i])) + else: + base += "'{}'='{}',".format(field_n[i], str(field_v[i])) + return base + except IndexError: + return None + + +# collects system memory information +def collectMemtop(influx_info, node, ci): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("memtop data starting collection with a collection interval of {}s".format(ci["memtop"])) + measurement = "memtop" + tags = {"node": node} + MiB = 1024.0 + while True: + try: + fields = OrderedDict([("total", 0), ("used", 0), ("free", 0), ("cached", 0), ("buf", 0), ("slab", 0), ("cas", 0), ("clim", 0), ("dirty", 0), ("wback", 0), ("anon", 0), ("avail", 0)]) + with open("/proc/meminfo", "r") as f: + hps = 0 + # for each line in /proc/meminfo, match with element in fields + for line in f: + line = line.strip("\n").split() + if line[0].strip(":").startswith("MemTotal"): + # convert to from kibibytes to mibibytes + fields["total"] = float(line[1]) / MiB + elif line[0].strip(":").startswith("MemFree"): + fields["free"] = int(line[1]) / MiB + elif line[0].strip(":").startswith("MemAvailable"): + fields["avail"] = float(line[1]) / MiB + elif line[0].strip(":").startswith("Buffers"): + fields["buf"] = float(line[1]) / MiB + elif line[0].strip(":").startswith("Cached"): + fields["cached"] = float(line[1]) / MiB + elif line[0].strip(":").startswith("Slab"): + fields["slab"] = float(line[1]) / MiB + elif line[0].strip(":").startswith("CommitLimit"): + fields["clim"] = float(line[1]) / MiB + elif line[0].strip(":").startswith("Committed_AS"): + fields["cas"] = float(line[1]) / MiB + elif line[0].strip(":").startswith("Dirty"): + fields["dirty"] = float(line[1]) / MiB + elif line[0].strip(":").startswith("Writeback"): + fields["wback"] = float(line[1]) / MiB + elif line[0].strip(":").endswith("(anon)"): + fields["anon"] += float(line[1]) / MiB + elif line[0].strip(":").endswith("Hugepagesize"): + hps = float(line[1]) / MiB + fields["used"] = fields["total"] - fields["avail"] + f.close() + # get platform specific memory info + fields["platform_avail"] = 0 + fields["platform_hfree"] = 0 + for file in os.listdir("/sys/devices/system/node"): + if file.startswith("node"): + node_num = file.replace("node", "").strip("\n") + avail = hfree = 0 + with open("/sys/devices/system/node/{}/meminfo".format(file)) as f1: + for line in f1: + line = line.strip("\n").split() + if line[2].strip(":").startswith("MemFree") or line[2].strip(":").startswith("FilePages") or line[2].strip(":").startswith("SReclaimable"): + avail += float(line[3]) + elif line[2].strip(":").startswith("HugePages_Free"): + hfree = float(line[3]) * hps + fields["{}:avail".format(node_num)] = avail / MiB + fields["{}:hfree".format(node_num)] = hfree + # get platform sum + fields["platform_avail"] += avail / MiB + fields["platform_hfree"] += hfree + f1.close() + s = generateString(measurement, tags.keys(), tags.values(), fields.keys(), fields.values()) + if s is None: + good_string = False + else: + good_string = True + if good_string: + # send data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], s), shell=True) + p.communicate() + time.sleep(ci["memtop"]) + except KeyboardInterrupt: + break + except Exception: + logging.error("memtop collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects rss and vsz information +def collectMemstats(influx_info, node, ci, services, syseng_services, openstack_services, exclude_list, skip_list, collect_all): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("memstats data starting collection with a collection interval of {}s".format(ci["memstats"])) + measurement = "memstats" + tags = {"node": node} + ps_output = None + influx_string = "" + while True: + try: + fields = {} + ps_output = Popen("exec ps -e -o rss,vsz,cmd", shell=True, stdout=PIPE) + # create dictionary of dictionaries + if collect_all is False: + for svc in services: + fields[svc] = {"rss": 0, "vsz": 0} + fields["static_syseng"] = {"rss": 0, "vsz": 0} + fields["live_syseng"] = {"rss": 0, "vsz": 0} + fields["total"] = {"rss": 0, "vsz": 0} + ps_output.stdout.readline() + while True: + # for each line in ps output, get rss and vsz info + line = ps_output.stdout.readline().strip("\n").split() + # if at end of output, send data + if not line: + break + else: + rss = float(line[0]) + vsz = float(line[1]) + # go through all command outputs + for i in range(2, len(line)): + # remove unwanted characters and borders from cmd name. Ex: /usr/bin/example.py -> example.py + svc = line[i].replace("(", "").replace(")", "").strip(":").split("/")[-1].strip("\n") + if svc == "gunicorn": + gsvc = line[-1].replace("[", "").replace("]", "").strip("\n") + if gsvc == "public:application": + gsvc = "keystone-public" + elif gsvc == "admin:application": + gsvc = "keystone-admin" + gsvc = "gunicorn_{}".format(gsvc) + if gsvc not in fields: + fields[gsvc] = {"rss": rss, "vsz": vsz} + else: + fields[gsvc]["rss"] += rss + fields[gsvc]["vsz"] += vsz + + elif svc == "postgres": + if line[i + 1].startswith("-") is False and line[i + 1].startswith("_") is False and line[i + 1] != "psql": + psvc = "" + if line[i + 2] in openstack_services: + psvc = line[i + 2].strip("\n") + else: + for j in range(i + 1, len(line)): + psvc += "{}_".format(line[j].strip("\n")) + psvc = "postgres_{}".format(psvc).strip("_") + if psvc not in fields: + fields[psvc] = {"rss": rss, "vsz": vsz} + else: + fields[psvc]["rss"] += rss + fields[psvc]["vsz"] += vsz + + if collect_all is False: + if svc in services: + fields[svc]["rss"] += rss + fields[svc]["vsz"] += vsz + fields["total"]["rss"] += rss + fields["total"]["vsz"] += vsz + break + elif svc in syseng_services: + if svc == "live_stream.py": + fields["live_syseng"]["rss"] += rss + fields["live_syseng"]["vsz"] += vsz + else: + fields["static_syseng"]["rss"] += rss + fields["static_syseng"]["vsz"] += vsz + fields["total"]["rss"] += rss + fields["total"]["vsz"] += vsz + break + # Collect all services + else: + if svc in exclude_list or svc.startswith("-") or svc[0].isdigit() or svc.startswith("[") or svc.endswith("]"): + continue + elif svc in skip_list or svc.startswith("IPaddr"): + break + else: + if svc not in fields: + fields[svc] = {"rss": rss, "vsz": vsz} + else: + fields[svc]["rss"] += rss + fields[svc]["vsz"] += vsz + fields["total"]["rss"] += rss + fields["total"]["vsz"] += vsz + break + # send data to InfluxDB + for key in fields.keys(): + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "rss", fields[key]["rss"], "vsz", fields[key]["vsz"]) + "\n" + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + ps_output.kill() + time.sleep(ci["memstats"]) + except KeyboardInterrupt: + if ps_output is not None: + ps_output.kill() + break + except Exception: + logging.error("memstats collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects task cpu information +def collectSchedtop(influx_info, node, ci, services, syseng_services, openstack_services, exclude_list, skip_list, collect_all): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("schedtop data starting collection with a collection interval of {}s".format(ci["schedtop"])) + measurement = "schedtop" + tags = {"node": node} + influx_string = "" + top_output = Popen("exec top -b -c -w 512 -d{}".format(ci["schedtop"]), shell=True, stdout=PIPE) + while True: + try: + fields = {} + pro = psutil.Process(top_output.pid) + # if process dies, restart it + if pro.status() == "zombie": + top_output.kill() + top_output = Popen("exec top -b -c -w 512 -d{}".format(ci["schedtop"]), shell=True, stdout=PIPE) + if collect_all is False: + for svc in services: + fields[svc] = 0 + fields["static_syseng"] = 0 + fields["live_syseng"] = 0 + fields["total"] = 0 + # check first line + line = top_output.stdout.readline() + if not line: + pass + else: + # skip header completely + for _ in range(6): + top_output.stdout.readline() + while True: + line = top_output.stdout.readline().strip("\n").split() + # if end of top output, leave this while loop + if not line: + break + else: + occ = float(line[8]) + # for each command listed, check if it matches one from the list + for i in range(11, len(line)): + # remove unwanted characters and borders from cmd name. Ex: /usr/bin/example.py -> example.py + svc = line[i].replace("(", "").replace(")", "").strip(":").split("/")[-1] + if svc == "gunicorn": + gsvc = line[-1].replace("[", "").replace("]", "").strip("\n") + if gsvc == "public:application": + gsvc = "keystone-public" + elif gsvc == "admin:application": + gsvc = "keystone-admin" + gsvc = "gunicorn_{}".format(gsvc) + if gsvc not in fields: + fields[gsvc] = occ + else: + fields[gsvc] += occ + + elif svc == "postgres": + if line[i + 1].startswith("-") is False and line[i + 1].startswith("_") is False and line[i + 1] != "psql": + psvc = "" + if line[i + 2] in openstack_services: + psvc = line[i + 2].strip("\n") + else: + for j in range(i + 1, len(line)): + psvc += "{}_".format(line[j].strip("\n")) + psvc = "postgres_{}".format(psvc).strip("_") + if psvc not in fields: + fields[psvc] = occ + else: + fields[psvc] += occ + + if collect_all is False: + if svc in services: + fields[svc] += occ + fields["total"] += occ + break + elif svc in syseng_services: + if svc == "live_stream.py": + fields["live_syseng"] += occ + else: + fields["static_syseng"] += occ + fields["total"] += occ + break + # Collect all services + else: + if svc in exclude_list or svc.startswith("-") or svc[0].isdigit() or svc.startswith("[") or svc.endswith("]"): + continue + elif svc in skip_list or svc.startswith("IPaddr"): + break + else: + if svc not in fields: + fields[svc] = occ + else: + fields[svc] += occ + fields["total"] += occ + break + for key in fields.keys(): + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", key, "occ", fields[key]) + "\n" + # send data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + time.sleep(ci["schedtop"]) + except KeyboardInterrupt: + if top_output is not None: + top_output.kill() + break + except Exception: + logging.error("schedtop collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects disk utilization information +def collectDiskstats(influx_info, node, ci): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("diskstats data starting collection with a collection interval of {}s".format(ci["diskstats"])) + measurement = "diskstats" + tags = {"node": node, "file_system": None, "type": None, "mount": None} + fields = {"size": 0, "used": 0, "avail": 0, "usage": 0} + influx_string = "" + while True: + try: + parts = psutil.disk_partitions() + for i in parts: + # gather all partitions + tags["mount"] = str(i[1]).split("/")[-1] + # if mount == '', call it root + if tags["mount"] == "": + tags["mount"] = "root" + # skip boot + elif tags["mount"] == "boot": + continue + tags["file_system"] = str(i[0]).split("/")[-1] + tags["type"] = i[2] + u = psutil.disk_usage(i[1]) + fields["size"] = u[0] + fields["used"] = u[1] + fields["avail"] = u[2] + fields["usage"] = u[3] + influx_string += "{},'{}'='{}','{}'='{}','{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "file_system", tags["file_system"], "type", tags["type"], "mount", tags["mount"], "size", fields["size"], "used", fields["used"], "avail", fields["avail"], "usage", fields["usage"]) + "\n" + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + time.sleep(ci["diskstats"]) + except KeyboardInterrupt: + break + except Exception: + logging.error("diskstats collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collect device I/O information +def collectIostat(influx_info, node, ci): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("iostat data starting collection with a collection interval of {}s".format(ci["iostat"])) + measurement = "iostat" + tags = {"node": node} + sector_size = 512.0 + influx_string = "" + while True: + try: + fields = {} + tmp = {} + tmp1 = {} + start = time.time() + # get initial values + for dev in os.listdir("/sys/block/"): + if dev.startswith("sr"): + continue + else: + fields[dev] = {"r/s": 0, "w/s": 0, "io/s": 0, "rkB/s": 0, "wkB/s": 0, "rrqms/s": 0, "wrqms/s": 0, "util": 0} + tmp[dev] = {"init_reads": 0, "init_reads_merged": 0, "init_read_sectors": 0, "init_read_wait": 0, "init_writes": 0, "init_writes_merged": 0, "init_write_sectors": 0, "init_write_wait": 0, "init_io_progress": 0, "init_io_time": 0, "init_wait_time": 0} + with open("/sys/block/{}/stat".format(dev), "r") as f: + # get initial readings + line = f.readline().strip("\n").split() + tmp[dev]["init_reads"] = int(line[0]) + tmp[dev]["init_reads_merged"] = int(line[1]) + tmp[dev]["init_read_sectors"] = int(line[2]) + tmp[dev]["init_read_wait"] = int(line[3]) + tmp[dev]["init_writes"] = int(line[4]) + tmp[dev]["init_writes_merged"] = int(line[5]) + tmp[dev]["init_write_sectors"] = int(line[6]) + tmp[dev]["init_write_wait"] = int(line[7]) + tmp[dev]["init_io_progress"] = int(line[8]) + tmp[dev]["init_io_time"] = int(line[9]) + tmp[dev]["init_wait_time"] = int(line[10]) + time.sleep(ci["iostat"]) + dt = time.time() - start + # get values again + for dev in os.listdir("/sys/block/"): + if dev.startswith("sr"): + continue + else: + # during a swact, some devices may not have been read in the initial reading. If found now, add them to dict + if dev not in fields: + fields[dev] = {"r/s": 0, "w/s": 0, "io/s": 0, "rkB/s": 0, "wkB/s": 0, "rrqms/s": 0, "wrqms/s": 0, "util": 0} + tmp1[dev] = {"reads": 0, "reads_merged": 0, "read_sectors": 0, "read_wait": 0, "writes": 0, "writes_merged": 0, "write_sectors": 0, "write_wait": 0, "io_progress": 0, "io_time": 0, "wait_time": 0} + with open("/sys/block/{}/stat".format(dev), "r") as f: + line = f.readline().strip("\n").split() + tmp1[dev]["reads"] = int(line[0]) + tmp1[dev]["reads_merged"] = int(line[1]) + tmp1[dev]["read_sectors"] = int(line[2]) + tmp1[dev]["read_wait"] = int(line[3]) + tmp1[dev]["writes"] = int(line[4]) + tmp1[dev]["writes_merged"] = int(line[5]) + tmp1[dev]["write_sectors"] = int(line[6]) + tmp1[dev]["write_wait"] = int(line[7]) + tmp1[dev]["io_progress"] = int(line[8]) + tmp1[dev]["io_time"] = int(line[9]) + tmp1[dev]["wait_time"] = int(line[10]) + # take difference and divide by delta t + for key in fields: + # if device was found in initial and second reading, do calculation + if key in tmp and key in tmp1: + fields[key]["r/s"] = abs(tmp1[key]["reads"] - tmp[key]["init_reads"]) / dt + fields[key]["w/s"] = abs(tmp1[key]["writes"] - tmp[key]["init_writes"]) / dt + fields[key]["rkB/s"] = abs(tmp1[key]["read_sectors"] - tmp[key]["init_read_sectors"]) * sector_size / dt / 1000 + fields[key]["wkB/s"] = abs(tmp1[key]["write_sectors"] - tmp[key]["init_write_sectors"]) * sector_size / dt / 1000 + fields[key]["rrqms/s"] = abs(tmp1[key]["reads_merged"] - tmp[key]["init_reads_merged"]) / dt + fields[key]["wrqms/s"] = abs(tmp1[key]["writes_merged"] - tmp[key]["init_writes_merged"]) / dt + fields[key]["io/s"] = fields[key]["r/s"] + fields[key]["w/s"] + fields[key]["rrqms/s"] + fields[key]["wrqms/s"] + fields[key]["util"] = abs(tmp1[key]["io_time"] - tmp[key]["init_io_time"]) / dt / 10 + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "device", key, "r/s", fields[key]["r/s"], "w/s", fields[key]["w/s"], "rkB/s", fields[key]["rkB/s"], "wkB/s", fields[key]["wkB/s"], "rrqms/s", fields[key]["rrqms/s"], "wrqms/s", fields[key]["wrqms/s"], "io/s", fields[key]["io/s"], "util", fields[key]["util"]) + "\n" + # send data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + except KeyboardInterrupt: + break + except Exception: + logging.error("iostat collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects cpu load average information +def collectLoadavg(influx_info, node, ci): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("load_avg data starting collection with a collection interval of {}s".format(ci["load_avg"])) + measurement = "load_avg" + tags = {"node": node} + fields = {"load_avg": 0} + while True: + try: + fields["load_avg"] = os.getloadavg()[0] + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{},'{}'='{}' '{}'='{}''".format(influx_info[0], influx_info[1], influx_info[2], measurement, "node", tags["node"], "load_avg", fields["load_avg"]), shell=True) + p.communicate() + time.sleep(ci["load_avg"]) + except KeyboardInterrupt: + break + except Exception: + logging.error("load_avg collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects cpu utilization information +def collectOcctop(influx_info, node, ci, pc): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("occtop data starting collection with a collection interval of {}s".format(ci["occtop"])) + measurement = "occtop" + tags = {"node": node} + platform_cores = pc + influx_string = "" + while True: + try: + cpu = psutil.cpu_percent(percpu=True) + cpu_times = psutil.cpu_times_percent(percpu=True) + fields = {} + # sum all cpu percents + total = float(sum(cpu)) + sys_total = 0 + fields["platform_total"] = {"usage": 0, "system": 0} + cores = 0 + # for each core, get values and assign a tag + for el in cpu: + fields["usage"] = float(el) + fields["system"] = float(cpu_times[cores][2]) + sys_total += float(cpu_times[cores][2]) + tags["core"] = "core_{}".format(cores) + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "core", tags["core"], "usage", fields["usage"], "system", fields["system"]) + "\n" + if len(platform_cores) > 0: + if cores in platform_cores: + fields["platform_total"]["usage"] += float(el) + fields["platform_total"]["system"] += float(cpu_times[cores][2]) + cores += 1 + # add usage and system total to influx string + if len(platform_cores) > 0: + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "core", "platform_total", "usage", fields["platform_total"]["usage"], "system", fields["platform_total"]["system"]) + "\n" + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "core", "total", "usage", total, "system", sys_total) + "\n" + # send data to Influx + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + time.sleep(ci["occtop"]) + except KeyboardInterrupt: + break + except Exception: + logging.error("occtop collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects network interface information +def collectNetstats(influx_info, node, ci): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("netstats data starting collection with a collection interval of {}s".format(ci["netstats"])) + measurement = "netstats" + tags = {"node": node} + fields = {} + prev_fields = {} + Mbps = float(1000000 / 8) + influx_string = "" + while True: + try: + net = psutil.net_io_counters(pernic=True) + # get initial data for difference calculation + for key in net: + prev_fields[key] = {"tx_B": net[key][0], "rx_B": net[key][1], "tx_p": net[key][2], "rx_p": net[key][3]} + start = time.time() + time.sleep(ci["netstats"]) + net = psutil.net_io_counters(pernic=True) + # get new data for difference calculation + dt = time.time() - start + for key in net: + tx_B = (float(net[key][0]) - float(prev_fields[key]["tx_B"])) + tx_Mbps = tx_B / Mbps / dt + rx_B = (float(net[key][1]) - float(prev_fields[key]["rx_B"])) + rx_Mbps = rx_B / Mbps / dt + tx_pps = (float(net[key][2]) - float(prev_fields[key]["tx_p"])) / dt + rx_pps = (float(net[key][3]) - float(prev_fields[key]["rx_p"])) / dt + # ensure no division by zero + if rx_B > 0 and rx_pps > 0: + rx_packet_size = rx_B / rx_pps + else: + rx_packet_size = 0 + if tx_B > 0 and tx_pps > 0: + tx_packet_size = tx_B / tx_pps + else: + tx_packet_size = 0 + fields[key] = {"tx_mbps": tx_Mbps, "rx_mbps": rx_Mbps, "tx_pps": tx_pps, "rx_pps": rx_pps, "tx_packet_size": tx_packet_size, "rx_packet_size": rx_packet_size} + for key in fields: + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "interface", key, "rx_mbps", fields[key]["rx_mbps"], "tx_mbps", fields[key]["tx_mbps"], "rx_pps", fields[key]["rx_pps"], "tx_pps", fields[key]["tx_pps"], "rx_packet_size", fields[key]["rx_packet_size"], "tx_packet_size", fields[key]["tx_packet_size"]) + "\n" + # send data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + except KeyboardInterrupt: + break + except Exception: + logging.error("netstats collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects postgres db size and postgres service size information +def collectPostgres(influx_info, node, ci): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("postgres data starting collection with a collection interval of {}s".format(ci["postgres"])) + measurement = "postgres_db_size" + measurement1 = "postgres_svc_stats" + tags = {"node": node, "service": None, "table_schema": 0, "table": None} + fields = {"db_size": 0, "connections": 0} + fields1 = {"table_size": 0, "total_size": 0, "index_size": 0, "live_tuples": 0, "dead_tuples": 0} + postgres_output = postgres_output1 = None + influx_string = influx_string1 = "" + good_string = False + while True: + try: + # make sure this is active controller, otherwise postgres queries wont work + if isActiveController(): + while True: + # get list of databases and their sizes + postgres_output = Popen("sudo -u postgres psql --pset pager=off -q -t -c'SELECT datname, pg_database_size(datname) FROM pg_database WHERE datistemplate = false;'", shell=True, stdout=PIPE) + lines = postgres_output.stdout.read().replace(" ", "").strip().split("\n") + if lines == "" or lines is None: + postgres_output.kill() + break + else: + # for each database from the previous output + for line in lines: + if not line: + break + line = line.replace(" ", "").split("|") + tags["service"] = line[0] + fields["db_size"] = line[1] + # send DB size to InfluxDB + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "db_size", fields["db_size"]) + "\n" + # get tables for each database + sql = "SELECT table_schema,table_name,pg_size_pretty(table_size) AS table_size,pg_size_pretty(indexes_size) AS indexes_size,pg_size_pretty(total_size) AS total_size,live_tuples,dead_tuples FROM (SELECT table_schema,table_name,pg_table_size(table_name) AS table_size,pg_indexes_size(table_name) AS indexes_size,pg_total_relation_size(table_name) AS total_size,pg_stat_get_live_tuples(table_name::regclass) AS live_tuples,pg_stat_get_dead_tuples(table_name::regclass) AS dead_tuples FROM (SELECT table_schema,table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE') AS all_tables ORDER BY total_size DESC) AS pretty_sizes;" + postgres_output1 = Popen('sudo -u postgres psql --pset pager=off -q -t -d{} -c"{}"'.format(line[0], sql), shell=True, stdout=PIPE) + lines = postgres_output1.stdout.read().replace(" ", "").strip().split("\n") + for line in lines: + if line == "": + continue + else: + line = line.replace(" ", "").split("|") + elements = list() + # ensures all data is present + if len(line) != 7: + good_string = False + break + else: + # do some conversions + for el in line: + if el.endswith("bytes"): + el = int(el.replace("bytes", "")) + elif el.endswith("kB"): + el = el.replace("kB", "") + el = int(el) * 1000 + elif el.endswith("MB"): + el = el.replace("MB", "") + el = int(el) * 1000000 + elif el.endswith("GB"): + el = el.replace("GB", "") + el = int(el) * 1000000000 + elements.append(el) + tags["table_schema"] = elements[0] + tags["table"] = elements[1] + fields1["table_size"] = int(elements[2]) + fields1["index_size"] = int(elements[3]) + fields1["total_size"] = int(elements[4]) + fields1["live_tuples"] = int(elements[5]) + fields1["dead_tuples"] = int(elements[6]) + influx_string1 += "{},'{}'='{}','{}'='{}','{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement1, "node", tags["node"], "service", tags["service"], "table_schema", tags["table_schema"], "table", tags["table"], "table_size", fields1["table_size"], "index_size", fields1["index_size"], "total_size", fields1["total_size"], "live_tuples", fields1["live_tuples"], "dead_tuples", fields1["dead_tuples"]) + "\n" + good_string = True + if good_string: + # send table data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string1), shell=True) + p.communicate() + influx_string = influx_string1 = "" + time.sleep(ci["postgres"]) + postgres_output1.kill() + postgres_output.kill() + else: + time.sleep(20) + except KeyboardInterrupt: + if postgres_output is not None: + postgres_output.kill() + if postgres_output1 is not None: + postgres_output1.kill() + break + except Exception: + logging.error("postgres collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collect postgres connections information +def collectPostgresConnections(influx_info, node, ci, fast): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + if fast: + logging.info("postgres_connections data starting collection with a constant collection interval") + else: + logging.info("postgres_connections data starting collection with a collection interval of {}s".format(ci["postgres"])) + measurement = "postgres_connections" + tags = {"node": node, "service": None, "state": None} + connections_output = None + influx_string = "" + while True: + try: + # make sure this is active controller, otherwise postgres queries wont work + if isActiveController(): + while True: + fields = {} + # outputs a list of postgres dbs and their connections + connections_output = Popen("sudo -u postgres psql --pset pager=off -q -c 'SELECT datname,state,count(*) from pg_stat_activity group by datname,state;'", shell=True, stdout=PIPE) + line = connections_output.stdout.readline() + if line == "" or line is None: + break + # skip header + connections_output.stdout.readline() + while True: + line = connections_output.stdout.readline().strip("\n") + if not line: + break + else: + line = line.replace(" ", "").split("|") + if len(line) != 3: + continue + else: + svc = line[0] + connections = int(line[2]) + tags["service"] = svc + if svc not in fields: + fields[svc] = {"active": 0, "idle": 0, "other": 0} + if line[1] == "active": + fields[svc]["active"] = connections + elif line[1] == "idle": + fields[svc]["idle"] = connections + else: + fields[svc]["other"] = connections + influx_string += "{},'{}'='{}','{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "state", "active", "connections", fields[svc]["active"]) + "\n" + influx_string += "{},'{}'='{}','{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "state", "idle", "connections", fields[svc]["idle"]) + "\n" + influx_string += "{},'{}'='{}','{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "state", "other", "connections", fields[svc]["other"]) + "\n" + + # send data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + connections_output.kill() + if fast: + pass + else: + time.sleep(ci["postgres"]) + else: + time.sleep(20) + except KeyboardInterrupt: + if connections_output is not None: + connections_output.kill() + break + except Exception: + logging.error("postgres_connections collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects rabbitmq information +def collectRabbitMq(influx_info, node, ci): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("rabbitmq data starting collection with a collection interval of {}s".format(ci["rabbitmq"])) + measurement = "rabbitmq" + tags = OrderedDict([("node", node)]) + rabbitmq_output = None + while True: + try: + # make sure this is active controller, otherwise rabbit queries wont work + if isActiveController(): + while True: + fields = OrderedDict([]) + rabbitmq_output = Popen("sudo rabbitmqctl -n rabbit@localhost status", shell=True, stdout=PIPE) + # needed data starts where output = '{memory,[' + line = rabbitmq_output.stdout.readline() + # if no data is returned, exit + if line == "" or line is None: + rabbitmq_output.kill() + break + else: + line = rabbitmq_output.stdout.read().strip("\n").split("{memory,[") + if len(line) != 2: + rabbitmq_output.kill() + break + else: + # remove brackets from data + info = line[1].replace(" ", "").replace("{", "").replace("}", "").replace("\n", "").replace("[", "").replace("]", "").split(",") + for i in range(len(info) - 3): + if info[i].endswith("total"): + info[i] = info[i].replace("total", "memory_total") + # some data needs string manipulation + if info[i].startswith("clustering") or info[i].startswith("amqp"): + info[i] = "listeners_" + info[i] + if info[i].startswith("total_"): + info[i] = "descriptors_" + info[i] + if info[i].startswith("limit") or info[i].startswith("used"): + info[i] = "processes_" + info[i] + if info[i].replace("_", "").isalpha() and info[i + 1].isdigit(): + fields[info[i]] = info[i + 1] + s = generateString(measurement, tags.keys(), tags.values(), fields.keys(), fields.values()) + if s is None: + rabbitmq_output.kill() + else: + # send data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], s), shell=True) + p.communicate() + time.sleep(ci["rabbitmq"]) + rabbitmq_output.kill() + else: + time.sleep(20) + except KeyboardInterrupt: + if rabbitmq_output is not None: + rabbitmq_output.kill() + break + except Exception: + logging.error("rabbitmq collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects rabbitmq messaging information +def collectRabbitMqSvc(influx_info, node, ci, services): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("rabbitmq_svc data starting collection with a collection interval of {}s".format(ci["rabbitmq"])) + measurement = "rabbitmq_svc" + tags = {"node": node, "service": None} + fields = {"messages": 0, "messages_ready": 0, "messages_unacknowledged": 0, "memory": 0, "consumers": 0} + rabbitmq_svc_output = None + good_string = False + influx_string = "" + while True: + try: + # make sure this is active controller, otherwise rabbit queries wont work + if isActiveController(): + while True: + rabbitmq_svc_output = Popen("sudo rabbitmqctl -n rabbit@localhost list_queues name messages messages_ready messages_unacknowledged memory consumers", shell=True, stdout=PIPE) + # # if no data is returned, exit + if rabbitmq_svc_output.stdout.readline() == "" or rabbitmq_svc_output.stdout.readline() is None: + rabbitmq_svc_output.kill() + break + else: + for line in rabbitmq_svc_output.stdout: + line = line.split() + if not line: + break + else: + if len(line) != 6: + good_string = False + break + else: + # read line and fill fields + if line[0] in services: + tags["service"] = line[0] + fields["messages"] = line[1] + fields["messages_ready"] = line[2] + fields["messages_unacknowledged"] = line[3] + fields["memory"] = line[4] + fields["consumers"] = line[5] + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "messages", fields["messages"], "messages_ready", fields["messages_ready"], "messages_unacknowledged", fields["messages_unacknowledged"], "memory", fields["memory"], "consumers", fields["consumers"]) + "\n" + good_string = True + if good_string: + # send data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + time.sleep(ci["rabbitmq"]) + rabbitmq_svc_output.kill() + else: + time.sleep(20) + except KeyboardInterrupt: + if rabbitmq_svc_output is not None: + rabbitmq_svc_output.kill() + break + except Exception: + logging.error("rabbitmq_svc collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects open file information +def collectFilestats(influx_info, node, ci, services, syseng_services, exclude_list, skip_list, collect_all): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("filestats data starting collection with a collection interval of {}s".format(ci["filestats"])) + measurement = "filestats" + tags = {"node": node} + influx_string = "" + while True: + try: + fields = {} + # fill dict with services from engtools.conf + if collect_all is False: + for svc in services: + fields[svc] = {"read/write": 0, "write": 0, "read": 0} + fields["static_syseng"] = {"read/write": 0, "write": 0, "read": 0} + fields["live_syseng"] = {"read/write": 0, "write": 0, "read": 0} + fields["total"] = {"read/write": 0, "write": 0, "read": 0} + for process in os.listdir("/proc/"): + if process.isdigit(): + # sometimes the process dies before reading its info + try: + svc = psutil.Process(int(process)).name() + svc = svc.split()[0].replace("(", "").replace(")", "").strip(":").split("/")[-1] + except Exception: + continue + if collect_all is False: + if svc in services: + try: + p = Popen("ls -l /proc/{}/fd".format(process), shell=True, stdout=PIPE) + p.stdout.readline() + while True: + line = p.stdout.readline().strip("\n").split() + if not line: + break + else: + priv = line[0] + if priv[1] == "r" and priv[2] == "w": + fields[svc]["read/write"] += 1 + fields["total"]["read/write"] += 1 + elif priv[1] == "r" and priv[2] != "w": + fields[svc]["read"] += 1 + fields["total"]["read"] += 1 + elif priv[1] != "r" and priv[2] == "w": + fields[svc]["write"] += 1 + fields["total"]["write"] += 1 + except Exception: + p.kill() + continue + p.kill() + + elif svc in syseng_services: + try: + p = Popen("ls -l /proc/{}/fd".format(process), shell=True, stdout=PIPE) + p.stdout.readline() + while True: + line = p.stdout.readline().strip("\n").split() + if not line: + break + else: + priv = line[0] + if svc == "live_stream.py": + if priv[1] == "r" and priv[2] == "w": + fields["live_syseng"]["read/write"] += 1 + fields["total"]["read/write"] += 1 + elif priv[1] == "r" and priv[2] != "w": + fields["live_syseng"]["read"] += 1 + fields["total"]["read"] += 1 + elif priv[1] != "r" and priv[2] == "w": + fields["live_syseng"]["write"] += 1 + fields["total"]["write"] += 1 + else: + if priv[1] == "r" and priv[2] == "w": + fields["static_syseng"]["read/write"] += 1 + fields["total"]["read/write"] += 1 + elif priv[1] == "r" and priv[2] != "w": + fields["static_syseng"]["read"] += 1 + fields["total"]["read"] += 1 + elif priv[1] != "r" and priv[2] == "w": + fields["static_syseng"]["write"] += 1 + fields["total"]["write"] += 1 + except Exception: + p.kill() + continue + p.kill() + + else: + # remove garbage processes + if svc in exclude_list or svc in skip_list or svc.startswith("-") or svc.endswith("-") or svc[0].isdigit() or svc[-1].isdigit() or svc[0].isupper(): + continue + elif svc not in fields: + fields[svc] = {"read/write": 0, "write": 0, "read": 0} + try: + p = Popen("ls -l /proc/{}/fd".format(process), shell=True, stdout=PIPE) + p.stdout.readline() + while True: + line = p.stdout.readline().strip("\n").split() + if not line: + break + else: + priv = line[0] + if priv[1] == "r" and priv[2] == "w": + fields[svc]["read/write"] += 1 + fields["total"]["read/write"] += 1 + elif priv[1] == "r" and priv[2] != "w": + fields[svc]["read"] += 1 + fields["total"]["read"] += 1 + elif priv[1] != "r" and priv[2] == "w": + fields[svc]["write"] += 1 + fields["total"]["write"] += 1 + if fields[svc]["read/write"] == 0 and fields[svc]["read"] == 0 and fields[svc]["write"] == 0: + del fields[svc] + except Exception: + p.kill() + continue + p.kill() + for key in fields.keys(): + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "read/write", fields[key]["read/write"], "write", fields[key]["write"], "read", fields[key]["read"]) + "\n" + # send data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + time.sleep(ci["filestats"]) + except KeyboardInterrupt: + break + except Exception: + logging.error("filestats collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects vshell information +def collectVswitch(influx_info, node, ci): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("vswitch data starting collection with a collection interval of {}s".format(ci["vswitch"])) + measurement = "vswitch" + tags = OrderedDict([("node", node), ("engine", 0)]) + tags1 = OrderedDict([("node", node), ("port", 0)]) + tags2 = OrderedDict([("node", node), ("interface", 0)]) + fields = OrderedDict([("cpuid", 0), ("rx_packets", 0), ("tx_packets", 0), ("rx_discard", 0), ("tx_discard", 0), ("tx_disabled", 0), ("tx_overflow", 0), ("tx_timeout", 0), ("usage", 0)]) + fields1 = OrderedDict([("rx_packets", 0), ("tx_packets", 0), ("rx_bytes", 0), ("tx_bytes", 0), ("tx_errors", 0), ("rx_errors", 0), ("rx_nombuf", 0)]) + fields2 = OrderedDict([("rx_packets", 0), ("tx_packets", 0), ("rx_bytes", 0), ("tx_bytes", 0), ("tx_errors", 0), ("rx_errors", 0), ("tx_discards", 0), ("rx_discards", 0), ("rx_floods", 0), ("rx_no_vlan", 0)]) + vshell_engine_stats_output = vshell_port_stats_output = vshell_interface_stats_output = None + influx_string = "" + while True: + try: + vshell_engine_stats_output = Popen("vshell engine-stats-list", shell=True, stdout=PIPE) + # skip first few lines + vshell_engine_stats_output.stdout.readline() + vshell_engine_stats_output.stdout.readline() + vshell_engine_stats_output.stdout.readline() + while True: + line = vshell_engine_stats_output.stdout.readline().replace("|", "").split() + if not line: + break + # skip lines like +++++++++++++++++++++++++++++ + elif line[0].startswith("+"): + continue + else: + # get info from output + i = 2 + tags["engine"] = line[1] + for key in fields: + fields[key] = line[i].strip("%") + i += 1 + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, tags.keys()[0], tags.values()[0], tags.keys()[1], tags.values()[1], fields.keys()[0], fields.values()[0], fields.keys()[1], fields.values()[1], fields.keys()[2], fields.values()[2], fields.keys()[3], fields.values()[3], fields.keys()[4], fields.values()[4], fields.keys()[5], fields.values()[5], fields.keys()[6], fields.values()[6], fields.keys()[7], fields.values()[7], fields.keys()[8], fields.values()[8]) + "\n" + vshell_engine_stats_output.kill() + vshell_port_stats_output = Popen("vshell port-stats-list", shell=True, stdout=PIPE) + vshell_port_stats_output.stdout.readline() + vshell_port_stats_output.stdout.readline() + vshell_port_stats_output.stdout.readline() + while True: + line = vshell_port_stats_output.stdout.readline().replace("|", "").split() + if not line: + break + elif line[0].startswith("+"): + continue + else: + i = 3 + tags1["port"] = line[1] + for key in fields1: + fields1[key] = line[i].strip("%") + i += 1 + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, tags1.keys()[0], tags1.values()[0], tags1.keys()[1], tags1.values()[1], fields1.keys()[0], fields1.values()[0], fields1.keys()[1], fields1.values()[1], fields1.keys()[2], fields1.values()[2], fields1.keys()[3], fields1.values()[3], fields1.keys()[4], fields1.values()[4], fields1.keys()[5], fields1.values()[5], fields1.keys()[6], fields1.values()[6]) + "\n" + vshell_port_stats_output.kill() + vshell_interface_stats_output = Popen("vshell interface-stats-list", shell=True, stdout=PIPE) + vshell_interface_stats_output.stdout.readline() + vshell_interface_stats_output.stdout.readline() + vshell_interface_stats_output.stdout.readline() + while True: + line = vshell_interface_stats_output.stdout.readline().replace("|", "").split() + if not line: + break + elif line[0].startswith("+"): + continue + else: + if line[2] == "ethernet" and line[3].startswith("eth"): + i = 4 + tags2["interface"] = line[3] + for key in fields2: + fields2[key] = line[i].strip("%") + i += 1 + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, tags2.keys()[0], tags2.values()[0], tags2.keys()[1], tags2.values()[1], fields2.keys()[0], fields2.values()[0], fields2.keys()[1], fields2.values()[1], fields2.keys()[2], fields2.values()[2], fields2.keys()[3], fields2.values()[3], fields2.keys()[4], fields2.values()[4], fields2.keys()[5], fields2.values()[5], fields2.keys()[6], fields2.values()[6], fields2.keys()[7], fields2.values()[7], fields2.keys()[8], fields2.values()[8], fields2.keys()[9], fields2.values()[9]) + "\n" + else: + continue + vshell_interface_stats_output.kill() + # send data to InfluxDB + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + time.sleep(ci["vswitch"]) + except KeyboardInterrupt: + if vshell_engine_stats_output is not None: + vshell_engine_stats_output.kill() + if vshell_port_stats_output is not None: + vshell_port_stats_output.kill() + if vshell_interface_stats_output is not None: + vshell_interface_stats_output.kill() + break + except Exception: + logging.error("vswitch collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# collects the number of cores +def collectCpuCount(influx_info, node, ci): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("cpu_count data starting collection with a collection interval of {}s".format(ci["cpu_count"])) + measurement = "cpu_count" + tags = {"node": node} + while True: + try: + fields = {"cpu_count": cpu_count()} + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{},'{}'='{}' '{}'='{}''".format(influx_info[0], influx_info[1], influx_info[2], measurement, "node", tags["node"], "cpu_count", fields["cpu_count"]), shell=True) + p.communicate() + time.sleep(ci["cpu_count"]) + except KeyboardInterrupt: + break + except Exception: + logging.error("cpu_count collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + + +# collect API GET and POST requests/sec +def collectApi(influx_info, node, ci, openstack_svcs): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + logging.info("api_request data starting collection with a collection interval of {}s".format(ci["cpu_count"])) + measurement = "api_requests" + tags = {"node": node} + openstack_services = openstack_svcs + influx_string = "" + while True: + try: + fields = {} + tmp = {} + tmp1 = {} + # get initial values + for s in openstack_services: + fields[s] = {"get": 0, "post": 0} + tmp[s] = {"get": 0, "post": 0} + log = "/var/log/{0}/{0}-api.log".format(s) + if os.path.exists(log): + if s == "ceilometer": + p = Popen("awk '/INFO/ && /500/' {} | wc -l".format(log), shell=True, stdout=PIPE) + else: + p = Popen("awk '/INFO/ && /GET/' {} | wc -l".format(log), shell=True, stdout=PIPE) + init_api_get = int(p.stdout.readline()) + tmp[s]["get"] = init_api_get + p.kill() + p = Popen("awk '/INFO/ && /POST/' {} | wc -l".format(log), shell=True, stdout=PIPE) + init_api_post = int(p.stdout.readline()) + tmp[s]["post"] = init_api_post + p.kill() + time.sleep(1) + # get new values + for s in openstack_services: + tmp1[s] = {"get": 0, "post": 0} + log = "/var/log/{0}/{0}-api.log".format(s) + if os.path.exists(log): + if s == "ceilometer": + p = Popen("awk '/INFO/ && /500/' {} | wc -l".format(log), shell=True, stdout=PIPE) + else: + p = Popen("awk '/INFO/ && /GET/' {} | wc -l".format(log), shell=True, stdout=PIPE) + api_get = int(p.stdout.readline()) + tmp1[s]["get"] = api_get + p.kill() + p = Popen("awk '/INFO/ && /POST/' {} | wc -l".format(log), shell=True, stdout=PIPE) + api_post = int(p.stdout.readline()) + tmp1[s]["post"] = api_post + p.kill() + # take difference + for key in fields: + if (key in tmp and key in tmp1) and (tmp1[key]["get"] >= tmp[key]["get"]) and (tmp1[key]["post"] >= tmp[key]["post"]): + fields[key]["get"] = (tmp1[key]["get"] - tmp[key]["get"]) + fields[key]["post"] = (tmp1[key]["post"] - tmp[key]["post"]) + influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "get_requests", fields[key]["get"], "post_requests", fields[key]["post"]) + "\n" + p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) + p.communicate() + influx_string = "" + except KeyboardInterrupt: + break + except Exception: + logging.error("api_request collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) + time.sleep(3) + + +# returns the cores dedicated to platform use +def getPlatformCores(node, cpe): + if cpe is True or node.startswith("compute"): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + core_list = list() + try: + with open("/etc/nova/compute_reserved.conf", "r") as f: + for line in f: + if line.startswith("PLATFORM_CPU_LIST"): + core_list = line.split("=")[1].replace("\"", "").strip("\n").split(",") + core_list = [int(x) for x in core_list] + return core_list + except Exception: + logging.warning("skipping platform specific collection for {} due to error: {}".format(node, sys.exc_info())) + return core_list + else: + return [] + + +# determine if controller is active/standby +def isActiveController(): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + try: + o = Popen("sm-dump", shell=True, stdout=PIPE) + o.stdout.readline() + o.stdout.readline() + # read line for active/standby + l = o.stdout.readline().strip("\n").split() + per = l[1] + o.kill() + if per == "active": + return True + else: + return False + except Exception: + if o is not None: + o.kill() + logging.error("sm-dump command could not be called properly. This is usually caused by a swact. Trying again on next call: {}".format(sys.exc_info())) + return False + + +# checks whether the duration param has been set. If set, sleep; then kill processes upon waking up +def checkDuration(duration): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + if duration is None: + return None + else: + time.sleep(duration) + print "Duration interval has ended. Killing processes now" + logging.warning("Duration interval has ended. Killing processes now") + raise KeyboardInterrupt + + +# kill all processes and log each death +def killProcesses(tasks): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + for t in tasks: + try: + logging.info("{} data stopped collection".format(str(t.name))) + t.terminate() + except Exception: + continue + + +# create database in InfluxDB and add it to Grafana +def createDB(influx_info, grafana_port, grafana_api_key): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + p = None + try: + logging.info("Adding database to InfluxDB and Grafana") + # create database in InfluxDB if not already created. Will NOT overwrite previous db + p = Popen("curl -s -XPOST 'http://'{}':'{}'/query' --data-urlencode 'q=CREATE DATABASE {}'".format(influx_info[0], influx_info[1], influx_info[2]), shell=True, stdout=PIPE) + response = p.stdout.read().strip("\n") + if response == "": + raise Exception("An error occurred while creating the database: Please make sure the Grafana and InfluxDB services are running") + else: + logging.info("InfluxDB response: {}".format(response)) + p.kill() + + # add database to Grafana + grafana_db = '{"name":"%s", "type":"influxdb", "url":"http://%s:%s", "access":"proxy", "isDefault":false, "database":"%s"}' % (influx_info[2], influx_info[0], influx_info[1], influx_info[2]) + p = Popen("curl -s 'http://{}:{}/api/datasources' -H 'Accept: application/json' -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' --data-binary '{}'".format(influx_info[0], grafana_port, grafana_api_key, grafana_db), shell=True, stdout=PIPE) + response = p.stdout.read().strip("\n") + if response == "": + raise Exception("An error occurred while creating the database: Please make sure the Grafana and InfluxDB services are running") + else: + logging.info("Grafana response: {}".format(response)) + p.kill() + except KeyboardInterrupt: + if p is not None: + p.kill() + except Exception as e: + print e.message + sys.exit(0) + + +# delete database from InfluxDB and remove it from Grafana +def deleteDB(influx_info, grafana_port, grafana_api_key): + logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) + p = None + try: + answer = str(raw_input("\nAre you sure you would like to delete {}? (Y/N): ".format(influx_info[2]))).lower() + except Exception: + answer = None + if answer is None or answer == "" or answer == "y" or answer == "yes": + try: + logging.info("Removing database from InfluxDB and Grafana") + print "Removing database from InfluxDB and Grafana. Please wait..." + # delete database from InfluxDB + p = Popen("curl -s -XPOST 'http://'{}':'{}'/query' --data-urlencode 'q=DROP DATABASE {}'".format(influx_info[0], influx_info[1], influx_info[2]), shell=True, stdout=PIPE) + response = p.stdout.read().strip("\n") + if response == "": + raise Exception("An error occurred while removing the database: Please make sure the Grafana and InfluxDB services are running") + else: + logging.info("InfluxDB response: {}".format(response)) + p.kill() + + # get database ID for db removal + p = Popen("curl -s -G 'http://{}:{}/api/datasources/id/{}' -H 'Accept: application/json' -H 'Content-Type: application/json' -H 'Authorization: Bearer {}'".format(influx_info[0], grafana_port, influx_info[2], grafana_api_key), shell=True, stdout=PIPE) + id = p.stdout.read().split(":")[1].strip("}") + if id == "": + raise Exception("An error occurred while removing the database: Could not determine the database ID") + p.kill() + + # remove database from Grafana + p = Popen("curl -s -XDELETE 'http://{}:{}/api/datasources/{}' -H 'Accept: application/json' -H 'Content-Type: application/json' -H 'Authorization: Bearer {}'".format(influx_info[0], grafana_port, id, grafana_api_key), shell=True, stdout=PIPE) + response = p.stdout.read().strip("\n") + if response == "": + raise Exception("An error occurred while removing the database: Please make sure the Grafana and InfluxDB services are running") + else: + logging.info("Grafana response: {}".format(response)) + p.kill() + except KeyboardInterrupt: + if p is not None: + p.kill() + except Exception as e: + print e.message + sys.exit(0) + + +# used for output log +def appendToFile(file, content): + with open(file, "a") as f: + fcntl.flock(f, fcntl.LOCK_EX) + f.write(content + '\n') + fcntl.flock(f, fcntl.LOCK_UN) + + +# main program +if __name__ == "__main__": + # make sure user is root + if os.geteuid() != 0: + print "Must be run as root!\n" + sys.exit(0) + + # initialize variables + cpe_lab = False + influx_ip = influx_port = influx_db = "" + external_if = "" + influx_info = list() + grafana_port = "" + grafana_api_key = "" + controller_services = list() + compute_services = list() + storage_services = list() + rabbit_services = list() + common_services = list() + services = {} + live_svc = ("live_stream.py",) + static_svcs = ("occtop", "memtop", "schedtop", "top.sh", "iostat.sh", "netstats.sh", "diskstats.sh", "memstats.sh", "filestats.sh", "ceph.sh", "postgres.sh", "rabbitmq.sh", "vswitch.sh") + collection_intervals = {"memtop": None, "memstats": None, "occtop": None, "schedtop": None, "load_avg": None, "cpu_count": None, "diskstats": None, "iostat": None, "filestats": None, "netstats": None, "postgres": None, "rabbitmq": None, "vswitch": None} + openstack_services = ("nova", "cinder", "aodh", "ceilometer", "heat", "glance", "ceph", "horizon", "keystone", "puppet", "sysinv", "neutron", "nova_api", "postgres") + # memstats, schedtop, and filestats must skip/exclude certain fields when collect_all is enabled. No need to collect this stuff + exclude_list = ("python", "python2", "bash", "perl", "sudo", "init") + skip_list = ("ps", "top", "sh", "", "curl", "awk", "wc", "sleep", "lsof", "cut", "grep", "ip", "tail", "su") + duration = None + unconverted_duration = "" + collect_api_requests = False + api_requests = "" + auto_delete_db = False + delete_db = "" + collect_all_services = False + all_services = "" + fast_postgres_connections = False + fast_postgres = "" + config = ConfigParser.ConfigParser() + + node = os.popen("hostname").read().strip("\n") + + # get info from engtools.conf + try: + conf_file = "" + if "engtools.conf" in tuple(os.listdir(os.getcwd())): + conf_file = os.getcwd() + "/engtools.conf" + elif "engtools.conf" in tuple(os.listdir("/etc/engtools/")): + conf_file = "/etc/engtools/engtools.conf" + config.read(conf_file) + if config.get("LabConfiguration", "CPE_LAB").lower() == "y" or config.get("LabConfiguration", "CPE_LAB").lower() == "yes": + cpe_lab = True + if node.startswith("controller"): + external_if = config.get("CollectInternal", "{}_EXTERNAL_INTERFACE".format(node.upper().replace("-", ""))) + influx_ip = config.get("RemoteServer", "INFLUX_IP") + influx_port = config.get("RemoteServer", "INFLUX_PORT") + influx_db = config.get("RemoteServer", "INFLUX_DB") + grafana_port = config.get("RemoteServer", "GRAFANA_PORT") + grafana_api_key = config.get("RemoteServer", "GRAFANA_API_KEY") + duration = config.get("LiveStream", "DURATION") + unconverted_duration = config.get("LiveStream", "DURATION") + api_requests = config.get("AdditionalOptions", "API_REQUESTS") + delete_db = config.get("AdditionalOptions", "AUTO_DELETE_DB") + all_services = config.get("AdditionalOptions", "ALL_SERVICES") + fast_postgres = config.get("AdditionalOptions", "FAST_POSTGRES_CONNECTIONS") + # additional options + if api_requests.lower() == "y" or api_requests.lower() == "yes": + collect_api_requests = True + if delete_db.lower() == "y" or delete_db.lower() == "yes": + auto_delete_db = True + if all_services.lower() == "y" or all_services.lower() == "yes": + collect_all_services = True + if fast_postgres.lower() == "y" or fast_postgres.lower() == "yes": + fast_postgres_connections = True + # convert duration into seconds + if duration == "": + duration = None + elif duration.endswith("s") or duration.endswith("S"): + duration = duration.strip("s") + duration = duration.strip("S") + duration = int(duration) + elif duration.endswith("m") or duration.endswith("M"): + duration = duration.strip("m") + duration = duration.strip("M") + duration = int(duration) * 60 + elif duration.endswith("h") or duration.endswith("H"): + duration = duration.strip("h") + duration = duration.strip("H") + duration = int(duration) * 3600 + elif duration.endswith("d") or duration.endswith("D"): + duration = duration.strip("d") + duration = duration.strip("D") + duration = int(duration) * 3600 * 24 + controller_services = tuple(config.get("ControllerServices", "CONTROLLER_SERVICE_LIST").split()) + compute_services = tuple(config.get("ComputeServices", "COMPUTE_SERVICE_LIST").split()) + storage_services = tuple(config.get("StorageServices", "STORAGE_SERVICE_LIST").split()) + rabbit_services = tuple(config.get("RabbitmqServices", "RABBITMQ_QUEUE_LIST").split()) + common_services = tuple(config.get("CommonServices", "COMMON_SERVICE_LIST").split()) + # get collection intervals + for i in config.options("Intervals"): + if config.get("Intervals", i) == "" or config.get("Intervals", i) is None: + collection_intervals[i] = None + else: + collection_intervals[i] = int(config.get("Intervals", i)) + except Exception: + print "An error has occurred when parsing the engtools.conf configuration file: {}".format(sys.exc_info()) + sys.exit(0) + + syseng_services = live_svc + static_svcs + if cpe_lab is True: + services["controller_services"] = controller_services + compute_services + storage_services + common_services + else: + controller_services += common_services + compute_services += common_services + storage_services += common_services + services["controller_services"] = controller_services + services["compute_services"] = compute_services + services["storage_services"] = storage_services + services["common_services"] = common_services + services["syseng_services"] = syseng_services + services["rabbit_services"] = rabbit_services + + influx_info.append(influx_ip) + influx_info.append(influx_port) + influx_info.append(influx_db) + + # add config options to log + with open("/tmp/livestream.log", "w") as e: + e.write("Configuration for {}:\n".format(node)) + e.write("-InfluxDB address: {}:{}\n".format(influx_ip, influx_port)) + e.write("-InfluxDB name: {}\n".format(influx_db)) + e.write("-CPE lab: {}\n".format(str(cpe_lab))) + e.write(("-Collect API requests: {}\n".format(str(collect_api_requests)))) + e.write(("-Collect all services: {}\n".format(str(collect_all_services)))) + e.write(("-Fast postgres connections: {}\n".format(str(fast_postgres_connections)))) + e.write(("-Automatic database removal: {}\n".format(str(auto_delete_db)))) + if duration is not None: + e.write("-Live stream duration: {}\n".format(unconverted_duration)) + e.close() + + # add POSTROUTING entry to NAT table + if cpe_lab is False: + # check controller-0 for NAT entry. If not there, add it + if node.startswith("controller"): + # use first interface if not specified in engtools.conf + if external_if == "" or external_if is None: + p = Popen("ifconfig", shell=True, stdout=PIPE) + external_if = p.stdout.readline().split(":")[0] + p.kill() + appendToFile("/tmp/livestream.log", "-External interface for {}: {}".format(node, external_if)) + # enable IP forwarding + p = Popen("sysctl -w net.ipv4.ip_forward=1 > /dev/null", shell=True) + p.communicate() + p = Popen("iptables -t nat -L --line-numbers", shell=True, stdout=PIPE) + tmp = [] + # entries need to be removed in reverse order + for line in p.stdout: + tmp.append(line.strip("\n")) + for line in reversed(tmp): + l = " ".join(line.strip("\n").split()[1:]) + # if an entry already exists, remove it + if l.startswith("MASQUERADE tcp -- anywhere"): + line_number = line.strip("\n").split()[0] + p1 = Popen("iptables -t nat -D POSTROUTING {}".format(line_number), shell=True) + p1.communicate() + p.kill() + appendToFile("/tmp/livestream.log", "-Adding NAT information to allow compute/storage nodes to communicate with remote server\n") + # add new entry for both InfluxDB and Grafana + p = Popen("iptables -t nat -A POSTROUTING -p tcp -o {} -d {} --dport {} -j MASQUERADE".format(external_if, influx_ip, influx_port), shell=True) + p.communicate() + p = Popen("iptables -t nat -A POSTROUTING -p tcp -o {} -d {} --dport {} -j MASQUERADE".format(external_if, influx_ip, grafana_port), shell=True) + p.communicate() + + appendToFile("/tmp/livestream.log", "\nStarting collection at {}\n".format(datetime.datetime.utcnow())) + tasks = [] + + createDB(influx_info, grafana_port, grafana_api_key) + + try: + node_type = str(node.split("-")[0]) + # if not a standard node, run the common functions with collect_all enabled + if node_type != "controller" and node_type != "compute" and node_type != "storage": + node_type = "common" + collect_all_services = True + + if collection_intervals["memstats"] is not None: + p = Process(target=collectMemstats, args=(influx_info, node, collection_intervals, services["{}_services".format(node_type)], services["syseng_services"], openstack_services, exclude_list, skip_list, collect_all_services), name="memstats") + tasks.append(p) + p.start() + if collection_intervals["schedtop"] is not None: + p = Process(target=collectSchedtop, args=(influx_info, node, collection_intervals, services["{}_services".format(node_type)], services["syseng_services"], openstack_services, exclude_list, skip_list, collect_all_services), name="schedtop") + tasks.append(p) + p.start() + if collection_intervals["filestats"] is not None: + p = Process(target=collectFilestats, args=(influx_info, node, collection_intervals, services["{}_services".format(node_type)], services["syseng_services"], exclude_list, skip_list, collect_all_services), name="filestats") + tasks.append(p) + p.start() + if collection_intervals["occtop"] is not None: + p = Process(target=collectOcctop, args=(influx_info, node, collection_intervals, getPlatformCores(node, cpe_lab)), name="occtop") + tasks.append(p) + p.start() + if collection_intervals["load_avg"] is not None: + p = Process(target=collectLoadavg, args=(influx_info, node, collection_intervals), name="load_avg") + tasks.append(p) + p.start() + if collection_intervals["cpu_count"] is not None: + p = Process(target=collectCpuCount, args=(influx_info, node, collection_intervals), name="cpu_count") + tasks.append(p) + p.start() + if collection_intervals["memtop"] is not None: + p = Process(target=collectMemtop, args=(influx_info, node, collection_intervals), name="memtop") + tasks.append(p) + p.start() + if collection_intervals["diskstats"] is not None: + p = Process(target=collectDiskstats, args=(influx_info, node, collection_intervals), name="diskstats") + tasks.append(p) + p.start() + if collection_intervals["iostat"] is not None: + p = Process(target=collectIostat, args=(influx_info, node, collection_intervals), name="iostat") + tasks.append(p) + p.start() + if collection_intervals["netstats"] is not None: + p = Process(target=collectNetstats, args=(influx_info, node, collection_intervals), name="netstats") + tasks.append(p) + p.start() + if collect_api_requests is True and node_type == "controller": + p = Process(target=collectApi, args=(influx_info, node, collection_intervals, openstack_services), name="api_requests") + tasks.append(p) + p.start() + + if node_type == "controller": + if collection_intervals["postgres"] is not None: + p = Process(target=collectPostgres, args=(influx_info, node, collection_intervals), name="postgres") + tasks.append(p) + p.start() + p = Process(target=collectPostgresConnections, args=(influx_info, node, collection_intervals, fast_postgres_connections), name="postgres_connections") + tasks.append(p) + p.start() + if collection_intervals["rabbitmq"] is not None: + p = Process(target=collectRabbitMq, args=(influx_info, node, collection_intervals), name="rabbitmq") + tasks.append(p) + p.start() + p = Process(target=collectRabbitMqSvc, args=(influx_info, node, collection_intervals, services["rabbit_services"]), name="rabbitmq_svc") + tasks.append(p) + p.start() + + if node_type == "compute" or cpe_lab is True: + if collection_intervals["vswitch"] is not None: + p = Process(target=collectVswitch, args=(influx_info, node, collection_intervals), name="vswitch") + tasks.append(p) + p.start() + + print "Sending data to InfluxDB. Please tail /tmp/livestream.log" + + checkDuration(duration) + # give a small delay to ensure services have started + time.sleep(3) + for t in tasks: + os.wait() + except KeyboardInterrupt: + pass + finally: + # end here once duration param has ended or ctrl-c is pressed + appendToFile("/tmp/livestream.log", "\nEnding collection at {}\n".format(datetime.datetime.utcnow())) + if tasks is not None and len(tasks) > 0: + killProcesses(tasks) + if auto_delete_db is True: + deleteDB(influx_info, grafana_port, grafana_api_key) + sys.exit(0) diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/memstats.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/memstats.sh new file mode 100644 index 0000000..664b10b --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/memstats.sh @@ -0,0 +1,112 @@ +#!/bin/bash +# Usage: memstats.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +PAGE_SIZE=$(getconf PAGE_SIZE) + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + +# Print key networking device statistics +function print_memory() +{ + # Configuration for netcmds + MEMINFO=/proc/meminfo + NODEINFO=/sys/devices/system/node/node?/meminfo + BUDDYINFO=/proc/buddyinfo + SLABINFO=/proc/slabinfo + + print_separator + TOOL_HIRES_TIME + + ${ECHO} "# ${MEMINFO}" + ${CAT} ${MEMINFO} + ${ECHO} + + ${ECHO} "# ${NODEINFO}" + ${CAT} ${NODEINFO} + ${ECHO} + + ${ECHO} "# ${BUDDYINFO}" + ${CAT} ${BUDDYINFO} + ${ECHO} + + ${ECHO} "# PSS" + cat /proc/*/smaps 2>/dev/null | \ + awk '/^Pss:/ {a += $2;} END {printf "%d MiB\n", a/1024.0;}' + ${ECHO} + + # use old slabinfo format (i.e. slub not enabled in kernel) + ${ECHO} "# ${SLABINFO}" + ${CAT} ${SLABINFO} | \ + awk -v page_size_B=${PAGE_SIZE} ' +BEGIN {page_KiB = page_size_B/1024; TOT_KiB = 0;} +(NF == 17) { + gsub(/[<>]/, ""); + printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8s\n", + $2, $3, $4, $5, $6, $7, $8, $10, $11, $12, $13, $15, $16, $17, "KiB"); +} +(NF == 16) { + num_objs=$3; obj_per_slab=$5; pages_per_slab=$6; + KiB = (obj_per_slab > 0) ? page_KiB*num_objs/obj_per_slab*pages_per_slab : 0; + TOT_KiB += KiB; + printf("%-22s %11d %8d %8d %10d %12d %1s %5d %10d %12d %1s %12d %9d %11d %8d\n", + $1, $2, $3, $4, $5, $6, $7, $9, $10, $11, $12, $14, $15, $16, KiB); +} +END { + printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8d\n", + "TOTAL", "-", "-", "-", "-", "-", ":", "-", "-", "-", ":", "-", "-", "-", TOT_KiB); +} +' 2>/dev/null + ${ECHO} + + ${ECHO} "# disk usage: rootfs, tmpfs" + cmd='df -h -H -T --local -t rootfs -t tmpfs' + ${ECHO} "Disk space usage rootfs,tmpfs (SI):" + ${ECHO} "${cmd}" + ${cmd} + ${ECHO} + + CMD='ps -e -o ppid,pid,nlwp,rss:10,vsz:10,cmd --sort=-rss' + ${ECHO} "# ${CMD}" + ${CMD} + ${ECHO} +} + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) + +for ((rep=1; rep <= REPEATS ; rep++)) +do + print_memory + sleep ${INTERVAL_SEC} +done +print_memory +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/netstats.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/netstats.sh new file mode 100644 index 0000000..4ed13b0 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/netstats.sh @@ -0,0 +1,66 @@ +#!/bin/bash +# Usage: netstats.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + +# Print key networking device statistics +function print_netcmds() +{ + # Configuration for netcmds + DEV=/proc/net/dev + NETSTAT=/proc/net/netstat + + print_separator + TOOL_HIRES_TIME + + for net in \ + ${DEV} ${NETSTAT} + do + if [ -e "${net}" ] + then + ${ECHO} "# ${net}" + ${CAT} ${net} + ${ECHO} + fi + done +} + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) + +for ((rep=1; rep <= REPEATS ; rep++)) +do + print_netcmds + sleep ${INTERVAL_SEC} +done +print_netcmds +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/postgres.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/postgres.sh new file mode 100644 index 0000000..9bcf8d1 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/postgres.sh @@ -0,0 +1,141 @@ +#!/bin/bash +# Usage: postgres.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + +# Print key networking device statistics +function print_postgres() +{ + print_separator + TOOL_HIRES_TIME + + # postgressql command: set user, disable pagination, and be quiet + PSQL="sudo -u postgres psql --pset pager=off -q" + + # List postgres databases + db_list=( $(${PSQL} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") ) + ${ECHO} "# postgres databases" + echo "db_list = ${db_list[@]}" + ${ECHO} + + # List sizes of all postgres databases (similar to "\l+") + ${ECHO} "# postgres database sizes" + ${PSQL} -c " +SELECT + pg_database.datname, + pg_database_size(pg_database.datname), + pg_size_pretty(pg_database_size(pg_database.datname)) +FROM pg_database +ORDER BY pg_database_size DESC; +" + + # For each database, list tables and their sizes (similar to "\dt+") + for db in "${db_list[@]}" + do + ${ECHO} "# postgres database: ${db}" + ${PSQL} -d ${db} -c " +SELECT + table_schema, + table_name, + pg_size_pretty(table_size) AS table_size, + pg_size_pretty(indexes_size) AS indexes_size, + pg_size_pretty(total_size) AS total_size, + live_tuples, + dead_tuples +FROM ( + SELECT + table_schema, + table_name, + pg_table_size(table_name) AS table_size, + pg_indexes_size(table_name) AS indexes_size, + pg_total_relation_size(table_name) AS total_size, + pg_stat_get_live_tuples(table_name::regclass) AS live_tuples, + pg_stat_get_dead_tuples(table_name::regclass) AS dead_tuples + FROM ( + SELECT + table_schema, + table_name + FROM information_schema.tables + WHERE table_schema='public' + AND table_type='BASE TABLE' + ) AS all_tables + ORDER BY total_size DESC +) AS pretty_sizes; +" + + ${ECHO} "# postgres database vacuum: ${db}" + ${PSQL} -d ${db} -c " +SELECT + relname, + n_live_tup, + n_dead_tup, + last_vacuum, + last_autovacuum, + last_analyze, + last_autoanalyze +FROM pg_stat_user_tables; +" + done + + # Specific table counts (This is very SLOW, look at "live tuples" instead) + # Number of keystone tokens + #${ECHO} "# keystone token count" + + # Number of postgres connections + ${ECHO} "# postgres database connections" + CONN=$(ps -C postgres -o cmd= | wc -l) + CONN_T=$(ps -C postgres -o cmd= | awk '/postgres: / {print $3}' | awk '{for(i=1;i<=NF;i++) a[$i]++} END {for(k in a) print k, a[k]}' | sort -k 2 -nr ) + ${ECHO} "connections total = ${CONN}" + ${ECHO} + ${ECHO} "connections breakdown:" + ${ECHO} "${CONN_T}" + ${ECHO} + + ${ECHO} "connections breakdown (query):" + ${PSQL} -c "SELECT datname,state,count(*) from pg_stat_activity group by datname,state;" + ${ECHO} + + ${ECHO} "connections idle age:" + ${PSQL} -c "SELECT datname,age(now(),state_change) from pg_stat_activity where state='idle';" + ${ECHO} +} + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) + +for ((rep=1; rep <= REPEATS ; rep++)) +do + print_postgres + sleep ${INTERVAL_SEC} +done +print_postgres +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/rabbitmq.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/rabbitmq.sh new file mode 100644 index 0000000..c588c16 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/rabbitmq.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# Usage: rabbitmq.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 +#Need this workaround +MQOPT="-n rabbit@localhost" +# Print key networking device statistics +function print_rabbitmq() +{ + print_separator + TOOL_HIRES_TIME + + # IMPORTANT: + # - Difficulty getting rabbitmqctl to work from init.d script; + # apparently it requires a psuedo-TTY, which is something you don't have + # until post-init. + # - WORKAROUND: run command using 'sudo', even if you are 'root' + + # Dump various rabbitmq related stats + MQ_STATUS="rabbitmqctl ${MQOPT} status" + ${ECHO} "# ${MQ_STATUS}" + sudo ${MQ_STATUS} | grep -e '{memory' -A30 + ${ECHO} + + # THe following is useful in diagnosing rabbit memory leaks + # when end-users do not drain their queues (eg, due to RPC timeout issues, etc) + MQ_QUEUES="rabbitmqctl ${MQOPT} list_queues messages name pid messages_ready messages_unacknowledged memory consumers" + ${ECHO} "# ${MQ_QUEUES}" + sudo ${MQ_QUEUES} + ${ECHO} + + num_queues=$(sudo rabbitmqctl ${MQOPT} list_queues | wc -l); ((num_queues-=2)) + num_bindings=$(sudo rabbitmqctl ${MQOPT} list_bindings | wc -l); ((num_bindings-=2)) + num_exchanges=$(sudo rabbitmqctl ${MQOPT} list_exchanges | wc -l); ((num_exchanges-=2)) + num_connections=$(sudo rabbitmqctl ${MQOPT} list_connections | wc -l); ((num_connections-=2)) + num_channels=$(sudo rabbitmqctl ${MQOPT} list_channels | wc -l); ((num_channels-=2)) + arr=($(sudo rabbitmqctl ${MQOPT} list_queues messages consumers memory | \ + awk '/^[0-9]/ {a+=$1; b+=$2; c+=$3} END {print a, b, c}')) + messages=${arr[0]}; consumers=${arr[1]}; memory=${arr[2]} + printf "%6s %8s %9s %11s %8s %8s %9s %10s\n" \ + "queues" "bindings" "exchanges" "connections" "channels" "messages" "consumers" "memory" + printf "%6d %8d %9d %11d %8d %8d %9d %10d\n" \ + $num_queues $num_bindings $num_exchanges $num_connections $num_channels $messages $consumers $memory + ${ECHO} +} + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) + +for ((rep=1; rep <= REPEATS ; rep++)) +do + print_rabbitmq + sleep ${INTERVAL_SEC} +done +print_rabbitmq +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rbzip2-engtools.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rbzip2-engtools.sh new file mode 100644 index 0000000..3d972c4 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rbzip2-engtools.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Purpose: +# bzip2 compress engtools data on all nodes. + +# Define common utility functions +TOOLBIN=$(dirname $0) +. ${TOOLBIN}/engtools_util.sh +if [ $UID -eq 0 ]; then + ERRLOG "Do not start $0 using sudo/root access." + exit 1 +fi + +# environment for system commands +source /etc/nova/openrc + +declare -a CONTROLLER +declare -a COMPUTE +declare -a STORAGE +CONTROLLER=( $(system host-list | awk '(/controller/) {print $4;}') ) +COMPUTE=( $(system host-list | awk '(/compute/) {print $4;}') ) +STORAGE=( $(system host-list | awk '(/storage/) {print $4;}') ) + +LOG "Remote bzip2 engtools data on all blades:" +for blade in ${CONTROLLER[@]}; do + ping -c1 ${blade} 1>/dev/null 2>/dev/null + if [ $? -eq 0 ]; then + LOG "bzip2 on $blade:" + ssh -q -t -o StrictHostKeyChecking=no \ + ${blade} sudo bzip2 /scratch/syseng_data/${blade}/* + else + WARNLOG "cannot ping: ${blade}" + fi +done +for blade in ${STORAGE[@]} ${COMPUTE[@]} ; do + ping -c1 ${blade} 1>/dev/null 2>/dev/null + if [ $? -eq 0 ]; then + LOG "bzip2 on $blade:" + ssh -q -t -o StrictHostKeyChecking=no \ + ${blade} sudo bzip2 /tmp/syseng_data/${blade}/* + else + WARNLOG "cannot ping: ${blade}" + fi +done +LOG "done" + +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rstart-engtools.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rstart-engtools.sh new file mode 100644 index 0000000..f3df76d --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rstart-engtools.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Purpose: +# Remote start engtools on all blades. + +# Define common utility functions +TOOLBIN=$(dirname $0) +. ${TOOLBIN}/engtools_util.sh +if [ $UID -eq 0 ]; then + ERRLOG "Do not start $0 using sudo/root access." + exit 1 +fi + +# environment for system commands +source /etc/nova/openrc + +declare -a BLADES +BLADES=( $(system host-list | awk '(/compute|controller|storage/) {print $4;}') ) + +LOG "Remote start engtools on all blades:" +for blade in ${BLADES[@]}; do + if [ "${blade}" == "${HOSTNAME}" ]; then + LOG "start on $blade:" + sudo service collect-engtools.sh start + else + ping -c1 ${blade} 1>/dev/null 2>/dev/null + if [ $? -eq 0 ]; then + LOG "start on $blade:" + ssh -q -t -o StrictHostKeyChecking=no \ + ${blade} sudo service collect-engtools.sh start + else + WARNLOG "cannot ping: ${blade}" + fi + fi +done +LOG "done" + +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rstop-engtools.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rstop-engtools.sh new file mode 100644 index 0000000..1251ea8 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rstop-engtools.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Purpose: +# Remote stop engtools on all blades. + +# Define common utility functions +TOOLBIN=$(dirname $0) +. ${TOOLBIN}/engtools_util.sh +if [ $UID -eq 0 ]; then + ERRLOG "Do not start $0 using sudo/root access." + exit 1 +fi + +# environment for system commands +source /etc/nova/openrc + +declare -a BLADES +BLADES=( $(system host-list | awk '(/compute|controller|storage/) {print $4;}') ) + +LOG "Remote stop engtools on all blades:" +for blade in ${BLADES[@]}; do + if [ "${blade}" == "${HOSTNAME}" ]; then + LOG "stop on $blade:" + sudo service collect-engtools.sh stop + else + ping -c1 ${blade} 1>/dev/null 2>/dev/null + if [ $? -eq 0 ]; then + LOG "stop on $blade:" + ssh -q -t -o StrictHostKeyChecking=no \ + ${blade} sudo service collect-engtools.sh stop + else + WARNLOG "cannot ping: ${blade}" + fi + fi +done +LOG "done" + +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rsync-engtools-data.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rsync-engtools-data.sh new file mode 100644 index 0000000..6f82f47 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/remote/rsync-engtools-data.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# Purpose: +# rsync data from all nodes to backup location. + +# Define common utility functions +TOOLBIN=$(dirname $0) +. ${TOOLBIN}/engtools_util.sh +if [ $UID -eq 0 ]; then + ERRLOG "Do not start $0 using sudo/root access." + exit 1 +fi + +# environment for system commands +source /etc/nova/openrc + +declare -a BLADES +declare -a CONTROLLER +declare -a STORAGE +declare -a COMPUTE +BLADES=( $(system host-list | awk '(/compute|controller|storage/) {print $4;}') ) +CONTROLLER=( $(system host-list | awk '(/controller/) {print $4;}') ) +COMPUTE=( $(system host-list | awk '(/compute/) {print $4;}') ) +STORAGE=( $(system host-list | awk '(/storage/) {print $4;}') ) + +DEST=/opt/backups/syseng_data/ +if [[ "${HOSTNAME}" =~ "controller-" ]]; then + LOG "rsync DEST=${DEST}" +else + LOG "*ERROR* only run this on controller" + exit 1 +fi +sudo mkdir -p ${DEST} + +# rsync options +USER=wrsroot +RSYNC_OPT="-r -l --safe-links -h -P --stats --exclude=*.pyc" + +# Rsync data from multiple locations +LOG "rsync engtools data from all blades:" + +# controllers +SRC=/scratch/syseng_data/ +DEST=/opt/backups/syseng_data/ +for HOST in ${CONTROLLER[@]} +do + ping -c1 ${HOST} 1>/dev/null 2>/dev/null + if [ $? -eq 0 ]; then + LOG "rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST}" + sudo rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST} + else + WARNLOG "cannot ping: ${HOST}" + fi +done + +# computes & storage +SRC=/tmp/syseng_data/ +DEST=/opt/backups/syseng_data/ +for HOST in ${STORAGE[@]} ${COMPUTE[@]} +do + ping -c1 ${HOST} 1>/dev/null 2>/dev/null + if [ $? -eq 0 ]; then + LOG "rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST}" + sudo rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST} + else + WARNLOG "cannot ping: ${HOST}" + fi +done +LOG 'done' + +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/slab.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/slab.sh new file mode 100644 index 0000000..70e9c05 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/slab.sh @@ -0,0 +1,23 @@ +#!/bin/bash +PAGE_SIZE=$(getconf PAGE_SIZE) +cat /proc/slabinfo | awk -v page_size_B=${PAGE_SIZE} ' +BEGIN {page_KiB = page_size_B/1024; TOT_KiB = 0;} +(NF == 17) { + gsub(/[<>]/, ""); + printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8s\n", + $2, $3, $4, $5, $6, $7, $8, $10, $11, $12, $13, $15, $16, $17, "KiB"); +} +(NF == 16) { + num_objs=$3; obj_per_slab=$5; pages_per_slab=$6; + KiB = (obj_per_slab > 0) ? page_KiB*num_objs/obj_per_slab*pages_per_slab : 0; + TOT_KiB += KiB; + printf("%-22s %11d %8d %8d %10d %12d %1s %5d %10d %12d %1s %12d %9d %11d %8d\n", + $1, $2, $3, $4, $5, $6, $7, $9, $10, $11, $12, $14, $15, $16, KiB); +} +END { + printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8d\n", + "TOTAL", "-", "-", "-", "-", "-", ":", "-", "-", "-", ":", "-", "-", "-", TOT_KiB); +} +' 2>/dev/null + +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/ticker.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/ticker.sh new file mode 100644 index 0000000..570cd52 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/ticker.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Usage: ticker.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) +((REP_LOG = 10 * 60 / INTERVAL_SEC)) + +LOG_NOCR "collecting " +t=0 +for ((rep=1; rep <= REPEATS ; rep++)) +do + ((t++)) + sleep ${INTERVAL_SEC} + if [ ${t} -ge ${REP_LOG} ]; then + t=0 + echo "." + LOG_NOCR "collecting " + else + echo -n "." + fi +done +echo "." + +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/top.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/top.sh new file mode 100644 index 0000000..45dff33 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/top.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Usage: top.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) +((REP = REPEATS + 1)) + +# Execute tool for specified duration +CMD="top -b -c -H -n ${REP} -d ${INTERVAL_SEC}" +#LOG "CMD: ${CMD}" +${CMD} +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/vswitch.sh b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/vswitch.sh new file mode 100644 index 0000000..dae8bf7 --- /dev/null +++ b/middleware/util/recipes-common/engtools/hostdata-collectors/scripts/vswitch.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# Usage: vswitch.sh [-p ] [-i ] [-c ] [-h] +TOOLBIN=$(dirname $0) + +# Initialize tools environment variables, and define common utility functions +. ${TOOLBIN}/engtools_util.sh +tools_init +if [ $? -ne 0 ]; then + echo "FATAL, tools_init - could not setup environment" + exit $? +fi + +# Enable use of INTERVAL_SEC sample interval +OPT_USE_INTERVALS=1 + +# Print key networking device statistics +function print_vswitch() +{ + print_separator + TOOL_HIRES_TIME + + cmd='vshell engine-list' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + cmd='vshell engine-stats-list' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + cmd='vshell port-list' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + cmd='vshell port-stats-list' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + cmd='vshell network-list' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + cmd='vshell network-stats-list' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + cmd='vshell interface-list' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} + cmd='vshell interface-stats-list' + ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} +} + +#------------------------------------------------------------------------------- +# MAIN Program: +#------------------------------------------------------------------------------- +# Parse input options +tools_parse_options "${@}" + +# Set affinity of current script +CPULIST="" +set_affinity ${CPULIST} + +LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." + +# Print tools generic tools header +tools_header + +# Calculate number of sample repeats based on overall interval and sampling interval +((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) + +for ((rep=1; rep <= REPEATS ; rep++)) +do + print_vswitch + sleep ${INTERVAL_SEC} +done +print_vswitch +LOG "done" + +# normal program exit +tools_cleanup 0 +exit 0 diff --git a/middleware/util/recipes-common/engtools/parsers/README b/middleware/util/recipes-common/engtools/parsers/README new file mode 100644 index 0000000..5400b89 --- /dev/null +++ b/middleware/util/recipes-common/engtools/parsers/README @@ -0,0 +1 @@ +SE tools wiki: http://wiki.wrs.com/PBUeng/InformationAboutSEToolsAndDataAnalysis diff --git a/middleware/util/recipes-common/engtools/parsers/common/cleanup-uncompressed.sh b/middleware/util/recipes-common/engtools/parsers/common/cleanup-uncompressed.sh new file mode 100755 index 0000000..3bd1fa2 --- /dev/null +++ b/middleware/util/recipes-common/engtools/parsers/common/cleanup-uncompressed.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +#Copyright (c) 2016 Wind River Systems, Inc. +# +#SPDX-License-Identifier: Apache-2.0 +# +# This script removes uncompressed file. It can save a huge amount of disk space +# on the analysis server. Run this script after the very last time the data is parsed +# and BEFORE running parse-daily.sh script. +# If it is run after each intermediary parse, the download-data.sh script will download the +# uncompressed files again. + +if [ ! -f lab.conf ]; then + echo "Lab configuration file is missing." + echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info." + exit 1 +fi + +source ./lab.conf +YEAR=`date +'%Y'` + +files="${FILE_LIST// /, }" +read -p "Are you sure you want to remove all uncompressed $files files? [Y/N]: " -n 1 -r +echo +if [[ $REPLY =~ ^[Y]$ ]] +then + for FILE in ${FILE_LIST}; do + rm -v */*_${YEAR}-*${FILE} + done +else + echo "Remove request cancelled." +fi + diff --git a/middleware/util/recipes-common/engtools/parsers/common/csv-to-influx.py b/middleware/util/recipes-common/engtools/parsers/common/csv-to-influx.py new file mode 100755 index 0000000..54eeaff --- /dev/null +++ b/middleware/util/recipes-common/engtools/parsers/common/csv-to-influx.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +This script is for parsing post-data analysis. It takes the csv files generated from the parser scripts and imports +the data to an influx database. All influx information should be specified in the lab.conf file. Please see the wiki +for more details. +""" + +import os +import sys +import time +import datetime +from optparse import OptionParser +from multiprocessing import Pool + + +# command line arguments +def init(): + parser = OptionParser() + parser.add_option("-a", "--all", dest="parse_all", action="store_true", default=False, help="use this option to parse all csv files for all nodes specified within lab.conf") + parser.add_option("-n", "--node", dest="node_list", action="append", type="string", help="the specific node(s) to be parsed, otherwise all nodes within lab.conf will be parsed") + parser.add_option("-f", "--file", dest="file_list", action="append", type="string", help="the specific csv file(s) to be parsed. Must use with the -n option. Ex: -n controller-0 -f postgres-conns.csv") + parser.add_option("-p", "--postgres_svc", dest="postgres_list", action="append", type="string", help="use this option to parse postgres CSV files given specific services. Ex: -p nova") + parser.add_option("-b", "--batch-size", dest="batch_size", action="store", type="int", default="100", help="Influx accepts data in batches. Use this option to change the batch size from the default value of 100. Note that Influx can timeout if the batch size is to large") + (options, args) = parser.parse_args() + if len(sys.argv[1:]) == 0: + parser.print_help() + sys.exit(0) + else: + return options + + +# converts given UTC time into epoch time +def convertTime(file, node, start, lc, utcTime): + try: + # diskstats csv requires special work as no timestamp is provided + if file.startswith("diskstats"): + t = " ".join(start) + pattern = '%Y-%m-%d %H%M' + epoch = int(time.mktime(time.strptime(t, pattern))) + # add 15 minutes to current timestamp + epoch += 900 * lc + else: + if utcTime.endswith("AM"): + pattern = '%m/%d/%Y %H:%M:%S' + epoch = int(time.mktime(time.strptime(utcTime[:19], pattern))) + elif utcTime.endswith("PM"): + tmp = int(utcTime[11:13]) + if tmp < 12: + tmp += 12 + str1 = utcTime[:11] + str2 = utcTime[13:19] + utcTime = str1 + str(tmp) + str2 + pattern = '%m/%d/%Y %H:%M:%S' + epoch = int(time.mktime(time.strptime(utcTime, pattern))) + elif file.startswith("memstats") or file.startswith("filestats"): + pattern = '%Y-%m-%d %H:%M:%S' + epoch = int(time.mktime(time.strptime(utcTime[:19], pattern))) + else: + pattern = '%Y-%m-%d %H:%M:%S.%f' + epoch = int(time.mktime(time.strptime(utcTime[:23], pattern))) + return str(epoch) + except Exception as e: + appendToFile("/tmp/csv-to-influx.log", "Error: Issue converting time for {} for {}. Please check the csv and re-parse as some data may be incorrect\n-{}".format(file, node, e.message)) + return None + + +# go through each node folder to parse csv files +def processFiles(path, node, options, influx_info): + prefixes = ["postgres-conns", "postgres", "memtop", "occtop", "iostat", "netstats", "rabbitmq", "schedtop", "vswitch", "filestats-summary", "memstats-summary", "diskstats"] + if options.file_list is None: + for file in os.listdir(path): + if file.endswith(".csv"): + if file.startswith(tuple(prefixes)): + if options.parse_all is True or options.node_list is not None: + parse(path, file, node, options, influx_info) + elif options.postgres_list is not None: + for svc in options.postgres_list: + if svc in list(file.split("_")): + parse(path, file, node, options, influx_info) + else: + continue + # if -f option is used + elif options.file_list is not None: + for file in options.file_list: + parse(path, file, node, options, influx_info) + + # let the log know when a thread has finished parsing a folder + appendToFile("/tmp/csv-to-influx.log", "-Process for {} finished parsing at {}".format(node, datetime.datetime.utcnow())) + + +# parse the csv files and add data to influx +# needs to be cleaned up +def parse(path, file, node, options, influx_info): + file_loc = os.path.join(path, file) + # until able to access the file + while True: + if os.access(file_loc, os.R_OK): + try: + with open(file_loc, "r") as f: + file_name = file.replace("-", "_").replace(".csv", "").replace("_{}".format(node.replace("-", "_")), + "").strip("\n") + appendToFile("/tmp/csv-to-influx.log", "Parsing {} for {}".format(file_name, node)) + header = f.readline().split(",") + # vswitch CSV files have no headers... + if file_name.startswith("vswitch"): + if file_name.replace("vswitch_", "").split("_")[0] == "engine": + header = "date/time,id,cpuid,rx-packets,tx-packets,tx-disabled,tx-overflow,rx-discard,tx-discard,usage".split( + ",") + elif file_name.replace("vswitch_", "").split("_")[0] == "interface": + header = "date/time,rx-packets,tx-packets,rx-bytes,tx-bytes,tx-errors,rx-errors,tx-discards,rx-discards,rx-floods,rx-no-vlan".split( + ",") + elif file_name.replace("vswitch_", "").split("_")[0] == "port": + header = "date/time,rx-packets,tx-packets,rx-bytes,tx-bytes,tx-errors,rx-errors,rx-nombuf".split( + ",") + elif file_name.startswith("memstats"): + if header[0] != "Date": + header = "date/time,rss,vrz" + influx_string = "" + measurement = "" + tag_names = ["node"] + init_tags = [node] + line_count = 0 + batch = 0 + start_time = "" # used for diskstats + bad_string = False + # set tag information needed for influx. Each file needs different things + if file_name.startswith("postgres_conns"): + measurement = "postgres_connections" + elif file_name.startswith("postgres"): + if file_name.endswith("_size"): + measurement = "postgres_db_size" + service = file_name.replace("postgres_", "").replace("_size", "") + if service == "size": + service = "postgres" + tag_names = ["node", "service"] + init_tags = [node, service] + else: + measurement = "postgres_svc_stats" + service = file_name.replace("postgres_", "").split("_")[0] + tag_names = ["node", "service", "schema", "table"] + init_tags = [node, service] + elif file_name.startswith("memtop"): + if file_name == "memtop_detailed": + measurement = "memtop_detailed" + else: + measurement = "memtop" + elif file_name.startswith("occtop"): + if file_name == "occtop_detailed": + measurement = "occtop_detailed" + else: + measurement = "occtop" + elif file_name.startswith("iostat"): + measurement = "iostat" + tag_names = ["node", "device"] + init_tags = [node, header[1]] + elif file_name.startswith("netstats"): + measurement = "netstats" + interface = file.replace("{}-".format(measurement), "").replace("{}-".format(node), "").replace( + ".csv", "") + tag_names = ["node", "interface"] + init_tags = [node, interface] + elif file_name.startswith("rabbitmq"): + if file_name.endswith("info"): + measurement = "rabbitmq_svc" + service = file_name.replace("rabbitmq_", "") + tag_names = ["node", "service"] + init_tags = [node, service] + else: + measurement = "rabbitmq" + elif file_name.startswith("schedtop"): + measurement = "schedtop" + service = file_name.replace("schedtop_", "").replace("_", "-") + tag_names = ["node", "service"] + init_tags = [node, service] + elif file_name.startswith("vswitch"): + measurement = "vswitch" + identifier = file_name.replace("vswitch_", "").split("_") + tag_names = ["node", identifier[0]] + if identifier[0] == "engine": + init_tags = [node, "engine_id_{}".format(identifier[1])] + elif identifier[0] == "interface": + init_tags = [node, identifier[1]] + elif identifier[0] == "port": + init_tags = [node, "port_{}".format(identifier[1])] + elif file_name.startswith("filestats"): + measurement = "filestats" + service = file_name.replace("filestats_summary_", "").replace(".csv", "").replace("_", "-") + tag_names = ["node", "service"] + init_tags = [node, service] + elif file_name.startswith("memstats"): + measurement = "memstats" + service = file_name.replace("memstats_summary_", "").replace(".csv", "").replace("_", "-") + tag_names = ["node", "service"] + init_tags = [node, service] + elif file_name.startswith("diskstats"): + measurement = "diskstats" + mount = file_name.replace("diskstats_", "") + tag_names = ["node", "mount", "file_system", "type"] + init_tags = [node, mount] + # find the bz2 file with the earliest date + start = float('inf') + for t in os.listdir(path): + if t.startswith(node) and t.endswith("bz2"): + next = int( + str(t.replace("{}_".format(node), "")[2:15]).replace("-", "").replace("_", "")) + if next < start: + start = next + start_time = t.split("_")[1:3] + + # go through header, determine the fields, skip the tags + field_names = [] + for i in header: + j = i.lower().replace(" ", "_").replace("-", "_").replace("used(%)", "usage").replace("(%)", "").replace("(s)", "").strip(" ").strip("\n") + if j in tag_names or i in init_tags or j == 'pid' or j == 'name': + continue + else: + # for occtop core info + if j.isdigit(): + j = "core_{}".format(j) + field_names.append(j) + + # go through each line + bad_count = 0 + for lines in f: + line = lines.strip("\n").split(",") + timestamp = convertTime(file, node, start_time, line_count, line[0].strip("\n")) + if timestamp is None: + bad_count += 1 + if bad_count == 3: + bad_string = True + break + else: + continue + tag_values = init_tags + field_values = [] + line_count += 1 + batch += 1 + + # go through data in each line and determine whether it belongs to a tag or a field + for word in line: + word = word.strip("\n") + # is non-number, interface, or device, add to tags, otherwise add to fields + if word.replace("_", "").replace("-", "").replace(" ", "").isalpha() or (word in init_tags) or word.endswith(".info") or word.startswith("ext"): + tag_values.append(word) + elif word.startswith("/dev"): + tag_values.append(word.split("/")[-1]) + elif word.startswith("= options.batch_size: + writing = True + influx_string = "curl -s -i -o /dev/null -XPOST 'http://'{}':'{}'/write?db='{}'&precision=s' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string.strip("\n")) + while writing: + begin = time.time() + os.system(influx_string + "\n") + end = time.time() + if end - begin >= 4.5: + appendToFile("/tmp/csv-to-influx.log", "Timeout warning: {} for {}. Retrying now".format(file_name, node)) + else: + batch = 0 + influx_string = "" + writing = False + # leave while loop due to incorrectly formatted csv data + if bad_string: + f.close() + break + else: + # get remainder of data from csv + if batch < options.batch_size: + writing = True + influx_string = "curl -s -i -o /dev/null -XPOST 'http://'{}':'{}'/write?db='{}'&precision=s' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string.strip("\n")) + while writing: + begin = time.time() + os.system(influx_string + "\n") + end = time.time() + if end - begin >= 4.5: + appendToFile("/tmp/csv-to-influx.log", "Timeout warning: {} for {}. Retrying now".format(file_name, node)) + else: + writing = False + f.close() + appendToFile("/tmp/csv-to-influx.log", + "{} lines parsed in {} for {}".format(line_count, file_name, node)) + break + except IOError as e: + appendToFile("/tmp/csv-to-influx.log", "Error: Issue opening {}\n-{}".format(file_loc, e.message)) + except (KeyboardInterrupt, SystemExit): + sys.exit(0) + else: + appendToFile("/tmp/csv-to-influx.log", "Error: Could not access {}".format(file_loc)) + + +# generate http api string to send data to influx +def generateString(file, node, meas, tag_n, tag_v, field_n, field_v, lc, date): + base = "{},".format(meas) + try: + if file.startswith("diskstats"): + for i in range(len(tag_n)): + if i == len(tag_n)-1: + base = base + "'{}'='{}' ".format(tag_n[i], str(tag_v[i])) + else: + base = base + "'{}'='{}',".format(tag_n[i], str(tag_v[i])) + for i in range(len(field_v)): + if str(field_v[i]).replace(".", "").isdigit(): + if i == len(field_v)-1: + base = base + "'{}'='{}' {}".format(field_n[i], str(field_v[i]), date) + else: + base = base + "'{}'='{}',".format(field_n[i], str(field_v[i])) + else: + appendToFile("/tmp/csv-to-influx.log", "Error: Issue with line {} with {} for {}. Please check the csv and re-parse as some data may be incorrect".format(lc, file, node)) + return None + else: + for i in range(len(tag_n)): + if i == len(tag_n)-1: + base = base + "'{}'='{}' ".format(tag_n[i], str(tag_v[i])) + else: + base = base + "'{}'='{}',".format(tag_n[i], str(tag_v[i])) + for i in range(1, len(field_v)): + if str(field_v[i]).replace(".", "").isdigit(): + if i == len(field_v)-1: + base = base + "'{}'='{}' {}".format(field_n[i], str(field_v[i]), date) + else: + base = base + "'{}'='{}',".format(field_n[i], str(field_v[i])) + else: + appendToFile("/tmp/csv-to-influx.log", "Error: Issue with line {} with {} for {}. Please check the csv and re-parse as some data may be incorrect".format(lc, file, node)) + return None + return base + '\n' + except Exception as e: + appendToFile("/tmp/csv-to-influx.log", "Error: Issue with http api string with {} for {}\n-{}".format(file, node, e.message)) + return None + + +# append to error log +def appendToFile(file, content): + with open(file, "a") as f: + f.write(content + '\n') + + +# main method +if __name__ == "__main__": + # get command-line args + options = init() + controller_list = [] + compute_list = [] + storage_list = [] + influx_host = influx_port = influx_db = "" + influx_info = [] + pool_size = 0 + + # create the files + file = open("/tmp/csv-to-influx.log", "w") + file.close() + file = open("output.txt", "w") + file.close() + appendToFile("/tmp/csv-to-influx.log", "Starting parsing at {}".format(datetime.datetime.utcnow())) + appendToFile("/tmp/csv-to-influx.log", "----------------------------------------------") + + # get node and influx info from lab.conf + with open("lab.conf", "r") as lc: + for lines in lc: + line = lines.strip("\n") + if line.startswith("CONTROLLER_LIST"): + controller_list = list(line.strip(" ").split("="))[1].strip("\"").split(" ") + elif line.startswith("COMPUTE_LIST"): + compute_list = list(line.strip(" ").split("="))[1].strip("\"").split(" ") + elif line.startswith("STORAGE_LIST"): + storage_list = list(line.strip(" ").split("="))[1].strip("\"").split(" ") + elif line.startswith("INFLUX_HOST"): + influx_host = list(line.strip(" ").split("="))[1].strip("\"").split(" ")[0] + elif line.startswith("INFLUX_PORT"): + influx_port = list(line.strip(" ").split("="))[1].strip("\"").split(" ")[0] + elif line.startswith("INFLUX_DB"): + influx_db = list(line.strip(" ").split("="))[1].strip("\"").split(" ")[0] + break + lc.close() + + influx_info.append(influx_host) + influx_info.append(influx_port) + influx_info.append(influx_db) + + # if -n option is used, remove unneeded nodes + if options.node_list is not None: + tmp_controller_list = [] + tmp_compute_list = [] + tmp_storage_list = [] + for n in controller_list: + if n in options.node_list: + tmp_controller_list.append(n) + for n in compute_list: + if n in options.node_list: + tmp_compute_list.append(n) + for n in storage_list: + if n in options.node_list: + tmp_storage_list.append(n) + controller_list = tmp_controller_list + compute_list = tmp_compute_list + storage_list = tmp_storage_list + + pool_size = len(controller_list) + len(compute_list) + len(storage_list) + + if options.file_list is not None and options.parse_all is True: + print "You cannot use the -a option with the -f option" + sys.exit(0) + if options.postgres_list is not None and options.file_list is not None: + print "You cannot use the -p option with the -f option" + sys.exit(0) + if options.parse_all is True and options.node_list is not None: + print "You cannot use the -a option with the -n option. Ex: -n controller-0" + sys.exit(0) + if options.file_list is not None and options.node_list is None: + print "You must specify a node and a file. Ex: -n controller-0 -f postgres-conns.csv" + sys.exit(0) + + working_dir = os.getcwd() + pool = Pool(processes=pool_size) + proc_list = [] + + print "Sending data to InfluxDB. Please tail /tmp/csv-to-influx.log" + + # create a process per node + if len(controller_list) > 0: + for i in range(len(controller_list)): + path = os.path.join(working_dir, controller_list[i]) + proc_list.append(pool.apply_async(processFiles, (path, controller_list[i], options, influx_info,))) + + if len(compute_list) > 0: + for i in range(len(compute_list)): + path = os.path.join(working_dir, compute_list[i]) + proc_list.append(pool.apply_async(processFiles, (path, compute_list[i], options, influx_info,))) + + if len(storage_list) > 0: + for i in range(len(storage_list)): + path = os.path.join(working_dir, storage_list[i]) + proc_list.append(pool.apply_async(processFiles, (path, storage_list[i], options, influx_info,))) + + pool.close() + pool.join() diff --git a/middleware/util/recipes-common/engtools/parsers/common/download-data.sh b/middleware/util/recipes-common/engtools/parsers/common/download-data.sh new file mode 100755 index 0000000..0c5726d --- /dev/null +++ b/middleware/util/recipes-common/engtools/parsers/common/download-data.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +#Copyright (c) 2016 Wind River Systems, Inc. +# +#SPDX-License-Identifier: Apache-2.0 +# +# This script is used to download syseng data from all hosts to the analysis server +# for post processing. +# Syseng data are stored under /scratch/syseng_data on the controllers. Syseng data +# for storage and compute hosts, which are stored under /tmp/syseng_data, are pulled +# to the controllers via the script download-computes.sh and stored under +# /opt/backups/tmp/syseng-data. +# +# This script is to be run after running download-computes.sh on one of the controllers. + +if [ ! -f lab.conf ]; then + echo "Lab configuration file is missing." + echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info." + exit 1 +fi + +source ./lab.conf + +rsync -azvh wrsroot@${CONTROLLER0_IP}:/scratch/syseng_data/* . +rsync -azvh wrsroot@${CONTROLLER1_IP}:/scratch/syseng_data/* . + +rsync -azvh wrsroot@${CONTROLLER0_IP}:/opt/backups/tmp/syseng-data/* . +rsync -azvh wrsroot@${CONTROLLER1_IP}:/opt/backups/tmp/syseng-data/* . + +# Compress the newly download data files if they have not been compressed +CURDIR=$(pwd) +ALL_HOSTS="${CONTROLLER_LIST} ${STORAGE_LIST} ${COMPUTE_LIST}" + +for HOST in ${ALL_HOSTS}; do + if [ -e ${HOST} ]; then + echo "Compressing ${HOST}" + cd ${CURDIR}/${HOST} + bzip2 ${HOST}* + cd ${CURDIR} + else + echo "${HOST} not found" + fi +done diff --git a/middleware/util/recipes-common/engtools/parsers/common/parse-all.sh b/middleware/util/recipes-common/engtools/parsers/common/parse-all.sh new file mode 100755 index 0000000..99d1e4e --- /dev/null +++ b/middleware/util/recipes-common/engtools/parsers/common/parse-all.sh @@ -0,0 +1,376 @@ +#!/bin/bash + +#Copyright (c) 2016-2017 Wind River Systems, Inc. +# +#SPDX-License-Identifier: Apache-2.0 +# + +# This script is used to parse all stats data. It is designed to be called by either +# parse-controllers.sh or parse-computes.sh and not used as a standalone script. +# If the input node is a controller, it will parse controller specific postgres & +# and rabbitmq stats first. If the input node is a compute, it will pars the compute +# specific vswitch stats first. +# +# The following parsing steps are common to all hosts and are executed in the specified order: +# - Parse occtop +# - Parse memtop +# - Parse memstats (summary) +# - Parse netstats +# - Parse schedtop (summary) +# - Parse iostats +# - Parse diskstats +# - Parse filestats (summary) +# - Parse process level schedtop (optional step, configured in lab.conf) +# - Generate tarball + +if [[ $# != 1 ]]; then + echo "ERROR: This script is meant to be called by either parse-controllers.sh or parse-computes.sh script." + echo "To run it separately, copy the script to the host directory that contains *.bz2 files." + echo "It takes a single argument - the name of the host directory (e.g. ./parse-all.sh controller-0)." + exit 1 +fi + +source ../lab.conf +source ./host.conf + +PARSERDIR=$(dirname $0) +. ${PARSERDIR}/parse-util.sh + +NODE=$1 + +CURDATE=$(date) +DATESTAMP=$(date +%b-%d) + +function sedit() +{ + local FILETOSED=$1 + sed -i -e "s/ */ /g" ${FILETOSED} + sed -i -e "s/ /,/g" ${FILETOSED} + # Remove any trailing comma + sed -i "s/,$//" ${FILETOSED} +} + +function get_filename_from_mountname() +{ + local name=$1 + local fname + if test "${name#*"scratch"}" != "${name}"; then + fname="scratch" + elif test "${name#*"log"}" != "${name}"; then + fname="log" + elif test "${name#*"backup"}" != "${name}"; then + fname="backup" + elif test "${name#*"ceph/mon"}" != "${name}"; then + fname="cephmon" + elif test "${name#*"conversion"}" != "${name}"; then + fname="img-conversion" + elif test "${name#*"platform"}" != "${name}"; then + fname="platform" + elif test "${name#*"postgres"}" != "${name}"; then + fname="postgres" + elif test "${name#*"cgcs"}" != "${name}"; then + fname="cgcs" + elif test "${name#*"rabbitmq"}" != "${name}"; then + fname="rabbitmq" + elif test "${name#*"instances"}" != "${name}"; then + fname="pv" + elif test "${name#*"ceph/osd"}" != "${name}"; then + # The ceph disk partition has the following mount name convention + # /var/lib/ceph/osd/ceph-0 + fname=`basename ${name}` + fi + echo $fname +} + +function parse_process_schedtop_data() +{ + # Logic has been moved to a separate script so that parsing process level schedtop + # can be run either as part of parse-all.sh script or independently. + LOG "Process level schedtop parsing is turned on in lab.conf. Parsing schedtop detail..." + cd .. + ./parse-schedtop.sh ${NODE} + cd ${NODE} +} + +function parse_controller_specific() +{ + # Parsing Postgres data, removing data from previous run if there are any. Generate summary + # data for each database and detail data for specified tables + LOG "Parsing postgres data for ${NODE}" + if [ -z "${DATABASE_LIST}" ]; then + WARNLOG "DATABASE_LIST is not set in the lab.conf file. Use default setting" + DATABASE_LIST="cinder glance keystone nova neutron ceilometer heat sysinv aodh postgres nova_api" + fi + + for DB in ${DATABASE_LIST}; do + rm /tmp/${DB}*.csv + done + ../parse_postgres *postgres.bz2 >postgres-summary-${NODE}-${DATESTAMP}.txt + for DB in ${DATABASE_LIST}; do + cp /tmp/${DB}_size.csv postgres_${DB}_size.csv + done + for TABLE in ${TABLE_LIST}; do + cp /tmp/${TABLE}.csv postgres_${TABLE}.csv + done + + # Parsing RabbitMQ data + LOG "Parsing rabbitmq data for ${NODE}" + ../parse-rabbitmq.sh rabbitmq-${NODE}.csv + + for QUEUE in ${RABBITMQ_QUEUE_LIST}; do + # If node is not a controller node then parse-rabbitmq-queue.sh should skip + ../parse-rabbitmq-queue.sh rabbitmq-${QUEUE}-${NODE}.csv ${QUEUE} + done +} + +function parse_compute_specific() +{ + LOG "Parsing vswitch data for ${NODE}" + ../parse-vswitch.sh ${NODE} +} + +function parse_occtop_data() +{ + LOG "Parsing occtop data for ${NODE}" + bzcat *occtop.bz2 >occtop-${NODE}-${DATESTAMP}.txt + cp occtop-${NODE}-${DATESTAMP}.txt tmp.txt + sedit tmp.txt + # Get the highest column count + column_count=$(awk -F "," '{print NF}' tmp.txt | sort -nu | tail -n 1) + grep '^[0-9]' tmp.txt |cut -d, -f1,2 | awk -F "," '{print $1" "$2}' > tmpdate.txt + grep '^[0-9]' tmp.txt |cut -d, -f3-$column_count > tmpcore.txt + paste -d, tmpdate.txt tmpcore.txt > tmp2.txt + # Generate header based on the number of columns. The Date/Time column consists of date and time fields + header="Date/Time,Total" + count=$(($column_count-3)) + for i in $(seq 0 $(($count-1))); do + header="$header,$i" + done + + # Generate detailed CSV with Date/Time, Total CPU occupancy and individual core occupancies e.g. + # Date/Time,Total,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35 + # 2016-11-22 00:29:16.523,759.5,21.4,18.9,43.8,24.5,23.1,25.3,28.1,25.5,20.5,27.8,26.8,32.7,27.3,25.1,21.1,23.2,21.7,36.4,23.3,16.6,15.3,13.9,14.4,15.0,14.7,14.4,16.4,13.8,17.0,17.8,19.0,15.1,14.0,13.2,14.5,17.8 + echo "${header}" > occtop-${NODE}-detailed.csv + cat tmp2.txt >> occtop-${NODE}-detailed.csv + + # Generate simple CSV file which is used to generate host CPU occupancy chart. Platform cores are + # defined in the host.conf. The simple CSV contains only the Date/Time and Total platform CPU occupancy e.g. + # Date/Time,Total + # 2016-11-22 00:29:16.523,94.9 + # 2016-11-22 00:30:16.526,71.3 + + if [ -z "${PLATFORM_CPU_LIST}" ]; then + # A controller node in standard system. In this case, all cores are dedicated to platform use. + # Simply extract the Date/Time and Total CPU occupancy + cut -d, -f1,2 occtop-${NODE}-detailed.csv > occtop-${NODE}.csv + else + # A CPE, compute or storage node. The cores dedicated to platform use are specified in the config. + echo "Date/Time,Total" > occtop-${NODE}.csv + while read -r line || [[ -n "$line" ]]; do + IFS="," read -r -a arr <<< "${line}" + total=0 + for CORE in ${PLATFORM_CPU_LIST}; do + # Add 2 to the index as occupancy of each individual core starts after Date/Time and Total + idx=$(($CORE+2)) + total=`echo $total + ${arr[$idx]} | bc` + done + echo "${arr[0]},${total}" >> occtop-${NODE}.csv + done < tmp2.txt + fi + # Remove temporary files + rm tmp.txt tmp2.txt tmpdate.txt tmpcore.txt +} + +function parse_memtop_data() +{ + LOG "Parsing memtop data for ${NODE}" + bzcat *memtop.bz2 > memtop-${NODE}-${DATESTAMP}.txt + cp memtop-${NODE}-${DATESTAMP}.txt tmp.txt + sedit tmp.txt + + # After dumping all memtop bz2 output into one text file and in-place sed, grab only relevant data + # for CSV output. Generate both detailed and simple CSV files. Simple output will be used to generate + # chart. + grep '^[0-9]' tmp.txt | awk -F "," '{print $1" "$2","$3","$4","$5","$6","$7","$8","$9","$10","$11","$12","$13","$14","$15","$16","$17","$18}' > tmp2.txt + echo "Date/Time,Total,Used,Free,Cached,Buf,Slab,CAS,CLim,Dirty,WBack,Anon,Avail,0:Avail,0:HFree,1:Avail,1:HFree" > memtop-${NODE}-detailed.csv + cat tmp2.txt >> memtop-${NODE}-detailed.csv + echo "Date/Time,Total,Anon" > memtop-${NODE}.csv + cut -d, -f1-2,12 tmp2.txt >> memtop-${NODE}.csv + # Remove temporary files + rm tmp.txt tmp2.txt +} + +function parse_netstats_data() +{ + LOG "Parsing netstats data for ${NODE}" + # First generate the summary data then detail data for specified interfaces + ../parse_netstats *netstats.bz2 > netstats-summary-${NODE}-${DATESTAMP}.txt + if [ -z "${NETSTATS_INTERFACE_LIST}" ]; then + ERRLOG "NETSTATS_INTERFACE_LIST is not set in host.conf. Skipping detail netstats..." + else + for INTERFACE in ${NETSTATS_INTERFACE_LIST}; do + echo "Date/Time,Interface,Rx PPS,Rx Mbps,Rx Packet Size,Tx PPS,Tx Mbps,Tx Packet Size" > netstats-${NODE}-${INTERFACE}.csv + ../parse_netstats *netstats.bz2 | grep " ${INTERFACE} " > tmp.txt + sed -i -e "s/|/ /g" tmp.txt + sed -i -e "s/ */ /g;s/ */ /g" tmp.txt + sed -i -e "s/ /,/g" tmp.txt + # Remove the leading comma + sed -i 's/,//' tmp.txt + while read -r line || [[ -n "$line" ]]; do + IFS="," read -r -a arr <<< "${line}" + echo "${arr[8]} ${arr[9]},${arr[0]},${arr[2]},${arr[3]},${arr[4]},${arr[5]},${arr[6]},${arr[7]}" >> netstats-${NODE}-${INTERFACE}.csv + done < tmp.txt + done + rm tmp.txt + fi +} + +function parse_iostats_data() +{ + LOG "Parsing iostat data for ${NODE}" + if [ -z "${IOSTATS_DEVICE_LIST}" ]; then + ERRLOG "IOSTAT_DEVICE_LIST is not set in host.conf. Skipping iostats..." + else + for DEVICE in ${IOSTATS_DEVICE_LIST}; do + # Add header to output csv file + echo "Date/Time,${DEVICE},rqm/s,wrqm/s,r/s,w/s,rkB/s,wkB/s,avgrq-sz,avgqu-sz,await,r_await,w_await,svctm,%util" > iostat-${NODE}-${DEVICE}.csv + # Dumping iostat content to tmp file + bzcat *iostat.bz2 | grep -E "/2015|/2016|/2017|${DEVICE}" | awk '{print $1","$2","$3","$4","$5","$6","$7","$8","$9","$10","$11","$12","$13","$14}' > tmp.txt + while IFS= read -r current + do + if test "${current#*Linux}" != "$current" + then + # Skip the line that contains the word "Linux" + continue + else + if test "${current#*$DEVICE}" == "$current" + then + # It's a date entry, look ahead + read -r next + if test "${next#*$DEVICE}" != "${next}" + then + # This next line contains the device stats + # Combine date and time fields + current="${current//2016,/2016 }" + current="${current//2017,/2017 }" + # Combine time and AM/PM fields + current="${current//,AM/ AM}" + current="${current//,PM/ PM}" + # Write both lines to intermediate file + echo "${current}" >> tmp2.txt + echo "${next}" >> tmp2.txt + fi + fi + fi + done < tmp.txt + mv tmp2.txt tmp.txt + # Combine the time and stats data into one line + # 11/22/2016 06:34:00 AM,,,,,,,,,,, + # dm-0,0.00,0.00,0.00,1.07,0.00,38.00,71.25,0.00,0.19,0.00,0.19,0.03,0.00 + paste -d "" - - < tmp.txt > tmp2.txt + # Remove empty fields, use "complement" option for contiguous fields/range + cut -d, -f2-11 --complement tmp2.txt > tmp.txt + # Write final content to output csv + cat tmp.txt >> iostat-${NODE}-${DEVICE}.csv + rm tmp.txt tmp2.txt + done + fi +} + +function parse_diskstats_data() +{ + LOG "Parsing diskstats data for ${NODE}" + + if [ -z "${DISKSTATS_FILESYSTEM_LIST}" ]; then + ERRLOG "DISKSTATS_FILESYSTEM_LIST is not set in host.conf. Skipping diskstats..." + else + for FS in ${DISKSTATS_FILESYSTEM_LIST}; do + fspair=(${FS//|/ }) + fsname=${fspair[0]} + mountname=${fspair[1]} + if [ ${mountname} == "/" ]; then + mountname=" /" + echo "File system,Type,Size,Used,Avail,Used(%)" > diskstats-${NODE}-root.csv + bzcat *diskstats.bz2 | grep $fsname | grep $mountname | grep G | awk '{print $1","$2","$3","$4","$5","$6}' >> diskstats-${NODE}-root.csv + else + fname=$(get_filename_from_mountname $mountname) + echo "File system,Type,Size,Used,Avail,Used(%)" > diskstats-${NODE}-$fname.csv + bzcat *diskstats.bz2 | grep $fsname | grep $mountname | grep G | awk '{print $1","$2","$3","$4","$5","$6}' >> diskstats-${NODE}-$fname.csv + fi + done + fi +} + +# Parsing starts here ... +LOG "Parsing ${NODE} files - ${CURDATE}" + +# Let's get the host specific parsing out of the way +if test "${NODE#*"controller"}" != "${NODE}"; then + parse_controller_specific +elif test "${NODE#*"compute"}" != "${NODE}"; then + parse_compute_specific +fi + +# Parsing CPU occtop data +parse_occtop_data + +# Parsing memtop data +parse_memtop_data + +# Parsing memstats data to generate the high level report. The most important piece of info is the list of +# hi-runners at the end of the file. If there is a leak, run parse-daily.sh script to generate the time +# series data for the offending processes only. Use process name, not PID as most Titanium Cloud processes have +# workers. +LOG "Parsing memstats summary for ${NODE}" +../parse_memstats --report *memstats.bz2 > memstats-summary-${NODE}-${DATESTAMP}.txt +#tar czf pidstats.tgz pid-*.csv +rm pid-*.csv + + +# Parsing netstats data +parse_netstats_data + +# Parsing schedtop data to generate the high level report. Leave the process level schedtop parsing till +# the end as it is a long running task. +LOG "Parsing schedtop summary for ${NODE}" +FILES=$(ls *schedtop.bz2) +../parse_schedtop ${FILES} > schedtop-summary-${NODE}-${DATESTAMP}.txt + +# Parsing iostats data +parse_iostats_data + +# Parsing diskstats data +parse_diskstats_data + +# Parsing filestats data to generate the high level report. If there is a file descriptor leak, run parse-daily.sh +# script to generate the time series data for the offending processes only. Use process name, not PID as most +# Titanium Cloud processes have workers. +LOG "Parsing filestats summary for ${NODE}" +../parse_filestats --all *filestats.bz2 > filestats-summary-${NODE}-${DATESTAMP}.txt + +# Parsing process level schedtop data. This is a long running task. To skip this step or generate data for +# only specific processes, update the lab.conf and host.conf files. +[[ ${GENERATE_PROCESS_SCHEDTOP} == Y ]] && parse_process_schedtop_data || WARNLOG "Parsing process level schedtop is skipped." + +# Done parsing for this host. If it's a controller host, check if the parsing of postgres connection stats which is run in +# parallel is done before creating a tar file. +if test "${NODE#*"controller"}" != "${NODE}"; then + # If postgres-conns.csv file has not been created which is highly unlikely, wait a couple of minutes + [ ! -e postgres-conns.csv ] && sleep 120 + + # Now check the stats of this file every 5 seconds to see if it's still being updated. Another option + # is to use inotify which requires another inotify-tools package. + oldsize=0 + newsize=0 + while true + do + newsize=$(stat -c %s postgres-conns.csv) + if [ "$oldsize" == "$newsize" ]; then + break + fi + oldsize=$newsize + sleep 5 + done +fi +tar czf syseng-data-${NODE}-${DATESTAMP}.tgz *.csv *.txt +LOG "Parsing stats data for ${NODE} completed!" diff --git a/middleware/util/recipes-common/engtools/parsers/common/parse-controllers.sh b/middleware/util/recipes-common/engtools/parsers/common/parse-controllers.sh new file mode 100755 index 0000000..61a77c5 --- /dev/null +++ b/middleware/util/recipes-common/engtools/parsers/common/parse-controllers.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +#Copyright (c) 2016 Wind River Systems, Inc. +# +#SPDX-License-Identifier: Apache-2.0 +# + +# This script is used to parse stats data for controller/CPE hosts. For large office, +# it is called by parse-everything.sh. For CPE, it should be called on its own. +# File lab.conf must exist with CONTROLLER_LIST config parameter set for the script to run +# Usage: ./parse-controllers.sh + +PARSERDIR=$(dirname $0) +. ${PARSERDIR}/parse-util.sh + +if [ ! -f lab.conf ]; then + echo "Lab configuration file is missing." + echo "See http://wiki.wrs.com/PBUeng/TitaniumServerSysengToolsAndDataAnalysis for more info." + exit 1 +fi + +source ./lab.conf + +if [ -z "${CONTROLLER_LIST}" ]; then + echo "ERROR: Controller list is not set in lab.conf file. Exiting..." + exit 1 +fi + +for HOST in ${CONTROLLER_LIST}; do + LOG "Parsing stats data for controller host ${HOST}" + if [ -d ${HOST} ]; then + cd ${HOST} + bzip2 ${HOST}* > /dev/null 2>&1 + ../parse-all.sh ${HOST} > /dev/null 2>&1 & + # Delay the next controller because they both write to /tmp + sleep 120 + cd .. + else + ERRLOG "${HOST} does not exist. Parsing skipped." + fi +done + +# Parsing postgres connection stats is a time consuming step, run it in parallel with parse-all +# script. +for HOST in ${CONTROLLER_LIST}; do + if [ -d ${HOST} ]; then + LOG "Parsing postgres connection stats data for controller host ${HOST}" + cd ${HOST} + ../parse-postgres.sh *postgres.bz2 > /dev/null 2>&1 & + cd .. + fi +done diff --git a/middleware/util/recipes-common/engtools/parsers/common/parse-daily.sh b/middleware/util/recipes-common/engtools/parsers/common/parse-daily.sh new file mode 100755 index 0000000..7298441 --- /dev/null +++ b/middleware/util/recipes-common/engtools/parsers/common/parse-daily.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +#Copyright (c) 2016 Wind River Systems, Inc. +# +#SPDX-License-Identifier: Apache-2.0 +# +# The following script is used when either memstats or filestats summary reports +# a possible memory or file leak respectively. It can be run for a particular host or +# for all hosts as configured in the lab.conf. +# Make sure to run cleanup-uncompressed.sh script before running this script to remove +# any uncompressed files as memstats/filestats parser can produce erronous result if +# there are both uncompressed and compressed version of the same file. +# +# Usage: +# ./parse-daily.sh to generate daily stats for all hosts +# ./parse-daily.sh to generate daily stats for +# specified host. +# +# e.g. >./parse-daily.sh memstats sm-eru +# >./parse-daily.sh controller-0 filestats postgress + +function print_usage() +{ + echo "Usage: ./parse-daily.sh will parse daily data for all hosts." + echo "Usage: ./parse-daily.sh will parse daily data for specified host." + echo "Valid parsers for daily stats are: memstats & filestats." + exit 1 +} + +function parse_daily_stats() +{ + local PARSER_NAME=$1 + local PROCESS_NAME=$2 + local TMPFILE="tmp.txt" + # Inserting the header in the summary csv file. The summary file is a concatenation + # of the daily file. If there is a large number of files, the parser may not have + # enough memory to process them all. The safest way is to parse one day at a time. + if [ ${PARSER_NAME} == "memstats" ]; then + local SUMMARYFILE=memstats-summary-${PROCESS_NAME}.csv + echo "Date,RSS,VSZ" > ${SUMMARYFILE} + else + local SUMMARYFILE=filestats-summary-${PROCESS_NAME}.csv + echo "Date,Read/Write,Write,Read" > ${SUMMARYFILE} + fi + # Get the list of dates for memstats/filestats bz2 files in this directory. + # The filename convention is : _YYYY-MM-DD_