From 062ec89dbb52085dfc88331c6639c83101687e02 Mon Sep 17 00:00:00 2001 From: Scott Little Date: Wed, 4 Sep 2019 10:14:28 -0400 Subject: [PATCH] Relocated some packages to repo 'utilities' List of relocated subdirectories: ceph/ceph-manager ceph/python-cephclient filesystem/nfscheck logging/logmgmt security/tpm2-openssl-engine security/wrs-ssl tools/collector tools/engtools/hostdata-collectors utilities/build-info utilities/namespace-utils utilities/pci-irq-affinity-agent utilities/platform-util utilities/tis-extensions utilities/update-motd Story: 2006166 Task: 35687 Depends-On: I665dc7fabbfffc798ad57843eb74dca16e7647a3 Change-Id: I2bf543a235507a4eff644a7feabd646a99d1474f Signed-off-by: Scott Little Depends-On: I85dda6d09028f57c1fb0f96e4bcd73ab9b9550be Signed-off-by: Scott Little --- .zuul.yaml | 15 - ceph/ceph-manager/.gitignore | 6 - ceph/ceph-manager/LICENSE | 202 - ceph/ceph-manager/PKG-INFO | 13 - ceph/ceph-manager/centos/build_srpm.data | 3 - ceph/ceph-manager/centos/ceph-manager.spec | 84 - ceph/ceph-manager/ceph-manager/LICENSE | 202 - .../ceph-manager/ceph_manager/__init__.py | 5 - .../ceph-manager/ceph_manager/ceph.py | 161 - .../ceph-manager/ceph_manager/constants.py | 90 - .../ceph-manager/ceph_manager/exception.py | 79 - .../ceph-manager/ceph_manager/i18n.py | 15 - .../ceph-manager/ceph_manager/monitor.py | 883 --- .../ceph-manager/ceph_manager/server.py | 173 - .../ceph_manager/tests/__init__.py | 0 ceph/ceph-manager/ceph-manager/setup.py | 19 - .../ceph-manager/test-requirements.txt | 10 - ceph/ceph-manager/ceph-manager/tox.ini | 24 - .../ceph-manager/files/ceph-manager.logrotate | 11 - ceph/ceph-manager/files/ceph-manager.service | 17 - ceph/ceph-manager/scripts/bin/ceph-manager | 17 - ceph/ceph-manager/scripts/init.d/ceph-manager | 103 - ceph/python-cephclient/centos/build_srpm.data | 2 - .../centos/python-cephclient.spec | 65 - .../python-cephclient/.gitignore | 2 - .../python-cephclient/LICENSE | 202 - .../python-cephclient/cephclient/__init__.py | 5 - .../python-cephclient/cephclient/client.py | 5793 ----------------- .../python-cephclient/cephclient/exception.py | 100 - .../cephclient/tests/__init__.py | 5 - .../python-cephclient/cephclient/wrapper.py | 268 - .../python-cephclient/requirements.txt | 3 - .../python-cephclient/setup.py | 34 - .../python-cephclient/test-requirements.txt | 6 - .../python-cephclient/tox.ini | 19 - devstack/lib/integ | 31 +- devstack/settings | 1 - filesystem/nfscheck/LICENSE | 202 - filesystem/nfscheck/PKG-INFO | 14 - filesystem/nfscheck/centos/build_srpm.data | 2 - filesystem/nfscheck/centos/nfscheck.spec | 43 - filesystem/nfscheck/files/LICENSE | 202 - filesystem/nfscheck/files/nfscheck-init.sh | 79 - filesystem/nfscheck/files/nfscheck.service | 10 - filesystem/nfscheck/files/nfscheck.sh | 48 - logging/logmgmt/.gitignore | 6 - logging/logmgmt/LICENSE | 202 - logging/logmgmt/PKG-INFO | 13 - logging/logmgmt/centos/build_srpm.data | 4 - logging/logmgmt/centos/logmgmt.spec | 87 - logging/logmgmt/logmgmt/LICENSE | 202 - logging/logmgmt/logmgmt/logmgmt/__init__.py | 6 - logging/logmgmt/logmgmt/logmgmt/logmgmt.py | 271 - .../logmgmt/logmgmt/logmgmt/prepostrotate.py | 60 - logging/logmgmt/logmgmt/setup.py | 18 - logging/logmgmt/scripts/bin/logmgmt | 18 - .../logmgmt/scripts/bin/logmgmt_postrotate | 19 - logging/logmgmt/scripts/bin/logmgmt_prerotate | 19 - .../etc/systemd/system/logmgmt.service | 14 - logging/logmgmt/scripts/init.d/logmgmt | 97 - logging/logmgmt/scripts/pmon.d/logmgmt | 24 - security/tpm2-openssl-engine/PKG_INFO | 14 - .../centos/build_srpm.data | 2 - .../centos/tpm2-openssl-engine.spec | 39 - .../tpm2-openssl-engine/LICENSE | 57 - .../tpm2-openssl-engine/Makefile | 54 - .../tpm2-openssl-engine/create_tpm2_key.c | 479 -- .../tpm2-openssl-engine/e_tpm2.c | 860 --- .../tpm2-openssl-engine/e_tpm2.h | 147 - .../tpm2-openssl-engine/e_tpm2_err.c | 170 - .../tpm2-openssl-engine/tpm2-asn.h | 121 - security/wrs-ssl/LICENSE | 202 - security/wrs-ssl/centos/build_srpm.data | 2 - security/wrs-ssl/centos/wrs-ssl.spec | 41 - security/wrs-ssl/files/tpmdevice-setup | 122 - security/wrs-ssl/server-csr.conf | 8 - tools/collector/LICENSE | 202 - tools/collector/centos/build_srpm.data | 2 - tools/collector/centos/collector.spec | 67 - tools/collector/scripts/LICENSE | 202 - tools/collector/scripts/collect | 1245 ---- tools/collector/scripts/collect_ceph.sh | 81 - tools/collector/scripts/collect_coredump.sh | 35 - tools/collector/scripts/collect_crash.sh | 30 - tools/collector/scripts/collect_date | 1064 --- tools/collector/scripts/collect_fm.sh | 41 - tools/collector/scripts/collect_host | 487 -- tools/collector/scripts/collect_ima.sh | 59 - .../collector/scripts/collect_mask_passwords | 123 - tools/collector/scripts/collect_networking.sh | 61 - tools/collector/scripts/collect_nfv_vim.sh | 44 - tools/collector/scripts/collect_openstack.sh | 68 - tools/collector/scripts/collect_ovs.sh | 35 - tools/collector/scripts/collect_parms | 29 - tools/collector/scripts/collect_patching.sh | 45 - tools/collector/scripts/collect_psqldb.sh | 117 - tools/collector/scripts/collect_sm.sh | 26 - tools/collector/scripts/collect_sysinv.sh | 72 - tools/collector/scripts/collect_tc.sh | 82 - tools/collector/scripts/collect_utils | 237 - tools/collector/scripts/etc.exclude | 40 - tools/collector/scripts/expect_done | 1 - tools/collector/scripts/run.exclude | 12 - tools/engtools/hostdata-collectors/README | 12 - .../centos/build_srpm.data | 2 - .../centos/collect-engtools.spec | 101 - .../hostdata-collectors/patch-engtools.sh | 33 - .../hostdata-collectors/scripts/LICENSE | 202 - .../hostdata-collectors/scripts/buddyinfo.py | 123 - .../hostdata-collectors/scripts/ceph.sh | 58 - .../scripts/cfg/engtools.conf | 99 - .../hostdata-collectors/scripts/chewmem | 86 - .../scripts/cleanup-engtools.sh | 54 - .../scripts/collect-engtools.service | 15 - .../scripts/collect-engtools.sh | 333 - .../hostdata-collectors/scripts/diskstats.sh | 119 - .../scripts/engtools_util.sh | 479 -- .../hostdata-collectors/scripts/filestats.sh | 96 - .../scripts/init.d/collect-engtools.sh | 118 - .../hostdata-collectors/scripts/iostat.sh | 49 - .../scripts/linux_benchmark.sh | 533 -- .../scripts/live_stream.py | 1600 ----- .../hostdata-collectors/scripts/memstats.sh | 110 - .../hostdata-collectors/scripts/netstats.sh | 63 - .../hostdata-collectors/scripts/postgres.sh | 138 - .../hostdata-collectors/scripts/rabbitmq.sh | 83 - .../scripts/remote/rbzip2-engtools.sh | 46 - .../scripts/remote/rstart-engtools.sh | 37 - .../scripts/remote/rstop-engtools.sh | 37 - .../scripts/remote/rsync-engtools-data.sh | 68 - .../hostdata-collectors/scripts/slab.sh | 23 - .../hostdata-collectors/scripts/ticker.sh | 49 - .../hostdata-collectors/scripts/top.sh | 43 - .../hostdata-collectors/scripts/vswitch.sh | 66 - tox.ini | 7 +- utilities/build-info/PKG-INFO | 12 - utilities/build-info/build-info-1.0/LICENSE | 202 - .../build-info/build-info-1.0/collect.sh | 104 - utilities/build-info/centos/build-info.spec | 50 - utilities/build-info/centos/build_srpm | 130 - utilities/build-info/centos/build_srpm.data | 3 - utilities/build-info/release-info.inc | 12 - utilities/namespace-utils/LICENSE | 202 - .../namespace-utils/centos/build_srpm.data | 2 - .../centos/namespace-utils.spec | 35 - .../namespace-utils/namespace-utils/LICENSE | 202 - .../namespace-utils/namespace-utils/bashns.c | 59 - .../namespace-utils/umount-in-namespace | 25 - utilities/pci-irq-affinity-agent/PKG-INFO | 7 - .../centos/build_srpm.data | 3 - .../centos/pci-irq-affinity.spec | 70 - .../pci-irq-affinity-agent/files/LICENSE | 202 - .../pci-irq-affinity-agent/files/config.ini | 22 - .../pci-irq-affinity-agent/files/nova-sriov | 117 - .../files/pci-irq-affinity-agent | 105 - .../files/pci-irq-affinity-agent.conf | 10 - .../files/pci-irq-affinity-agent.service | 14 - .../pci_irq_affinity/__init__.py | 0 .../pci_irq_affinity/affinity.py | 92 - .../pci_irq_affinity/agent.py | 206 - .../pci_irq_affinity/config.py | 45 - .../pci_irq_affinity/driver.py | 141 - .../pci_irq_affinity/guest.py | 265 - .../pci_irq_affinity/instance.py | 82 - .../pci_irq_affinity/pci_irq_affinity/log.py | 28 - .../pci_irq_affinity/nova_provider.py | 139 - .../pci_irq_affinity/utils.py | 291 - .../pci_irq_affinity/setup.py | 35 - .../platform-util/centos/build_srpm.data | 4 - .../platform-util/centos/platform-util.spec | 110 - utilities/platform-util/platform-util/LICENSE | 202 - .../platform-util/platform_util/__init__.py | 5 - .../platform-util/platform_util/i18n.py | 15 - .../platform_util/license/__init__.py | 5 - .../platform_util/license/constants.py | 66 - .../platform_util/license/exception.py | 40 - .../platform_util/license/license.py | 211 - .../platform-util/platform-util/setup.py | 20 - utilities/platform-util/scripts/LICENSE | 202 - .../platform-util/scripts/cgcs_tc_setup.sh | 518 -- .../platform-util/scripts/connectivity_test | 58 - .../platform-util/scripts/log_functions.sh | 45 - .../platform-util/scripts/opt-platform.mount | 0 .../scripts/opt-platform.service | 15 - .../scripts/patch-restart-haproxy | 94 - .../platform-util/scripts/patch-restart-mtce | 476 -- .../scripts/patch-restart-processes | 555 -- .../scripts/remotelogging_tc_setup.sh | 200 - utilities/tis-extensions/PKG-INFO | 13 - .../tis-extensions/centos/build_srpm.data | 2 - .../tis-extensions/centos/tis-extensions.spec | 60 - utilities/tis-extensions/files/LICENSE | 202 - .../tis-extensions/files/coredump-sysctl.conf | 4 - utilities/tis-extensions/files/coredump.conf | 8 - .../files/modules-load-vfio.conf | 1 - utilities/update-motd/LICENSE | 202 - utilities/update-motd/PKG-INFO | 14 - utilities/update-motd/centos/build_srpm.data | 2 - utilities/update-motd/centos/update-motd.spec | 62 - utilities/update-motd/files/LICENSE | 202 - .../files/apply_banner_customization | 25 - utilities/update-motd/files/customize-banner | 286 - .../files/install_banner_customization | 16 - utilities/update-motd/files/motd-footer | 16 - utilities/update-motd/files/motd-header | 16 - utilities/update-motd/files/motd-update | 15 - utilities/update-motd/files/motd-update.cron | 3 - utilities/update-motd/files/motd.head | 5 - 208 files changed, 3 insertions(+), 28939 deletions(-) delete mode 100644 ceph/ceph-manager/.gitignore delete mode 100644 ceph/ceph-manager/LICENSE delete mode 100644 ceph/ceph-manager/PKG-INFO delete mode 100644 ceph/ceph-manager/centos/build_srpm.data delete mode 100644 ceph/ceph-manager/centos/ceph-manager.spec delete mode 100644 ceph/ceph-manager/ceph-manager/LICENSE delete mode 100644 ceph/ceph-manager/ceph-manager/ceph_manager/__init__.py delete mode 100644 ceph/ceph-manager/ceph-manager/ceph_manager/ceph.py delete mode 100644 ceph/ceph-manager/ceph-manager/ceph_manager/constants.py delete mode 100644 ceph/ceph-manager/ceph-manager/ceph_manager/exception.py delete mode 100644 ceph/ceph-manager/ceph-manager/ceph_manager/i18n.py delete mode 100644 ceph/ceph-manager/ceph-manager/ceph_manager/monitor.py delete mode 100644 ceph/ceph-manager/ceph-manager/ceph_manager/server.py delete mode 100644 ceph/ceph-manager/ceph-manager/ceph_manager/tests/__init__.py delete mode 100644 ceph/ceph-manager/ceph-manager/setup.py delete mode 100644 ceph/ceph-manager/ceph-manager/test-requirements.txt delete mode 100644 ceph/ceph-manager/ceph-manager/tox.ini delete mode 100644 ceph/ceph-manager/files/ceph-manager.logrotate delete mode 100644 ceph/ceph-manager/files/ceph-manager.service delete mode 100644 ceph/ceph-manager/scripts/bin/ceph-manager delete mode 100644 ceph/ceph-manager/scripts/init.d/ceph-manager delete mode 100644 ceph/python-cephclient/centos/build_srpm.data delete mode 100644 ceph/python-cephclient/centos/python-cephclient.spec delete mode 100644 ceph/python-cephclient/python-cephclient/.gitignore delete mode 100644 ceph/python-cephclient/python-cephclient/LICENSE delete mode 100644 ceph/python-cephclient/python-cephclient/cephclient/__init__.py delete mode 100644 ceph/python-cephclient/python-cephclient/cephclient/client.py delete mode 100644 ceph/python-cephclient/python-cephclient/cephclient/exception.py delete mode 100644 ceph/python-cephclient/python-cephclient/cephclient/tests/__init__.py delete mode 100644 ceph/python-cephclient/python-cephclient/cephclient/wrapper.py delete mode 100644 ceph/python-cephclient/python-cephclient/requirements.txt delete mode 100644 ceph/python-cephclient/python-cephclient/setup.py delete mode 100644 ceph/python-cephclient/python-cephclient/test-requirements.txt delete mode 100644 ceph/python-cephclient/python-cephclient/tox.ini delete mode 100644 filesystem/nfscheck/LICENSE delete mode 100644 filesystem/nfscheck/PKG-INFO delete mode 100644 filesystem/nfscheck/centos/build_srpm.data delete mode 100644 filesystem/nfscheck/centos/nfscheck.spec delete mode 100644 filesystem/nfscheck/files/LICENSE delete mode 100755 filesystem/nfscheck/files/nfscheck-init.sh delete mode 100644 filesystem/nfscheck/files/nfscheck.service delete mode 100644 filesystem/nfscheck/files/nfscheck.sh delete mode 100644 logging/logmgmt/.gitignore delete mode 100644 logging/logmgmt/LICENSE delete mode 100644 logging/logmgmt/PKG-INFO delete mode 100644 logging/logmgmt/centos/build_srpm.data delete mode 100644 logging/logmgmt/centos/logmgmt.spec delete mode 100644 logging/logmgmt/logmgmt/LICENSE delete mode 100644 logging/logmgmt/logmgmt/logmgmt/__init__.py delete mode 100644 logging/logmgmt/logmgmt/logmgmt/logmgmt.py delete mode 100644 logging/logmgmt/logmgmt/logmgmt/prepostrotate.py delete mode 100644 logging/logmgmt/logmgmt/setup.py delete mode 100644 logging/logmgmt/scripts/bin/logmgmt delete mode 100644 logging/logmgmt/scripts/bin/logmgmt_postrotate delete mode 100644 logging/logmgmt/scripts/bin/logmgmt_prerotate delete mode 100644 logging/logmgmt/scripts/etc/systemd/system/logmgmt.service delete mode 100644 logging/logmgmt/scripts/init.d/logmgmt delete mode 100644 logging/logmgmt/scripts/pmon.d/logmgmt delete mode 100644 security/tpm2-openssl-engine/PKG_INFO delete mode 100644 security/tpm2-openssl-engine/centos/build_srpm.data delete mode 100644 security/tpm2-openssl-engine/centos/tpm2-openssl-engine.spec delete mode 100644 security/tpm2-openssl-engine/tpm2-openssl-engine/LICENSE delete mode 100644 security/tpm2-openssl-engine/tpm2-openssl-engine/Makefile delete mode 100644 security/tpm2-openssl-engine/tpm2-openssl-engine/create_tpm2_key.c delete mode 100644 security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2.c delete mode 100644 security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2.h delete mode 100644 security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2_err.c delete mode 100644 security/tpm2-openssl-engine/tpm2-openssl-engine/tpm2-asn.h delete mode 100644 security/wrs-ssl/LICENSE delete mode 100644 security/wrs-ssl/centos/build_srpm.data delete mode 100644 security/wrs-ssl/centos/wrs-ssl.spec delete mode 100644 security/wrs-ssl/files/tpmdevice-setup delete mode 100644 security/wrs-ssl/server-csr.conf delete mode 100644 tools/collector/LICENSE delete mode 100644 tools/collector/centos/build_srpm.data delete mode 100644 tools/collector/centos/collector.spec delete mode 100644 tools/collector/scripts/LICENSE delete mode 100755 tools/collector/scripts/collect delete mode 100755 tools/collector/scripts/collect_ceph.sh delete mode 100644 tools/collector/scripts/collect_coredump.sh delete mode 100644 tools/collector/scripts/collect_crash.sh delete mode 100755 tools/collector/scripts/collect_date delete mode 100644 tools/collector/scripts/collect_fm.sh delete mode 100755 tools/collector/scripts/collect_host delete mode 100755 tools/collector/scripts/collect_ima.sh delete mode 100644 tools/collector/scripts/collect_mask_passwords delete mode 100755 tools/collector/scripts/collect_networking.sh delete mode 100644 tools/collector/scripts/collect_nfv_vim.sh delete mode 100755 tools/collector/scripts/collect_openstack.sh delete mode 100644 tools/collector/scripts/collect_ovs.sh delete mode 100644 tools/collector/scripts/collect_parms delete mode 100755 tools/collector/scripts/collect_patching.sh delete mode 100755 tools/collector/scripts/collect_psqldb.sh delete mode 100644 tools/collector/scripts/collect_sm.sh delete mode 100755 tools/collector/scripts/collect_sysinv.sh delete mode 100755 tools/collector/scripts/collect_tc.sh delete mode 100755 tools/collector/scripts/collect_utils delete mode 100644 tools/collector/scripts/etc.exclude delete mode 100755 tools/collector/scripts/expect_done delete mode 100644 tools/collector/scripts/run.exclude delete mode 100644 tools/engtools/hostdata-collectors/README delete mode 100644 tools/engtools/hostdata-collectors/centos/build_srpm.data delete mode 100644 tools/engtools/hostdata-collectors/centos/collect-engtools.spec delete mode 100755 tools/engtools/hostdata-collectors/patch-engtools.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/LICENSE delete mode 100644 tools/engtools/hostdata-collectors/scripts/buddyinfo.py delete mode 100644 tools/engtools/hostdata-collectors/scripts/ceph.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/cfg/engtools.conf delete mode 100644 tools/engtools/hostdata-collectors/scripts/chewmem delete mode 100644 tools/engtools/hostdata-collectors/scripts/cleanup-engtools.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/collect-engtools.service delete mode 100644 tools/engtools/hostdata-collectors/scripts/collect-engtools.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/diskstats.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/engtools_util.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/filestats.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/init.d/collect-engtools.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/iostat.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/linux_benchmark.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/live_stream.py delete mode 100644 tools/engtools/hostdata-collectors/scripts/memstats.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/netstats.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/postgres.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/rabbitmq.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/remote/rbzip2-engtools.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/remote/rstart-engtools.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/remote/rstop-engtools.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/remote/rsync-engtools-data.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/slab.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/ticker.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/top.sh delete mode 100644 tools/engtools/hostdata-collectors/scripts/vswitch.sh delete mode 100644 utilities/build-info/PKG-INFO delete mode 100644 utilities/build-info/build-info-1.0/LICENSE delete mode 100755 utilities/build-info/build-info-1.0/collect.sh delete mode 100644 utilities/build-info/centos/build-info.spec delete mode 100755 utilities/build-info/centos/build_srpm delete mode 100644 utilities/build-info/centos/build_srpm.data delete mode 100644 utilities/build-info/release-info.inc delete mode 100644 utilities/namespace-utils/LICENSE delete mode 100644 utilities/namespace-utils/centos/build_srpm.data delete mode 100644 utilities/namespace-utils/centos/namespace-utils.spec delete mode 100644 utilities/namespace-utils/namespace-utils/LICENSE delete mode 100644 utilities/namespace-utils/namespace-utils/bashns.c delete mode 100644 utilities/namespace-utils/namespace-utils/umount-in-namespace delete mode 100644 utilities/pci-irq-affinity-agent/PKG-INFO delete mode 100644 utilities/pci-irq-affinity-agent/centos/build_srpm.data delete mode 100644 utilities/pci-irq-affinity-agent/centos/pci-irq-affinity.spec delete mode 100644 utilities/pci-irq-affinity-agent/files/LICENSE delete mode 100644 utilities/pci-irq-affinity-agent/files/config.ini delete mode 100755 utilities/pci-irq-affinity-agent/files/nova-sriov delete mode 100755 utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent delete mode 100644 utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent.conf delete mode 100644 utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent.service delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/__init__.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/affinity.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/agent.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/config.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/driver.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/guest.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/instance.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/log.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/nova_provider.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/utils.py delete mode 100644 utilities/pci-irq-affinity-agent/pci_irq_affinity/setup.py delete mode 100644 utilities/platform-util/centos/build_srpm.data delete mode 100644 utilities/platform-util/centos/platform-util.spec delete mode 100644 utilities/platform-util/platform-util/LICENSE delete mode 100644 utilities/platform-util/platform-util/platform_util/__init__.py delete mode 100644 utilities/platform-util/platform-util/platform_util/i18n.py delete mode 100644 utilities/platform-util/platform-util/platform_util/license/__init__.py delete mode 100644 utilities/platform-util/platform-util/platform_util/license/constants.py delete mode 100644 utilities/platform-util/platform-util/platform_util/license/exception.py delete mode 100644 utilities/platform-util/platform-util/platform_util/license/license.py delete mode 100644 utilities/platform-util/platform-util/setup.py delete mode 100644 utilities/platform-util/scripts/LICENSE delete mode 100755 utilities/platform-util/scripts/cgcs_tc_setup.sh delete mode 100644 utilities/platform-util/scripts/connectivity_test delete mode 100644 utilities/platform-util/scripts/log_functions.sh delete mode 100644 utilities/platform-util/scripts/opt-platform.mount delete mode 100644 utilities/platform-util/scripts/opt-platform.service delete mode 100644 utilities/platform-util/scripts/patch-restart-haproxy delete mode 100755 utilities/platform-util/scripts/patch-restart-mtce delete mode 100755 utilities/platform-util/scripts/patch-restart-processes delete mode 100755 utilities/platform-util/scripts/remotelogging_tc_setup.sh delete mode 100644 utilities/tis-extensions/PKG-INFO delete mode 100644 utilities/tis-extensions/centos/build_srpm.data delete mode 100644 utilities/tis-extensions/centos/tis-extensions.spec delete mode 100644 utilities/tis-extensions/files/LICENSE delete mode 100644 utilities/tis-extensions/files/coredump-sysctl.conf delete mode 100644 utilities/tis-extensions/files/coredump.conf delete mode 100644 utilities/tis-extensions/files/modules-load-vfio.conf delete mode 100644 utilities/update-motd/LICENSE delete mode 100644 utilities/update-motd/PKG-INFO delete mode 100644 utilities/update-motd/centos/build_srpm.data delete mode 100644 utilities/update-motd/centos/update-motd.spec delete mode 100644 utilities/update-motd/files/LICENSE delete mode 100644 utilities/update-motd/files/apply_banner_customization delete mode 100644 utilities/update-motd/files/customize-banner delete mode 100644 utilities/update-motd/files/install_banner_customization delete mode 100644 utilities/update-motd/files/motd-footer delete mode 100644 utilities/update-motd/files/motd-header delete mode 100644 utilities/update-motd/files/motd-update delete mode 100644 utilities/update-motd/files/motd-update.cron delete mode 100644 utilities/update-motd/files/motd.head diff --git a/.zuul.yaml b/.zuul.yaml index e788bc813..9ef67301a 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -159,21 +159,6 @@ q-svc: false # Try this as a tox-based job with a minimal functional target in tox.ini -- job: - name: flock-devstack-integ - parent: flock-devstack-tox-base-min - timeout: 7800 - required-projects: - - starlingx/update - vars: - tox_envlist: functional - devstack_services: - # StarlingX services - platform-util: true - devstack_plugins: - integ: https://opendev.org/starlingx/integ - update: https://opendev.org/starlingx/update - - job: name: stx-integ-pylint parent: openstack-tox-pylint diff --git a/ceph/ceph-manager/.gitignore b/ceph/ceph-manager/.gitignore deleted file mode 100644 index 78868598f..000000000 --- a/ceph/ceph-manager/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -!.distro -.distro/centos7/rpmbuild/RPMS -.distro/centos7/rpmbuild/SRPMS -.distro/centos7/rpmbuild/BUILD -.distro/centos7/rpmbuild/BUILDROOT -.distro/centos7/rpmbuild/SOURCES/ceph-manager*tar.gz diff --git a/ceph/ceph-manager/LICENSE b/ceph/ceph-manager/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/ceph/ceph-manager/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ceph/ceph-manager/PKG-INFO b/ceph/ceph-manager/PKG-INFO deleted file mode 100644 index 5b6746d87..000000000 --- a/ceph/ceph-manager/PKG-INFO +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 1.1 -Name: ceph-manager -Version: 1.0 -Summary: Handle Ceph API calls and provide status updates via alarms -Home-page: -Author: Windriver -Author-email: info@windriver.com -License: Apache-2.0 - -Description: Handle Ceph API calls and provide status updates via alarms - - -Platform: UNKNOWN diff --git a/ceph/ceph-manager/centos/build_srpm.data b/ceph/ceph-manager/centos/build_srpm.data deleted file mode 100644 index f951ada81..000000000 --- a/ceph/ceph-manager/centos/build_srpm.data +++ /dev/null @@ -1,3 +0,0 @@ -SRC_DIR="ceph-manager" -COPY_LIST_TO_TAR="files scripts" -TIS_PATCH_VER=5 diff --git a/ceph/ceph-manager/centos/ceph-manager.spec b/ceph/ceph-manager/centos/ceph-manager.spec deleted file mode 100644 index 068855826..000000000 --- a/ceph/ceph-manager/centos/ceph-manager.spec +++ /dev/null @@ -1,84 +0,0 @@ -Summary: Handle Ceph API calls and provide status updates via alarms -Name: ceph-manager -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -Source0: %{name}-%{version}.tar.gz - -BuildRequires: python-setuptools -BuildRequires: python2-pip -BuildRequires: python2-wheel -BuildRequires: systemd-units -BuildRequires: systemd-devel -Requires: sysinv - -%description -Handle Ceph API calls and provide status updates via alarms. -Handle sysinv RPC calls for long running Ceph API operations: -- cache tiering enable -- cache tiering disable - -%define local_bindir /usr/bin/ -%define local_etc_initd /etc/init.d/ -%define local_etc_logrotated /etc/logrotate.d/ -%define pythonroot /usr/lib64/python2.7/site-packages - -%define debug_package %{nil} - -%prep -%setup - -%build -%{__python} setup.py build -%py2_build_wheel - -%install -%{__python} setup.py install --root=$RPM_BUILD_ROOT \ - --install-lib=%{pythonroot} \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed -mkdir -p $RPM_BUILD_ROOT/wheels -install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ - -install -d -m 755 %{buildroot}%{local_etc_initd} -install -p -D -m 700 scripts/init.d/ceph-manager %{buildroot}%{local_etc_initd}/ceph-manager - -install -d -m 755 %{buildroot}%{local_bindir} -install -p -D -m 700 scripts/bin/ceph-manager %{buildroot}%{local_bindir}/ceph-manager - -install -d -m 755 %{buildroot}%{local_etc_logrotated} -install -p -D -m 644 files/ceph-manager.logrotate %{buildroot}%{local_etc_logrotated}/ceph-manager.logrotate - -install -d -m 755 %{buildroot}%{_unitdir} -install -m 644 -p -D files/%{name}.service %{buildroot}%{_unitdir}/%{name}.service - -%clean -rm -rf $RPM_BUILD_ROOT - -# Note: The package name is ceph-manager but the import name is ceph_manager so -# can't use '%{name}'. -%files -%defattr(-,root,root,-) -%doc LICENSE -%{local_bindir}/* -%{local_etc_initd}/* -%{_unitdir}/%{name}.service -%dir %{local_etc_logrotated} -%{local_etc_logrotated}/* -%dir %{pythonroot}/ceph_manager -%{pythonroot}/ceph_manager/* -%dir %{pythonroot}/ceph_manager-%{version}.0-py2.7.egg-info -%{pythonroot}/ceph_manager-%{version}.0-py2.7.egg-info/* - -%package wheels -Summary: %{name} wheels - -%description wheels -Contains python wheels for %{name} - -%files wheels -/wheels/* diff --git a/ceph/ceph-manager/ceph-manager/LICENSE b/ceph/ceph-manager/ceph-manager/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/ceph/ceph-manager/ceph-manager/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ceph/ceph-manager/ceph-manager/ceph_manager/__init__.py b/ceph/ceph-manager/ceph-manager/ceph_manager/__init__.py deleted file mode 100644 index 754a8f4ef..000000000 --- a/ceph/ceph-manager/ceph-manager/ceph_manager/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/ceph/ceph-manager/ceph-manager/ceph_manager/ceph.py b/ceph/ceph-manager/ceph-manager/ceph_manager/ceph.py deleted file mode 100644 index 68260ce26..000000000 --- a/ceph/ceph-manager/ceph-manager/ceph_manager/ceph.py +++ /dev/null @@ -1,161 +0,0 @@ -# -# Copyright (c) 2016-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from ceph_manager import exception -from ceph_manager.i18n import _LI -# noinspection PyUnresolvedReferences -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - - -def osd_pool_set_quota(ceph_api, pool_name, max_bytes=0, max_objects=0): - """Set the quota for an OSD pool_name - - Setting max_bytes or max_objects to 0 will disable that quota param - :param pool_name: OSD pool_name - :param max_bytes: maximum bytes for OSD pool_name - :param max_objects: maximum objects for OSD pool_name - """ - - # Update quota if needed - prev_quota = osd_pool_get_quota(ceph_api, pool_name) - if prev_quota["max_bytes"] != max_bytes: - resp, b = ceph_api.osd_set_pool_quota(pool_name, 'max_bytes', - max_bytes, body='json') - if resp.ok: - LOG.info(_LI("Set OSD pool_name quota: " - "pool_name={}, max_bytes={}").format( - pool_name, max_bytes)) - else: - e = exception.CephPoolSetQuotaFailure( - pool=pool_name, name='max_bytes', - value=max_bytes, reason=resp.reason) - LOG.error(e) - raise e - if prev_quota["max_objects"] != max_objects: - resp, b = ceph_api.osd_set_pool_quota(pool_name, 'max_objects', - max_objects, - body='json') - if resp.ok: - LOG.info(_LI("Set OSD pool_name quota: " - "pool_name={}, max_objects={}").format( - pool_name, max_objects)) - else: - e = exception.CephPoolSetQuotaFailure( - pool=pool_name, name='max_objects', - value=max_objects, reason=resp.reason) - LOG.error(e) - raise e - - -def osd_pool_get_quota(ceph_api, pool_name): - resp, quota = ceph_api.osd_get_pool_quota(pool_name, body='json') - if not resp.ok: - e = exception.CephPoolGetQuotaFailure( - pool=pool_name, reason=resp.reason) - LOG.error(e) - raise e - else: - return {"max_objects": quota["output"]["quota_max_objects"], - "max_bytes": quota["output"]["quota_max_bytes"]} - - -def osd_pool_exists(ceph_api, pool_name): - response, body = ceph_api.osd_pool_get( - pool_name, "pg_num", body='json') - if response.ok: - return True - return False - - -def osd_pool_create(ceph_api, pool_name, pg_num, pgp_num): - # ruleset 0: is the default ruleset if no crushmap is loaded or - # the ruleset for the backing tier if loaded: - # Name: storage_tier_ruleset - ruleset = 0 - response, body = ceph_api.osd_pool_create( - pool_name, pg_num, pgp_num, pool_type="replicated", - ruleset=ruleset, body='json') - if response.ok: - LOG.info(_LI("Created OSD pool: " - "pool_name={}, pg_num={}, pgp_num={}, " - "pool_type=replicated, ruleset={}").format( - pool_name, pg_num, pgp_num, ruleset)) - else: - e = exception.CephPoolCreateFailure( - name=pool_name, reason=response.reason) - LOG.error(e) - raise e - - # Explicitly assign the ruleset to the pool on creation since it is - # ignored in the create call - response, body = ceph_api.osd_set_pool_param( - pool_name, "crush_ruleset", ruleset, body='json') - if response.ok: - LOG.info(_LI("Assigned crush ruleset to OS pool: " - "pool_name={}, ruleset={}").format( - pool_name, ruleset)) - else: - e = exception.CephPoolRulesetFailure( - name=pool_name, reason=response.reason) - LOG.error(e) - ceph_api.osd_pool_delete( - pool_name, pool_name, - sure='--yes-i-really-really-mean-it', - body='json') - raise e - - -def osd_pool_delete(ceph_api, pool_name): - """Delete an osd pool - - :param pool_name: pool name - """ - response, body = ceph_api.osd_pool_delete( - pool_name, pool_name, - sure='--yes-i-really-really-mean-it', - body='json') - if response.ok: - LOG.info(_LI("Deleted OSD pool {}").format(pool_name)) - else: - e = exception.CephPoolDeleteFailure( - name=pool_name, reason=response.reason) - LOG.warn(e) - raise e - - -def osd_set_pool_param(ceph_api, pool_name, param, value): - response, body = ceph_api.osd_set_pool_param( - pool_name, param, value, - force=None, body='json') - if response.ok: - LOG.info('OSD set pool param: ' - 'pool={}, name={}, value={}'.format( - pool_name, param, value)) - else: - raise exception.CephPoolSetParamFailure( - pool_name=pool_name, - param=param, - value=str(value), - reason=response.reason) - return response, body - - -def osd_get_pool_param(ceph_api, pool_name, param): - response, body = ceph_api.osd_get_pool_param( - pool_name, param, body='json') - if response.ok: - LOG.debug('OSD get pool param: ' - 'pool={}, name={}, value={}'.format( - pool_name, param, body['output'][param])) - else: - raise exception.CephPoolGetParamFailure( - pool_name=pool_name, - param=param, - reason=response.reason) - return body['output'][param] diff --git a/ceph/ceph-manager/ceph-manager/ceph_manager/constants.py b/ceph/ceph-manager/ceph-manager/ceph_manager/constants.py deleted file mode 100644 index 359a7381e..000000000 --- a/ceph/ceph-manager/ceph-manager/ceph_manager/constants.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright (c) 2016-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from ceph_manager.i18n import _ -# noinspection PyUnresolvedReferences -from sysinv.common import constants as sysinv_constants - -CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL = \ - sysinv_constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL -CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER = \ - sysinv_constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER -CEPH_POOLS = sysinv_constants.CEPH_POOLS -CEPH_REPLICATION_FACTOR = sysinv_constants.CEPH_REPLICATION_FACTOR_DEFAULT - -# Cache flush parameters -CACHE_FLUSH_OBJECTS_THRESHOLD = 1000 -CACHE_FLUSH_MIN_WAIT_OBJ_COUNT_DECREASE_SEC = 1 -CACHE_FLUSH_MAX_WAIT_OBJ_COUNT_DECREASE_SEC = 128 - -FM_ALARM_REASON_MAX_SIZE = 256 - -# TODO this will later change based on parsed health -# clock skew is vm malfunction, mon or osd is equipment mal -ALARM_CAUSE = 'equipment-malfunction' -ALARM_TYPE = 'equipment' - -# Ceph health check interval (in seconds) -CEPH_HEALTH_CHECK_INTERVAL = 60 - -# Ceph health statuses -CEPH_HEALTH_OK = 'HEALTH_OK' -CEPH_HEALTH_WARN = 'HEALTH_WARN' -CEPH_HEALTH_ERR = 'HEALTH_ERR' -CEPH_HEALTH_DOWN = 'CEPH_DOWN' - -# Statuses not reported by Ceph -CEPH_STATUS_CUSTOM = [CEPH_HEALTH_DOWN] - -SEVERITY = {CEPH_HEALTH_DOWN: 'critical', - CEPH_HEALTH_ERR: 'critical', - CEPH_HEALTH_WARN: 'warning'} - -SERVICE_AFFECTING = {CEPH_HEALTH_DOWN: True, - CEPH_HEALTH_ERR: True, - CEPH_HEALTH_WARN: False} - -# TODO this will later change based on parsed health -ALARM_REASON_NO_OSD = _('no OSDs') -ALARM_REASON_OSDS_DOWN = _('OSDs are down') -ALARM_REASON_OSDS_OUT = _('OSDs are out') -ALARM_REASON_OSDS_DOWN_OUT = _('OSDs are down/out') -ALARM_REASON_PEER_HOST_DOWN = _('peer host down') - -REPAIR_ACTION_MAJOR_CRITICAL_ALARM = _( - 'Ensure storage hosts from replication group are unlocked and available.' - 'Check if OSDs of each storage host are up and running.' - 'If problem persists, contact next level of support.') -REPAIR_ACTION = _('If problem persists, contact next level of support.') - -SYSINV_CONDUCTOR_TOPIC = 'sysinv.conductor_manager' -CEPH_MANAGER_TOPIC = 'sysinv.ceph_manager' -SYSINV_CONFIG_FILE = '/etc/sysinv/sysinv.conf' - -# Titanium Cloud version strings -TITANIUM_SERVER_VERSION_18_03 = '18.03' - -CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET = ( - "all OSDs are running jewel or later but the " - "'require_jewel_osds' osdmap flag is not set") - -UPGRADE_COMPLETED = \ - sysinv_constants.UPGRADE_COMPLETED -UPGRADE_ABORTING = \ - sysinv_constants.UPGRADE_ABORTING -UPGRADE_ABORT_COMPLETING = \ - sysinv_constants.UPGRADE_ABORT_COMPLETING -UPGRADE_ABORTING_ROLLBACK = \ - sysinv_constants.UPGRADE_ABORTING_ROLLBACK - -CEPH_FLAG_REQUIRE_JEWEL_OSDS = 'require_jewel_osds' - -# Tiers -CEPH_CRUSH_TIER_SUFFIX = sysinv_constants.CEPH_CRUSH_TIER_SUFFIX -SB_TIER_TYPE_CEPH = sysinv_constants.SB_TIER_TYPE_CEPH -SB_TIER_SUPPORTED = sysinv_constants.SB_TIER_SUPPORTED -SB_TIER_DEFAULT_NAMES = sysinv_constants.SB_TIER_DEFAULT_NAMES -SB_TIER_CEPH_POOLS = sysinv_constants.SB_TIER_CEPH_POOLS diff --git a/ceph/ceph-manager/ceph-manager/ceph_manager/exception.py b/ceph/ceph-manager/ceph-manager/ceph_manager/exception.py deleted file mode 100644 index 8fc7c80e4..000000000 --- a/ceph/ceph-manager/ceph-manager/ceph_manager/exception.py +++ /dev/null @@ -1,79 +0,0 @@ -# -# Copyright (c) 2016-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# noinspection PyUnresolvedReferences -from ceph_manager.i18n import _ -from ceph_manager.i18n import _LW -# noinspection PyUnresolvedReferences -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - - -class CephManagerException(Exception): - message = _("An unknown exception occurred.") - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - if not message: - try: - message = self.message % kwargs - except TypeError: - LOG.warn(_LW('Exception in string format operation')) - for name, value in kwargs.items(): - LOG.error("%s: %s" % (name, value)) - # at least get the core message out if something happened - message = self.message - super(CephManagerException, self).__init__(message) - - -class CephPoolSetQuotaFailure(CephManagerException): - message = _("Error seting the OSD pool " - "quota %(name)s for %(pool)s to " - "%(value)s") + ": %(reason)s" - - -class CephPoolGetQuotaFailure(CephManagerException): - message = _("Error geting the OSD pool quota for " - "%(pool)s") + ": %(reason)s" - - -class CephPoolCreateFailure(CephManagerException): - message = _("Creating OSD pool %(name)s failed: %(reason)s") - - -class CephPoolDeleteFailure(CephManagerException): - message = _("Deleting OSD pool %(name)s failed: %(reason)s") - - -class CephPoolRulesetFailure(CephManagerException): - message = _("Assigning crush ruleset to OSD " - "pool %(name)s failed: %(reason)s") - - -class CephPoolSetParamFailure(CephManagerException): - message = _("Cannot set Ceph OSD pool parameter: " - "pool_name=%(pool_name)s, param=%(param)s, value=%(value)s. " - "Reason: %(reason)s") - - -class CephPoolGetParamFailure(CephManagerException): - message = _("Cannot get Ceph OSD pool parameter: " - "pool_name=%(pool_name)s, param=%(param)s. " - "Reason: %(reason)s") - - -class CephSetKeyFailure(CephManagerException): - message = _("Error setting the Ceph flag " - "'%(flag)s' %(extra)s: " - "response=%(response_status_code)s:%(response_reason)s, " - "status=%(status)s, output=%(output)s") - - -class CephApiFailure(CephManagerException): - message = _("API failure: " - "call=%(call)s, reason=%(reason)s") diff --git a/ceph/ceph-manager/ceph-manager/ceph_manager/i18n.py b/ceph/ceph-manager/ceph-manager/ceph_manager/i18n.py deleted file mode 100644 index 67977ceae..000000000 --- a/ceph/ceph-manager/ceph-manager/ceph_manager/i18n.py +++ /dev/null @@ -1,15 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -import oslo_i18n - -DOMAIN = 'ceph-manager' - -_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) -_ = _translators.primary - -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error diff --git a/ceph/ceph-manager/ceph-manager/ceph_manager/monitor.py b/ceph/ceph-manager/ceph-manager/ceph_manager/monitor.py deleted file mode 100644 index 16228e0fa..000000000 --- a/ceph/ceph-manager/ceph-manager/ceph_manager/monitor.py +++ /dev/null @@ -1,883 +0,0 @@ -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import time - -# noinspection PyUnresolvedReferences -from fm_api import fm_api -# noinspection PyUnresolvedReferences -from fm_api import constants as fm_constants -# noinspection PyUnresolvedReferences -from oslo_log import log as logging - -# noinspection PyProtectedMember -from ceph_manager.i18n import _ -from ceph_manager.i18n import _LI -from ceph_manager.i18n import _LW -from ceph_manager.i18n import _LE - -from ceph_manager import constants -from ceph_manager import exception - -LOG = logging.getLogger(__name__) - - -# In 18.03 R5, ceph cache tiering was disabled and prevented from being -# re-enabled. When upgrading from 18.03 (R5) to R6 we need to remove the -# cache-tier from the crushmap ceph-cache-tiering -# -# This class is needed only when upgrading from R5 to R6 -# TODO: remove it after 1st R6 release -# -class HandleUpgradesMixin(object): - - def __init__(self, service): - self.service = service - self.wait_for_upgrade_complete = False - - def setup(self, config): - self._set_upgrade(self.service.retry_get_software_upgrade_status()) - - def _set_upgrade(self, upgrade): - state = upgrade.get('state') - from_version = upgrade.get('from_version') - if (state - and state != constants.UPGRADE_COMPLETED - and from_version == constants.TITANIUM_SERVER_VERSION_18_03): - - LOG.info(_LI("Wait for ceph upgrade to complete " - "before monitoring cluster.")) - self.wait_for_upgrade_complete = True - - def set_flag_require_jewel_osds(self): - try: - response, body = self.service.ceph_api.osd_set_key( - constants.CEPH_FLAG_REQUIRE_JEWEL_OSDS, - body='json') - LOG.info(_LI("Set require_jewel_osds flag")) - except IOError as e: - raise exception.CephApiFailure( - call="osd_set_key", - reason=str(e)) - else: - if not response.ok: - raise exception.CephSetKeyFailure( - flag=constants.CEPH_FLAG_REQUIRE_JEWEL_OSDS, - extra=_("needed to complete upgrade to Jewel"), - response_status_code=response.status_code, - response_reason=response.reason, - status=body.get('status'), - output=body.get('output')) - - def filter_health_status(self, health): - health = self.auto_heal(health) - # filter out require_jewel_osds warning - # - if not self.wait_for_upgrade_complete: - return health - if health['health'] != constants.CEPH_HEALTH_WARN: - return health - if (constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET - not in health['detail']): - return health - return self._remove_require_jewel_osds_warning(health) - - def _remove_require_jewel_osds_warning(self, health): - reasons_list = [] - for reason in health['detail'].split(';'): - reason = reason.strip() - if len(reason) == 0: - continue - if constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET \ - in reason: - continue - reasons_list.append(reason) - if len(reasons_list) == 0: - health = { - 'health': constants.CEPH_HEALTH_OK, - 'detail': ''} - else: - health['detail'] = '; '.join(reasons_list) - return health - - def auto_heal(self, health): - if (health['health'] == constants.CEPH_HEALTH_WARN - and (constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET - in health['detail'])): - try: - upgrade = self.service.get_software_upgrade_status() - except Exception as ex: - LOG.warn(_LW( - "Getting software upgrade status failed " - "with: %s. Skip auto-heal attempt " - "(will retry on next ceph status poll).") % str(ex)) - return health - state = upgrade.get('state') - # surpress require_jewel_osds in case upgrade is - # in progress but not completed or aborting - if (not self.wait_for_upgrade_complete - and (upgrade.get('from_version') - == constants.TITANIUM_SERVER_VERSION_18_03) - and state not in [ - None, - constants.UPGRADE_COMPLETED, - constants.UPGRADE_ABORTING, - constants.UPGRADE_ABORT_COMPLETING, - constants.UPGRADE_ABORTING_ROLLBACK]): - self.wait_for_upgrade_complete = True - # set require_jewel_osds in case upgrade is - # not in progress or completed - if (state in [None, constants.UPGRADE_COMPLETED]): - LOG.warn(_LW( - "No upgrade in progress or update completed " - "and require_jewel_osds health warning raised. " - "Set require_jewel_osds flag.")) - self.set_flag_require_jewel_osds() - health = self._remove_require_jewel_osds_warning(health) - LOG.info(_LI("Unsurpress require_jewel_osds health warning")) - self.wait_for_upgrade_complete = False - # unsurpress require_jewel_osds in case upgrade - # is aborting - if (state in [ - constants.UPGRADE_ABORTING, - constants.UPGRADE_ABORT_COMPLETING, - constants.UPGRADE_ABORTING_ROLLBACK]): - self.wait_for_upgrade_complete = False - return health - - -class Monitor(HandleUpgradesMixin): - - def __init__(self, service): - self.service = service - self.current_ceph_health = "" - self.tiers_size = {} - self.known_object_pool_name = None - self.primary_tier_name = constants.SB_TIER_DEFAULT_NAMES[ - constants.SB_TIER_TYPE_CEPH] + constants.CEPH_CRUSH_TIER_SUFFIX - self.cluster_is_up = False - super(Monitor, self).__init__(service) - - def setup(self, config): - super(Monitor, self).setup(config) - - def run(self): - # Wait until Ceph cluster is up and we can get the fsid - while True: - try: - self.ceph_get_fsid() - except Exception: - LOG.exception( - "Error getting fsid, will retry in %ss" - % constants.CEPH_HEALTH_CHECK_INTERVAL) - if self.service.entity_instance_id: - break - time.sleep(constants.CEPH_HEALTH_CHECK_INTERVAL) - - # Start monitoring ceph status - while True: - try: - self.ceph_poll_status() - self.ceph_poll_quotas() - except Exception: - LOG.exception( - "Error running periodic monitoring of ceph status, " - "will retry in %ss" - % constants.CEPH_HEALTH_CHECK_INTERVAL) - time.sleep(constants.CEPH_HEALTH_CHECK_INTERVAL) - - def ceph_get_fsid(self): - # Check whether an alarm has already been raised - self._get_current_alarms() - if self.current_health_alarm: - LOG.info(_LI("Current alarm: %s") % - str(self.current_health_alarm.__dict__)) - - fsid = self._get_fsid() - if not fsid: - # Raise alarm - it will not have an entity_instance_id - self._report_fault({'health': constants.CEPH_HEALTH_DOWN, - 'detail': 'Ceph cluster is down.'}, - fm_constants.FM_ALARM_ID_STORAGE_CEPH) - else: - # Clear alarm with no entity_instance_id - self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH) - self.service.entity_instance_id = 'cluster=%s' % fsid - - def ceph_poll_status(self): - # get previous data every time in case: - # * daemon restarted - # * alarm was cleared manually but stored as raised in daemon - self._get_current_alarms() - if self.current_health_alarm: - LOG.info(_LI("Current alarm: %s") % - str(self.current_health_alarm.__dict__)) - - # get ceph health - health = self._get_health() - LOG.info(_LI("Current Ceph health: " - "%(health)s detail: %(detail)s") % health) - - health = self.filter_health_status(health) - if health['health'] != constants.CEPH_HEALTH_OK: - self._report_fault(health, fm_constants.FM_ALARM_ID_STORAGE_CEPH) - self._report_alarm_osds_health() - else: - self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH) - self.clear_all_major_critical() - - def filter_health_status(self, health): - return super(Monitor, self).filter_health_status(health) - - def ceph_poll_quotas(self): - self._get_current_alarms() - if self.current_quota_alarms: - LOG.info(_LI("Current quota alarms %s") % - self.current_quota_alarms) - - # Get current current size of each tier - previous_tiers_size = self.tiers_size - self.tiers_size = self._get_tiers_size() - - # Make sure any removed tiers have the alarms cleared - for t in (set(previous_tiers_size) - set(self.tiers_size)): - self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE, - "{0}.tier={1}".format( - self.service.entity_instance_id, - t[:-len(constants.CEPH_CRUSH_TIER_SUFFIX)])) - - # Check the quotas on each tier - for tier in self.tiers_size: - # Extract the tier name from the crush equivalent - tier_name = tier[:-len(constants.CEPH_CRUSH_TIER_SUFFIX)] - - if self.tiers_size[tier] == 0: - LOG.info(_LI("'%s' tier cluster size not yet available") - % tier_name) - continue - - pools_quota_sum = 0 - if tier == self.primary_tier_name: - for pool in constants.CEPH_POOLS: - if (pool['pool_name'] == - constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or - pool['pool_name'] == - constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER): - object_pool_name = self._get_object_pool_name() - if object_pool_name is None: - LOG.error("Rados gateway object data pool does " - "not exist.") - else: - pools_quota_sum += \ - self._get_osd_pool_quota(object_pool_name) - else: - pools_quota_sum += self._get_osd_pool_quota( - pool['pool_name']) - else: - for pool in constants.SB_TIER_CEPH_POOLS: - pool_name = "{0}-{1}".format(pool['pool_name'], tier_name) - pools_quota_sum += self._get_osd_pool_quota(pool_name) - - # Currently, there is only one pool on the addtional tier(s), - # therefore allow a quota of 0 - if (pools_quota_sum != self.tiers_size[tier] and - pools_quota_sum != 0): - self._report_fault( - {'tier_name': tier_name, - 'tier_eid': "{0}.tier={1}".format( - self.service.entity_instance_id, - tier_name)}, - fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE) - else: - self._clear_fault( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE, - "{0}.tier={1}".format(self.service.entity_instance_id, - tier_name)) - - # CEPH HELPERS - - def _get_fsid(self): - try: - response, fsid = self.service.ceph_api.fsid( - body='text', timeout=30) - except IOError as e: - LOG.warning(_LW("ceph_api.fsid failed: %s") % str(e)) - self.cluster_is_up = False - return None - - if not response.ok: - LOG.warning(_LW("Get fsid failed: %s") % response.reason) - self.cluster_is_up = False - return None - - self.cluster_is_up = True - return fsid.strip() - - def _get_health(self): - try: - # we use text since it has all info - response, body = self.service.ceph_api.health( - body='text', timeout=30) - except IOError as e: - LOG.warning(_LW("ceph_api.health failed: %s") % str(e)) - self.cluster_is_up = False - return {'health': constants.CEPH_HEALTH_DOWN, - 'detail': 'Ceph cluster is down.'} - - if not response.ok: - LOG.warning(_LW("CEPH health check failed: %s") % response.reason) - health_info = [constants.CEPH_HEALTH_DOWN, response.reason] - self.cluster_is_up = False - else: - health_info = body.split(' ', 1) - self.cluster_is_up = True - - health = health_info[0] - - if len(health_info) > 1: - detail = health_info[1] - else: - detail = health_info[0] - - return {'health': health.strip(), - 'detail': detail.strip()} - - def _get_object_pool_name(self): - if self.known_object_pool_name is None: - response, body = self.service.ceph_api.osd_pool_get( - constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL, - "pg_num", - body='json') - - if response.ok: - self.known_object_pool_name = \ - constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL - return self.known_object_pool_name - - response, body = self.service.ceph_api.osd_pool_get( - constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER, - "pg_num", - body='json') - - if response.ok: - self.known_object_pool_name = \ - constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER - return self.known_object_pool_name - - return self.known_object_pool_name - - def _get_osd_pool_quota(self, pool_name): - try: - resp, quota = self.service.ceph_api.osd_get_pool_quota( - pool_name, body='json') - except IOError: - return 0 - - if not resp.ok: - LOG.error(_LE("Getting the quota for " - "%(name)s pool failed:%(reason)s)") % - {"name": pool_name, "reason": resp.reason}) - return 0 - else: - try: - quota_gib = int(quota["output"]["quota_max_bytes"]) / (1024**3) - return quota_gib - except IOError: - return 0 - - # we have two root nodes 'cache-tier' and 'storage-tier' - # to calculate the space that is used by the pools, we must only - # use 'storage-tier' - # this function determines if a certain node is under a certain - # tree - def host_is_in_root(self, search_tree, node, root_name): - if node['type'] == 'root': - if node['name'] == root_name: - return True - else: - return False - return self.host_is_in_root(search_tree, - search_tree[node['parent']], - root_name) - - # The information received from ceph is not properly - # structured for efficient parsing and searching, so - # it must be processed and transformed into a more - # structured form. - # - # Input received from ceph is an array of nodes with the - # following structure: - # [{'id':, 'children':, ....}, - # ...] - # - # We process this array and transform it into a dictionary - # (for efficient access) The transformed "search tree" is a - # dictionary with the following structure: - # { : {'children':} - def _get_tiers_size(self): - try: - resp, body = self.service.ceph_api.osd_df( - body='json', - output_method='tree') - except IOError: - return 0 - if not resp.ok: - LOG.error(_LE("Getting the cluster usage " - "information failed: %(reason)s - " - "%(body)s") % {"reason": resp.reason, - "body": body}) - return {} - - # A node is a crushmap element: root, chassis, host, osd. Create a - # dictionary for the nodes with the key as the id used for efficient - # searching through nodes. - # - # For example: storage-0's node has one child node => OSD 0 - # { - # "id": -4, - # "name": "storage-0", - # "type": "host", - # "type_id": 1, - # "reweight": -1.000000, - # "kb": 51354096, - # "kb_used": 1510348, - # "kb_avail": 49843748, - # "utilization": 2.941047, - # "var": 1.480470, - # "pgs": 0, - # "children": [ - # 0 - # ] - # }, - search_tree = {} - for node in body['output']['nodes']: - search_tree[node['id']] = node - - # Extract the tiers as we will return a dict for the size of each tier - tiers = {k: v for k, v in search_tree.items() if v['type'] == 'root'} - - # For each tier, traverse the heirarchy from the root->chassis->host. - # Sum the host sizes to determine the overall size of the tier - tier_sizes = {} - for tier in tiers.values(): - tier_size = 0 - for chassis_id in tier['children']: - chassis_size = 0 - chassis = search_tree[chassis_id] - for host_id in chassis['children']: - host = search_tree[host_id] - if (chassis_size == 0 or - chassis_size > host['kb']): - chassis_size = host['kb'] - tier_size += chassis_size / (1024**2) - tier_sizes[tier['name']] = tier_size - - return tier_sizes - - # ALARM HELPERS - - @staticmethod - def _check_storage_group(osd_tree, group_id, - hosts, osds, fn_report_alarm): - reasons = set() - degraded_hosts = set() - severity = fm_constants.FM_ALARM_SEVERITY_CRITICAL - for host_id in hosts: - if len(osds[host_id]) == 0: - reasons.add(constants.ALARM_REASON_NO_OSD) - degraded_hosts.add(host_id) - else: - for osd_id in osds[host_id]: - if osd_tree[osd_id]['status'] == 'up': - if osd_tree[osd_id]['reweight'] == 0.0: - reasons.add(constants.ALARM_REASON_OSDS_OUT) - degraded_hosts.add(host_id) - else: - severity = fm_constants.FM_ALARM_SEVERITY_MAJOR - elif osd_tree[osd_id]['status'] == 'down': - reasons.add(constants.ALARM_REASON_OSDS_DOWN) - degraded_hosts.add(host_id) - if constants.ALARM_REASON_OSDS_OUT in reasons \ - and constants.ALARM_REASON_OSDS_DOWN in reasons: - reasons.add(constants.ALARM_REASON_OSDS_DOWN_OUT) - reasons.remove(constants.ALARM_REASON_OSDS_OUT) - if constants.ALARM_REASON_OSDS_DOWN in reasons \ - and constants.ALARM_REASON_OSDS_DOWN_OUT in reasons: - reasons.remove(constants.ALARM_REASON_OSDS_DOWN) - reason = "/".join(list(reasons)) - if severity == fm_constants.FM_ALARM_SEVERITY_CRITICAL: - reason = "{} {}: {}".format( - fm_constants.ALARM_CRITICAL_REPLICATION, - osd_tree[group_id]['name'], - reason) - elif severity == fm_constants.FM_ALARM_SEVERITY_MAJOR: - reason = "{} {}: {}".format( - fm_constants.ALARM_MAJOR_REPLICATION, - osd_tree[group_id]['name'], - reason) - if len(degraded_hosts) == 0: - if len(hosts) < 2: - fn_report_alarm( - osd_tree[group_id]['name'], - "{} {}: {}".format( - fm_constants.ALARM_MAJOR_REPLICATION, - osd_tree[group_id]['name'], - constants.ALARM_REASON_PEER_HOST_DOWN), - fm_constants.FM_ALARM_SEVERITY_MAJOR) - elif len(degraded_hosts) == 1: - fn_report_alarm( - "{}.host={}".format( - osd_tree[group_id]['name'], - osd_tree[list(degraded_hosts)[0]]['name']), - reason, severity) - else: - fn_report_alarm( - osd_tree[group_id]['name'], - reason, severity) - - def _check_storage_tier(self, osd_tree, tier_name, fn_report_alarm): - for tier_id in osd_tree: - if osd_tree[tier_id]['type'] != 'root': - continue - if osd_tree[tier_id]['name'] != tier_name: - continue - for group_id in osd_tree[tier_id]['children']: - if osd_tree[group_id]['type'] != 'chassis': - continue - if not osd_tree[group_id]['name'].startswith('group-'): - continue - hosts = [] - osds = {} - for host_id in osd_tree[group_id]['children']: - if osd_tree[host_id]['type'] != 'host': - continue - hosts.append(host_id) - osds[host_id] = [] - for osd_id in osd_tree[host_id]['children']: - if osd_tree[osd_id]['type'] == 'osd': - osds[host_id].append(osd_id) - self._check_storage_group(osd_tree, group_id, hosts, - osds, fn_report_alarm) - break - - def _current_health_alarm_equals(self, reason, severity): - if not self.current_health_alarm: - return False - if getattr(self.current_health_alarm, 'severity', None) != severity: - return False - if getattr(self.current_health_alarm, 'reason_text', None) != reason: - return False - return True - - def _report_alarm_osds_health(self): - response, osd_tree = self.service.ceph_api.osd_tree(body='json') - if not response.ok: - LOG.error(_LE("Failed to retrieve Ceph OSD tree: " - "status_code: %(status_code)s, reason: %(reason)s") % - {"status_code": response.status_code, - "reason": response.reason}) - return - osd_tree = dict([(n['id'], n) for n in osd_tree['output']['nodes']]) - alarms = [] - - self._check_storage_tier(osd_tree, "storage-tier", - lambda *args: alarms.append(args)) - - old_alarms = {} - for alarm_id in [ - fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR, - fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL]: - alarm_list = self.service.fm_api.get_faults_by_id(alarm_id) - if not alarm_list: - continue - for alarm in alarm_list: - if alarm.entity_instance_id not in old_alarms: - old_alarms[alarm.entity_instance_id] = [] - old_alarms[alarm.entity_instance_id].append( - (alarm.alarm_id, alarm.reason_text)) - - for peer_group, reason, severity in alarms: - if self._current_health_alarm_equals(reason, severity): - continue - alarm_critical_major = fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR - if severity == fm_constants.FM_ALARM_SEVERITY_CRITICAL: - alarm_critical_major = ( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL) - entity_instance_id = ( - self.service.entity_instance_id + '.peergroup=' + peer_group) - alarm_already_exists = False - if entity_instance_id in old_alarms: - for alarm_id, old_reason in old_alarms[entity_instance_id]: - if (reason == old_reason and - alarm_id == alarm_critical_major): - # if the alarm is exactly the same, we don't need - # to recreate it - old_alarms[entity_instance_id].remove( - (alarm_id, old_reason)) - alarm_already_exists = True - elif (alarm_id == alarm_critical_major): - # if we change just the reason, then we just remove the - # alarm from the list so we don't remove it at the - # end of the function - old_alarms[entity_instance_id].remove( - (alarm_id, old_reason)) - - if (len(old_alarms[entity_instance_id]) == 0): - del old_alarms[entity_instance_id] - - # in case the alarm is exactly the same, we skip the alarm set - if alarm_already_exists is True: - continue - major_repair_action = constants.REPAIR_ACTION_MAJOR_CRITICAL_ALARM - fault = fm_api.Fault( - alarm_id=alarm_critical_major, - alarm_type=fm_constants.FM_ALARM_TYPE_4, - alarm_state=fm_constants.FM_ALARM_STATE_SET, - entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER, - entity_instance_id=entity_instance_id, - severity=severity, - reason_text=reason, - probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_15, - proposed_repair_action=major_repair_action, - service_affecting=constants.SERVICE_AFFECTING['HEALTH_WARN']) - alarm_uuid = self.service.fm_api.set_fault(fault) - if alarm_uuid: - LOG.info(_LI( - "Created storage alarm %(alarm_uuid)s - " - "severity: %(severity)s, reason: %(reason)s, " - "service_affecting: %(service_affecting)s") % { - "alarm_uuid": str(alarm_uuid), - "severity": str(severity), - "reason": reason, - "service_affecting": str( - constants.SERVICE_AFFECTING['HEALTH_WARN'])}) - else: - LOG.error(_LE( - "Failed to create storage alarm - " - "severity: %(severity)s, reason: %(reason)s, " - "service_affecting: %(service_affecting)s") % { - "severity": str(severity), - "reason": reason, - "service_affecting": str( - constants.SERVICE_AFFECTING['HEALTH_WARN'])}) - - for entity_instance_id in old_alarms: - for alarm_id, old_reason in old_alarms[entity_instance_id]: - self.service.fm_api.clear_fault(alarm_id, entity_instance_id) - - @staticmethod - def _parse_reason(health): - """Parse reason strings received from Ceph""" - if health['health'] in constants.CEPH_STATUS_CUSTOM: - # Don't parse reason messages that we added - return "Storage Alarm Condition: %(health)s. %(detail)s" % health - - reasons_lst = health['detail'].split(';') - - parsed_reasons_text = "" - - # Check if PGs have issues - we can't safely store the entire message - # as it tends to be long - for reason in reasons_lst: - if "pgs" in reason: - parsed_reasons_text += "PGs are degraded/stuck or undersized" - break - - # Extract recovery status - parsed_reasons = [r.strip() for r in reasons_lst if 'recovery' in r] - if parsed_reasons: - parsed_reasons_text += ";" + ";".join(parsed_reasons) - - # We need to keep the most important parts of the messages when storing - # them to fm alarms, therefore text between [] brackets is truncated if - # max size is reached. - - # Add brackets, if needed - if len(parsed_reasons_text): - lbracket = " [" - rbracket = "]" - else: - lbracket = "" - rbracket = "" - - msg = {"head": "Storage Alarm Condition: ", - "tail": ". Please check 'ceph -s' for more details."} - max_size = constants.FM_ALARM_REASON_MAX_SIZE - \ - len(msg["head"]) - len(msg["tail"]) - - return ( - msg['head'] + - (health['health'] + lbracket - + parsed_reasons_text)[:max_size - 1] + - rbracket + msg['tail']) - - def _report_fault(self, health, alarm_id): - if alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH: - new_severity = constants.SEVERITY[health['health']] - new_reason_text = self._parse_reason(health) - new_service_affecting = \ - constants.SERVICE_AFFECTING[health['health']] - - # Raise or update alarm if necessary - if ((not self.current_health_alarm) or - (self.current_health_alarm.__dict__['severity'] != - new_severity) or - (self.current_health_alarm.__dict__['reason_text'] != - new_reason_text) or - (self.current_health_alarm.__dict__['service_affecting'] != - str(new_service_affecting))): - - fault = fm_api.Fault( - alarm_id=fm_constants.FM_ALARM_ID_STORAGE_CEPH, - alarm_type=fm_constants.FM_ALARM_TYPE_4, - alarm_state=fm_constants.FM_ALARM_STATE_SET, - entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER, - entity_instance_id=self.service.entity_instance_id, - severity=new_severity, - reason_text=new_reason_text, - probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_15, - proposed_repair_action=constants.REPAIR_ACTION, - service_affecting=new_service_affecting) - - alarm_uuid = self.service.fm_api.set_fault(fault) - if alarm_uuid: - LOG.info(_LI( - "Created storage alarm %(alarm_uuid)s - " - "severity: %(severity)s, reason: %(reason)s, " - "service_affecting: %(service_affecting)s") % { - "alarm_uuid": alarm_uuid, - "severity": new_severity, - "reason": new_reason_text, - "service_affecting": new_service_affecting}) - else: - LOG.error(_LE( - "Failed to create storage alarm - " - "severity: %(severity)s, reason: %(reason)s " - "service_affecting: %(service_affecting)s") % { - "severity": new_severity, - "reason": new_reason_text, - "service_affecting": new_service_affecting}) - - # Log detailed reason for later analysis - if (self.current_ceph_health != health['health'] or - self.detailed_health_reason != health['detail']): - LOG.info(_LI("Ceph status changed: %(health)s " - "detailed reason: %(detail)s") % health) - self.current_ceph_health = health['health'] - self.detailed_health_reason = health['detail'] - - elif (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE and - not health['tier_eid'] in self.current_quota_alarms): - - quota_reason_text = ("Quota/Space mismatch for the %s tier. The " - "sum of Ceph pool quotas does not match the " - "tier size." % health['tier_name']) - fault = fm_api.Fault( - alarm_id=fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE, - alarm_state=fm_constants.FM_ALARM_STATE_SET, - entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER, - entity_instance_id=health['tier_eid'], - severity=fm_constants.FM_ALARM_SEVERITY_MINOR, - reason_text=quota_reason_text, - alarm_type=fm_constants.FM_ALARM_TYPE_7, - probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_75, - proposed_repair_action=( - "Update ceph storage pool quotas to use all available " - "cluster space for the %s tier." % health['tier_name']), - service_affecting=False) - - alarm_uuid = self.service.fm_api.set_fault(fault) - if alarm_uuid: - LOG.info(_LI( - "Created storage quota storage alarm %(alarm_uuid)s. " - "Reason: %(reason)s") % { - "alarm_uuid": alarm_uuid, "reason": quota_reason_text}) - else: - LOG.error(_LE("Failed to create quota " - "storage alarm. Reason: %s") % quota_reason_text) - - def _clear_fault(self, alarm_id, entity_instance_id=None): - # Only clear alarm if there is one already raised - if (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH and - self.current_health_alarm): - LOG.info(_LI("Clearing health alarm")) - self.service.fm_api.clear_fault( - fm_constants.FM_ALARM_ID_STORAGE_CEPH, - self.service.entity_instance_id) - elif (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE and - entity_instance_id in self.current_quota_alarms): - LOG.info(_LI("Clearing quota alarm with entity_instance_id %s") - % entity_instance_id) - self.service.fm_api.clear_fault( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE, - entity_instance_id) - - def clear_critical_alarm(self, group_name): - alarm_list = self.service.fm_api.get_faults_by_id( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL) - if alarm_list: - for alarm in range(len(alarm_list)): - group_id = alarm_list[alarm].entity_instance_id.find("group-") - group_instance_name = ( - "group-" + - alarm_list[alarm].entity_instance_id[group_id + 6]) - if group_name == group_instance_name: - self.service.fm_api.clear_fault( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL, - alarm_list[alarm].entity_instance_id) - - def clear_all_major_critical(self, group_name=None): - # clear major alarms - alarm_list = self.service.fm_api.get_faults_by_id( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR) - if alarm_list: - for alarm in range(len(alarm_list)): - if group_name is not None: - group_id = ( - alarm_list[alarm].entity_instance_id.find("group-")) - group_instance_name = ( - "group-" + - alarm_list[alarm].entity_instance_id[group_id + 6]) - if group_name == group_instance_name: - self.service.fm_api.clear_fault( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR, - alarm_list[alarm].entity_instance_id) - else: - self.service.fm_api.clear_fault( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR, - alarm_list[alarm].entity_instance_id) - # clear critical alarms - alarm_list = self.service.fm_api.get_faults_by_id( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL) - if alarm_list: - for alarm in range(len(alarm_list)): - if group_name is not None: - group_id = ( - alarm_list[alarm].entity_instance_id.find("group-")) - group_instance_name = ( - "group-" + - alarm_list[alarm].entity_instance_id[group_id + 6]) - if group_name == group_instance_name: - self.service.fm_api.clear_fault( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL, - alarm_list[alarm].entity_instance_id) - else: - self.service.fm_api.clear_fault( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL, - alarm_list[alarm].entity_instance_id) - - def _get_current_alarms(self): - """Retrieve currently raised alarm""" - self.current_health_alarm = self.service.fm_api.get_fault( - fm_constants.FM_ALARM_ID_STORAGE_CEPH, - self.service.entity_instance_id) - quota_faults = self.service.fm_api.get_faults_by_id( - fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE) - if quota_faults: - self.current_quota_alarms = [f.entity_instance_id - for f in quota_faults] - else: - self.current_quota_alarms = [] diff --git a/ceph/ceph-manager/ceph-manager/ceph_manager/server.py b/ceph/ceph-manager/ceph-manager/ceph_manager/server.py deleted file mode 100644 index 49d9ca961..000000000 --- a/ceph/ceph-manager/ceph-manager/ceph_manager/server.py +++ /dev/null @@ -1,173 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright (c) 2016-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# https://chrigl.de/posts/2014/08/27/oslo-messaging-example.html -# http://docs.openstack.org/developer/oslo.messaging/server.html - -import sys - -# noinspection PyUnresolvedReferences -import eventlet -# noinspection PyUnresolvedReferences -import oslo_messaging as messaging -# noinspection PyUnresolvedReferences -from fm_api import fm_api -# noinspection PyUnresolvedReferences -from oslo_config import cfg -# noinspection PyUnresolvedReferences -from oslo_log import log as logging -# noinspection PyUnresolvedReferences -from oslo_service import service -# noinspection PyUnresolvedReferences -from oslo_service.periodic_task import PeriodicTasks - -# noinspection PyUnresolvedReferences -from cephclient import wrapper - -from ceph_manager.monitor import Monitor -from ceph_manager import constants - -from ceph_manager.i18n import _LI -from ceph_manager.i18n import _LW -from retrying import retry - -eventlet.monkey_patch(all=True) - -CONF = cfg.CONF -CONF.register_opts([ - cfg.StrOpt('sysinv_api_bind_ip', - default='0.0.0.0', - help='IP for the Ceph Manager server to bind to')]) -CONF.logging_default_format_string = ( - '%(asctime)s.%(msecs)03d %(process)d ' - '%(levelname)s %(name)s [-] %(message)s') -logging.register_options(CONF) -logging.setup(CONF, __name__) -LOG = logging.getLogger(__name__) -CONF.rpc_backend = 'rabbit' - - -class RpcEndpoint(PeriodicTasks): - - def __init__(self, service=None): - self.service = service - - def get_primary_tier_size(self, _): - """Get the ceph size for the primary tier. - - returns: an int for the size (in GB) of the tier - """ - - tiers_size = self.service.monitor.tiers_size - primary_tier_size = tiers_size.get( - self.service.monitor.primary_tier_name, 0) - LOG.debug(_LI("Ceph cluster primary tier size: %s GB") % - str(primary_tier_size)) - return primary_tier_size - - def get_tiers_size(self, _): - """Get the ceph cluster tier sizes. - - returns: a dict of sizes (in GB) by tier name - """ - - tiers_size = self.service.monitor.tiers_size - LOG.debug(_LI("Ceph cluster tiers (size in GB): %s") % - str(tiers_size)) - return tiers_size - - def is_cluster_up(self, _): - """Report if the last health check was successful. - - This is an independent view of the cluster accessibility that can be - used by the sysinv conductor to gate ceph API calls which would timeout - and potentially block other operations. - - This view is only updated at the rate the monitor checks for a cluster - uuid or a health check (CEPH_HEALTH_CHECK_INTERVAL) - - returns: boolean True if last health check was successful else False - """ - return self.service.monitor.cluster_is_up - - -class SysinvConductorUpgradeApi(object): - def __init__(self): - self.sysinv_conductor = None - super(SysinvConductorUpgradeApi, self).__init__() - - def get_software_upgrade_status(self): - LOG.info(_LI("Getting software upgrade status from sysinv")) - cctxt = self.sysinv_conductor.prepare(timeout=2) - upgrade = cctxt.call({}, 'get_software_upgrade_status') - LOG.info(_LI("Software upgrade status: %s") % str(upgrade)) - return upgrade - - @retry(wait_fixed=1000, - retry_on_exception=lambda e: - LOG.warn(_LW( - "Getting software upgrade status failed " - "with: %s. Retrying... ") % str(e)) or True) - def retry_get_software_upgrade_status(self): - return self.get_software_upgrade_status() - - -class Service(SysinvConductorUpgradeApi, service.Service): - - def __init__(self, conf): - super(Service, self).__init__() - self.conf = conf - self.rpc_server = None - self.sysinv_conductor = None - self.ceph_api = None - self.entity_instance_id = '' - self.fm_api = fm_api.FaultAPIs() - self.monitor = Monitor(self) - self.config = None - self.config_desired = None - self.config_applied = None - - def start(self): - super(Service, self).start() - transport = messaging.get_transport(self.conf) - self.sysinv_conductor = messaging.RPCClient( - transport, - messaging.Target( - topic=constants.SYSINV_CONDUCTOR_TOPIC)) - - self.ceph_api = wrapper.CephWrapper( - endpoint='https://localhost:5001') - - # Get initial config from sysinv and send it to - # services that need it before starting them - self.rpc_server = messaging.get_rpc_server( - transport, - messaging.Target(topic=constants.CEPH_MANAGER_TOPIC, - server=self.conf.sysinv_api_bind_ip), - [RpcEndpoint(self)], - executor='eventlet') - self.rpc_server.start() - eventlet.spawn_n(self.monitor.run) - - def stop(self): - try: - self.rpc_server.stop() - self.rpc_server.wait() - except Exception: - pass - super(Service, self).stop() - - -def run_service(): - CONF(sys.argv[1:]) - logging.setup(CONF, "ceph-manager") - launcher = service.launch(CONF, Service(CONF), workers=1) - launcher.wait() - - -if __name__ == "__main__": - run_service() diff --git a/ceph/ceph-manager/ceph-manager/ceph_manager/tests/__init__.py b/ceph/ceph-manager/ceph-manager/ceph_manager/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/ceph/ceph-manager/ceph-manager/setup.py b/ceph/ceph-manager/ceph-manager/setup.py deleted file mode 100644 index 40cf5012b..000000000 --- a/ceph/ceph-manager/ceph-manager/setup.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2013-2014, 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import setuptools - -setuptools.setup( - name='ceph_manager', - version='1.0.0', - description='CEPH manager', - license='Apache-2.0', - packages=['ceph_manager'], - entry_points={ - } -) diff --git a/ceph/ceph-manager/ceph-manager/test-requirements.txt b/ceph/ceph-manager/ceph-manager/test-requirements.txt deleted file mode 100644 index 1fdf20563..000000000 --- a/ceph/ceph-manager/ceph-manager/test-requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -mock -flake8 -eventlet -pytest -oslo.log -oslo.i18n \ No newline at end of file diff --git a/ceph/ceph-manager/ceph-manager/tox.ini b/ceph/ceph-manager/ceph-manager/tox.ini deleted file mode 100644 index cad30d506..000000000 --- a/ceph/ceph-manager/ceph-manager/tox.ini +++ /dev/null @@ -1,24 +0,0 @@ -# adapted from glance tox.ini - -[tox] -minversion = 1.6 -envlist = py27,pep8 -skipsdist = True -# tox does not work if the path to the workdir is too long, so move it to /tmp -toxworkdir = /tmp/{env:USER}_ceph_manager_tox - -[testenv] -setenv = VIRTUAL_ENV={envdir} -usedevelop = True -install_command = pip install -U --force-reinstall {opts} {packages} -deps = -r{toxinidir}/test-requirements.txt -commands = py.test {posargs} -whitelist_externals = bash -passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY - -[testenv:pep8] -commands = - flake8 {posargs} - -[flake8] -exclude = .venv,.git,.tox,dist,doc,etc,*glance/locale*,*lib/python*,*egg,build diff --git a/ceph/ceph-manager/files/ceph-manager.logrotate b/ceph/ceph-manager/files/ceph-manager.logrotate deleted file mode 100644 index 8d7a16ab1..000000000 --- a/ceph/ceph-manager/files/ceph-manager.logrotate +++ /dev/null @@ -1,11 +0,0 @@ -/var/log/ceph-manager.log { - nodateext - size 10M - start 1 - rotate 10 - missingok - notifempty - compress - delaycompress - copytruncate -} diff --git a/ceph/ceph-manager/files/ceph-manager.service b/ceph/ceph-manager/files/ceph-manager.service deleted file mode 100644 index e8bf26cf9..000000000 --- a/ceph/ceph-manager/files/ceph-manager.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Handle Ceph API calls and provide status updates via alarms -After=ceph.target - -[Service] -Type=forking -Restart=no -KillMode=process -RemainAfterExit=yes -ExecStart=/etc/rc.d/init.d/ceph-manager start -ExecStop=/etc/rc.d/init.d/ceph-manager stop -ExecReload=/etc/rc.d/init.d/ceph-manager reload -PIDFile=/var/run/ceph/ceph-manager.pid - -[Install] -WantedBy=multi-user.target - diff --git a/ceph/ceph-manager/scripts/bin/ceph-manager b/ceph/ceph-manager/scripts/bin/ceph-manager deleted file mode 100644 index 9aa4330db..000000000 --- a/ceph/ceph-manager/scripts/bin/ceph-manager +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import sys - -try: - from ceph_manager.server import run_service -except EnvironmentError as e: - print >> sys.stderr, "Error importing ceph_manager: ", str(e) - sys.exit(1) - -run_service() diff --git a/ceph/ceph-manager/scripts/init.d/ceph-manager b/ceph/ceph-manager/scripts/init.d/ceph-manager deleted file mode 100644 index b0a0fc6c9..000000000 --- a/ceph/ceph-manager/scripts/init.d/ceph-manager +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/sh -# -# Copyright (c) 2013-2014, 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -### BEGIN INIT INFO -# Provides: ceph-manager -# Required-Start: $ceph -# Required-Stop: $ceph -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Daemon for polling ceph status -# Description: Daemon for polling ceph status -### END INIT INFO - -DESC="ceph-manager" -DAEMON="/usr/bin/ceph-manager" -RUNDIR="/var/run/ceph" -PIDFILE=$RUNDIR/$DESC.pid - -CONFIGFILE="/etc/sysinv/sysinv.conf" -LOGFILE="/var/log/ceph-manager.log" - -start() -{ - if [ -e $PIDFILE ]; then - PIDDIR=/proc/$(cat $PIDFILE) - if [ -d ${PIDDIR} ]; then - echo "$DESC already running." - exit 0 - else - echo "Removing stale PID file $PIDFILE" - rm -f $PIDFILE - fi - fi - - echo -n "Starting $DESC..." - mkdir -p $RUNDIR - start-stop-daemon --start --quiet \ - --pidfile ${PIDFILE} --exec ${DAEMON} \ - --make-pidfile --background \ - -- --log-file=$LOGFILE --config-file=$CONFIGFILE - - if [ $? -eq 0 ]; then - echo "done." - else - echo "failed." - exit 1 - fi -} - -stop() -{ - echo -n "Stopping $DESC..." - start-stop-daemon --stop --quiet --pidfile $PIDFILE --retry 60 - if [ $? -eq 0 ]; then - echo "done." - else - echo "failed." - fi - rm -f $PIDFILE -} - -status() -{ - pid=`cat $PIDFILE 2>/dev/null` - if [ -n "$pid" ]; then - if ps -p $pid &> /dev/null ; then - echo "$DESC is running" - exit 0 - else - echo "$DESC is not running but has pid file" - exit 1 - fi - fi - echo "$DESC is not running" - exit 3 -} - -case "$1" in - start) - start - ;; - stop) - stop - ;; - restart|force-reload|reload) - stop - start - ;; - status) - status - ;; - *) - echo "Usage: $0 {start|stop|force-reload|restart|reload|status}" - exit 1 - ;; -esac - -exit 0 diff --git a/ceph/python-cephclient/centos/build_srpm.data b/ceph/python-cephclient/centos/build_srpm.data deleted file mode 100644 index e6b7ea231..000000000 --- a/ceph/python-cephclient/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -SRC_DIR="python-cephclient" -TIS_PATCH_VER=0 diff --git a/ceph/python-cephclient/centos/python-cephclient.spec b/ceph/python-cephclient/centos/python-cephclient.spec deleted file mode 100644 index 0d531b6d9..000000000 --- a/ceph/python-cephclient/centos/python-cephclient.spec +++ /dev/null @@ -1,65 +0,0 @@ -Summary: Handle Ceph API calls and provide status updates via alarms -Name: python-cephclient -Version: 13.2.2.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: https://github.com/openstack/stx-integ/tree/master/ceph/python-cephclient/python-cephclient' -Source0: %{name}-%{version}.tar.gz - -BuildArch: noarch - -BuildRequires: python -BuildRequires: python2-pip -BuildRequires: python2-wheel - -Requires: python -Requires: python-ipaddress -Requires: python2-six -Requires: python2-requests - -Provides: python-cephclient - -%description -A client library in Python for Ceph Mgr RESTful plugin providing REST API -access to the cluster over an SSL-secured connection. Python API is compatible -with the old Python Ceph client at -https://github.com/dmsimard/python-cephclient that no longer works in Ceph -mimic because Ceph REST API component was removed. - -%define debug_package %{nil} - -%prep -%autosetup -p 1 -n %{name}-%{version} - -rm -rf .pytest_cache -rm -rf python_cephclient.egg-info -rm -f requirements.txt - -%build -%{__python} setup.py build -%py2_build_wheel - -%install -%{__python2} setup.py install --skip-build --root %{buildroot} -mkdir -p $RPM_BUILD_ROOT/wheels -install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -%defattr(-,root,root,-) -%license LICENSE -%{python2_sitelib}/cephclient -%{python2_sitelib}/*.egg-info - -%package wheels -Summary: %{name} wheels - -%description wheels -Contains python wheels for %{name} - -%files wheels -/wheels/* diff --git a/ceph/python-cephclient/python-cephclient/.gitignore b/ceph/python-cephclient/python-cephclient/.gitignore deleted file mode 100644 index 2e436dc54..000000000 --- a/ceph/python-cephclient/python-cephclient/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.pytest_cache -*.egg-info diff --git a/ceph/python-cephclient/python-cephclient/LICENSE b/ceph/python-cephclient/python-cephclient/LICENSE deleted file mode 100644 index d6e28015e..000000000 --- a/ceph/python-cephclient/python-cephclient/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2019 Wind River Systems, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ceph/python-cephclient/python-cephclient/cephclient/__init__.py b/ceph/python-cephclient/python-cephclient/cephclient/__init__.py deleted file mode 100644 index 0c941312c..000000000 --- a/ceph/python-cephclient/python-cephclient/cephclient/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/ceph/python-cephclient/python-cephclient/cephclient/client.py b/ceph/python-cephclient/python-cephclient/cephclient/client.py deleted file mode 100644 index 0728fa77d..000000000 --- a/ceph/python-cephclient/python-cephclient/cephclient/client.py +++ /dev/null @@ -1,5793 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import ipaddress -import json -import logging -import re -import requests -import six -import subprocess -import time - -from cephclient.exception import CephMonRestfulListKeysError -from cephclient.exception import CephMonRestfulJsonError -from cephclient.exception import CephMonRestfulMissingUserCredentials -from cephclient.exception import CephMgrDumpError -from cephclient.exception import CephMgrJsonError -from cephclient.exception import CephMgrMissingRestfulService -from cephclient.exception import CephClientFormatNotSupported -from cephclient.exception import CephClientResponseFormatNotImplemented -from cephclient.exception import CephClientInvalidChoice -from cephclient.exception import CephClientTypeError -from cephclient.exception import CephClientValueOutOfBounds -from cephclient.exception import CephClientInvalidPgid -from cephclient.exception import CephClientInvalidIPAddr -from cephclient.exception import CephClientInvalidOsdIdValue -from cephclient.exception import CephClientNoSuchUser -from cephclient.exception import CephClientIncorrectPassword - - -CEPH_MON_RESTFUL_USER = 'admin' -CEPH_MON_RESTFUL_SERVICE = 'restful' -CEPH_CLIENT_RETRY_COUNT = 2 -CEPH_CLIENT_RETRY_TIMEOUT_SEC = 5 -CEPH_CLI_TIMEOUT_SEC = 5 -API_SUPPORTED_RESPONSE_FORMATS = [ - 'text', 'json', 'xml', 'binary' -] - -LOG = logging.getLogger('ceph_client') -LOG.setLevel(logging.DEBUG) -ch = logging.StreamHandler() -ch.setLevel(logging.DEBUG) -ch.setFormatter(logging.Formatter( - '%(asctime)s %(levelname)s %(name)s %(message)s')) -LOG.addHandler(ch) - - -class CephClient(object): - - def __init__(self, - username=CEPH_MON_RESTFUL_USER, - password=None, - retry_count=CEPH_CLIENT_RETRY_COUNT, - retry_timeout=CEPH_CLIENT_RETRY_TIMEOUT_SEC): - self.username = username - self.password = password - self.check_certificate = True - self.service_url = None - # TODO: fix certificates - self._disable_certificate_checks() - self.session = None - self.retry_count = retry_count - self.retry_timeout = retry_timeout - - def _refresh_session(self): - self.session = requests.Session() - self.session.auth = (self.username, self.password) - - def _disable_certificate_checks(self): - self.check_certificate = False - requests.packages.urllib3.disable_warnings() - LOG.warning('skip checking server certificate') - - def _get_password(self): - try: - output = subprocess.check_output( - ('ceph restful list-keys ' - '--connect-timeout {}').format( - CEPH_CLI_TIMEOUT_SEC), - shell=True) - except subprocess.CalledProcessError as e: - raise CephMonRestfulListKeysError(str(e)) - try: - keys = json.loads(output) - except (KeyError, ValueError): - raise CephMonRestfulJsonError(output) - try: - self.password = keys[self.username] - except KeyError: - raise CephMonRestfulMissingUserCredentials(self.username) - - def _get_service_url(self): - try: - output = subprocess.check_output( - ('ceph mgr dump ' - '--connect-timeout {}').format( - CEPH_CLI_TIMEOUT_SEC), - shell=True) - except subprocess.CalledProcessError as e: - raise CephMgrDumpError(str(e)) - try: - status = json.loads(output) - except (KeyError, ValueError): - raise CephMgrJsonError(output) - try: - self.service_url = status["services"][CEPH_MON_RESTFUL_SERVICE] - except (KeyError, TypeError): - raise CephMgrMissingRestfulService( - status.get('services', '')) - - def _make_text_result(self, prefix, result): - if result.get('has_failed'): - assert(len(result['failed']) == 1) - response = requests.Response() - response.status_code = requests.codes.internal_server_error - response.reason = result['failed'][0]['outs'].rstrip() - return response, response.reason - else: - assert(len(result['finished']) == 1) - response = requests.Response() - response.status_code = requests.codes.ok - response.reason = "OK" - return response, result['finished'][0]['outb'].rstrip() - - def _apply_json_result_workarounds(self, prefix, outb): - if prefix == 'osd crush tree': - # ceph mgr strangely adds a pair of square brackets at the end - while outb.endswith('][]'): - LOG.info("Trim 'osd crush tree' json response") - outb = outb[:-2] - return outb - - def _make_json_result(self, prefix, result): - if result.get('has_failed'): - assert(len(result['failed']) == 1) - response = requests.Response() - response.status_code = requests.codes.internal_server_error - response.reason = result['failed'][0]['outs'] - return response, dict( - status=result['failed'][0]['outs'], - output=result['failed'][0]['outb']) - else: - assert(len(result['finished']) == 1) - outb = result['finished'][0]['outb'] - outb = self._apply_json_result_workarounds(prefix, outb) - response = requests.Response() - response.status_code = requests.codes.ok - response.reason = "OK" - try: - return response, dict( - status=result['finished'][0]['outs'], - output=json.loads(outb or 'null')) - except (ValueError, TypeError): - raise CephMgrJsonError(outb) - - def _request(self, prefix, *args, **kwargs): - if not self.password: - self._get_password() - if not self.service_url: - self._get_service_url() - if not self.session: - self._refresh_session() - format = kwargs.get('body', 'json').lower() - if format not in API_SUPPORTED_RESPONSE_FORMATS: - raise CephClientFormatNotSupported( - prefix=prefix, format=format) - del kwargs['body'] - req_json = dict(kwargs) - req_json['format'] = format - assert('prefix' not in kwargs) - req_json['prefix'] = prefix - if 'timeout' in req_json: - timeout = req_json['timeout'] - del req_json['timeout'] - else: - timeout = None - LOG.info('Request params: url={}, json={}'.format( - self.service_url + 'request?wait=1', req_json)) - credit = self.retry_count + 1 - while credit > 0: - credit -= 1 - try: - result = self.session.post( - self.service_url + 'request?wait=1', - json=req_json, - verify=self.check_certificate, - timeout=timeout).json() - LOG.info('Result: {}'.format(result)) - if 'is_finished' in result: - self.session.delete( - self.service_url + 'request?id=' + result['id']) - else: - assert('message' in result) - if 'auth: No such user' in result['message']: - raise CephClientNoSuchUser(user=self.username) - elif 'auth: Incorrect password' in result['message']: - raise CephClientIncorrectPassword( - user=self.username) - break - except CephClientIncorrectPassword: - if not credit: - raise - LOG.warning('Incorrect password for user \'{}\'. ' - 'Fetch user password via list-keys ' - 'and retry.'.format(self.username)) - if self.retry_timeout > 0: - time.sleep(self.retry_timeout) - self._get_password() - self._refresh_session() - except (requests.ConnectionError, - requests.Timeout, - requests.HTTPError) as e: - if not credit: - raise IOError(str(e)) - LOG.warning( - 'Request error: {}. ' - 'Refresh restful service URL and retry'.format(e)) - if self.retry_timeout > 0: - time.sleep(self.retry_timeout) - self._get_service_url() - self._refresh_session() - if format == 'json': - return self._make_json_result(prefix, result) - elif format == 'text': - return self._make_text_result(prefix, result) - else: - raise CephClientResponseFormatNotImplemented( - format=format, reason=result["finished"][0]["outb"]) - - def pg_stat(self, body='json', timeout=None): - """show placement group status.""" - return self._request('pg stat', body=body, timeout=timeout) - - def pg_getmap(self, body='json', timeout=None): - """get binary pg map to -o/stdout""" - return self._request('pg getmap', body=body, timeout=timeout) - - PG_DUMP_DUMPCONTENTS_VALUES = \ - ['all', 'summary', 'sum', 'delta', 'pools', - 'osds', 'pgs', 'pgs_brief'] - - def pg_dump(self, dumpcontents=None, body='json', timeout=None): - """show human-readable versions of pg map (only 'all' valid with plain)""" - kwargs = dict(body=body, timeout=timeout) - if dumpcontents is not None: - if not isinstance(dumpcontents, six.string_types): - raise CephClientTypeError( - name='dumpcontents', - actual=type(dumpcontents), - expected=six.string_types) - supported = CephClient.PG_DUMP_DUMPCONTENTS_VALUES - if dumpcontents not in supported: - raise CephClientInvalidChoice( - function='pg_dump', - option='dumpcontents', - value=dumpcontents, - supported=', '.join(supported)) - if not isinstance(dumpcontents, list): - dumpcontents = [dumpcontents] - kwargs['dumpcontents'] = dumpcontents - return self._request('pg dump', **kwargs) - - PG_DUMP_JSON_DUMPCONTENTS_VALUES = [ - 'all', 'summary', 'sum', 'pools', 'osds', 'pgs'] - - def pg_dump_json(self, dumpcontents=None, body='json', timeout=None): - """show human-readable version of pg map in json only""" - kwargs = dict(body=body, timeout=timeout) - if dumpcontents is not None: - if not isinstance(dumpcontents, six.string_types): - raise CephClientTypeError( - name='dumpcontents', - actual=type(dumpcontents), - expected=six.string_types) - supported = CephClient.PG_DUMP_JSON_DUMPCONTENTS_VALUES - if dumpcontents not in supported: - raise CephClientInvalidChoice( - function='pg_dump_json', - option='dumpcontents', - value=dumpcontents, - supported=', '.join(supported)) - if not isinstance(dumpcontents, list): - dumpcontents = [dumpcontents] - kwargs['dumpcontents'] = dumpcontents - return self._request('pg dump_json', **kwargs) - - def pg_dump_pools_json(self, body='json', timeout=None): - """show pg pools info in json only""" - return self._request('pg dump_pools_json', - body=body, timeout=timeout) - - def pg_ls_by_pool(self, poolstr, states=None, - body='json', timeout=None): - """list pg with pool = [poolname]""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(poolstr, six.string_types): - raise CephClientTypeError( - name='poolstr', - actual=type(poolstr), - expected=six.string_types) - - kwargs['poolstr'] = poolstr - if states is not None: - if isinstance(states, list): - for item in states: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='states', - actual=item, - expected='list of strings') - else: - if not isinstance(states, six.string_types): - raise CephClientTypeError( - name='states', - actual=type(states), - expected=six.string_types) - if not isinstance(states, list): - states = [states] - kwargs['states'] = states - return self._request('pg ls-by-pool', **kwargs) - - def pg_ls_by_primary(self, osd, pool=None, - states=None, body='json', timeout=None): - """list pg with primary = [osd]""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(osd, six.integer_types): - pass - elif isinstance(osd, six.string_types): - osd = osd.lower() - prefix = 'osd.' - if not osd.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=osd) - osd = int(osd[len(prefix):]) - else: - raise CephClientTypeError( - name='osd', - actual=type(osd), - expected='int or string') - - kwargs['osd'] = osd - if pool is not None: - if not isinstance(pool, six.integer_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=int) - kwargs['pool'] = pool - if states is not None: - if isinstance(states, list): - for item in states: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='states', - actual=item, - expected='list of strings') - else: - if not isinstance(states, six.string_types): - raise CephClientTypeError( - name='states', - actual=type(states), - expected=six.string_types) - if not isinstance(states, list): - states = [states] - kwargs['states'] = states - return self._request('pg ls-by-primary', **kwargs) - - def pg_ls_by_osd(self, osd, pool=None, states=None, - body='json', timeout=None): - """list pg on osd [osd]""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(osd, six.integer_types): - pass - elif isinstance(osd, six.string_types): - osd = osd.lower() - prefix = 'osd.' - if not osd.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=osd) - osd = int(osd[len(prefix):]) - else: - raise CephClientTypeError( - name='osd', - actual=type(osd), - expected='int or string') - - kwargs['osd'] = osd - if pool is not None: - if not isinstance(pool, six.integer_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=int) - kwargs['pool'] = pool - if states is not None: - if isinstance(states, list): - for item in states: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='states', - actual=item, - expected='list of strings') - else: - if not isinstance(states, six.string_types): - raise CephClientTypeError( - name='states', - actual=type(states), - expected=six.string_types) - if not isinstance(states, list): - states = [states] - kwargs['states'] = states - return self._request('pg ls-by-osd', **kwargs) - - def pg_ls(self, pool=None, states=None, body='json', timeout=None): - """list pg with specific pool, osd, state""" - kwargs = dict(body=body, timeout=timeout) - if pool is not None: - if not isinstance(pool, six.integer_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=int) - kwargs['pool'] = pool - if states is not None: - if isinstance(states, list): - for item in states: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='states', - actual=item, - expected='list of strings') - else: - if not isinstance(states, six.string_types): - raise CephClientTypeError( - name='states', - actual=type(states), - expected=six.string_types) - if not isinstance(states, list): - states = [states] - kwargs['states'] = states - return self._request('pg ls', **kwargs) - - PG_DUMP_STUCK_STUCKOPS_VALUES = \ - ['inactive', 'unclean', 'stale', 'undersized', - 'degraded'] - - def pg_dump_stuck(self, stuckops=None, threshold=None, - body='json', timeout=None): - """show information about stuck pgs""" - kwargs = dict(body=body, timeout=timeout) - if stuckops is not None: - if not isinstance(stuckops, six.string_types): - raise CephClientTypeError( - name='stuckops', - actual=type(stuckops), - expected=six.string_types) - supported = CephClient.PG_DUMP_STUCK_STUCKOPS_VALUES - if stuckops not in supported: - raise CephClientInvalidChoice( - function='pg_dump_stuck', - option='stuckops', - value=stuckops, - supported=', '.join(supported)) - if not isinstance(stuckops, list): - stuckops = [stuckops] - kwargs['stuckops'] = stuckops - if threshold is not None: - if not isinstance(threshold, six.integer_types): - raise CephClientTypeError( - name='threshold', - actual=type(threshold), - expected=int) - kwargs['threshold'] = threshold - return self._request('pg dump_stuck', **kwargs) - - PG_DEBUG_DEBUGOP_VALUES = \ - ['unfound_objects_exist', 'degraded_pgs_exist'] - - def pg_debug(self, debugop, body='json', timeout=None): - """show debug info about pgs""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(debugop, six.string_types): - raise CephClientTypeError( - name='debugop', - actual=type(debugop), - expected=six.string_types) - supported = CephClient.PG_DEBUG_DEBUGOP_VALUES - if debugop not in supported: - raise CephClientInvalidChoice( - function='pg_debug', - option='debugop', - value=debugop, - supported=', '.join(supported)) - - kwargs['debugop'] = debugop - return self._request('pg debug', **kwargs) - - def pg_scrub(self, pgid, body='json', timeout=None): - """start scrub on """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - return self._request('pg scrub', **kwargs) - - def pg_deep_scrub(self, pgid, body='json', timeout=None): - """start deep-scrub on """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - return self._request('pg deep-scrub', **kwargs) - - def pg_repair(self, pgid, body='json', timeout=None): - """start repair on """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - return self._request('pg repair', **kwargs) - - def pg_force_recovery(self, pgid, body='json', timeout=None): - """force recovery of first""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - if not isinstance(pgid, list): - pgid = [pgid] - kwargs['pgid'] = pgid - return self._request('pg force-recovery', **kwargs) - - def pg_force_backfill(self, pgid, body='json', timeout=None): - """force backfill of first""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - if not isinstance(pgid, list): - pgid = [pgid] - kwargs['pgid'] = pgid - return self._request('pg force-backfill', **kwargs) - - def pg_cancel_force_recovery(self, pgid, body='json', timeout=None): - """restore normal recovery priority of """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - if not isinstance(pgid, list): - pgid = [pgid] - kwargs['pgid'] = pgid - return self._request('pg cancel-force-recovery', **kwargs) - - def pg_cancel_force_backfill(self, pgid, body='json', timeout=None): - """restore normal backfill priority of """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - if not isinstance(pgid, list): - pgid = [pgid] - kwargs['pgid'] = pgid - return self._request('pg cancel-force-backfill', **kwargs) - - def osd_perf(self, body='json', timeout=None): - """print dump of OSD perf summary stats""" - return self._request('osd perf', body=body, timeout=timeout) - - OSD_DF_OUTPUT_METHOD_VALUES = ['plain', 'tree'] - - def osd_df(self, output_method=None, body='json', timeout=None): - """show OSD utilization""" - kwargs = dict(body=body, timeout=timeout) - if output_method is not None: - if not isinstance(output_method, six.string_types): - raise CephClientTypeError( - name='output_method', - actual=type(output_method), - expected=six.string_types) - supported = CephClient.OSD_DF_OUTPUT_METHOD_VALUES - if output_method not in supported: - raise CephClientInvalidChoice( - function='osd_df', - option='output_method', - value=output_method, - supported=', '.join(supported)) - kwargs['output_method'] = output_method - return self._request('osd df', **kwargs) - - def osd_blocked_by(self, body='json', timeout=None): - """print histogram of which OSDs are blocking their peers""" - return self._request('osd blocked-by', body=body, timeout=timeout) - - def osd_pool_stats(self, pool_name=None, body='json', timeout=None): - """obtain stats from all pools, or from specified pool""" - kwargs = dict(body=body, timeout=timeout) - if pool_name is not None: - if not isinstance(pool_name, six.string_types): - raise CephClientTypeError( - name='pool_name', - actual=type(pool_name), - expected=six.string_types) - kwargs['pool_name'] = pool_name - return self._request('osd pool stats', **kwargs) - - OSD_REWEIGHT_BY_UTILIZATION_NO_INCREASING_VALUES = ['--no-increasing'] - - def osd_reweight_by_utilization( - self, oload=None, max_change=None, max_osds=None, - no_increasing=None, body='json', timeout=None): - """reweight OSDs by utilization [overload-percentage-for-consideration,default 120] """ - kwargs = dict(body=body, timeout=timeout) - if oload is not None: - if not isinstance(oload, six.integer_types): - raise CephClientTypeError( - name='oload', - actual=type(oload), - expected=int) - kwargs['oload'] = oload - if max_change is not None: - if not isinstance(max_change, (six.integer_types, float)): - raise CephClientTypeError( - name='max_change', - actual=type(max_change), - expected=int) - kwargs['max_change'] = max_change - if max_osds is not None: - if not isinstance(max_osds, six.integer_types): - raise CephClientTypeError( - name='max_osds', - actual=type(max_osds), - expected=int) - kwargs['max_osds'] = max_osds - if no_increasing is not None: - if not isinstance(no_increasing, six.string_types): - raise CephClientTypeError( - name='no_increasing', - actual=type(no_increasing), - expected=six.string_types) - supported = CephClient.OSD_REWEIGHT_BY_UTILIZATION_NO_INCREASING_VALUES # noqa E501 - if no_increasing not in supported: - raise CephClientInvalidChoice( - function='osd_reweight_by_utilization', - option='no_increasing', - value=no_increasing, - supported=', '.join(supported)) - kwargs['no_increasing'] = no_increasing - return self._request('osd reweight-by-utilization', **kwargs) - - OSD_TEST_REWEIGHT_BY_UTILIZATION_NO_INCREASING_VALUES = [ - '--no-increasing'] - - def osd_test_reweight_by_utilization( - self, oload=None, max_change=None, max_osds=None, - no_increasing=None, body='json', timeout=None): - """dry run of reweight OSDs by utilization [overload-percentage-for-consideration, default 120] """ - kwargs = dict(body=body, timeout=timeout) - if oload is not None: - if not isinstance(oload, six.integer_types): - raise CephClientTypeError( - name='oload', - actual=type(oload), - expected=int) - kwargs['oload'] = oload - if max_change is not None: - if not isinstance(max_change, (six.integer_types, float)): - raise CephClientTypeError( - name='max_change', - actual=type(max_change), - expected=int) - kwargs['max_change'] = max_change - if max_osds is not None: - if not isinstance(max_osds, six.integer_types): - raise CephClientTypeError( - name='max_osds', - actual=type(max_osds), - expected=int) - kwargs['max_osds'] = max_osds - if no_increasing is not None: - if not isinstance(no_increasing, six.string_types): - raise CephClientTypeError( - name='no_increasing', - actual=type(no_increasing), - expected=six.string_types) - supported = CephClient.OSD_TEST_REWEIGHT_BY_UTILIZATION_NO_INCREASING_VALUES # noqa E501 - if no_increasing not in supported: - raise CephClientInvalidChoice( - function='osd_test_reweight_by_utilization', - option='no_increasing', - value=no_increasing, - supported=', '.join(supported)) - kwargs['no_increasing'] = no_increasing - return self._request('osd test-reweight-by-utilization', **kwargs) - - def osd_reweight_by_pg( - self, oload=None, max_change=None, max_osds=None, pools=None, - body='json', timeout=None): - """reweight OSDs by PG distribution [overload-percentage-for-consideration, default 120] """ - kwargs = dict(body=body, timeout=timeout) - if oload is not None: - if not isinstance(oload, six.integer_types): - raise CephClientTypeError( - name='oload', - actual=type(oload), - expected=int) - kwargs['oload'] = oload - if max_change is not None: - if not isinstance(max_change, (six.integer_types, float)): - raise CephClientTypeError( - name='max_change', - actual=type(max_change), - expected=int) - kwargs['max_change'] = max_change - if max_osds is not None: - if not isinstance(max_osds, six.integer_types): - raise CephClientTypeError( - name='max_osds', - actual=type(max_osds), - expected=int) - kwargs['max_osds'] = max_osds - if pools is not None: - if isinstance(pools, list): - for item in pools: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='pools', - actual=item, - expected='list of strings') - else: - if not isinstance(pools, six.string_types): - raise CephClientTypeError( - name='pools', - actual=type(pools), - expected=six.string_types) - if not isinstance(pools, list): - pools = [pools] - kwargs['pools'] = pools - return self._request('osd reweight-by-pg', **kwargs) - - def osd_test_reweight_by_pg( - self, oload=None, max_change=None, max_osds=None, pools=None, - body='json', timeout=None): - """dry run of reweight OSDs by PG distribution [overload-percentage-for-consideration, default 120] """ - kwargs = dict(body=body, timeout=timeout) - if oload is not None: - if not isinstance(oload, six.integer_types): - raise CephClientTypeError( - name='oload', - actual=type(oload), - expected=int) - kwargs['oload'] = oload - if max_change is not None: - if not isinstance(max_change, (six.integer_types, float)): - raise CephClientTypeError( - name='max_change', - actual=type(max_change), - expected=int) - kwargs['max_change'] = max_change - if max_osds is not None: - if not isinstance(max_osds, six.integer_types): - raise CephClientTypeError( - name='max_osds', - actual=type(max_osds), - expected=int) - kwargs['max_osds'] = max_osds - if pools is not None: - if isinstance(pools, list): - for item in pools: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='pools', - actual=item, - expected='list of strings') - else: - if not isinstance(pools, six.string_types): - raise CephClientTypeError( - name='pools', - actual=type(pools), - expected=six.string_types) - if not isinstance(pools, list): - pools = [pools] - kwargs['pools'] = pools - return self._request('osd test-reweight-by-pg', **kwargs) - - def osd_safe_to_destroy(self, ids, body='json', timeout=None): - """check whether osd(s) can be safely destroyed without reducing datadurability """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd safe-to-destroy', **kwargs) - - def osd_ok_to_stop(self, ids, body='json', timeout=None): - """check whether osd(s) can be safely stopped without reducing immediatedata availability """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd ok-to-stop', **kwargs) - - def osd_scrub(self, who, body='json', timeout=None): - """initiate scrub on osd , or use to scrub all""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - return self._request('osd scrub', **kwargs) - - def osd_deep_scrub(self, who, body='json', timeout=None): - """initiate deep scrub on osd , or use to deep scrub all""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - return self._request('osd deep-scrub', **kwargs) - - def osd_repair(self, who, body='json', timeout=None): - """initiate repair on osd , or use to repair all""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - return self._request('osd repair', **kwargs) - - def service_dump(self, body='json', timeout=None): - """dump service map""" - return self._request('service dump', body=body, timeout=timeout) - - def service_status(self, body='json', timeout=None): - """dump service state""" - return self._request('service status', body=body, timeout=timeout) - - def config_show(self, who, key, body='json', timeout=None): - """Show running configuration""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - return self._request('config show', **kwargs) - - def config_show_with_defaults(self, who, body='json', timeout=None): - """Show running configuration (including compiled-in defaults)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - return self._request('config show-with-defaults', **kwargs) - - def pg_map(self, pgid, body='json', timeout=None): - """show mapping of pg to osds""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - return self._request('pg map', **kwargs) - - def osd_last_stat_seq(self, _id, body='json', timeout=None): - """get the last pg stats sequence number reported for this osd""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - return self._request('osd last-stat-seq', **kwargs) - - def auth_export(self, entity=None, body='json', timeout=None): - """write keyring for requested entity, or master keyring if none given""" - kwargs = dict(body=body, timeout=timeout) - if entity is not None: - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - kwargs['entity'] = entity - return self._request('auth export', **kwargs) - - def auth_get(self, entity, body='json', timeout=None): - """write keyring file with requested key""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - return self._request('auth get', **kwargs) - - def auth_get_key(self, entity, body='json', timeout=None): - """display requested key""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - return self._request('auth get-key', **kwargs) - - def auth_print_key(self, entity, body='json', timeout=None): - """display requested key""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - return self._request('auth print-key', **kwargs) - - def auth_list(self, body='json', timeout=None): - """list authentication state""" - return self._request('auth list', body=body, timeout=timeout) - - def auth_ls(self, body='json', timeout=None): - """list authentication state""" - return self._request('auth ls', body=body, timeout=timeout) - - def auth_import(self, body='json', timeout=None): - """auth import: read keyring file from -i """ - return self._request('auth import', body=body, timeout=timeout) - - def auth_add(self, entity, caps=None, body='json', timeout=None): - """add auth info for from input file, or random key if no inputis given, and/or any caps specified in the command """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - if caps is not None: - if isinstance(caps, list): - for item in caps: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='caps', - actual=item, - expected='list of strings') - else: - if not isinstance(caps, six.string_types): - raise CephClientTypeError( - name='caps', - actual=type(caps), - expected=six.string_types) - if not isinstance(caps, list): - caps = [caps] - kwargs['caps'] = caps - return self._request('auth add', **kwargs) - - def auth_get_or_create_key( - self, entity, caps=None, body='json', timeout=None): - """get, or add, key for from system/caps pairs specified in thecommand. If key already exists, any given caps must match the existing caps for that key. """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - if caps is not None: - if isinstance(caps, list): - for item in caps: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='caps', - actual=item, - expected='list of strings') - else: - if not isinstance(caps, six.string_types): - raise CephClientTypeError( - name='caps', - actual=type(caps), - expected=six.string_types) - if not isinstance(caps, list): - caps = [caps] - kwargs['caps'] = caps - return self._request('auth get-or-create-key', **kwargs) - - def auth_get_or_create(self, entity, caps=None, - body='json', timeout=None): - """add auth info for from input file, or random key if no inputgiven, and/or any caps specified in the command """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - if caps is not None: - if isinstance(caps, list): - for item in caps: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='caps', - actual=item, - expected='list of strings') - else: - if not isinstance(caps, six.string_types): - raise CephClientTypeError( - name='caps', - actual=type(caps), - expected=six.string_types) - if not isinstance(caps, list): - caps = [caps] - kwargs['caps'] = caps - return self._request('auth get-or-create', **kwargs) - - def fs_authorize(self, filesystem, entity, caps, - body='json', timeout=None): - """add auth for to access file system based onfollowing directory and permissions pairs """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(filesystem, six.string_types): - raise CephClientTypeError( - name='filesystem', - actual=type(filesystem), - expected=six.string_types) - - kwargs['filesystem'] = filesystem - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - if isinstance(caps, list): - for item in caps: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='caps', - actual=item, - expected='list of strings') - else: - if not isinstance(caps, six.string_types): - raise CephClientTypeError( - name='caps', - actual=type(caps), - expected=six.string_types) - - if not isinstance(caps, list): - caps = [caps] - kwargs['caps'] = caps - return self._request('fs authorize', **kwargs) - - def auth_caps(self, entity, caps, body='json', timeout=None): - """update caps for from caps specified in the command""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - if isinstance(caps, list): - for item in caps: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='caps', - actual=item, - expected='list of strings') - else: - if not isinstance(caps, six.string_types): - raise CephClientTypeError( - name='caps', - actual=type(caps), - expected=six.string_types) - - if not isinstance(caps, list): - caps = [caps] - kwargs['caps'] = caps - return self._request('auth caps', **kwargs) - - def auth_del(self, entity, body='json', timeout=None): - """delete all caps for """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - return self._request('auth del', **kwargs) - - def auth_rm(self, entity, body='json', timeout=None): - """remove all caps for """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(entity, six.string_types): - raise CephClientTypeError( - name='entity', - actual=type(entity), - expected=six.string_types) - - kwargs['entity'] = entity - return self._request('auth rm', **kwargs) - - def compact(self, body='json', timeout=None): - """cause compaction of monitor's leveldb/rocksdb storage""" - return self._request('compact', body=body, timeout=timeout) - - def scrub(self, body='json', timeout=None): - """scrub the monitor stores""" - return self._request('scrub', body=body, timeout=timeout) - - def fsid(self, body='json', timeout=None): - """show cluster FSID/UUID""" - return self._request('fsid', body=body, timeout=timeout) - - def log(self, logtext, body='json', timeout=None): - """log supplied text to the monitor log""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(logtext, list): - for item in logtext: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='logtext', - actual=item, - expected='list of strings') - else: - if not isinstance(logtext, six.string_types): - raise CephClientTypeError( - name='logtext', - actual=type(logtext), - expected=six.string_types) - - if not isinstance(logtext, list): - logtext = [logtext] - kwargs['logtext'] = logtext - return self._request('log', **kwargs) - - LOG_LAST_LEVEL_VALUES = ['debug', 'info', 'sec', 'warn', 'error'] - - LOG_LAST_CHANNEL_VALUES = ['*', 'cluster', 'audit'] - - def log_last( - self, num=None, level=None, channel=None, body='json', - timeout=None): - """print last few lines of the cluster log""" - kwargs = dict(body=body, timeout=timeout) - if num is not None: - if not isinstance(num, six.integer_types): - raise CephClientTypeError( - name='num', - actual=type(num), - expected=int) - if num < 1: - raise CephClientValueOutOfBounds( - name='num', - actual=num, - min=1, - max='unlimited') - kwargs['num'] = num - if level is not None: - if not isinstance(level, six.string_types): - raise CephClientTypeError( - name='level', - actual=type(level), - expected=six.string_types) - supported = CephClient.LOG_LAST_LEVEL_VALUES - if level not in supported: - raise CephClientInvalidChoice( - function='log_last', - option='level', - value=level, - supported=', '.join(supported)) - kwargs['level'] = level - if channel is not None: - if not isinstance(channel, six.string_types): - raise CephClientTypeError( - name='channel', - actual=type(channel), - expected=six.string_types) - supported = CephClient.LOG_LAST_CHANNEL_VALUES - if channel not in supported: - raise CephClientInvalidChoice( - function='log_last', - option='channel', - value=channel, - supported=', '.join(supported)) - kwargs['channel'] = channel - return self._request('log last', **kwargs) - - def injectargs(self, injected_args, body='json', timeout=None): - """inject config arguments into monitor""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(injected_args, list): - for item in injected_args: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='injected_args', - actual=item, - expected='list of strings') - else: - if not isinstance(injected_args, six.string_types): - raise CephClientTypeError( - name='injected_args', - actual=type(injected_args), - expected=six.string_types) - - if not isinstance(injected_args, list): - injected_args = [injected_args] - kwargs['injected_args'] = injected_args - return self._request('injectargs', **kwargs) - - def status(self, body='json', timeout=None): - """show cluster status""" - return self._request('status', body=body, timeout=timeout) - - HEALTH_DETAIL_VALUES = ['detail'] - - def health(self, detail=None, body='json', timeout=None): - """show cluster health""" - kwargs = dict(body=body, timeout=timeout) - if detail is not None: - if not isinstance(detail, six.string_types): - raise CephClientTypeError( - name='detail', - actual=type(detail), - expected=six.string_types) - supported = CephClient.HEALTH_DETAIL_VALUES - if detail not in supported: - raise CephClientInvalidChoice( - function='health', - option='detail', - value=detail, - supported=', '.join(supported)) - kwargs['detail'] = detail - return self._request('health', **kwargs) - - def time_sync_status(self, body='json', timeout=None): - """show time sync status""" - return self._request('time-sync-status', - body=body, timeout=timeout) - - DF_DETAIL_VALUES = ['detail'] - - def df(self, detail=None, body='json', timeout=None): - """show cluster free space stats""" - kwargs = dict(body=body, timeout=timeout) - if detail is not None: - if not isinstance(detail, six.string_types): - raise CephClientTypeError( - name='detail', - actual=type(detail), - expected=six.string_types) - supported = CephClient.DF_DETAIL_VALUES - if detail not in supported: - raise CephClientInvalidChoice( - function='df', - option='detail', - value=detail, - supported=', '.join(supported)) - kwargs['detail'] = detail - return self._request('df', **kwargs) - - def report(self, tags=None, body='json', timeout=None): - """report full status of cluster, optional title tag strings""" - kwargs = dict(body=body, timeout=timeout) - if tags is not None: - if isinstance(tags, list): - for item in tags: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='tags', - actual=item, - expected='list of strings') - else: - if not isinstance(tags, six.string_types): - raise CephClientTypeError( - name='tags', - actual=type(tags), - expected=six.string_types) - if not isinstance(tags, list): - tags = [tags] - kwargs['tags'] = tags - return self._request('report', **kwargs) - - def features(self, body='json', timeout=None): - """report of connected features""" - return self._request('features', body=body, timeout=timeout) - - def quorum_status(self, body='json', timeout=None): - """report status of monitor quorum""" - return self._request('quorum_status', body=body, timeout=timeout) - - def mon_status(self, body='json', timeout=None): - """report status of monitors""" - return self._request('mon_status', body=body, timeout=timeout) - - SYNC_FORCE_VALIDATE1_VALUES = ['--yes-i-really-mean-it'] - - SYNC_FORCE_VALIDATE2_VALUES = ['--i-know-what-i-am-doing'] - - def sync_force( - self, validate1=None, validate2=None, body='json', - timeout=None): - """force sync of and clear monitor store""" - kwargs = dict(body=body, timeout=timeout) - if validate1 is not None: - if not isinstance(validate1, six.string_types): - raise CephClientTypeError( - name='validate1', - actual=type(validate1), - expected=six.string_types) - supported = CephClient.SYNC_FORCE_VALIDATE1_VALUES - if validate1 not in supported: - raise CephClientInvalidChoice( - function='sync_force', - option='validate1', - value=validate1, - supported=', '.join(supported)) - kwargs['validate1'] = validate1 - if validate2 is not None: - if not isinstance(validate2, six.string_types): - raise CephClientTypeError( - name='validate2', - actual=type(validate2), - expected=six.string_types) - supported = CephClient.SYNC_FORCE_VALIDATE2_VALUES - if validate2 not in supported: - raise CephClientInvalidChoice( - function='sync_force', - option='validate2', - value=validate2, - supported=', '.join(supported)) - kwargs['validate2'] = validate2 - return self._request('sync force', **kwargs) - - HEAP_HEAPCMD_VALUES = \ - ['dump', 'start_profiler', 'stop_profiler', - 'release', 'stats'] - - def heap(self, heapcmd, body='json', timeout=None): - """show heap usage info (available only if compiled with tcmalloc)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(heapcmd, six.string_types): - raise CephClientTypeError( - name='heapcmd', - actual=type(heapcmd), - expected=six.string_types) - supported = CephClient.HEAP_HEAPCMD_VALUES - if heapcmd not in supported: - raise CephClientInvalidChoice( - function='heap', - option='heapcmd', - value=heapcmd, - supported=', '.join(supported)) - - kwargs['heapcmd'] = heapcmd - return self._request('heap', **kwargs) - - QUORUM_QUORUMCMD_VALUES = ['enter', 'exit'] - - def quorum(self, quorumcmd, body='json', timeout=None): - """enter or exit quorum""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(quorumcmd, six.string_types): - raise CephClientTypeError( - name='quorumcmd', - actual=type(quorumcmd), - expected=six.string_types) - supported = CephClient.QUORUM_QUORUMCMD_VALUES - if quorumcmd not in supported: - raise CephClientInvalidChoice( - function='quorum', - option='quorumcmd', - value=quorumcmd, - supported=', '.join(supported)) - - kwargs['quorumcmd'] = quorumcmd - return self._request('quorum', **kwargs) - - def tell(self, target, args, body='json', timeout=None): - """send a command to a specific daemon""" - kwargs = dict(body=body, timeout=timeout) - - kwargs['target'] = target - if isinstance(args, list): - for item in args: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='args', - actual=item, - expected='list of strings') - else: - if not isinstance(args, six.string_types): - raise CephClientTypeError( - name='args', - actual=type(args), - expected=six.string_types) - - if not isinstance(args, list): - args = [args] - kwargs['args'] = args - return self._request('tell', **kwargs) - - def version(self, body='json', timeout=None): - """show mon daemon version""" - return self._request('version', body=body, timeout=timeout) - - NODE_LS__TYPE_VALUES = ['all', 'osd', 'mon', 'mds', 'mgr'] - - def node_ls(self, _type=None, body='json', timeout=None): - """list all nodes in cluster [type]""" - kwargs = dict(body=body, timeout=timeout) - if _type is not None: - if not isinstance(_type, six.string_types): - raise CephClientTypeError( - name='_type', - actual=type(_type), - expected=six.string_types) - supported = CephClient.NODE_LS__TYPE_VALUES - if _type not in supported: - raise CephClientInvalidChoice( - function='node_ls', - option='_type', - value=_type, - supported=', '.join(supported)) - kwargs['type'] = _type - return self._request('node ls', **kwargs) - - def mon_compact(self, body='json', timeout=None): - """cause compaction of monitor's leveldb/rocksdb storage""" - return self._request('mon compact', body=body, timeout=timeout) - - def mon_scrub(self, body='json', timeout=None): - """scrub the monitor stores""" - return self._request('mon scrub', body=body, timeout=timeout) - - MON_SYNC_FORCE_VALIDATE1_VALUES = ['--yes-i-really-mean-it'] - - MON_SYNC_FORCE_VALIDATE2_VALUES = ['--i-know-what-i-am-doing'] - - def mon_sync_force( - self, validate1=None, validate2=None, body='json', - timeout=None): - """force sync of and clear monitor store""" - kwargs = dict(body=body, timeout=timeout) - if validate1 is not None: - if not isinstance(validate1, six.string_types): - raise CephClientTypeError( - name='validate1', - actual=type(validate1), - expected=six.string_types) - supported = CephClient.MON_SYNC_FORCE_VALIDATE1_VALUES - if validate1 not in supported: - raise CephClientInvalidChoice( - function='mon_sync_force', - option='validate1', - value=validate1, - supported=', '.join(supported)) - kwargs['validate1'] = validate1 - if validate2 is not None: - if not isinstance(validate2, six.string_types): - raise CephClientTypeError( - name='validate2', - actual=type(validate2), - expected=six.string_types) - supported = CephClient.MON_SYNC_FORCE_VALIDATE2_VALUES - if validate2 not in supported: - raise CephClientInvalidChoice( - function='mon_sync_force', - option='validate2', - value=validate2, - supported=', '.join(supported)) - kwargs['validate2'] = validate2 - return self._request('mon sync force', **kwargs) - - def mon_metadata(self, _id=None, body='json', timeout=None): - """fetch metadata for mon """ - kwargs = dict(body=body, timeout=timeout) - if _id is not None: - if not isinstance(_id, six.string_types): - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected=six.string_types) - kwargs['id'] = _id - return self._request('mon metadata', **kwargs) - - def mon_count_metadata(self, _property, body='json', timeout=None): - """count mons by metadata field property""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(_property, six.string_types): - raise CephClientTypeError( - name='_property', - actual=type(_property), - expected=six.string_types) - - kwargs['property'] = _property - return self._request('mon count-metadata', **kwargs) - - def mon_versions(self, body='json', timeout=None): - """check running versions of monitors""" - return self._request('mon versions', body=body, timeout=timeout) - - def versions(self, body='json', timeout=None): - """check running versions of ceph daemons""" - return self._request('versions', body=body, timeout=timeout) - - def mds_stat(self, body='json', timeout=None): - """show MDS status""" - return self._request('mds stat', body=body, timeout=timeout) - - def mds_dump(self, epoch=None, body='json', timeout=None): - """dump legacy MDS cluster info, optionally from epoch""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('mds dump', **kwargs) - - def fs_dump(self, epoch=None, body='json', timeout=None): - """dump all CephFS status, optionally from epoch""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('fs dump', **kwargs) - - def mds_getmap(self, epoch=None, body='json', timeout=None): - """get MDS map, optionally from epoch""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('mds getmap', **kwargs) - - def mds_metadata(self, who=None, body='json', timeout=None): - """fetch metadata for mds """ - kwargs = dict(body=body, timeout=timeout) - if who is not None: - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - kwargs['who'] = who - return self._request('mds metadata', **kwargs) - - def mds_count_metadata(self, _property, body='json', timeout=None): - """count MDSs by metadata field property""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(_property, six.string_types): - raise CephClientTypeError( - name='_property', - actual=type(_property), - expected=six.string_types) - - kwargs['property'] = _property - return self._request('mds count-metadata', **kwargs) - - def mds_versions(self, body='json', timeout=None): - """check running versions of MDSs""" - return self._request('mds versions', body=body, timeout=timeout) - - def mds_tell(self, who, args, body='json', timeout=None): - """send command to particular mds""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - if isinstance(args, list): - for item in args: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='args', - actual=item, - expected='list of strings') - else: - if not isinstance(args, six.string_types): - raise CephClientTypeError( - name='args', - actual=type(args), - expected=six.string_types) - - if not isinstance(args, list): - args = [args] - kwargs['args'] = args - return self._request('mds tell', **kwargs) - - def mds_compat_show(self, body='json', timeout=None): - """show mds compatibility settings""" - return self._request('mds compat show', body=body, timeout=timeout) - - def mds_stop(self, role, body='json', timeout=None): - """stop mds""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(role, six.string_types): - raise CephClientTypeError( - name='role', - actual=type(role), - expected=six.string_types) - - kwargs['role'] = role - return self._request('mds stop', **kwargs) - - def mds_deactivate(self, role, body='json', timeout=None): - """clean up specified MDS rank (use with `set max_mds` to shrink cluster)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(role, six.string_types): - raise CephClientTypeError( - name='role', - actual=type(role), - expected=six.string_types) - - kwargs['role'] = role - return self._request('mds deactivate', **kwargs) - - def mds_set_max_mds(self, maxmds, body='json', timeout=None): - """set max MDS index""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(maxmds, six.integer_types): - raise CephClientTypeError( - name='maxmds', - actual=type(maxmds), - expected=int) - if maxmds < 0: - raise CephClientValueOutOfBounds( - name='maxmds', - actual=maxmds, - min=0, - max='unlimited') - - kwargs['maxmds'] = maxmds - return self._request('mds set_max_mds', **kwargs) - - MDS_SET_VAR_VALUES = \ - ['max_mds', 'max_file_size', 'inline_data', - 'allow_new_snaps', 'allow_multimds', - 'allow_multimds_snaps', 'allow_dirfrags'] - - def mds_set(self, var, val, confirm=None, body='json', timeout=None): - """set mds parameter to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(var, six.string_types): - raise CephClientTypeError( - name='var', - actual=type(var), - expected=six.string_types) - supported = CephClient.MDS_SET_VAR_VALUES - if var not in supported: - raise CephClientInvalidChoice( - function='mds_set', - option='var', - value=var, - supported=', '.join(supported)) - - kwargs['var'] = var - if not isinstance(val, six.string_types): - raise CephClientTypeError( - name='val', - actual=type(val), - expected=six.string_types) - - kwargs['val'] = val - if confirm is not None: - if not isinstance(confirm, six.string_types): - raise CephClientTypeError( - name='confirm', - actual=type(confirm), - expected=six.string_types) - kwargs['confirm'] = confirm - return self._request('mds set', **kwargs) - - def mds_set_state(self, gid, state, body='json', timeout=None): - """set mds state of to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(gid, six.integer_types): - raise CephClientTypeError( - name='gid', - actual=type(gid), - expected=int) - if gid < 0: - raise CephClientValueOutOfBounds( - name='gid', - actual=gid, - min=0, - max='unlimited') - - kwargs['gid'] = gid - if not isinstance(state, six.integer_types): - raise CephClientTypeError( - name='state', - actual=type(state), - expected=int) - if state < 0 or state > 20: - raise CephClientValueOutOfBounds( - name='state', - actual=state, - min=0, - max=20) - - kwargs['state'] = state - return self._request('mds set_state', **kwargs) - - def mds_fail(self, role_or_gid, body='json', timeout=None): - """Mark MDS failed: trigger a failover if a standby is available""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(role_or_gid, six.string_types): - raise CephClientTypeError( - name='role_or_gid', - actual=type(role_or_gid), - expected=six.string_types) - - kwargs['role_or_gid'] = role_or_gid - return self._request('mds fail', **kwargs) - - def mds_repaired(self, role, body='json', timeout=None): - """mark a damaged MDS rank as no longer damaged""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(role, six.string_types): - raise CephClientTypeError( - name='role', - actual=type(role), - expected=six.string_types) - - kwargs['role'] = role - return self._request('mds repaired', **kwargs) - - def mds_rm(self, gid, body='json', timeout=None): - """remove nonactive mds""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(gid, six.integer_types): - raise CephClientTypeError( - name='gid', - actual=type(gid), - expected=int) - if gid < 0: - raise CephClientValueOutOfBounds( - name='gid', - actual=gid, - min=0, - max='unlimited') - - kwargs['gid'] = gid - return self._request('mds rm', **kwargs) - - def mds_rmfailed(self, role, confirm=None, body='json', timeout=None): - """remove failed mds""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(role, six.string_types): - raise CephClientTypeError( - name='role', - actual=type(role), - expected=six.string_types) - - kwargs['role'] = role - if confirm is not None: - if not isinstance(confirm, six.string_types): - raise CephClientTypeError( - name='confirm', - actual=type(confirm), - expected=six.string_types) - kwargs['confirm'] = confirm - return self._request('mds rmfailed', **kwargs) - - def mds_cluster_down(self, body='json', timeout=None): - """take MDS cluster down""" - return self._request('mds cluster_down', - body=body, timeout=timeout) - - def mds_cluster_up(self, body='json', timeout=None): - """bring MDS cluster up""" - return self._request('mds cluster_up', body=body, timeout=timeout) - - def mds_compat_rm_compat(self, feature, body='json', timeout=None): - """remove compatible feature""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(feature, six.integer_types): - raise CephClientTypeError( - name='feature', - actual=type(feature), - expected=int) - if feature < 0: - raise CephClientValueOutOfBounds( - name='feature', - actual=feature, - min=0, - max='unlimited') - - kwargs['feature'] = feature - return self._request('mds compat rm_compat', **kwargs) - - def mds_compat_rm_incompat(self, feature, body='json', timeout=None): - """remove incompatible feature""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(feature, six.integer_types): - raise CephClientTypeError( - name='feature', - actual=type(feature), - expected=int) - if feature < 0: - raise CephClientValueOutOfBounds( - name='feature', - actual=feature, - min=0, - max='unlimited') - - kwargs['feature'] = feature - return self._request('mds compat rm_incompat', **kwargs) - - def mds_add_data_pool(self, pool, body='json', timeout=None): - """add data pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - return self._request('mds add_data_pool', **kwargs) - - def mds_rm_data_pool(self, pool, body='json', timeout=None): - """remove data pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - return self._request('mds rm_data_pool', **kwargs) - - def mds_remove_data_pool(self, pool, body='json', timeout=None): - """remove data pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - return self._request('mds remove_data_pool', **kwargs) - - MDS_NEWFS_SURE_VALUES = ['--yes-i-really-mean-it'] - - def mds_newfs(self, metadata, data, sure=None, - body='json', timeout=None): - """make new filesystem using pools and """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(metadata, six.integer_types): - raise CephClientTypeError( - name='metadata', - actual=type(metadata), - expected=int) - if metadata < 0: - raise CephClientValueOutOfBounds( - name='metadata', - actual=metadata, - min=0, - max='unlimited') - - kwargs['metadata'] = metadata - if not isinstance(data, six.integer_types): - raise CephClientTypeError( - name='data', - actual=type(data), - expected=int) - if data < 0: - raise CephClientValueOutOfBounds( - name='data', - actual=data, - min=0, - max='unlimited') - - kwargs['data'] = data - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.MDS_NEWFS_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='mds_newfs', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('mds newfs', **kwargs) - - FS_NEW_FORCE_VALUES = ['--force'] - - FS_NEW_SURE_VALUES = ['--allow-dangerous-metadata-overlay'] - - def fs_new(self, fs_name, metadata, data, force=None, - sure=None, body='json', timeout=None): - """make new filesystem using named pools and """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(fs_name, six.string_types): - raise CephClientTypeError( - name='fs_name', - actual=type(fs_name), - expected=six.string_types) - - kwargs['fs_name'] = fs_name - if not isinstance(metadata, six.string_types): - raise CephClientTypeError( - name='metadata', - actual=type(metadata), - expected=six.string_types) - - kwargs['metadata'] = metadata - if not isinstance(data, six.string_types): - raise CephClientTypeError( - name='data', - actual=type(data), - expected=six.string_types) - - kwargs['data'] = data - if force is not None: - if not isinstance(force, six.string_types): - raise CephClientTypeError( - name='force', - actual=type(force), - expected=six.string_types) - supported = CephClient.FS_NEW_FORCE_VALUES - if force not in supported: - raise CephClientInvalidChoice( - function='fs_new', - option='force', - value=force, - supported=', '.join(supported)) - kwargs['force'] = force - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.FS_NEW_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='fs_new', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('fs new', **kwargs) - - FS_RM_SURE_VALUES = ['--yes-i-really-mean-it'] - - def fs_rm(self, fs_name, sure=None, body='json', timeout=None): - """disable the named filesystem""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(fs_name, six.string_types): - raise CephClientTypeError( - name='fs_name', - actual=type(fs_name), - expected=six.string_types) - - kwargs['fs_name'] = fs_name - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.FS_RM_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='fs_rm', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('fs rm', **kwargs) - - FS_RESET_SURE_VALUES = ['--yes-i-really-mean-it'] - - def fs_reset(self, fs_name, sure=None, body='json', timeout=None): - """disaster recovery only: reset to a single-MDS map""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(fs_name, six.string_types): - raise CephClientTypeError( - name='fs_name', - actual=type(fs_name), - expected=six.string_types) - - kwargs['fs_name'] = fs_name - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.FS_RESET_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='fs_reset', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('fs reset', **kwargs) - - def fs_ls(self, body='json', timeout=None): - """list filesystems""" - return self._request('fs ls', body=body, timeout=timeout) - - def fs_get(self, fs_name, body='json', timeout=None): - """get info about one filesystem""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(fs_name, six.string_types): - raise CephClientTypeError( - name='fs_name', - actual=type(fs_name), - expected=six.string_types) - - kwargs['fs_name'] = fs_name - return self._request('fs get', **kwargs) - - FS_SET_VAR_VALUES = \ - ['max_mds', 'max_file_size', 'allow_new_snaps', - 'inline_data', 'cluster_down', - 'allow_dirfrags', 'balancer', - 'standby_count_wanted', 'session_timeout', - 'session_autoclose', 'down', 'joinable', - 'min_compat_client'] - - def fs_set(self, fs_name, var, val, confirm=None, - body='json', timeout=None): - """set fs parameter to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(fs_name, six.string_types): - raise CephClientTypeError( - name='fs_name', - actual=type(fs_name), - expected=six.string_types) - - kwargs['fs_name'] = fs_name - if not isinstance(var, six.string_types): - raise CephClientTypeError( - name='var', - actual=type(var), - expected=six.string_types) - supported = CephClient.FS_SET_VAR_VALUES - if var not in supported: - raise CephClientInvalidChoice( - function='fs_set', - option='var', - value=var, - supported=', '.join(supported)) - - kwargs['var'] = var - if not isinstance(val, six.string_types): - raise CephClientTypeError( - name='val', - actual=type(val), - expected=six.string_types) - - kwargs['val'] = val - if confirm is not None: - if not isinstance(confirm, six.string_types): - raise CephClientTypeError( - name='confirm', - actual=type(confirm), - expected=six.string_types) - kwargs['confirm'] = confirm - return self._request('fs set', **kwargs) - - FS_FLAG_SET_FLAG_NAME_VALUES = ['enable_multiple'] - - FS_FLAG_SET_CONFIRM_VALUES = ['--yes-i-really-mean-it'] - - def fs_flag_set(self, flag_name, val, confirm=None, - body='json', timeout=None): - """Set a global CephFS flag""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(flag_name, six.string_types): - raise CephClientTypeError( - name='flag_name', - actual=type(flag_name), - expected=six.string_types) - supported = CephClient.FS_FLAG_SET_FLAG_NAME_VALUES - if flag_name not in supported: - raise CephClientInvalidChoice( - function='fs_flag_set', - option='flag_name', - value=flag_name, - supported=', '.join(supported)) - - kwargs['flag_name'] = flag_name - if not isinstance(val, six.string_types): - raise CephClientTypeError( - name='val', - actual=type(val), - expected=six.string_types) - - kwargs['val'] = val - if confirm is not None: - if not isinstance(confirm, six.string_types): - raise CephClientTypeError( - name='confirm', - actual=type(confirm), - expected=six.string_types) - supported = CephClient.FS_FLAG_SET_CONFIRM_VALUES - if confirm not in supported: - raise CephClientInvalidChoice( - function='fs_flag_set', - option='confirm', - value=confirm, - supported=', '.join(supported)) - kwargs['confirm'] = confirm - return self._request('fs flag set', **kwargs) - - def fs_add_data_pool(self, fs_name, pool, body='json', timeout=None): - """add data pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(fs_name, six.string_types): - raise CephClientTypeError( - name='fs_name', - actual=type(fs_name), - expected=six.string_types) - - kwargs['fs_name'] = fs_name - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - return self._request('fs add_data_pool', **kwargs) - - def fs_rm_data_pool(self, fs_name, pool, body='json', timeout=None): - """remove data pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(fs_name, six.string_types): - raise CephClientTypeError( - name='fs_name', - actual=type(fs_name), - expected=six.string_types) - - kwargs['fs_name'] = fs_name - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - return self._request('fs rm_data_pool', **kwargs) - - def fs_set_default(self, fs_name, body='json', timeout=None): - """set the default to the named filesystem""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(fs_name, six.string_types): - raise CephClientTypeError( - name='fs_name', - actual=type(fs_name), - expected=six.string_types) - - kwargs['fs_name'] = fs_name - return self._request('fs set_default', **kwargs) - - def mon_dump(self, epoch=None, body='json', timeout=None): - """dump formatted monmap (optionally from epoch)""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('mon dump', **kwargs) - - def mon_stat(self, body='json', timeout=None): - """summarize monitor status""" - return self._request('mon stat', body=body, timeout=timeout) - - def mon_getmap(self, epoch=None, body='json', timeout=None): - """get monmap""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('mon getmap', **kwargs) - - def mon_add(self, name, addr, body='json', timeout=None): - """add new monitor named at """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - try: - ipaddress.ip_address(addr) - except ValueError: - raise CephClientInvalidIPAddr( - name='addr', - actual=addr) - - kwargs['addr'] = addr - return self._request('mon add', **kwargs) - - def mon_rm(self, name, body='json', timeout=None): - """remove monitor named """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - return self._request('mon rm', **kwargs) - - def mon_remove(self, name, body='json', timeout=None): - """remove monitor named """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - return self._request('mon remove', **kwargs) - - MON_FEATURE_LS_WITH_VALUE_VALUES = ['--with-value'] - - def mon_feature_ls(self, with_value=None, body='json', timeout=None): - """list available mon map features to be set/unset""" - kwargs = dict(body=body, timeout=timeout) - if with_value is not None: - if not isinstance(with_value, six.string_types): - raise CephClientTypeError( - name='with_value', - actual=type(with_value), - expected=six.string_types) - supported = CephClient.MON_FEATURE_LS_WITH_VALUE_VALUES - if with_value not in supported: - raise CephClientInvalidChoice( - function='mon_feature_ls', - option='with_value', - value=with_value, - supported=', '.join(supported)) - kwargs['with_value'] = with_value - return self._request('mon feature ls', **kwargs) - - MON_FEATURE_SET_SURE_VALUES = ['--yes-i-really-mean-it'] - - def mon_feature_set(self, feature_name, sure=None, - body='json', timeout=None): - """set provided feature on mon map""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(feature_name, six.string_types): - raise CephClientTypeError( - name='feature_name', - actual=type(feature_name), - expected=six.string_types) - - kwargs['feature_name'] = feature_name - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.MON_FEATURE_SET_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='mon_feature_set', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('mon feature set', **kwargs) - - def osd_stat(self, body='json', timeout=None): - """print summary of OSD map""" - return self._request('osd stat', body=body, timeout=timeout) - - def osd_dump(self, epoch=None, body='json', timeout=None): - """print summary of OSD map""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('osd dump', **kwargs) - - OSD_TREE_STATES_VALUES = ['up', 'down', 'in', 'out', 'destroyed'] - - def osd_tree(self, epoch=None, states=None, body='json', timeout=None): - """print OSD tree""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - if states is not None: - if not isinstance(states, six.string_types): - raise CephClientTypeError( - name='states', - actual=type(states), - expected=six.string_types) - supported = CephClient.OSD_TREE_STATES_VALUES - if states not in supported: - raise CephClientInvalidChoice( - function='osd_tree', - option='states', - value=states, - supported=', '.join(supported)) - if not isinstance(states, list): - states = [states] - kwargs['states'] = states - return self._request('osd tree', **kwargs) - - OSD_TREE_FROM_STATES_VALUES = [ - 'up', 'down', 'in', 'out', 'destroyed'] - - def osd_tree_from( - self, bucket, epoch=None, states=None, body='json', - timeout=None): - """print OSD tree in bucket""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(bucket, six.string_types): - raise CephClientTypeError( - name='bucket', - actual=type(bucket), - expected=six.string_types) - - kwargs['bucket'] = bucket - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - if states is not None: - if not isinstance(states, six.string_types): - raise CephClientTypeError( - name='states', - actual=type(states), - expected=six.string_types) - supported = CephClient.OSD_TREE_FROM_STATES_VALUES - if states not in supported: - raise CephClientInvalidChoice( - function='osd_tree_from', - option='states', - value=states, - supported=', '.join(supported)) - if not isinstance(states, list): - states = [states] - kwargs['states'] = states - return self._request('osd tree-from', **kwargs) - - def osd_ls(self, epoch=None, body='json', timeout=None): - """show all OSD ids""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('osd ls', **kwargs) - - def osd_getmap(self, epoch=None, body='json', timeout=None): - """get OSD map""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('osd getmap', **kwargs) - - def osd_getcrushmap(self, epoch=None, body='json', timeout=None): - """get CRUSH map""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('osd getcrushmap', **kwargs) - - def osd_getmaxosd(self, body='json', timeout=None): - """show largest OSD id""" - return self._request('osd getmaxosd', body=body, timeout=timeout) - - def osd_ls_tree(self, name, epoch=None, body='json', timeout=None): - """show OSD ids under bucket in the CRUSH map""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('osd ls-tree', **kwargs) - - def osd_find(self, _id, body='json', timeout=None): - """find osd in the CRUSH map and show its location""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - return self._request('osd find', **kwargs) - - def osd_metadata(self, _id=None, body='json', timeout=None): - """fetch metadata for osd {id} (default all)""" - kwargs = dict(body=body, timeout=timeout) - if _id is not None: - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - kwargs['id'] = _id - return self._request('osd metadata', **kwargs) - - def osd_count_metadata(self, _property, body='json', timeout=None): - """count OSDs by metadata field property""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(_property, six.string_types): - raise CephClientTypeError( - name='_property', - actual=type(_property), - expected=six.string_types) - - kwargs['property'] = _property - return self._request('osd count-metadata', **kwargs) - - def osd_versions(self, body='json', timeout=None): - """check running versions of OSDs""" - return self._request('osd versions', body=body, timeout=timeout) - - def osd_map(self, pool, _object, nspace=None, - body='json', timeout=None): - """find pg for in with [namespace]""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - - kwargs['object'] = _object - if nspace is not None: - if not isinstance(nspace, six.string_types): - raise CephClientTypeError( - name='nspace', - actual=type(nspace), - expected=six.string_types) - kwargs['nspace'] = nspace - return self._request('osd map', **kwargs) - - def osd_lspools(self, auid=None, body='json', timeout=None): - """list pools""" - kwargs = dict(body=body, timeout=timeout) - if auid is not None: - if not isinstance(auid, six.integer_types): - raise CephClientTypeError( - name='auid', - actual=type(auid), - expected=int) - kwargs['auid'] = auid - return self._request('osd lspools', **kwargs) - - def osd_crush_rule_list(self, body='json', timeout=None): - """list crush rules""" - return self._request('osd crush rule list', - body=body, timeout=timeout) - - def osd_crush_rule_ls(self, body='json', timeout=None): - """list crush rules""" - return self._request('osd crush rule ls', - body=body, timeout=timeout) - - def osd_crush_rule_ls_by_class( - self, _class, body='json', timeout=None): - """list all crush rules that reference the same """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(_class, six.string_types): - raise CephClientTypeError( - name='_class', - actual=type(_class), - expected=six.string_types) - - kwargs['class'] = _class - return self._request('osd crush rule ls-by-class', **kwargs) - - def osd_crush_rule_dump(self, name=None, body='json', timeout=None): - """dump crush rule (default all)""" - kwargs = dict(body=body, timeout=timeout) - if name is not None: - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - kwargs['name'] = name - return self._request('osd crush rule dump', **kwargs) - - def osd_crush_dump(self, body='json', timeout=None): - """dump crush map""" - return self._request('osd crush dump', body=body, timeout=timeout) - - def osd_crush_add_bucket( - self, name, _type, args=None, body='json', timeout=None): - """add no-parent (probably root) crush bucket of type tolocation """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if not isinstance(_type, six.string_types): - raise CephClientTypeError( - name='_type', - actual=type(_type), - expected=six.string_types) - - kwargs['type'] = _type - if args is not None: - if isinstance(args, list): - for item in args: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='args', - actual=item, - expected='list of strings') - else: - if not isinstance(args, six.string_types): - raise CephClientTypeError( - name='args', - actual=type(args), - expected=six.string_types) - if not isinstance(args, list): - args = [args] - kwargs['args'] = args - return self._request('osd crush add-bucket', **kwargs) - - def osd_crush_rename_bucket( - self, srcname, dstname, body='json', timeout=None): - """rename bucket to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(srcname, six.string_types): - raise CephClientTypeError( - name='srcname', - actual=type(srcname), - expected=six.string_types) - - kwargs['srcname'] = srcname - if not isinstance(dstname, six.string_types): - raise CephClientTypeError( - name='dstname', - actual=type(dstname), - expected=six.string_types) - - kwargs['dstname'] = dstname - return self._request('osd crush rename-bucket', **kwargs) - - def osd_crush_set(self, _id, weight, args, body='json', timeout=None): - """update crushmap position and weight for to withlocation """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - if not isinstance(weight, (six.integer_types, float)): - raise CephClientTypeError( - name='weight', - actual=type(weight), - expected=int) - if weight < 0.0: - raise CephClientValueOutOfBounds( - name='weight', - actual=weight, - min=0.0, - max='unlimited') - - kwargs['weight'] = weight - if isinstance(args, list): - for item in args: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='args', - actual=item, - expected='list of strings') - else: - if not isinstance(args, six.string_types): - raise CephClientTypeError( - name='args', - actual=type(args), - expected=six.string_types) - - if not isinstance(args, list): - args = [args] - kwargs['args'] = args - return self._request('osd crush set', **kwargs) - - def osd_crush_add(self, _id, weight, args, body='json', timeout=None): - """add or update crushmap position and weight for with and location """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - if not isinstance(weight, (six.integer_types, float)): - raise CephClientTypeError( - name='weight', - actual=type(weight), - expected=int) - if weight < 0.0: - raise CephClientValueOutOfBounds( - name='weight', - actual=weight, - min=0.0, - max='unlimited') - - kwargs['weight'] = weight - if isinstance(args, list): - for item in args: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='args', - actual=item, - expected='list of strings') - else: - if not isinstance(args, six.string_types): - raise CephClientTypeError( - name='args', - actual=type(args), - expected=six.string_types) - - if not isinstance(args, list): - args = [args] - kwargs['args'] = args - return self._request('osd crush add', **kwargs) - - def osd_crush_set_all_straw_buckets_to_straw2( - self, body='json', timeout=None): - """convert all CRUSH current straw buckets to use the straw2 algorithm""" - return self._request( - 'osd crush set-all-straw-buckets-to-straw2', body=body, - timeout=timeout) - - def osd_crush_set_device_class( - self, _class, ids, body='json', timeout=None): - """set the of the osd(s) [...],or use to setall. """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(_class, six.string_types): - raise CephClientTypeError( - name='_class', - actual=type(_class), - expected=six.string_types) - - kwargs['class'] = _class - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd crush set-device-class', **kwargs) - - def osd_crush_rm_device_class(self, ids, body='json', timeout=None): - """remove class of the osd(s) [...],or use to removeall. """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd crush rm-device-class', **kwargs) - - def osd_crush_class_rename( - self, srcname, dstname, body='json', timeout=None): - """rename crush device class to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(srcname, six.string_types): - raise CephClientTypeError( - name='srcname', - actual=type(srcname), - expected=six.string_types) - - kwargs['srcname'] = srcname - if not isinstance(dstname, six.string_types): - raise CephClientTypeError( - name='dstname', - actual=type(dstname), - expected=six.string_types) - - kwargs['dstname'] = dstname - return self._request('osd crush class rename', **kwargs) - - def osd_crush_create_or_move( - self, _id, weight, args, body='json', timeout=None): - """create entry or move existing entry for at/to location """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - if not isinstance(weight, (six.integer_types, float)): - raise CephClientTypeError( - name='weight', - actual=type(weight), - expected=int) - if weight < 0.0: - raise CephClientValueOutOfBounds( - name='weight', - actual=weight, - min=0.0, - max='unlimited') - - kwargs['weight'] = weight - if isinstance(args, list): - for item in args: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='args', - actual=item, - expected='list of strings') - else: - if not isinstance(args, six.string_types): - raise CephClientTypeError( - name='args', - actual=type(args), - expected=six.string_types) - - if not isinstance(args, list): - args = [args] - kwargs['args'] = args - return self._request('osd crush create-or-move', **kwargs) - - def osd_crush_move(self, name, args, body='json', timeout=None): - """move existing entry for to location """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if isinstance(args, list): - for item in args: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='args', - actual=item, - expected='list of strings') - else: - if not isinstance(args, six.string_types): - raise CephClientTypeError( - name='args', - actual=type(args), - expected=six.string_types) - - if not isinstance(args, list): - args = [args] - kwargs['args'] = args - return self._request('osd crush move', **kwargs) - - OSD_CRUSH_SWAP_BUCKET_FORCE_VALUES = ['--yes-i-really-mean-it'] - - def osd_crush_swap_bucket( - self, source, dest, force=None, body='json', timeout=None): - """swap existing bucket contents from (orphan) bucket and """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(source, six.string_types): - raise CephClientTypeError( - name='source', - actual=type(source), - expected=six.string_types) - - kwargs['source'] = source - if not isinstance(dest, six.string_types): - raise CephClientTypeError( - name='dest', - actual=type(dest), - expected=six.string_types) - - kwargs['dest'] = dest - if force is not None: - if not isinstance(force, six.string_types): - raise CephClientTypeError( - name='force', - actual=type(force), - expected=six.string_types) - supported = CephClient.OSD_CRUSH_SWAP_BUCKET_FORCE_VALUES - if force not in supported: - raise CephClientInvalidChoice( - function='osd_crush_swap_bucket', - option='force', - value=force, - supported=', '.join(supported)) - kwargs['force'] = force - return self._request('osd crush swap-bucket', **kwargs) - - def osd_crush_link(self, name, args, body='json', timeout=None): - """link existing entry for under location """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if isinstance(args, list): - for item in args: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='args', - actual=item, - expected='list of strings') - else: - if not isinstance(args, six.string_types): - raise CephClientTypeError( - name='args', - actual=type(args), - expected=six.string_types) - - if not isinstance(args, list): - args = [args] - kwargs['args'] = args - return self._request('osd crush link', **kwargs) - - def osd_crush_rm(self, name, ancestor=None, body='json', timeout=None): - """remove from crush map (everywhere, or just at )""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if ancestor is not None: - if not isinstance(ancestor, six.string_types): - raise CephClientTypeError( - name='ancestor', - actual=type(ancestor), - expected=six.string_types) - kwargs['ancestor'] = ancestor - return self._request('osd crush rm', **kwargs) - - def osd_crush_remove(self, name, ancestor=None, - body='json', timeout=None): - """remove from crush map (everywhere, or just at )""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if ancestor is not None: - if not isinstance(ancestor, six.string_types): - raise CephClientTypeError( - name='ancestor', - actual=type(ancestor), - expected=six.string_types) - kwargs['ancestor'] = ancestor - return self._request('osd crush remove', **kwargs) - - def osd_crush_unlink(self, name, ancestor=None, - body='json', timeout=None): - """unlink from crush map (everywhere, or just at )""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if ancestor is not None: - if not isinstance(ancestor, six.string_types): - raise CephClientTypeError( - name='ancestor', - actual=type(ancestor), - expected=six.string_types) - kwargs['ancestor'] = ancestor - return self._request('osd crush unlink', **kwargs) - - def osd_crush_reweight_all(self, body='json', timeout=None): - """recalculate the weights for the tree to ensure they sum correctly""" - return self._request('osd crush reweight-all', - body=body, timeout=timeout) - - def osd_crush_reweight(self, name, weight, body='json', timeout=None): - """change 's weight to in crush map""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if not isinstance(weight, (six.integer_types, float)): - raise CephClientTypeError( - name='weight', - actual=type(weight), - expected=int) - if weight < 0.0: - raise CephClientValueOutOfBounds( - name='weight', - actual=weight, - min=0.0, - max='unlimited') - - kwargs['weight'] = weight - return self._request('osd crush reweight', **kwargs) - - def osd_crush_reweight_subtree( - self, name, weight, body='json', timeout=None): - """change all leaf items beneath to in crush map""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if not isinstance(weight, (six.integer_types, float)): - raise CephClientTypeError( - name='weight', - actual=type(weight), - expected=int) - if weight < 0.0: - raise CephClientValueOutOfBounds( - name='weight', - actual=weight, - min=0.0, - max='unlimited') - - kwargs['weight'] = weight - return self._request('osd crush reweight-subtree', **kwargs) - - OSD_CRUSH_TUNABLES_PROFILE_VALUES = \ - ['legacy', 'argonaut', 'bobtail', 'firefly', - 'hammer', 'jewel', 'optimal', 'default'] - - def osd_crush_tunables(self, profile, body='json', timeout=None): - """set crush tunables values to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(profile, six.string_types): - raise CephClientTypeError( - name='profile', - actual=type(profile), - expected=six.string_types) - supported = CephClient.OSD_CRUSH_TUNABLES_PROFILE_VALUES - if profile not in supported: - raise CephClientInvalidChoice( - function='osd_crush_tunables', - option='profile', - value=profile, - supported=', '.join(supported)) - - kwargs['profile'] = profile - return self._request('osd crush tunables', **kwargs) - - OSD_CRUSH_SET_TUNABLE_TUNABLE_VALUES = ['straw_calc_version'] - - def osd_crush_set_tunable( - self, tunable, value, body='json', timeout=None): - """set crush tunable to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(tunable, six.string_types): - raise CephClientTypeError( - name='tunable', - actual=type(tunable), - expected=six.string_types) - supported = CephClient.OSD_CRUSH_SET_TUNABLE_TUNABLE_VALUES - if tunable not in supported: - raise CephClientInvalidChoice( - function='osd_crush_set_tunable', - option='tunable', - value=tunable, - supported=', '.join(supported)) - - kwargs['tunable'] = tunable - if not isinstance(value, six.integer_types): - raise CephClientTypeError( - name='value', - actual=type(value), - expected=int) - - kwargs['value'] = value - return self._request('osd crush set-tunable', **kwargs) - - OSD_CRUSH_GET_TUNABLE_TUNABLE_VALUES = ['straw_calc_version'] - - def osd_crush_get_tunable(self, tunable, body='json', timeout=None): - """get crush tunable """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(tunable, six.string_types): - raise CephClientTypeError( - name='tunable', - actual=type(tunable), - expected=six.string_types) - supported = CephClient.OSD_CRUSH_GET_TUNABLE_TUNABLE_VALUES - if tunable not in supported: - raise CephClientInvalidChoice( - function='osd_crush_get_tunable', - option='tunable', - value=tunable, - supported=', '.join(supported)) - - kwargs['tunable'] = tunable - return self._request('osd crush get-tunable', **kwargs) - - def osd_crush_show_tunables(self, body='json', timeout=None): - """show current crush tunables""" - return self._request('osd crush show-tunables', - body=body, timeout=timeout) - - OSD_CRUSH_RULE_CREATE_SIMPLE_MODE_VALUES = ['firstn', 'indep'] - - def osd_crush_rule_create_simple( - self, name, root, _type, mode=None, body='json', timeout=None): - """create crush rule to start from , replicate acrossbuckets of type , using a choose mode of (default firstn; indep best for erasure pools) """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if not isinstance(root, six.string_types): - raise CephClientTypeError( - name='root', - actual=type(root), - expected=six.string_types) - - kwargs['root'] = root - if not isinstance(_type, six.string_types): - raise CephClientTypeError( - name='_type', - actual=type(_type), - expected=six.string_types) - - kwargs['type'] = _type - if mode is not None: - if not isinstance(mode, six.string_types): - raise CephClientTypeError( - name='mode', - actual=type(mode), - expected=six.string_types) - supported = CephClient.OSD_CRUSH_RULE_CREATE_SIMPLE_MODE_VALUES - if mode not in supported: - raise CephClientInvalidChoice( - function='osd_crush_rule_create_simple', - option='mode', - value=mode, - supported=', '.join(supported)) - kwargs['mode'] = mode - return self._request('osd crush rule create-simple', **kwargs) - - def osd_crush_rule_create_replicated( - self, name, root, _type, _class=None, body='json', - timeout=None): - """create crush rule for replicated pool to start from ,replicate across buckets of type , using a choose mode of (default firstn; indep best for erasure pools) """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if not isinstance(root, six.string_types): - raise CephClientTypeError( - name='root', - actual=type(root), - expected=six.string_types) - - kwargs['root'] = root - if not isinstance(_type, six.string_types): - raise CephClientTypeError( - name='_type', - actual=type(_type), - expected=six.string_types) - - kwargs['type'] = _type - if _class is not None: - if not isinstance(_class, six.string_types): - raise CephClientTypeError( - name='_class', - actual=type(_class), - expected=six.string_types) - kwargs['class'] = _class - return self._request('osd crush rule create-replicated', **kwargs) - - def osd_crush_rule_create_erasure( - self, name, profile=None, body='json', timeout=None): - """create crush rule for erasure coded pool created with (default default) """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if profile is not None: - if not isinstance(profile, six.string_types): - raise CephClientTypeError( - name='profile', - actual=type(profile), - expected=six.string_types) - kwargs['profile'] = profile - return self._request('osd crush rule create-erasure', **kwargs) - - def osd_crush_rule_rm(self, name, body='json', timeout=None): - """remove crush rule """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - return self._request('osd crush rule rm', **kwargs) - - def osd_crush_rule_rename( - self, srcname, dstname, body='json', timeout=None): - """rename crush rule to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(srcname, six.string_types): - raise CephClientTypeError( - name='srcname', - actual=type(srcname), - expected=six.string_types) - - kwargs['srcname'] = srcname - if not isinstance(dstname, six.string_types): - raise CephClientTypeError( - name='dstname', - actual=type(dstname), - expected=six.string_types) - - kwargs['dstname'] = dstname - return self._request('osd crush rule rename', **kwargs) - - OSD_CRUSH_TREE_SHADOW_VALUES = ['--show-shadow'] - - def osd_crush_tree(self, shadow=None, body='json', timeout=None): - """dump crush buckets and items in a tree view""" - kwargs = dict(body=body, timeout=timeout) - if shadow is not None: - if not isinstance(shadow, six.string_types): - raise CephClientTypeError( - name='shadow', - actual=type(shadow), - expected=six.string_types) - supported = CephClient.OSD_CRUSH_TREE_SHADOW_VALUES - if shadow not in supported: - raise CephClientInvalidChoice( - function='osd_crush_tree', - option='shadow', - value=shadow, - supported=', '.join(supported)) - kwargs['shadow'] = shadow - return self._request('osd crush tree', **kwargs) - - def osd_crush_ls(self, node, body='json', timeout=None): - """list items beneath a node in the CRUSH tree""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(node, six.string_types): - raise CephClientTypeError( - name='node', - actual=type(node), - expected=six.string_types) - - kwargs['node'] = node - return self._request('osd crush ls', **kwargs) - - def osd_crush_class_ls(self, body='json', timeout=None): - """list all crush device classes""" - return self._request('osd crush class ls', - body=body, timeout=timeout) - - def osd_crush_class_ls_osd(self, _class, body='json', timeout=None): - """list all osds belonging to the specific """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(_class, six.string_types): - raise CephClientTypeError( - name='_class', - actual=type(_class), - expected=six.string_types) - - kwargs['class'] = _class - return self._request('osd crush class ls-osd', **kwargs) - - def osd_crush_weight_set_ls(self, body='json', timeout=None): - """list crush weight sets""" - return self._request('osd crush weight-set ls', - body=body, timeout=timeout) - - def osd_crush_weight_set_dump(self, body='json', timeout=None): - """dump crush weight sets""" - return self._request('osd crush weight-set dump', - body=body, timeout=timeout) - - def osd_crush_weight_set_create_compat( - self, body='json', timeout=None): - """create a default backward-compatible weight-set""" - return self._request( - 'osd crush weight-set create-compat', body=body, - timeout=timeout) - - OSD_CRUSH_WEIGHT_SET_CREATE_MODE_VALUES = ['flat', 'positional'] - - def osd_crush_weight_set_create( - self, pool, mode, body='json', timeout=None): - """create a weight-set for a given pool""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(mode, six.string_types): - raise CephClientTypeError( - name='mode', - actual=type(mode), - expected=six.string_types) - supported = CephClient.OSD_CRUSH_WEIGHT_SET_CREATE_MODE_VALUES - if mode not in supported: - raise CephClientInvalidChoice( - function='osd_crush_weight_set_create', - option='mode', - value=mode, - supported=', '.join(supported)) - - kwargs['mode'] = mode - return self._request('osd crush weight-set create', **kwargs) - - def osd_crush_weight_set_rm(self, pool, body='json', timeout=None): - """remove the weight-set for a given pool""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - return self._request('osd crush weight-set rm', **kwargs) - - def osd_crush_weight_set_rm_compat(self, body='json', timeout=None): - """remove the backward-compatible weight-set""" - return self._request( - 'osd crush weight-set rm-compat', body=body, timeout=timeout) - - def osd_crush_weight_set_reweight( - self, pool, item, weight, body='json', timeout=None): - """set weight for an item (bucket or osd) in a pool's weight-set""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='item', - actual=type(item), - expected=six.string_types) - - kwargs['item'] = item - if not isinstance(weight, (six.integer_types, float)): - raise CephClientTypeError( - name='weight', - actual=type(weight), - expected=int) - if weight < 0.0: - raise CephClientValueOutOfBounds( - name='weight', - actual=weight, - min=0.0, - max='unlimited') - - if not isinstance(weight, list): - weight = [weight] - kwargs['weight'] = weight - return self._request('osd crush weight-set reweight', **kwargs) - - def osd_crush_weight_set_reweight_compat( - self, item, weight, body='json', timeout=None): - """set weight for an item (bucket or osd) in the backward-compatibleweight-set """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='item', - actual=type(item), - expected=six.string_types) - - kwargs['item'] = item - if not isinstance(weight, (six.integer_types, float)): - raise CephClientTypeError( - name='weight', - actual=type(weight), - expected=int) - if weight < 0.0: - raise CephClientValueOutOfBounds( - name='weight', - actual=weight, - min=0.0, - max='unlimited') - - if not isinstance(weight, list): - weight = [weight] - kwargs['weight'] = weight - return self._request( - 'osd crush weight-set reweight-compat', **kwargs) - - def osd_setmaxosd(self, newmax, body='json', timeout=None): - """set new maximum osd value""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(newmax, six.integer_types): - raise CephClientTypeError( - name='newmax', - actual=type(newmax), - expected=int) - if newmax < 0: - raise CephClientValueOutOfBounds( - name='newmax', - actual=newmax, - min=0, - max='unlimited') - - kwargs['newmax'] = newmax - return self._request('osd setmaxosd', **kwargs) - - def osd_set_full_ratio(self, ratio, body='json', timeout=None): - """set usage ratio at which OSDs are marked full""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(ratio, (six.integer_types, float)): - raise CephClientTypeError( - name='ratio', - actual=type(ratio), - expected=int) - if ratio < 0.0 or ratio > 1.0: - raise CephClientValueOutOfBounds( - name='ratio', - actual=ratio, - min=0.0, - max=1.0) - - kwargs['ratio'] = ratio - return self._request('osd set-full-ratio', **kwargs) - - def osd_set_backfillfull_ratio(self, ratio, body='json', timeout=None): - """set usage ratio at which OSDs are marked too full to backfill""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(ratio, (six.integer_types, float)): - raise CephClientTypeError( - name='ratio', - actual=type(ratio), - expected=int) - if ratio < 0.0 or ratio > 1.0: - raise CephClientValueOutOfBounds( - name='ratio', - actual=ratio, - min=0.0, - max=1.0) - - kwargs['ratio'] = ratio - return self._request('osd set-backfillfull-ratio', **kwargs) - - def osd_set_nearfull_ratio(self, ratio, body='json', timeout=None): - """set usage ratio at which OSDs are marked near-full""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(ratio, (six.integer_types, float)): - raise CephClientTypeError( - name='ratio', - actual=type(ratio), - expected=int) - if ratio < 0.0 or ratio > 1.0: - raise CephClientValueOutOfBounds( - name='ratio', - actual=ratio, - min=0.0, - max=1.0) - - kwargs['ratio'] = ratio - return self._request('osd set-nearfull-ratio', **kwargs) - - def osd_get_require_min_compat_client(self, body='json', timeout=None): - """get the minimum client version we will maintain compatibility with""" - return self._request( - 'osd get-require-min-compat-client', body=body, - timeout=timeout) - - OSD_SET_REQUIRE_MIN_COMPAT_CLIENT_SURE_VALUES = [ - '--yes-i-really-mean-it'] - - def osd_set_require_min_compat_client( - self, version, sure=None, body='json', timeout=None): - """set the minimum client version we will maintain compatibility with""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(version, six.string_types): - raise CephClientTypeError( - name='version', - actual=type(version), - expected=six.string_types) - - kwargs['version'] = version - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.OSD_SET_REQUIRE_MIN_COMPAT_CLIENT_SURE_VALUES # noqa E501 - if sure not in supported: - raise CephClientInvalidChoice( - function='osd_set_require_min_compat_client', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('osd set-require-min-compat-client', **kwargs) - - def osd_pause(self, body='json', timeout=None): - """pause osd""" - return self._request('osd pause', body=body, timeout=timeout) - - def osd_unpause(self, body='json', timeout=None): - """unpause osd""" - return self._request('osd unpause', body=body, timeout=timeout) - - def osd_erasure_code_profile_set( - self, name, profile=None, body='json', timeout=None): - """create erasure code profile with [ ...] pairs. Adda --force at the end to override an existing profile (VERY DANGEROUS) """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if profile is not None: - if isinstance(profile, list): - for item in profile: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='profile', - actual=item, - expected='list of strings') - else: - if not isinstance(profile, six.string_types): - raise CephClientTypeError( - name='profile', - actual=type(profile), - expected=six.string_types) - if not isinstance(profile, list): - profile = [profile] - kwargs['profile'] = profile - return self._request('osd erasure-code-profile set', **kwargs) - - def osd_erasure_code_profile_get( - self, name, body='json', timeout=None): - """get erasure code profile """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - return self._request('osd erasure-code-profile get', **kwargs) - - def osd_erasure_code_profile_rm(self, name, body='json', timeout=None): - """remove erasure code profile """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - return self._request('osd erasure-code-profile rm', **kwargs) - - def osd_erasure_code_profile_ls(self, body='json', timeout=None): - """list all erasure code profiles""" - return self._request( - 'osd erasure-code-profile ls', body=body, timeout=timeout) - - OSD_SET_KEY_VALUES = \ - ['full', 'pause', 'noup', 'nodown', 'noout', - 'noin', 'nobackfill', 'norebalance', - 'norecover', 'noscrub', 'nodeep-scrub', - 'notieragent', 'nosnaptrim', 'sortbitwise', - 'recovery_deletes', 'require_jewel_osds', - 'require_kraken_osds'] - - OSD_SET_SURE_VALUES = ['--yes-i-really-mean-it'] - - def osd_set(self, key, sure=None, body='json', timeout=None): - """set """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - supported = CephClient.OSD_SET_KEY_VALUES - if key not in supported: - raise CephClientInvalidChoice( - function='osd_set', - option='key', - value=key, - supported=', '.join(supported)) - - kwargs['key'] = key - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.OSD_SET_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='osd_set', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('osd set', **kwargs) - - OSD_UNSET_KEY_VALUES = \ - ['full', 'pause', 'noup', 'nodown', 'noout', - 'noin', 'nobackfill', 'norebalance', - 'norecover', 'noscrub', 'nodeep-scrub', - 'notieragent', 'nosnaptrim'] - - def osd_unset(self, key, body='json', timeout=None): - """unset """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - supported = CephClient.OSD_UNSET_KEY_VALUES - if key not in supported: - raise CephClientInvalidChoice( - function='osd_unset', - option='key', - value=key, - supported=', '.join(supported)) - - kwargs['key'] = key - return self._request('osd unset', **kwargs) - - OSD_REQUIRE_OSD_RELEASE_RELEASE_VALUES = ['luminous', 'mimic'] - - OSD_REQUIRE_OSD_RELEASE_SURE_VALUES = ['--yes-i-really-mean-it'] - - def osd_require_osd_release( - self, release, sure=None, body='json', timeout=None): - """set the minimum allowed OSD release to participate in the cluster""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(release, six.string_types): - raise CephClientTypeError( - name='release', - actual=type(release), - expected=six.string_types) - supported = CephClient.OSD_REQUIRE_OSD_RELEASE_RELEASE_VALUES - if release not in supported: - raise CephClientInvalidChoice( - function='osd_require_osd_release', - option='release', - value=release, - supported=', '.join(supported)) - - kwargs['release'] = release - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.OSD_REQUIRE_OSD_RELEASE_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='osd_require_osd_release', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('osd require-osd-release', **kwargs) - - def osd_down(self, ids, body='json', timeout=None): - """set osd(s) [...] down, or use to set all osds down""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd down', **kwargs) - - def osd_out(self, ids, body='json', timeout=None): - """set osd(s) [...] out, or use to set all osds out""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd out', **kwargs) - - def osd_in(self, ids, body='json', timeout=None): - """set osd(s) [...] in, can use to automatically setall previously out osds in """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd in', **kwargs) - - def osd_rm(self, ids, body='json', timeout=None): - """remove osd(s) [...], or use to remove all osds""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd rm', **kwargs) - - def osd_add_noup(self, ids, body='json', timeout=None): - """mark osd(s) [...] as noup, or use to mark all osdsas noup """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd add-noup', **kwargs) - - def osd_add_nodown(self, ids, body='json', timeout=None): - """mark osd(s) [...] as nodown, or use to mark allosds as nodown """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd add-nodown', **kwargs) - - def osd_add_noin(self, ids, body='json', timeout=None): - """mark osd(s) [...] as noin, or use to mark all osdsas noin """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd add-noin', **kwargs) - - def osd_add_noout(self, ids, body='json', timeout=None): - """mark osd(s) [...] as noout, or use to mark all osdsas noout """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd add-noout', **kwargs) - - def osd_rm_noup(self, ids, body='json', timeout=None): - """allow osd(s) [...] to be marked up (if they are currentlymarked as noup), can use to automatically filter out all noup osds """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd rm-noup', **kwargs) - - def osd_rm_nodown(self, ids, body='json', timeout=None): - """allow osd(s) [...] to be marked down (if they are currentlymarked as nodown), can use to automatically filter out all nodown osds """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd rm-nodown', **kwargs) - - def osd_rm_noin(self, ids, body='json', timeout=None): - """allow osd(s) [...] to be marked in (if they are currentlymarked as noin), can use to automatically filter out all noin osds """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd rm-noin', **kwargs) - - def osd_rm_noout(self, ids, body='json', timeout=None): - """allow osd(s) [...] to be marked out (if they are currentlymarked as noout), can use to automatically filter out all noout osds """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(ids, list): - for item in ids: - if not isinstance(item, six.string_types): - raise CephClientTypeError( - name='ids', - actual=item, - expected='list of strings') - else: - if not isinstance(ids, six.string_types): - raise CephClientTypeError( - name='ids', - actual=type(ids), - expected=six.string_types) - - if not isinstance(ids, list): - ids = [ids] - kwargs['ids'] = ids - return self._request('osd rm-noout', **kwargs) - - def osd_reweight(self, _id, weight, body='json', timeout=None): - """reweight osd to 0.0 < < 1.0""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - if not isinstance(weight, (six.integer_types, float)): - raise CephClientTypeError( - name='weight', - actual=type(weight), - expected=int) - if weight < 0.0 or weight > 1.0: - raise CephClientValueOutOfBounds( - name='weight', - actual=weight, - min=0.0, - max=1.0) - - kwargs['weight'] = weight - return self._request('osd reweight', **kwargs) - - def osd_reweightn(self, weights, body='json', timeout=None): - """reweight osds with {: ,...})""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(weights, six.string_types): - raise CephClientTypeError( - name='weights', - actual=type(weights), - expected=six.string_types) - - kwargs['weights'] = weights - return self._request('osd reweightn', **kwargs) - - OSD_FORCE_CREATE_PG_SURE_VALUES = ['--yes-i-really-mean-it'] - - def osd_force_create_pg(self, pgid, sure=None, - body='json', timeout=None): - """force creation of pg """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.OSD_FORCE_CREATE_PG_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='osd_force_create_pg', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('osd force-create-pg', **kwargs) - - def osd_pg_temp(self, pgid, _id=None, body='json', timeout=None): - """set pg_temp mapping pgid:[ [...]] (developers only)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - if _id is not None: - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - if not isinstance(_id, list): - _id = [_id] - kwargs['id'] = _id - return self._request('osd pg-temp', **kwargs) - - def osd_pg_upmap(self, pgid, _id, body='json', timeout=None): - """set pg_upmap mapping :[ [...]] (developers only)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - if not isinstance(_id, list): - _id = [_id] - kwargs['id'] = _id - return self._request('osd pg-upmap', **kwargs) - - def osd_rm_pg_upmap(self, pgid, body='json', timeout=None): - """clear pg_upmap mapping for (developers only)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - return self._request('osd rm-pg-upmap', **kwargs) - - def osd_pg_upmap_items(self, pgid, _id, body='json', timeout=None): - """set pg_upmap_items mapping :{ to , [...]} (developersonly) """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - if not isinstance(_id, list): - _id = [_id] - kwargs['id'] = _id - return self._request('osd pg-upmap-items', **kwargs) - - def osd_rm_pg_upmap_items(self, pgid, body='json', timeout=None): - """clear pg_upmap_items mapping for (developers only)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - return self._request('osd rm-pg-upmap-items', **kwargs) - - def osd_primary_temp(self, pgid, _id, body='json', timeout=None): - """set primary_temp mapping pgid:|-1 (developers only)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pgid, six.string_types): - raise CephClientTypeError( - name='pgid', - actual=type(pgid), - expected=six.string_types) - if not re.match(r'[0-9]+\.[0-9a-fA-F]+', pgid): - raise CephClientInvalidPgid( - name='pgid', - actual=pgid) - - kwargs['pgid'] = pgid - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - return self._request('osd primary-temp', **kwargs) - - def osd_primary_affinity(self, _id, weight, body='json', timeout=None): - """adjust osd primary-affinity from 0.0 <= <= 1.0""" - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - if not isinstance(weight, (six.integer_types, float)): - raise CephClientTypeError( - name='weight', - actual=type(weight), - expected=int) - if weight < 0.0 or weight > 1.0: - raise CephClientValueOutOfBounds( - name='weight', - actual=weight, - min=0.0, - max=1.0) - - kwargs['weight'] = weight - return self._request('osd primary-affinity', **kwargs) - - OSD_DESTROY_SURE_VALUES = ['--yes-i-really-mean-it'] - - def osd_destroy(self, _id, sure=None, body='json', timeout=None): - """mark osd as being destroyed. Keeps the ID intact (allowing reuse), butremoves cephx keys, config-key data and lockbox keys, rendering data permanently unreadable. """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.OSD_DESTROY_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='osd_destroy', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('osd destroy', **kwargs) - - OSD_PURGE_NEW_SURE_VALUES = ['--yes-i-really-mean-it'] - - def osd_purge_new(self, _id, sure=None, body='json', timeout=None): - """purge all traces of an OSD that was partially created but neverstarted """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.OSD_PURGE_NEW_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='osd_purge_new', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('osd purge-new', **kwargs) - - OSD_PURGE_SURE_VALUES = ['--yes-i-really-mean-it'] - - def osd_purge(self, _id, sure=None, body='json', timeout=None): - """purge all osd data from the monitors. Combines `osd destroy`, `osdrm`, and `osd crush rm`. """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.OSD_PURGE_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='osd_purge', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('osd purge', **kwargs) - - OSD_LOST_SURE_VALUES = ['--yes-i-really-mean-it'] - - def osd_lost(self, _id, sure=None, body='json', timeout=None): - """mark osd as permanently lost. THIS DESTROYS DATA IF NO MORE REPLICAS EXIST, BE CAREFUL """ - kwargs = dict(body=body, timeout=timeout) - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - - kwargs['id'] = _id - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.OSD_LOST_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='osd_lost', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('osd lost', **kwargs) - - def osd_create(self, uuid=None, _id=None, body='json', timeout=None): - """create new osd (with optional UUID and ID)""" - kwargs = dict(body=body, timeout=timeout) - if uuid is not None: - - kwargs['uuid'] = uuid - if _id is not None: - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - kwargs['id'] = _id - return self._request('osd create', **kwargs) - - def osd_new(self, uuid, _id=None, body='json', timeout=None): - """Create a new OSD. If supplied, the `id` to be replaced needs to existand have been previously destroyed. Reads secrets from JSON file via `-i ` (see man page). """ - kwargs = dict(body=body, timeout=timeout) - kwargs['uuid'] = uuid - if _id is not None: - if isinstance(_id, six.integer_types): - pass - elif isinstance(_id, six.string_types): - _id = _id.lower() - prefix = 'osd.' - if not _id.startswith(prefix): - raise CephClientInvalidOsdIdValue(osdid=_id) - _id = int(_id[len(prefix):]) - else: - raise CephClientTypeError( - name='_id', - actual=type(_id), - expected='int or string') - kwargs['id'] = _id - return self._request('osd new', **kwargs) - - OSD_BLACKLIST_BLACKLISTOP_VALUES = ['add', 'rm'] - - def osd_blacklist( - self, blacklistop, addr, expire=None, body='json', - timeout=None): - """add (optionally until seconds from now) or remove fromblacklist """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(blacklistop, six.string_types): - raise CephClientTypeError( - name='blacklistop', - actual=type(blacklistop), - expected=six.string_types) - supported = CephClient.OSD_BLACKLIST_BLACKLISTOP_VALUES - if blacklistop not in supported: - raise CephClientInvalidChoice( - function='osd_blacklist', - option='blacklistop', - value=blacklistop, - supported=', '.join(supported)) - - kwargs['blacklistop'] = blacklistop - - kwargs['addr'] = addr - if expire is not None: - if not isinstance(expire, (six.integer_types, float)): - raise CephClientTypeError( - name='expire', - actual=type(expire), - expected=int) - if expire < 0.0: - raise CephClientValueOutOfBounds( - name='expire', - actual=expire, - min=0.0, - max='unlimited') - kwargs['expire'] = expire - return self._request('osd blacklist', **kwargs) - - def osd_blacklist_ls(self, body='json', timeout=None): - """show blacklisted clients""" - return self._request('osd blacklist ls', - body=body, timeout=timeout) - - def osd_blacklist_clear(self, body='json', timeout=None): - """clear all blacklisted clients""" - return self._request('osd blacklist clear', - body=body, timeout=timeout) - - def osd_pool_mksnap(self, pool, snap, body='json', timeout=None): - """make snapshot in """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(snap, six.string_types): - raise CephClientTypeError( - name='snap', - actual=type(snap), - expected=six.string_types) - - kwargs['snap'] = snap - return self._request('osd pool mksnap', **kwargs) - - def osd_pool_rmsnap(self, pool, snap, body='json', timeout=None): - """remove snapshot from """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(snap, six.string_types): - raise CephClientTypeError( - name='snap', - actual=type(snap), - expected=six.string_types) - - kwargs['snap'] = snap - return self._request('osd pool rmsnap', **kwargs) - - OSD_POOL_LS_DETAIL_VALUES = ['detail'] - - def osd_pool_ls(self, detail=None, body='json', timeout=None): - """list pools""" - kwargs = dict(body=body, timeout=timeout) - if detail is not None: - if not isinstance(detail, six.string_types): - raise CephClientTypeError( - name='detail', - actual=type(detail), - expected=six.string_types) - supported = CephClient.OSD_POOL_LS_DETAIL_VALUES - if detail not in supported: - raise CephClientInvalidChoice( - function='osd_pool_ls', - option='detail', - value=detail, - supported=', '.join(supported)) - kwargs['detail'] = detail - return self._request('osd pool ls', **kwargs) - - OSD_POOL_CREATE_POOL_TYPE_VALUES = ['replicated', 'erasure'] - - def osd_pool_create( - self, pool, pg_num, pgp_num=None, pool_type=None, - erasure_code_profile=None, rule=None, - expected_num_objects=None, body='json', timeout=None): - """create pool""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(pg_num, six.integer_types): - raise CephClientTypeError( - name='pg_num', - actual=type(pg_num), - expected=int) - if pg_num < 0: - raise CephClientValueOutOfBounds( - name='pg_num', - actual=pg_num, - min=0, - max='unlimited') - - kwargs['pg_num'] = pg_num - if pgp_num is not None: - if not isinstance(pgp_num, six.integer_types): - raise CephClientTypeError( - name='pgp_num', - actual=type(pgp_num), - expected=int) - if pgp_num < 0: - raise CephClientValueOutOfBounds( - name='pgp_num', - actual=pgp_num, - min=0, - max='unlimited') - kwargs['pgp_num'] = pgp_num - if pool_type is not None: - if not isinstance(pool_type, six.string_types): - raise CephClientTypeError( - name='pool_type', - actual=type(pool_type), - expected=six.string_types) - supported = CephClient.OSD_POOL_CREATE_POOL_TYPE_VALUES - if pool_type not in supported: - raise CephClientInvalidChoice( - function='osd_pool_create', - option='pool_type', - value=pool_type, - supported=', '.join(supported)) - kwargs['pool_type'] = pool_type - if erasure_code_profile is not None: - if not isinstance(erasure_code_profile, six.string_types): - raise CephClientTypeError( - name='erasure_code_profile', - actual=type(erasure_code_profile), - expected=six.string_types) - kwargs['erasure_code_profile'] = erasure_code_profile - if rule is not None: - if not isinstance(rule, six.string_types): - raise CephClientTypeError( - name='rule', - actual=type(rule), - expected=six.string_types) - kwargs['rule'] = rule - if expected_num_objects is not None: - if not isinstance(expected_num_objects, six.integer_types): - raise CephClientTypeError( - name='expected_num_objects', - actual=type(expected_num_objects), - expected=int) - kwargs['expected_num_objects'] = expected_num_objects - return self._request('osd pool create', **kwargs) - - def osd_pool_delete(self, pool, pool2=None, - sure=None, body='json', timeout=None): - """delete pool""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if pool2 is not None: - if not isinstance(pool2, six.string_types): - raise CephClientTypeError( - name='pool2', - actual=type(pool2), - expected=six.string_types) - kwargs['pool2'] = pool2 - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - kwargs['sure'] = sure - return self._request('osd pool delete', **kwargs) - - def osd_pool_rm(self, pool, pool2=None, sure=None, - body='json', timeout=None): - """remove pool""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if pool2 is not None: - if not isinstance(pool2, six.string_types): - raise CephClientTypeError( - name='pool2', - actual=type(pool2), - expected=six.string_types) - kwargs['pool2'] = pool2 - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - kwargs['sure'] = sure - return self._request('osd pool rm', **kwargs) - - def osd_pool_rename(self, srcpool, destpool, - body='json', timeout=None): - """rename to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(srcpool, six.string_types): - raise CephClientTypeError( - name='srcpool', - actual=type(srcpool), - expected=six.string_types) - - kwargs['srcpool'] = srcpool - if not isinstance(destpool, six.string_types): - raise CephClientTypeError( - name='destpool', - actual=type(destpool), - expected=six.string_types) - - kwargs['destpool'] = destpool - return self._request('osd pool rename', **kwargs) - - OSD_POOL_GET_VAR_VALUES = \ - ['size', 'min_size', 'pg_num', 'pgp_num', - 'crush_rule', 'hashpspool', 'nodelete', - 'nopgchange', 'nosizechange', - 'write_fadvise_dontneed', 'noscrub', - 'nodeep-scrub', 'hit_set_type', - 'hit_set_period', 'hit_set_count', - 'hit_set_fpp', 'use_gmt_hitset', 'auid', - 'target_max_objects', 'target_max_bytes', - 'cache_target_dirty_ratio', - 'cache_target_dirty_high_ratio', - 'cache_target_full_ratio', - 'cache_min_flush_age', 'cache_min_evict_age', - 'erasure_code_profile', - 'min_read_recency_for_promote', 'all', - 'min_write_recency_for_promote', 'fast_read', - 'hit_set_grade_decay_rate', - 'hit_set_search_last_n', 'scrub_min_interval', - 'scrub_max_interval', 'deep_scrub_interval', - 'recovery_priority', 'recovery_op_priority', - 'scrub_priority', 'compression_mode', - 'compression_algorithm', - 'compression_required_ratio', - 'compression_max_blob_size', - 'compression_min_blob_size', 'csum_type', - 'csum_min_block', 'csum_max_block', - 'allow_ec_overwrites'] - - def osd_pool_get(self, pool, var, body='json', timeout=None): - """get pool parameter """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(var, six.string_types): - raise CephClientTypeError( - name='var', - actual=type(var), - expected=six.string_types) - supported = CephClient.OSD_POOL_GET_VAR_VALUES - if var not in supported: - raise CephClientInvalidChoice( - function='osd_pool_get', - option='var', - value=var, - supported=', '.join(supported)) - - kwargs['var'] = var - return self._request('osd pool get', **kwargs) - - OSD_POOL_SET_VAR_VALUES = \ - ['size', 'min_size', 'pg_num', 'pgp_num', - 'crush_rule', 'hashpspool', 'nodelete', - 'nopgchange', 'nosizechange', - 'write_fadvise_dontneed', 'noscrub', - 'nodeep-scrub', 'hit_set_type', - 'hit_set_period', 'hit_set_count', - 'hit_set_fpp', 'use_gmt_hitset', - 'target_max_bytes', 'target_max_objects', - 'cache_target_dirty_ratio', - 'cache_target_dirty_high_ratio', - 'cache_target_full_ratio', - 'cache_min_flush_age', 'cache_min_evict_age', - 'auid', 'min_read_recency_for_promote', - 'min_write_recency_for_promote', 'fast_read', - 'hit_set_grade_decay_rate', - 'hit_set_search_last_n', 'scrub_min_interval', - 'scrub_max_interval', 'deep_scrub_interval', - 'recovery_priority', 'recovery_op_priority', - 'scrub_priority', 'compression_mode', - 'compression_algorithm', - 'compression_required_ratio', - 'compression_max_blob_size', - 'compression_min_blob_size', 'csum_type', - 'csum_min_block', 'csum_max_block', - 'allow_ec_overwrites'] - - OSD_POOL_SET_FORCE_VALUES = ['--yes-i-really-mean-it'] - - def osd_pool_set(self, pool, var, val, force=None, - body='json', timeout=None): - """set pool parameter to """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(var, six.string_types): - raise CephClientTypeError( - name='var', - actual=type(var), - expected=six.string_types) - supported = CephClient.OSD_POOL_SET_VAR_VALUES - if var not in supported: - raise CephClientInvalidChoice( - function='osd_pool_set', - option='var', - value=var, - supported=', '.join(supported)) - - kwargs['var'] = var - if not isinstance(val, six.string_types): - raise CephClientTypeError( - name='val', - actual=type(val), - expected=six.string_types) - - kwargs['val'] = val - if force is not None: - if not isinstance(force, six.string_types): - raise CephClientTypeError( - name='force', - actual=type(force), - expected=six.string_types) - supported = CephClient.OSD_POOL_SET_FORCE_VALUES - if force not in supported: - raise CephClientInvalidChoice( - function='osd_pool_set', - option='force', - value=force, - supported=', '.join(supported)) - kwargs['force'] = force - return self._request('osd pool set', **kwargs) - - OSD_POOL_SET_QUOTA_FIELD_VALUES = ['max_objects', 'max_bytes'] - - def osd_pool_set_quota(self, pool, field, val, - body='json', timeout=None): - """set object or byte limit on pool""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(field, six.string_types): - raise CephClientTypeError( - name='field', - actual=type(field), - expected=six.string_types) - supported = CephClient.OSD_POOL_SET_QUOTA_FIELD_VALUES - if field not in supported: - raise CephClientInvalidChoice( - function='osd_pool_set_quota', - option='field', - value=field, - supported=', '.join(supported)) - - kwargs['field'] = field - if not isinstance(val, six.string_types): - raise CephClientTypeError( - name='val', - actual=type(val), - expected=six.string_types) - - kwargs['val'] = val - return self._request('osd pool set-quota', **kwargs) - - def osd_pool_get_quota(self, pool, body='json', timeout=None): - """obtain object or byte limits for pool""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - return self._request('osd pool get-quota', **kwargs) - - OSD_POOL_APPLICATION_ENABLE_FORCE_VALUES = ['--yes-i-really-mean-it'] - - def osd_pool_application_enable( - self, pool, app, force=None, body='json', timeout=None): - """enable use of an application [cephfs,rbd,rgw] on pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(app, six.string_types): - raise CephClientTypeError( - name='app', - actual=type(app), - expected=six.string_types) - - kwargs['app'] = app - if force is not None: - if not isinstance(force, six.string_types): - raise CephClientTypeError( - name='force', - actual=type(force), - expected=six.string_types) - supported = CephClient.OSD_POOL_APPLICATION_ENABLE_FORCE_VALUES - if force not in supported: - raise CephClientInvalidChoice( - function='osd_pool_application_enable', - option='force', - value=force, - supported=', '.join(supported)) - kwargs['force'] = force - return self._request('osd pool application enable', **kwargs) - - OSD_POOL_APPLICATION_DISABLE_FORCE_VALUES = ['--yes-i-really-mean-it'] - - def osd_pool_application_disable( - self, pool, app, force=None, body='json', timeout=None): - """disables use of an application on pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(app, six.string_types): - raise CephClientTypeError( - name='app', - actual=type(app), - expected=six.string_types) - - kwargs['app'] = app - if force is not None: - if not isinstance(force, six.string_types): - raise CephClientTypeError( - name='force', - actual=type(force), - expected=six.string_types) - supported = CephClient.OSD_POOL_APPLICATION_DISABLE_FORCE_VALUES - if force not in supported: - raise CephClientInvalidChoice( - function='osd_pool_application_disable', - option='force', - value=force, - supported=', '.join(supported)) - kwargs['force'] = force - return self._request('osd pool application disable', **kwargs) - - def osd_pool_application_set( - self, pool, app, key, value, body='json', timeout=None): - """sets application metadata key to on pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(app, six.string_types): - raise CephClientTypeError( - name='app', - actual=type(app), - expected=six.string_types) - - kwargs['app'] = app - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - if not isinstance(value, six.string_types): - raise CephClientTypeError( - name='value', - actual=type(value), - expected=six.string_types) - - kwargs['value'] = value - return self._request('osd pool application set', **kwargs) - - def osd_pool_application_rm( - self, pool, app, key, body='json', timeout=None): - """removes application metadata key on pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(app, six.string_types): - raise CephClientTypeError( - name='app', - actual=type(app), - expected=six.string_types) - - kwargs['app'] = app - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - return self._request('osd pool application rm', **kwargs) - - def osd_pool_application_get( - self, pool, app=None, key=None, body='json', timeout=None): - """get value of key of application on pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if app is not None: - if not isinstance(app, six.string_types): - raise CephClientTypeError( - name='app', - actual=type(app), - expected=six.string_types) - kwargs['app'] = app - if key is not None: - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - kwargs['key'] = key - return self._request('osd pool application get', **kwargs) - - def osd_utilization(self, body='json', timeout=None): - """get basic pg distribution stats""" - return self._request('osd utilization', body=body, timeout=timeout) - - OSD_TIER_ADD_FORCE_NONEMPTY_VALUES = ['--force-nonempty'] - - def osd_tier_add( - self, pool, tierpool, force_nonempty=None, body='json', - timeout=None): - """add the tier (the second one) to base pool (thefirst one)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(tierpool, six.string_types): - raise CephClientTypeError( - name='tierpool', - actual=type(tierpool), - expected=six.string_types) - - kwargs['tierpool'] = tierpool - if force_nonempty is not None: - if not isinstance(force_nonempty, six.string_types): - raise CephClientTypeError( - name='force_nonempty', - actual=type(force_nonempty), - expected=six.string_types) - supported = CephClient.OSD_TIER_ADD_FORCE_NONEMPTY_VALUES - if force_nonempty not in supported: - raise CephClientInvalidChoice( - function='osd_tier_add', - option='force_nonempty', - value=force_nonempty, - supported=', '.join(supported)) - kwargs['force_nonempty'] = force_nonempty - return self._request('osd tier add', **kwargs) - - def osd_tier_rm(self, pool, tierpool, body='json', timeout=None): - """remove the tier (the second one) from base pool (thefirst one)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(tierpool, six.string_types): - raise CephClientTypeError( - name='tierpool', - actual=type(tierpool), - expected=six.string_types) - - kwargs['tierpool'] = tierpool - return self._request('osd tier rm', **kwargs) - - def osd_tier_remove(self, pool, tierpool, body='json', timeout=None): - """remove the tier (the second one) from base pool (thefirst one)""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(tierpool, six.string_types): - raise CephClientTypeError( - name='tierpool', - actual=type(tierpool), - expected=six.string_types) - - kwargs['tierpool'] = tierpool - return self._request('osd tier remove', **kwargs) - - OSD_TIER_CACHE_MODE_MODE_VALUES = \ - ['none', 'writeback', 'forward', 'readonly', - 'readforward', 'proxy', 'readproxy'] - - OSD_TIER_CACHE_MODE_SURE_VALUES = ['--yes-i-really-mean-it'] - - def osd_tier_cache_mode( - self, pool, mode, sure=None, body='json', timeout=None): - """specify the caching mode for cache tier """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(mode, six.string_types): - raise CephClientTypeError( - name='mode', - actual=type(mode), - expected=six.string_types) - supported = CephClient.OSD_TIER_CACHE_MODE_MODE_VALUES - if mode not in supported: - raise CephClientInvalidChoice( - function='osd_tier_cache_mode', - option='mode', - value=mode, - supported=', '.join(supported)) - - kwargs['mode'] = mode - if sure is not None: - if not isinstance(sure, six.string_types): - raise CephClientTypeError( - name='sure', - actual=type(sure), - expected=six.string_types) - supported = CephClient.OSD_TIER_CACHE_MODE_SURE_VALUES - if sure not in supported: - raise CephClientInvalidChoice( - function='osd_tier_cache_mode', - option='sure', - value=sure, - supported=', '.join(supported)) - kwargs['sure'] = sure - return self._request('osd tier cache-mode', **kwargs) - - def osd_tier_set_overlay( - self, pool, overlaypool, body='json', timeout=None): - """set the overlay pool for base pool to be """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(overlaypool, six.string_types): - raise CephClientTypeError( - name='overlaypool', - actual=type(overlaypool), - expected=six.string_types) - - kwargs['overlaypool'] = overlaypool - return self._request('osd tier set-overlay', **kwargs) - - def osd_tier_rm_overlay(self, pool, body='json', timeout=None): - """remove the overlay pool for base pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - return self._request('osd tier rm-overlay', **kwargs) - - def osd_tier_remove_overlay(self, pool, body='json', timeout=None): - """remove the overlay pool for base pool """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - return self._request('osd tier remove-overlay', **kwargs) - - def osd_tier_add_cache(self, pool, tierpool, size, - body='json', timeout=None): - - """add a cache (the second one) of size to existingpool (the first one) """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(pool, six.string_types): - raise CephClientTypeError( - name='pool', - actual=type(pool), - expected=six.string_types) - - kwargs['pool'] = pool - if not isinstance(tierpool, six.string_types): - raise CephClientTypeError( - name='tierpool', - actual=type(tierpool), - expected=six.string_types) - - kwargs['tierpool'] = tierpool - if not isinstance(size, six.integer_types): - raise CephClientTypeError( - name='size', - actual=type(size), - expected=int) - if size < 0: - raise CephClientValueOutOfBounds( - name='size', - actual=size, - min=0, - max='unlimited') - - kwargs['size'] = size - return self._request('osd tier add-cache', **kwargs) - - def config_key_get(self, key, body='json', timeout=None): - """get """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - return self._request('config-key get', **kwargs) - - def config_key_set(self, key, val=None, body='json', timeout=None): - """set to value """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - if val is not None: - if not isinstance(val, six.string_types): - raise CephClientTypeError( - name='val', - actual=type(val), - expected=six.string_types) - kwargs['val'] = val - return self._request('config-key set', **kwargs) - - def config_key_put(self, key, val=None, body='json', timeout=None): - """put , value """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - if val is not None: - if not isinstance(val, six.string_types): - raise CephClientTypeError( - name='val', - actual=type(val), - expected=six.string_types) - kwargs['val'] = val - return self._request('config-key put', **kwargs) - - def config_key_del(self, key, body='json', timeout=None): - """delete """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - return self._request('config-key del', **kwargs) - - def config_key_rm(self, key, body='json', timeout=None): - """rm """ - kwargs = dict(body=body, timeout=timeout) - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - return self._request('config-key rm', **kwargs) - - def config_key_exists(self, key, body='json', timeout=None): - """check for 's existence""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - return self._request('config-key exists', **kwargs) - - def config_key_list(self, body='json', timeout=None): - """list keys""" - return self._request('config-key list', body=body, timeout=timeout) - - def config_key_ls(self, body='json', timeout=None): - """list keys""" - return self._request('config-key ls', body=body, timeout=timeout) - - def config_key_dump(self, key=None, body='json', timeout=None): - """dump keys and values (with optional prefix)""" - kwargs = dict(body=body, timeout=timeout) - if key is not None: - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - kwargs['key'] = key - return self._request('config-key dump', **kwargs) - - def mgr_dump(self, epoch=None, body='json', timeout=None): - """dump the latest MgrMap""" - kwargs = dict(body=body, timeout=timeout) - if epoch is not None: - if not isinstance(epoch, six.integer_types): - raise CephClientTypeError( - name='epoch', - actual=type(epoch), - expected=int) - if epoch < 0: - raise CephClientValueOutOfBounds( - name='epoch', - actual=epoch, - min=0, - max='unlimited') - kwargs['epoch'] = epoch - return self._request('mgr dump', **kwargs) - - def mgr_fail(self, who, body='json', timeout=None): - """treat the named manager daemon as failed""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - return self._request('mgr fail', **kwargs) - - def mgr_module_ls(self, body='json', timeout=None): - """list active mgr modules""" - return self._request('mgr module ls', body=body, timeout=timeout) - - def mgr_services(self, body='json', timeout=None): - """list service endpoints provided by mgr modules""" - return self._request('mgr services', body=body, timeout=timeout) - - MGR_MODULE_ENABLE_FORCE_VALUES = ['--force'] - - def mgr_module_enable(self, module, force=None, - body='json', timeout=None): - """enable mgr module""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(module, six.string_types): - raise CephClientTypeError( - name='module', - actual=type(module), - expected=six.string_types) - - kwargs['module'] = module - if force is not None: - if not isinstance(force, six.string_types): - raise CephClientTypeError( - name='force', - actual=type(force), - expected=six.string_types) - supported = CephClient.MGR_MODULE_ENABLE_FORCE_VALUES - if force not in supported: - raise CephClientInvalidChoice( - function='mgr_module_enable', - option='force', - value=force, - supported=', '.join(supported)) - kwargs['force'] = force - return self._request('mgr module enable', **kwargs) - - def mgr_module_disable(self, module, body='json', timeout=None): - """disable mgr module""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(module, six.string_types): - raise CephClientTypeError( - name='module', - actual=type(module), - expected=six.string_types) - - kwargs['module'] = module - return self._request('mgr module disable', **kwargs) - - def mgr_metadata(self, who=None, body='json', timeout=None): - """dump metadata for all daemons or a specific daemon""" - kwargs = dict(body=body, timeout=timeout) - if who is not None: - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - kwargs['who'] = who - return self._request('mgr metadata', **kwargs) - - def mgr_count_metadata(self, _property, body='json', timeout=None): - """count ceph-mgr daemons by metadata field property""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(_property, six.string_types): - raise CephClientTypeError( - name='_property', - actual=type(_property), - expected=six.string_types) - - kwargs['property'] = _property - return self._request('mgr count-metadata', **kwargs) - - def mgr_versions(self, body='json', timeout=None): - """check running versions of ceph-mgr daemons""" - return self._request('mgr versions', body=body, timeout=timeout) - - def config_set(self, who, name, value, body='json', timeout=None): - """Set a configuration option for one or more entities""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - if not isinstance(value, six.string_types): - raise CephClientTypeError( - name='value', - actual=type(value), - expected=six.string_types) - - kwargs['value'] = value - return self._request('config set', **kwargs) - - def config_rm(self, who, name, body='json', timeout=None): - """Clear a configuration option for one or more entities""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - if not isinstance(name, six.string_types): - raise CephClientTypeError( - name='name', - actual=type(name), - expected=six.string_types) - - kwargs['name'] = name - return self._request('config rm', **kwargs) - - def config_get(self, who, key, body='json', timeout=None): - """Show configuration option(s) for an entity""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(who, six.string_types): - raise CephClientTypeError( - name='who', - actual=type(who), - expected=six.string_types) - - kwargs['who'] = who - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - return self._request('config get', **kwargs) - - def config_dump(self, body='json', timeout=None): - """Show all configuration option(s)""" - return self._request('config dump', body=body, timeout=timeout) - - def config_help(self, key, body='json', timeout=None): - """Describe a configuration option""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(key, six.string_types): - raise CephClientTypeError( - name='key', - actual=type(key), - expected=six.string_types) - - kwargs['key'] = key - return self._request('config help', **kwargs) - - def config_assimilate_conf(self, body='json', timeout=None): - """Assimilate options from a conf, and return a new, minimal conf file""" - return self._request('config assimilate-conf', - body=body, timeout=timeout) - - def config_log(self, num, body='json', timeout=None): - """Show recent history of config changes""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(num, six.integer_types): - raise CephClientTypeError( - name='num', - actual=type(num), - expected=int) - - kwargs['num'] = num - return self._request('config log', **kwargs) - - def config_reset(self, num, body='json', timeout=None): - """Revert configuration to previous state""" - kwargs = dict(body=body, timeout=timeout) - if not isinstance(num, six.integer_types): - raise CephClientTypeError( - name='num', - actual=type(num), - expected=int) - - kwargs['num'] = num - return self._request('config reset', **kwargs) diff --git a/ceph/python-cephclient/python-cephclient/cephclient/exception.py b/ceph/python-cephclient/python-cephclient/cephclient/exception.py deleted file mode 100644 index 03d2d0271..000000000 --- a/ceph/python-cephclient/python-cephclient/cephclient/exception.py +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -class CephClientException(Exception): - message = "generic ceph client exception" - - def __init__(self, *args, **kwargs): - if "message" not in kwargs: - try: - message = self.message.format(*args, **kwargs) - except Exception: # noqa - message = '{}, args:{}, kwargs: {}'.format( - self.message, args, kwargs) - else: - message = kwargs["message"] - super(CephClientException, self).__init__(message) - - -class CephMonRestfulListKeysError(CephClientException): - message = "Failed to get ceph-mgr restful plugin keys. {}" - - -class CephMonRestfulJsonError(CephClientException): - message = "Failed to decode ceph-mgr restful plugin JSON response: {}" - - -class CephMonRestfulMissingUserCredentials(CephClientException): - message = "Failed to get ceph-mgr restful plugin credentials for user: {}" - - -class CephMgrDumpError(CephClientException): - message = "Failed to get ceph manager info. {}" - - -class CephMgrJsonError(CephClientException): - message = "Failed to decode ceph manager JSON response: {}" - - -class CephMgrMissingRestfulService(CephClientException): - message = "Missing restful service. Available services: {}" - - -class CephClientFormatNotSupported(CephClientException): - message = "Command '{prefix}' does not support request format '{format}'" - - -class CephClientResponseFormatNotImplemented(CephClientException): - message = ("Can't decode response. Support for '{format}' format " - "is not implemented. Response: {reason}") - - -class CephClientFunctionNotImplemented(CephClientException): - message = "Function '{name}' is not implemented" - - -class CephClientInvalidChoice(CephClientException): - message = ("Function '{function}' does not support option " - "{option}='{value}'. Supported values are: {supported}") - - -class CephClientTypeError(CephClientException): - message = ("Expecting option '{name}' of type {expected}. " - "Got {actual} instead") - - -class CephClientValueOutOfBounds(CephClientException): - message = ("Argument '{name}' should be within range: {min} .. {max} " - ". Got value '{actual}' instead") - - -class CephClientInvalidPgid(CephClientException): - message = ("Argument '{name}' is not a valid Ceph PG id. Expected " - "n.xxx where n is an int > 0, xxx is a hex number > 0. " - "Got value '{actual}' instead") - - -class CephClientInvalidIPAddr(CephClientException): - message = ("Argument '{name}' should be a valid IPv4 or IPv6 address. " - "Got value '{actual}' instead") - - -class CephClientInvalidOsdIdValue(CephClientException): - message = ("Invalid OSD ID value '{osdid}'. Should start with 'osd.'") - - -class CephClientInvalidOsdIdType(CephClientException): - message = ("Invalid OSD ID type for '{osdid}'. " - "Expected integer or 'osd.NNN'") - - -class CephClientNoSuchUser(CephClientException): - message = ("No such user '{user}'.") - - -class CephClientIncorrectPassword(CephClientException): - message = ("Incorrect password for user '{user}'.") diff --git a/ceph/python-cephclient/python-cephclient/cephclient/tests/__init__.py b/ceph/python-cephclient/python-cephclient/cephclient/tests/__init__.py deleted file mode 100644 index 0c941312c..000000000 --- a/ceph/python-cephclient/python-cephclient/cephclient/tests/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/ceph/python-cephclient/python-cephclient/cephclient/wrapper.py b/ceph/python-cephclient/python-cephclient/cephclient/wrapper.py deleted file mode 100644 index 126edcad1..000000000 --- a/ceph/python-cephclient/python-cephclient/cephclient/wrapper.py +++ /dev/null @@ -1,268 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -import six - -from cephclient.client import CephClient -from cephclient.exception import CephClientFunctionNotImplemented -from cephclient.exception import CephClientInvalidOsdIdValue -from cephclient.exception import CephClientTypeError - - -class CephWrapper(CephClient): - - def __init__(self, endpoint=''): - super(CephWrapper, self).__init__() - - def auth_import(self, body='json', timeout=None): - raise CephClientFunctionNotImplemented(name='auth_import') - - def _sanitize_osdid_to_str(self, _id): - if isinstance(_id, six.string_types): - prefix = 'osd.' - if not _id.startswith(prefix): - try: - int(_id) - except ValueError: - raise CephClientInvalidOsdIdValue( - osdid=_id) - _id = prefix + _id - elif isinstance(_id, six.integer_types): - _id = 'osd.{}'.format(_id) - else: - raise CephClientInvalidOsdIdValue( - osdid=_id) - return _id - - def _sanitize_osdid_to_int(self, _id): - if isinstance(_id, six.string_types): - prefix = 'osd.' - if _id.startswith(prefix): - _id = _id[len(prefix):] - try: - _id = int(_id) - except ValueError: - raise CephClientInvalidOsdIdValue( - osdid=_id) - elif not isinstance(_id, six.integer_types): - raise CephClientInvalidOsdIdValue( - osdid=_id) - return _id - - def osd_create(self, uuid, body='json', timeout=None, params=None): - """create new osd (with optional UUID and ID) - - Notes: - 1. osd create declares it accepts osd id as string but only works when - given an integer value; it automatically generates an ID otherwise - instead of using the one provided by 'osd create id=...' - - 2. old cephclient passes osd id through params dictionary - """ - kwargs = dict(uuid=uuid, body=body, timeout=timeout) - try: - kwargs['id'] = self._sanitize_osdid_to_int(params['id']) - except (KeyError, TypeError): - pass - return self._request('osd create', **kwargs) - - def osd_rm(self, ids, body='json', timeout=None): - """remove osd(s) [...], or use to remove all osds """ - if isinstance(ids, list): - ids = [self._sanitize_osdid_to_str(_id) - for _id in ids] - else: - ids = self._sanitize_osdid_to_str(ids) - return super(CephWrapper, self).osd_rm( - ids=ids, body=body, timeout=timeout) - - def osd_remove(self, ids, body='json', timeout=None): - return self.osd_rm(ids, body=body, timeout=timeout) - - def osd_down(self, ids, body='json', timeout=None): - """set osd(s) [...] down, or use to set all osds down """ - if isinstance(ids, list): - ids = [self._sanitize_osdid_to_str(_id) - for _id in ids] - else: - ids = self._sanitize_osdid_to_str(ids) - return super(CephWrapper, self).osd_down( - ids=ids, body=body, timeout=timeout) - - OSD_CRUSH_TREE_CONVERTED_FIELDS = [ - 'crush_weight', 'depth', 'id', 'name', 'type', 'type_id'] - - def _osd_crush_tree_convert_node(self, node): - return {k: node[k] for k in self.OSD_CRUSH_TREE_CONVERTED_FIELDS - if k in node} - - def _osd_crush_tree_populate_tree(self, node, node_map): - children = node.get('children') - node = self._osd_crush_tree_convert_node(node) - if node['type'] != 'osd': - node['items'] = [] - for _id in children: - node['items'].append( - self._osd_crush_tree_populate_tree( - node_map[_id], node_map)) - return node - - def osd_crush_tree(self, shadow=None, body='json', timeout=None): - """dump crush buckets and items in a tree view """ - response, _body = super(CephWrapper, self).osd_crush_tree( - shadow=shadow, body=body, timeout=timeout) - trees = [] - if response.ok and body == 'json' \ - and 'output' in _body: - node_map = {} - root_nodes = [] - for node in _body['output']: - node_map[node['id']] = node - if node['type'] == 'root': - root_nodes.append(node) - for root in root_nodes: - trees.append( - self._osd_crush_tree_populate_tree( - root, node_map)) - _body['output'] = trees - return response, _body - - def _osd_crush_rule_by_ruleset(self, ruleset, timeout=None): - response, _body = self.osd_crush_rule_dump( - body='json', timeout=timeout) - if not response.ok: - return response, _body - name = None - for rule in _body['output']: - if rule.get('ruleset') == ruleset: - name = rule.get('rule_name') - _body['output'] = dict(rule=name) - return response, _body - - def _osd_crush_ruleset_by_rule(self, rule, timeout=None): - response, _body = self.osd_crush_rule_dump( - name=rule, body='json', timeout=timeout) - return response, _body - - def osd_pool_create(self, pool, pg_num, pgp_num=None, pool_type=None, - erasure_code_profile=None, ruleset=None, - expected_num_objects=None, body='json', timeout=None): - """create pool - - Notes: - 1. map 'ruleset' to 'rule' (assuming 1:1 correspondence) - """ - response, _body = self._osd_crush_rule_by_ruleset(ruleset) - if not response.ok: - return response, _body - rule = _body['output']['rule'] - return super(CephWrapper, self).osd_pool_create( - pool, pg_num, pgp_num=pgp_num, pool_type=pool_type, - erasure_code_profile=erasure_code_profile, rule=rule, - expected_num_objects=expected_num_objects, body=body, - timeout=timeout) - - def osd_get_pool_param(self, pool, var, body='json', timeout=None): - """get pool parameter """ - if var == 'crush_ruleset': - response, _body = super(CephWrapper, self).osd_pool_get( - pool, 'crush_rule', body='json', timeout=timeout) - if response.ok: - rule = _body['output']['crush_rule'] - del _body['output']['crush_rule'] - response, _body = self._osd_crush_ruleset_by_rule( - rule, timeout=timeout) - if response.ok: - _body['output'] = dict( - crush_ruleset=_body['output']['ruleset']) - return response, _body - else: - return super(CephWrapper, self).osd_pool_get( - pool, var, body=body, timeout=timeout) - - def osd_pool_set(self, pool, var, val, force=None, - body='json', timeout=None): - """set pool parameter to """ - return super(CephWrapper, self).osd_pool_set( - pool=pool, var=var, val=str(val), - force=force, body=body, timeout=timeout) - - def osd_set_pool_param(self, pool, var, val, force=None, - body='json', timeout=None): - """set pool parameter to """ - if var == 'crush_ruleset': - var = 'crush_rule' - response, _body = self._osd_crush_rule_by_ruleset( - val, timeout=timeout) - if not response.ok: - return response, _body - val = _body['output']['rule'] - return super(CephWrapper, self).osd_pool_set( - pool, var, str(val), force=None, - body=body, timeout=timeout) - - def osd_get_pool_quota(self, pool, body='json', timeout=None): - """obtain object or byte limits for pool """ - return super(CephWrapper, self).osd_pool_get_quota( - pool, body=body, timeout=timeout) - - def osd_set_pool_quota(self, pool, field, val, body='json', timeout=None): - """set object or byte limit on pool """ - return super(CephWrapper, self).osd_pool_set_quota( - pool, field, str(val), body=body, timeout=timeout) - - def osd_pool_set_quota(self, pool, field, val, - body='json', timeout=None): - """set object or byte limit on pool """ - return super(CephWrapper, self).osd_pool_set_quota( - pool=pool, field=field, val=str(val), - body=body, timeout=timeout) - - def _auth_convert_caps(self, caps): - if caps: - if not isinstance(caps, dict): - raise CephClientTypeError( - name='caps', - actual=type(caps), - expected=dict) - _caps = [] - for key, value in list(caps.items()): - _caps.append(key) - _caps.append(value) - caps = _caps - return caps - - def auth_add(self, entity, caps=None, body='json', timeout=None): - """add auth info for from input file, or random key if no input is given, and/or any caps specified in the command """ - caps = self._auth_convert_caps(caps) - return super(CephWrapper, self).auth_add( - entity, caps=caps, body=body, timeout=timeout) - - def auth_caps(self, entity, caps, body='json', timeout=None): - """update caps for from caps specified in the command """ - caps = self._auth_convert_caps(caps) - return super(CephWrapper, self).auth_caps( - entity, caps=caps, body=body, timeout=timeout) - - def auth_get_or_create(self, entity, caps=None, body='json', timeout=None): - """add auth info for from input file, or random key if no input given, and/or any caps specified in the command """ - caps = self._auth_convert_caps(caps) - return super(CephWrapper, self).auth_get_or_create( - entity, caps, body=body, timeout=timeout) - - def auth_get_or_create_key(self, entity, caps=None, - body='json', timeout=None): - - """get, or add, key for from system/caps pairs specified in the command. If key already exists, any given caps must match the existing caps for that key. """ - caps = self._auth_convert_caps(caps) - response, _body = super(CephWrapper, self).auth_get_or_create_key( - entity, caps, body=body, timeout=timeout) - if response.ok: - _body['output'] = _body['output'] - return response, _body - - def osd_set_key(self, key, sure=None, body='json', timeout=None): - """set """ - return self.osd_set(key, sure=sure, body=body, timeout=timeout) diff --git a/ceph/python-cephclient/python-cephclient/requirements.txt b/ceph/python-cephclient/python-cephclient/requirements.txt deleted file mode 100644 index ea6e06dce..000000000 --- a/ceph/python-cephclient/python-cephclient/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -ipaddress -requests -six diff --git a/ceph/python-cephclient/python-cephclient/setup.py b/ceph/python-cephclient/python-cephclient/setup.py deleted file mode 100644 index c8368e319..000000000 --- a/ceph/python-cephclient/python-cephclient/setup.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -import setuptools - -setuptools.setup( - name='python-cephclient', - packages=['cephclient'], - version='13.2.2.0', - url='https://github.com/openstack/stx-integ/tree/master/ceph/python-cephclient/python-cephclient', # noqa E501 - author='Daniel Badea', - author_email='daniel.badea@windriver.com', - description=( - 'A client library in Python for Ceph Mgr RESTful plugin ' - 'providing REST API access to the cluster over an SSL-secured ' - 'connection. Python API is compatible with the old Python ' - 'Ceph client at https://github.com/dmsimard/python-cephclient ' - 'that no longer works in Ceph mimic because Ceph REST API ' - 'component was removed.'), - license='Apache-2.0', - keywords='ceph rest api ceph-rest-api client library', - install_requires=['ipaddress', 'requests', 'six'], - classifiers=[ - 'License :: OSI Approved :: Apache Software License', - 'Development Status :: 1 - Alpha', - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'Intended Audience :: Information Technology', - 'Programming Language :: Python', - 'Topic :: Utilities' - ]) diff --git a/ceph/python-cephclient/python-cephclient/test-requirements.txt b/ceph/python-cephclient/python-cephclient/test-requirements.txt deleted file mode 100644 index 7f5733341..000000000 --- a/ceph/python-cephclient/python-cephclient/test-requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -flake8 -pytest diff --git a/ceph/python-cephclient/python-cephclient/tox.ini b/ceph/python-cephclient/python-cephclient/tox.ini deleted file mode 100644 index c948b7560..000000000 --- a/ceph/python-cephclient/python-cephclient/tox.ini +++ /dev/null @@ -1,19 +0,0 @@ -[tox] -envlist = py27,pep8 -skipsdist = True -toxworkdir = /tmp/{env:USER}_ceph_manager_tox - -[testenv] -setenv = VIRTUAL_ENV={envdir} -usedevelop = True -install_command = pip install --no-binary --upgrade --force-reinstall {opts} {packages} -deps = -r{toxinidir}/test-requirements.txt -commands = py.test {posargs} -whitelist_externals = bash - -[testenv:pep8] -commands = - flake8 {posargs} - -[flake8] -exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build diff --git a/devstack/lib/integ b/devstack/lib/integ index 017becc96..3681cc332 100644 --- a/devstack/lib/integ +++ b/devstack/lib/integ @@ -19,8 +19,6 @@ set -o xtrace STXINTEG_DIR=${GITDIR[$STX_INTEG_NAME]} -PLATFORM_UTIL_DIR=$STXINTEG_DIR/utilities/platform-util - # STX_INST_DIR should be a non-root-writable place to install build artifacts STX_INST_DIR=${STX_INST_DIR:-/usr/local} STX_BIN_DIR=${STX_BIN_DIR:-$STX_INST_DIR/bin} @@ -49,34 +47,7 @@ function init_integ { function install_integ { # Install the service - if is_service_enabled platform-util; then - install_platform_util - fi -} - -function install_platform_util { - pushd $PLATFORM_UTIL_DIR/platform-util - sudo python setup.py install \ - --root=/ \ - --install-lib=$PYTHON_SITE_DIR \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed - popd - - local systemddir=/etc/systemd - $STX_SUDO install -m 755 -d ${STX_SBIN_DIR} - $STX_SUDO install -m 755 $PLATFORM_UTIL_DIR/scripts/patch-restart-mtce ${STX_SBIN_DIR} - $STX_SUDO install -m 755 $PLATFORM_UTIL_DIR/scripts/patch-restart-processes ${STX_SBIN_DIR} - $STX_SUDO install -m 755 $PLATFORM_UTIL_DIR/scripts/patch-restart-haproxy ${STX_SBIN_DIR} - - $STX_SUDO install -m 755 $PLATFORM_UTIL_DIR/scripts/cgcs_tc_setup.sh ${STX_BIN_DIR} - $STX_SUDO install -m 755 $PLATFORM_UTIL_DIR/scripts/remotelogging_tc_setup.sh ${STX_BIN_DIR} - $STX_SUDO install -m 755 $PLATFORM_UTIL_DIR/scripts/connectivity_test ${STX_BIN_DIR} - - # sudo install -m 755 $PLATFORM_UTIL_DIR/scripts/opt-platform.mount ${systemddir}/system - # sudo install -m 755 $PLATFORM_UTIL_DIR/scripts/opt-platform.service ${systemddir}/system - # sudo install -m 755 $PLATFORM_UTIL_DIR/scripts/memcached.service ${systemddir}/system + : } function start_integ { diff --git a/devstack/settings b/devstack/settings index 6c7d75dd9..ebe1af4d4 100644 --- a/devstack/settings +++ b/devstack/settings @@ -6,7 +6,6 @@ # https://docs.openstack.org/devstack/latest/plugins.html#plugin-sh-contract # Services -# platform-util # Defaults # -------- diff --git a/filesystem/nfscheck/LICENSE b/filesystem/nfscheck/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/filesystem/nfscheck/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/filesystem/nfscheck/PKG-INFO b/filesystem/nfscheck/PKG-INFO deleted file mode 100644 index 0800a1145..000000000 --- a/filesystem/nfscheck/PKG-INFO +++ /dev/null @@ -1,14 +0,0 @@ -Metadata-Version: 1.1 -Name: nfscheck -Version: 1.0 -Summary: NFS Audit -Home-page: -Author: -Author-email: -License: Apache-2.0 - -Description: -NFS Audit - - -Platform: UNKNOWN diff --git a/filesystem/nfscheck/centos/build_srpm.data b/filesystem/nfscheck/centos/build_srpm.data deleted file mode 100644 index 4498c84d5..000000000 --- a/filesystem/nfscheck/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -COPY_LIST="LICENSE files/*" -TIS_PATCH_VER=0 diff --git a/filesystem/nfscheck/centos/nfscheck.spec b/filesystem/nfscheck/centos/nfscheck.spec deleted file mode 100644 index 4ba09bd0e..000000000 --- a/filesystem/nfscheck/centos/nfscheck.spec +++ /dev/null @@ -1,43 +0,0 @@ -Name: nfscheck -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -Summary: NFS Audit - -Group: base -License: Apache-2.0 -URL: unknown -Source0: nfscheck.sh -Source1: nfscheck.service -Source2: LICENSE - -Requires: systemd -Requires: util-linux - -%description -NFS Audit - - -%prep - - -%build - - -%install -install -d -m 755 %{buildroot}/usr/bin/ -install -m 755 %{SOURCE0} %{buildroot}/usr/bin/nfscheck.sh - -install -d -m 755 %{buildroot}/usr/lib/systemd/system/ -install -m 664 %{SOURCE1} %{buildroot}/usr/lib/systemd/system/nfscheck.service - -%post -/usr/bin/systemctl enable nfscheck.service >/dev/null 2>&1 - -%files -%license ../SOURCES/LICENSE -/usr/bin/* -/usr/lib/systemd/system/* - - -%changelog - diff --git a/filesystem/nfscheck/files/LICENSE b/filesystem/nfscheck/files/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/filesystem/nfscheck/files/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/filesystem/nfscheck/files/nfscheck-init.sh b/filesystem/nfscheck/files/nfscheck-init.sh deleted file mode 100755 index 53b1fccea..000000000 --- a/filesystem/nfscheck/files/nfscheck-init.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/sh -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# chkconfig: 345 99 10 - -### BEGIN INIT INFO -# Provides: nfscheck -# Required-Start: $syslog -# Required-Stop: $syslog -# Default-Start: 2 3 5 -# Default-Stop: 0 1 6 -# Short-Description: nfscheck -# Description: NFS Audit -### END INIT INFO - -DESC="nfscheck" -DAEMON="/usr/bin/nfscheck" -PIDFILE="/var/run/nfscheck.pid" - -start() -{ - if [ -e $PIDFILE ]; then - PIDDIR=/proc/$(cat $PIDFILE) - if [ -d ${PIDDIR} ]; then - echo "$DESC already running." - exit 1 - else - echo "Removing stale PID file $PIDFILE" - rm -f $PIDFILE - fi - fi - - echo -n "Starting $DESC..." - - start-stop-daemon --start --quiet --background \ - --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} - - if [ $? -eq 0 ]; then - echo "done." - else - echo "failed." - fi -} - -stop() -{ - echo -n "Stopping $DESC..." - start-stop-daemon --stop --quiet --pidfile $PIDFILE - if [ $? -eq 0 ]; then - echo "done." - else - echo "failed." - fi - rm -f $PIDFILE -} - -case "$1" in - start) - start - ;; - stop) - stop - ;; - restart|force-reload) - stop - start - ;; - *) - echo "Usage: $0 {start|stop|force-reload|restart}" - exit 1 - ;; -esac - -exit 0 diff --git a/filesystem/nfscheck/files/nfscheck.service b/filesystem/nfscheck/files/nfscheck.service deleted file mode 100644 index e83300e8e..000000000 --- a/filesystem/nfscheck/files/nfscheck.service +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=nfscheck -After=syslog.target network.target nfs-mountd.service sw-patch.service - -[Service] -Type=simple -ExecStart=/bin/sh /usr/bin/nfscheck.sh - -[Install] -WantedBy=multi-user.target diff --git a/filesystem/nfscheck/files/nfscheck.sh b/filesystem/nfscheck/files/nfscheck.sh deleted file mode 100644 index 3b0679138..000000000 --- a/filesystem/nfscheck/files/nfscheck.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# The following script tests the NFS mount in order to log when it is hung - -MOUNT=/opt/platform -previous=1 -delay=60 - -while : ; do - # First, check that it's actually an NFS mount - mount | grep -q $MOUNT - if [ $? -ne 0 ]; then - logger -t NFSCHECK "$MOUNT is not mounted" - previous=1 - sleep $delay - continue - fi - - ls $MOUNT >/dev/null 2>&1 & - - sleep $delay - - # At this point, jobs will either report no jobs (empty) or Done, - # unless the job is still running/hung - rc=$(jobs) - if [[ -z "$rc" || $rc =~ "Done" ]]; then - # NFS is successful - if [ $previous -ne 0 ]; then - logger -t NFSCHECK "NFS test of $MOUNT is ok" - previous=0 - fi - else - # Keep waiting until the job is done - while ! [[ -z "$rc" || $rc =~ "Done" ]]; do - logger -t NFSCHECK "NFS test of $MOUNT is failed" - previous=1 - sleep $delay - rc=$(jobs) - done - fi -done - diff --git a/logging/logmgmt/.gitignore b/logging/logmgmt/.gitignore deleted file mode 100644 index d2679b469..000000000 --- a/logging/logmgmt/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -!.distro -.distro/centos7/rpmbuild/RPMS -.distro/centos7/rpmbuild/SRPMS -.distro/centos7/rpmbuild/BUILD -.distro/centos7/rpmbuild/BUILDROOT -.distro/centos7/rpmbuild/SOURCES/logmgmt*tar.gz diff --git a/logging/logmgmt/LICENSE b/logging/logmgmt/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/logging/logmgmt/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/logging/logmgmt/PKG-INFO b/logging/logmgmt/PKG-INFO deleted file mode 100644 index 46c15a9e8..000000000 --- a/logging/logmgmt/PKG-INFO +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 1.1 -Name: logmgmt -Version: 1.0 -Summary: Management of /var/log filesystem -Home-page: -Author: Windriver -Author-email: info@windriver.com -License: Apache-2.0 - -Description: Management of /var/log filesystem - - -Platform: UNKNOWN diff --git a/logging/logmgmt/centos/build_srpm.data b/logging/logmgmt/centos/build_srpm.data deleted file mode 100644 index 9e65d5468..000000000 --- a/logging/logmgmt/centos/build_srpm.data +++ /dev/null @@ -1,4 +0,0 @@ -SRC_DIR="logmgmt" -COPY_LIST_TO_TAR="scripts" -COPY_LIST="$SRC_DIR/LICENSE" -TIS_PATCH_VER=4 diff --git a/logging/logmgmt/centos/logmgmt.spec b/logging/logmgmt/centos/logmgmt.spec deleted file mode 100644 index c52fcd10d..000000000 --- a/logging/logmgmt/centos/logmgmt.spec +++ /dev/null @@ -1,87 +0,0 @@ -Summary: Management of /var/log filesystem -Name: logmgmt -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -Source0: %{name}-%{version}.tar.gz -Source1: LICENSE - -BuildRequires: python-setuptools -BuildRequires: python2-pip -BuildRequires: python2-wheel -BuildRequires: systemd-devel -Requires: systemd -Requires: python-daemon - -%description -Management of /var/log filesystem - -%define local_bindir /usr/bin/ -%define local_etc_initd /etc/init.d/ -%define local_etc_pmond /etc/pmon.d/ -%define pythonroot /usr/lib64/python2.7/site-packages - -%define debug_package %{nil} - -%prep -%setup - -# Remove bundled egg-info -rm -rf *.egg-info - -%build -%{__python} setup.py build -%py2_build_wheel - -%install -%{__python} setup.py install --root=$RPM_BUILD_ROOT \ - --install-lib=%{pythonroot} \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed -mkdir -p $RPM_BUILD_ROOT/wheels -install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ - -install -d -m 755 %{buildroot}%{local_bindir} -install -p -D -m 700 scripts/bin/logmgmt %{buildroot}%{local_bindir}/logmgmt -install -p -D -m 700 scripts/bin/logmgmt_postrotate %{buildroot}%{local_bindir}/logmgmt_postrotate -install -p -D -m 700 scripts/bin/logmgmt_prerotate %{buildroot}%{local_bindir}/logmgmt_prerotate - -install -d -m 755 %{buildroot}%{local_etc_initd} -install -p -D -m 700 scripts/init.d/logmgmt %{buildroot}%{local_etc_initd}/logmgmt - -install -d -m 755 %{buildroot}%{local_etc_pmond} -install -p -D -m 644 scripts/pmon.d/logmgmt %{buildroot}%{local_etc_pmond}/logmgmt - -install -p -D -m 664 scripts/etc/systemd/system/logmgmt.service %{buildroot}%{_unitdir}/logmgmt.service - -%post -/usr/bin/systemctl enable logmgmt.service >/dev/null 2>&1 - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -%defattr(-,root,root,-) -%doc LICENSE -%{local_bindir}/* -%{local_etc_initd}/* -%dir %{local_etc_pmond} -%{local_etc_pmond}/* -%{_unitdir}/logmgmt.service -%dir %{pythonroot}/%{name} -%{pythonroot}/%{name}/* -%dir %{pythonroot}/%{name}-%{version}.0-py2.7.egg-info -%{pythonroot}/%{name}-%{version}.0-py2.7.egg-info/* - -%package wheels -Summary: %{name} wheels - -%description wheels -Contains python wheels for %{name} - -%files wheels -/wheels/* diff --git a/logging/logmgmt/logmgmt/LICENSE b/logging/logmgmt/logmgmt/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/logging/logmgmt/logmgmt/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/logging/logmgmt/logmgmt/logmgmt/__init__.py b/logging/logmgmt/logmgmt/logmgmt/__init__.py deleted file mode 100644 index 0da84c8ca..000000000 --- a/logging/logmgmt/logmgmt/logmgmt/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -Copyright (c) 2014 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" diff --git a/logging/logmgmt/logmgmt/logmgmt/logmgmt.py b/logging/logmgmt/logmgmt/logmgmt/logmgmt.py deleted file mode 100644 index fe5e83c30..000000000 --- a/logging/logmgmt/logmgmt/logmgmt/logmgmt.py +++ /dev/null @@ -1,271 +0,0 @@ -""" -Copyright (c) 2014 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -################### -# IMPORTS -################### -from __future__ import absolute_import -import logging -import logging.handlers -import time -import os -import subprocess -import glob -import re -import sys - -from daemon import runner - -from logmgmt import prepostrotate - -################### -# CONSTANTS -################### -LOG_DIR = '/var/lib/logmgmt' -LOG_FILE = LOG_DIR + '/logmgmt.log' -PID_FILE = '/var/run/logmgmt.pid' -LOG_FILE_MAX_BYTES = 1024 * 1024 -LOG_FILE_BACKUP_COUNT = 5 - -PERCENT_FREE_CRITICAL = 10 -PERCENT_FREE_MAJOR = 20 - -LOGROTATE_PERIOD = 600 # Every ten minutes - - -################### -# METHODS -################### -def start_polling(): - logmgmt_daemon = LogMgmtDaemon() - logmgmt_runner = runner.DaemonRunner(logmgmt_daemon) - logmgmt_runner.daemon_context.umask = 0o022 - logmgmt_runner.do_action() - - -def handle_exception(exc_type, exc_value, exc_traceback): - """Exception handler to log any uncaught exceptions""" - logging.error("Uncaught exception", - exc_info=(exc_type, exc_value, exc_traceback)) - sys.__excepthook__(exc_type, exc_value, exc_traceback) - - -################### -# CLASSES -################### -class LogMgmtDaemon(): - """Daemon process representation of the /var/log monitoring program""" - def __init__(self): - # Daemon-specific init - self.stdin_path = '/dev/null' - self.stdout_path = '/dev/null' - self.stderr_path = '/dev/null' - self.pidfile_path = PID_FILE - self.pidfile_timeout = 5 - - self.monitored_files = [] - self.unmonitored_files = [] - - self.last_logrotate = 0 - self.last_check = 0 - - def configure_logging(self, level=logging.DEBUG): - my_exec = os.path.basename(sys.argv[0]) - - if not os.path.exists(LOG_DIR): - os.mkdir(LOG_DIR, 0o755) - - log_format = '%(asctime)s: ' \ - + my_exec + '[%(process)s]: ' \ - + '%(filename)s(%(lineno)s): ' \ - + '%(levelname)s: %(message)s' - - fmt = logging.Formatter(fmt=log_format) - - # Use python's log rotation, rather than logrotate - handler = logging.handlers.RotatingFileHandler( - LOG_FILE, - maxBytes=LOG_FILE_MAX_BYTES, - backupCount=LOG_FILE_BACKUP_COUNT) - - my_logger = logging.getLogger() - my_logger.setLevel(level) - - handler.setFormatter(fmt) - handler.setLevel(level) - my_logger.addHandler(handler) - - # Log uncaught exceptions to file - sys.excepthook = handle_exception - - def run(self): - self.configure_logging() - - while True: - self.check_var_log() - - # run/poll every 1 min - time.sleep(60) - - def get_percent_free(self): - usage = os.statvfs('/var/log') - return ((usage.f_bavail * 100) / usage.f_blocks) - - def get_monitored_files(self): - self.monitored_files = [] - - try: - output = subprocess.check_output(['/usr/sbin/logrotate', '-d', '/etc/logrotate.conf'], - stderr=subprocess.STDOUT) - - for line in output.split('\n'): - fields = line.split() - if len(fields) > 0 and fields[0] == "considering": - self.monitored_files.extend(glob.glob(fields[2])) - self.monitored_files.extend(glob.glob(fields[2] + '.[0-9].gz')) - self.monitored_files.extend(glob.glob(fields[2] + '.[0-9][0-9].gz')) - self.monitored_files.extend(glob.glob(fields[2] + '.[0-9]')) - self.monitored_files.extend(glob.glob(fields[2] + '.[0-9][0-9]')) - except: - logging.error('Failed to determine monitored files') - raise - - def get_unmonitored_files(self): - self.unmonitored_files = [] - - try: - output = subprocess.check_output(['find', '/var/log', '-type', 'f']) - - for fname in output.split('\n'): - if fname in self.monitored_files: - continue - - # Ignore some files - if ('/var/log/puppet' in fname - or '/var/log/dmesg' in fname - or '/var/log/rabbitmq' in fname - or '/var/log/lastlog' in fname): - continue - - if os.path.exists(fname): - self.unmonitored_files.append(fname) - - except: - logging.error('Failed to determine unmonitored files') - - def purge_files(self, index): - pattern = re.compile('.*\.([0-9]*)\.gz') - for fname in sorted(self.monitored_files): - result = pattern.match(fname) - if result: - if int(result.group(1)) >= index: - logging.info("Purging file: %s" % fname) - try: - os.remove(fname) - except OSError as e: - logging.error('Failed to remove file: %s', e) - - def run_logrotate(self): - self.last_logrotate = int(time.time()) - try: - subprocess.check_call(['/usr/sbin/logrotate', '/etc/logrotate.conf']) - except: - logging.error('Failed logrotate') - - def run_logrotate_forced(self): - self.last_logrotate = int(time.time()) - try: - subprocess.check_call(['/usr/sbin/logrotate', '-f', '/etc/logrotate.conf']) - except: - logging.error('Failed logrotate -f') - - def timecheck(self): - # If we're more than a couple of mins since the last timecheck, - # there could have been a large time correction, which would skew - # our timing. Reset the logrotate timestamp to ensure we don't miss anything - now = int(time.time()) - - if self.last_check > now or (now - self.last_check) > 120: - self.last_logrotate = 0 - - self.last_check = now - - def check_var_log(self): - self.timecheck() - - try: - prepostrotate.ensure_bash_log_locked_down() - except Exception as e: - logging.exception('Failed to ensure bash.log locked', e) - - pf = self.get_percent_free() - - if pf > PERCENT_FREE_CRITICAL: - # We've got more than 10% free space, so just run logrotate every ten minutes - now = int(time.time()) - if self.last_logrotate > now or (now - self.last_logrotate) > LOGROTATE_PERIOD: - logging.info("Running logrotate") - self.run_logrotate() - - return - - logging.warning("Reached critical disk usage for /var/log: %d%% free" % pf) - - # We're running out of disk space, so we need to start deleting files - try: - for index in range(20, 11, -1): - logging.info("/var/log is %d%% free. Purging rotated .%d.gz files to free space" % (pf, index)) - self.get_monitored_files() - self.purge_files(index) - pf = self.get_percent_free() - - if pf >= PERCENT_FREE_MAJOR: - # We've freed up enough space. Do a logrotate and leave - logging.info("/var/log is %d%% free. Running logrotate" % pf) - self.run_logrotate() - return - except Exception as e: - logging.exception('Failed purging rotated files', e) - - # We still haven't freed up enough space, so try a logrotate - logging.info("/var/log is %d%% free. Running logrotate" % pf) - self.run_logrotate() - - pf = self.get_percent_free() - if pf >= PERCENT_FREE_MAJOR: - return - - # Try a forced rotate - logging.info("/var/log is %d%% free. Running forced logrotate" % pf) - self.run_logrotate_forced() - - pf = self.get_percent_free() - if pf >= PERCENT_FREE_MAJOR: - return - - # Start deleting unmonitored files - try: - self.get_monitored_files() - self.get_unmonitored_files() - logging.info("/var/log is %d%% free. Deleting unmonitored files to free space" % pf) - for fname in sorted(self.unmonitored_files, key=os.path.getsize, reverse=True): - logging.info("Deleting unmonitored file: %s" % fname) - try: - os.remove(fname) - except OSError as e: - logging.error('Failed to remove file: %s', e) - pf = self.get_percent_free() - if pf >= PERCENT_FREE_MAJOR: - logging.info("/var/log is %d%% free." % pf) - return - except Exception as e: - logging.exception('Failed checking unmonitored files', e) - - # Nothing else to be done - logging.info("/var/log is %d%% free." % pf) - return - diff --git a/logging/logmgmt/logmgmt/logmgmt/prepostrotate.py b/logging/logmgmt/logmgmt/logmgmt/prepostrotate.py deleted file mode 100644 index 242c79546..000000000 --- a/logging/logmgmt/logmgmt/logmgmt/prepostrotate.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Copyright (c) 2017 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -################### -# IMPORTS -################### - -import array -import fcntl -import struct -import glob - -EXT2_APPEND_FL = 0x00000020 -EXT4_EXTENTS_FL = 0x00080000 - -EXT_IOC_SETFLAGS = 0x40086602 -EXT_IOC_GETFLAGS = 0x80086601 - - -def _is_file_append_only(filename): - buf = array.array('h', [0]) - with open(filename, 'r') as f: - fcntl.ioctl(f.fileno(), EXT_IOC_GETFLAGS, buf) - has_append_only = (buf.tolist()[0] & EXT2_APPEND_FL) == EXT2_APPEND_FL - return has_append_only - - -def _set_file_attrs(filename, attrs): - flags = struct.pack('i', attrs) - with open(filename, 'r') as f: - fcntl.ioctl(f.fileno(), EXT_IOC_SETFLAGS, flags) - - -def chattr_add_append_only(filename): - _set_file_attrs(filename, EXT2_APPEND_FL | EXT4_EXTENTS_FL) - - -def chattr_remove_append_only(filename): - _set_file_attrs(filename, EXT4_EXTENTS_FL) - - -def prerotate(): - for filename in glob.glob("/var/log/bash.log*"): - if _is_file_append_only(filename): - chattr_remove_append_only(filename) - - -def postrotate(): - for filename in glob.glob("/var/log/bash.log*"): - if not _is_file_append_only(filename): - chattr_add_append_only(filename) - - -def ensure_bash_log_locked_down(): - # need the same functionality as postrotate - postrotate() diff --git a/logging/logmgmt/logmgmt/setup.py b/logging/logmgmt/logmgmt/setup.py deleted file mode 100644 index deab3ddcd..000000000 --- a/logging/logmgmt/logmgmt/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python - -""" -Copyright (c) 2014 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -import setuptools - -setuptools.setup(name='logmgmt', - version='1.0.0', - description='logmgmt', - license='Apache-2.0', - packages=['logmgmt'], - entry_points={} - ) diff --git a/logging/logmgmt/scripts/bin/logmgmt b/logging/logmgmt/scripts/bin/logmgmt deleted file mode 100644 index bb02df905..000000000 --- a/logging/logmgmt/scripts/bin/logmgmt +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python - -""" -Copyright (c) 2014 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -import sys - -try: - from logmgmt import logmgmt -except EnvironmentError as e: - print >> sys.stderr, "Error importing logmgmt: ", str(e) - sys.exit(1) - -logmgmt.start_polling() diff --git a/logging/logmgmt/scripts/bin/logmgmt_postrotate b/logging/logmgmt/scripts/bin/logmgmt_postrotate deleted file mode 100644 index dfdb097c2..000000000 --- a/logging/logmgmt/scripts/bin/logmgmt_postrotate +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python - -""" -Copyright (c) 2014 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -import sys - -try: - from logmgmt import prepostrotate -except EnvironmentError as e: - print >> sys.stderr, "Error importing prepostrotate: ", str(e) - sys.exit(1) - -prepostrotate.postrotate() - diff --git a/logging/logmgmt/scripts/bin/logmgmt_prerotate b/logging/logmgmt/scripts/bin/logmgmt_prerotate deleted file mode 100644 index f641d61b7..000000000 --- a/logging/logmgmt/scripts/bin/logmgmt_prerotate +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python - -""" -Copyright (c) 2014 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -import sys - -try: - from logmgmt import prepostrotate -except EnvironmentError as e: - print >> sys.stderr, "Error importing prepostrotate: ", str(e) - sys.exit(1) - -prepostrotate.prerotate() - diff --git a/logging/logmgmt/scripts/etc/systemd/system/logmgmt.service b/logging/logmgmt/scripts/etc/systemd/system/logmgmt.service deleted file mode 100644 index 8fdc05bab..000000000 --- a/logging/logmgmt/scripts/etc/systemd/system/logmgmt.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=Titanium Cloud Log Management -After=network.target syslog-ng.service iscsid.service sw-patch.service -Before=config.service pmon.service - -[Service] -Type=forking -ExecStart=/etc/init.d/logmgmt start -ExecStop=/etc/init.d/logmgmt stop -ExecReload=/etc/init.d/logmgmt restart -PIDFile=/var/run/logmgmt.pid - -[Install] -WantedBy=multi-user.target diff --git a/logging/logmgmt/scripts/init.d/logmgmt b/logging/logmgmt/scripts/init.d/logmgmt deleted file mode 100644 index 4a8eea961..000000000 --- a/logging/logmgmt/scripts/init.d/logmgmt +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/sh -# -# Copyright (c) 2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -### BEGIN INIT INFO -# Provides: logmgmt -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Daemon for /var/log management -# Description: Daemon for /var/log management -### END INIT INFO - -DESC="logmgmt" -DAEMON="/usr/bin/logmgmt" -RUNDIR="/var/run" -PIDFILE=$RUNDIR/$DESC.pid - -start() -{ - if [ -e $PIDFILE ]; then - PID=$(cat $PIDFILE) - PIDDIR=/proc/${PID} - if [ -n "${PID}" -a -d ${PIDDIR} ]; then - echo "$DESC already running." - exit 0 - else - echo "Removing stale PID file $PIDFILE" - rm -f $PIDFILE - fi - fi - - echo -n "Starting $DESC..." - mkdir -p $RUNDIR - start-stop-daemon --start --quiet \ - --pidfile ${PIDFILE} --exec ${DAEMON} start - - #--make-pidfile - - if [ $? -eq 0 ]; then - echo "done." - else - echo "failed." - exit 1 - fi -} - -stop() -{ - echo -n "Stopping $DESC..." - start-stop-daemon --stop --quiet --pidfile $PIDFILE - if [ $? -eq 0 ]; then - echo "done." - else - echo "failed." - fi - rm -f $PIDFILE -} - -status() -{ - pid=`cat $PIDFILE 2>/dev/null` - if [ -n "$pid" ]; then - if ps -p $pid &>/dev/null ; then - echo "$DESC is running" - exit 0 - else - echo "$DESC is not running but has pid file" - exit 1 - fi - fi - echo "$DESC is not running" - exit 3 -} - -case "$1" in - start) - start - ;; - stop) - stop - ;; - restart|force-reload|reload) - stop - start - ;; - status) - status - ;; - *) - echo "Usage: $0 {start|stop|force-reload|restart|reload|status}" - exit 1 - ;; -esac - -exit 0 diff --git a/logging/logmgmt/scripts/pmon.d/logmgmt b/logging/logmgmt/scripts/pmon.d/logmgmt deleted file mode 100644 index 5e12bf291..000000000 --- a/logging/logmgmt/scripts/pmon.d/logmgmt +++ /dev/null @@ -1,24 +0,0 @@ -; -; Copyright (c) 2014-2016 Wind River Systems, Inc. -; -; SPDX-License-Identifier: Apache-2.0 -; -[process] -process = logmgmt -pidfile = /var/run/logmgmt.pid -script = /etc/init.d/logmgmt -style = lsb ; ocf or lsb -severity = minor ; Process failure severity - ; critical : host is failed - ; major : host is degraded - ; minor : log is generated -restarts = 5 ; Number of back to back unsuccessful restarts before severity assertion -interval = 10 ; Number of seconds to wait between back-to-back unsuccessful restarts -debounce = 20 ; Number of seconds the process needs to run before declaring - ; it as running O.K. after a restart. - ; Time after which back-to-back restart count is cleared. -startuptime = 10 ; Seconds to wait after process start before starting the debounce monitor -mode = passive ; Monitoring mode: passive (default) or active - ; passive: process death monitoring (default: always) - ; active: heartbeat monitoring, i.e. request / response messaging - diff --git a/security/tpm2-openssl-engine/PKG_INFO b/security/tpm2-openssl-engine/PKG_INFO deleted file mode 100644 index 9f29aad37..000000000 --- a/security/tpm2-openssl-engine/PKG_INFO +++ /dev/null @@ -1,14 +0,0 @@ -Metadata-Version: 1.1 -Name: tpm2-openssl-engine -Version: 1.0 -Summary: TPM 2.0 Openssl Engine -Home-page: -Author: Windriver -Author-email: info@windriver.com -License: openssl - -Description: Titanium Control's TPM 2.0 OpenSSL Engine. Leveraged by - Titanium applications to provide secure TLS Decryption and Signing - capabilities to Titanium host applications. - -Platform: UNKNOWN diff --git a/security/tpm2-openssl-engine/centos/build_srpm.data b/security/tpm2-openssl-engine/centos/build_srpm.data deleted file mode 100644 index 9c445bbb7..000000000 --- a/security/tpm2-openssl-engine/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -SRC_DIR="tpm2-openssl-engine" -TIS_PATCH_VER=2 diff --git a/security/tpm2-openssl-engine/centos/tpm2-openssl-engine.spec b/security/tpm2-openssl-engine/centos/tpm2-openssl-engine.spec deleted file mode 100644 index b2719a1e4..000000000 --- a/security/tpm2-openssl-engine/centos/tpm2-openssl-engine.spec +++ /dev/null @@ -1,39 +0,0 @@ -Name: tpm2-openssl-engine -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -Summary: TPM 2.0 Openssl Engine -License: openssl -Group: base -Packager: Wind River -URL: unknown - -Source0: %{name}-%{version}.tar.gz - -BuildRequires: openssl-devel -BuildRequires: openssl -BuildRequires: tss2-devel -Requires: tss2 - -%description -TPM 2.0 OpenSSL engine. Leveraged by applications -to provide secure TLS Decryption and Signing capabilities - -%prep -%setup -q - -%build -make %{?_smp_mflags} - -%install -make install ENGINEDIR=%{buildroot}/%{_libdir}/openssl/engines UTILDIR=%{buildroot}/usr/sbin - - -%files -%license LICENSE - -%defattr(-,root,root,-) - -%{_libdir}/openssl/engines/libtpm2.so -/usr/sbin/create_tpm2_key - - diff --git a/security/tpm2-openssl-engine/tpm2-openssl-engine/LICENSE b/security/tpm2-openssl-engine/tpm2-openssl-engine/LICENSE deleted file mode 100644 index 0adcabd4d..000000000 --- a/security/tpm2-openssl-engine/tpm2-openssl-engine/LICENSE +++ /dev/null @@ -1,57 +0,0 @@ -OpenSSL License -==================================================================== -Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - -3. All advertising materials mentioning features or use of this - software must display the following acknowledgment: - "This product includes software developed by the OpenSSL Project - for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - -4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - endorse or promote products derived from this software without - prior written permission. For written permission, please contact - openssl-core@openssl.org. - -5. Products derived from this software may not be called "OpenSSL" - nor may "OpenSSL" appear in their names without prior written - permission of the OpenSSL Project. - -6. Redistributions of any form whatsoever must retain the following - acknowledgment: - "This product includes software developed by the OpenSSL Project - for use in the OpenSSL Toolkit (http://www.openssl.org/)" - -THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY -EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR -ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -OF THE POSSIBILITY OF SUCH DAMAGE. -==================================================================== - -This product includes cryptographic software written by Eric Young -(eay@cryptsoft.com). This product includes software written by Tim -Hudson (tjh@cryptsoft.com). -This product is inspired by the original TPM 1.2 openssl engine written -by Kent Yoder for the Trousers Project. This product -includes TPM key blob ASN-1 encoding scheme from James Bottomley - - diff --git a/security/tpm2-openssl-engine/tpm2-openssl-engine/Makefile b/security/tpm2-openssl-engine/tpm2-openssl-engine/Makefile deleted file mode 100644 index b6d1f8f3c..000000000 --- a/security/tpm2-openssl-engine/tpm2-openssl-engine/Makefile +++ /dev/null @@ -1,54 +0,0 @@ -# -# Copyright (c) 2013-2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -#### Installation options -ENGINEDIR= -UTILDIR= - -#### Toolchain options -CC = gcc -LD = $(CC) - -#### Debug flags (typically "-g"). -# Those flags only feed CFLAGS so it is not mandatory to use this form. -DEBUG_CFLAGS = -g -O2 -Werror -Wno-unused-parameter -Wno-missing-braces - -#### Compiler-specific flags that may be used to disable some negative over- -# optimization or to silence some warnings. -fno-strict-aliasing is needed with -# gcc >= 4.4. -SPEC_CFLAGS = -fno-strict-aliasing - -#### Common CFLAGS -CFLAGS = $(DEBUG_CFLAGS) $(SPEC_CFLAGS) - -#### Common LDFLAGS -LDFLAGS = -g - -DYNAMIC_ENGINE=libtpm2.so -UTIL=create_tpm2_key - -INCLUDES+=-I${SYSTEM_DIR}/usr/include/ -LDFLAGS +=-lcrypto -lc -ltss -SRCS += e_tpm2.c e_tpm2_err.c -HEADERS += e_tpm2.h - -OBJS = $(SRCS:.c=.o) - -all: $(DYNAMIC_ENGINE) ${UTIL} - -${UTIL}: $(OBJS) - $(CC) -Wall ${CFLAGS} ${INCLUDES} create_tpm2_key.c ${LDFLAGS} -o ${UTIL} - -$(DYNAMIC_ENGINE): $(OBJS) - $(CC) -Wall ${CFLAGS} ${INCLUDES} ${LDFLAGS} -fPIC -c ${SRCS} - $(CC) -shared -Wl,-soname,${DYNAMIC_ENGINE} ${LDFLAGS} -o ${DYNAMIC_ENGINE} $(OBJS) - -install: all - install -D -m 755 ${DYNAMIC_ENGINE} ${ENGINEDIR}/${DYNAMIC_ENGINE} - install -D -m 755 ${UTIL} ${UTILDIR}/${UTIL} - -clean: - $(RM) *.o *.so *.so.0 diff --git a/security/tpm2-openssl-engine/tpm2-openssl-engine/create_tpm2_key.c b/security/tpm2-openssl-engine/tpm2-openssl-engine/create_tpm2_key.c deleted file mode 100644 index 06c854b7d..000000000 --- a/security/tpm2-openssl-engine/tpm2-openssl-engine/create_tpm2_key.c +++ /dev/null @@ -1,479 +0,0 @@ -/* - * Copyright (c) 2017 Wind River Systems, Inc. -* -* SPDX-License-Identifier: Apache-2.0 -* - */ -/* ==================================================================== - * - * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * licensing@OpenSSL.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * This product is inspired by the original TPM 1.2 openssl engine written - * by Kent Yoder for the Trousers Project. This product - * includes TPM key blob ASN-1 encoding scheme from James Bottomley - * - * - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "tpm2-asn.h" - -static struct option long_options[] = { - {"auth", 0, 0, 'a'}, - {"help", 0, 0, 'h'}, - {"name-scheme", 1, 0, 'n'}, - {"parent-handle", 1, 0, 'p'}, - {"wrap", 1, 0, 'w'}, - {0, 0, 0, 0} -}; - -static TPM_ALG_ID name_alg = TPM_ALG_SHA256; -static int name_alg_size = SHA256_DIGEST_SIZE; - -void -usage(char *argv0) -{ - fprintf(stderr, "\t%s: create a TPM key and write it to disk\n" - "\tusage: %s [options] \n\n" - "\tOptions:\n" - "\t\t-a|--auth require a password for the key [NO]\n" - "\t\t-h|--help print this help message\n" - "\t\t-n|--name-scheme name algorithm to use sha1 [sha256] sha384 sha512\n" - "\t\t-p|--parent-handle persistent handle of parent key\n" - "\t\t-w|--wrap [file] wrap an existing openssl PEM key\n", - argv0, argv0); - exit(-1); -} - -void tpm2_error(TPM_RC rc, const char *reason) -{ - const char *msg, *submsg, *num; - - fprintf(stderr, "%s failed with %d\n", reason, rc); - TSS_ResponseCode_toString(&msg, &submsg, &num, rc); - fprintf(stderr, "%s%s%s\n", msg, submsg, num); -} - -void -openssl_print_errors() -{ - ERR_load_ERR_strings(); - ERR_load_crypto_strings(); - ERR_print_errors_fp(stderr); -} - -int -openssl_write_tpmfile(const char *file, BYTE *pubkey, int pubkey_len, - BYTE *privkey, int privkey_len, int empty_auth, - TPM_HANDLE parent) -{ - TSSLOADABLE tssl; - BIO *outb; - - /* clear structure so as not to have to set optional parameters */ - memset(&tssl, 0, sizeof(tssl)); - if ((outb = BIO_new_file(file, "w")) == NULL) { - fprintf(stderr, "Error opening file for write: %s\n", file); - return 1; - } - tssl.type = OBJ_txt2obj(OID_loadableKey, 1); - tssl.emptyAuth = empty_auth; - if ((parent & 0xff000000) == 0x81000000) { - tssl.parent = ASN1_INTEGER_new(); - ASN1_INTEGER_set(tssl.parent, parent); - } - tssl.pubkey = ASN1_OCTET_STRING_new(); - ASN1_STRING_set(tssl.pubkey, pubkey, pubkey_len); - tssl.privkey = ASN1_OCTET_STRING_new(); - ASN1_STRING_set(tssl.privkey, privkey, privkey_len); - - PEM_write_bio_TSSLOADABLE(outb, &tssl); - BIO_free(outb); - return 0; -} - -EVP_PKEY * -openssl_read_key(char *filename) -{ - BIO *b = NULL; - EVP_PKEY *pkey; - - b = BIO_new_file(filename, "r"); - if (b == NULL) { - fprintf(stderr, "Error opening file for read: %s\n", filename); - return NULL; - } - - if ((pkey = PEM_read_bio_PrivateKey(b, NULL, PEM_def_callback, NULL)) == NULL) { - fprintf(stderr, "Reading key %s from disk failed.\n", filename); - openssl_print_errors(); - } - BIO_free(b); - - return pkey; -} - -void tpm2_public_template_rsa(TPMT_PUBLIC *pub) -{ - pub->type = TPM_ALG_RSA; - pub->nameAlg = name_alg; - /* note: all our keys are decrypt only. This is because - * we use the TPM2_RSA_Decrypt operation for both signing - * and decryption (see e_tpm2.c for details) */ - pub->objectAttributes.val = TPMA_OBJECT_NODA | - TPMA_OBJECT_DECRYPT | - TPMA_OBJECT_SIGN | - TPMA_OBJECT_USERWITHAUTH; - pub->authPolicy.t.size = 0; - pub->parameters.rsaDetail.symmetric.algorithm = TPM_ALG_NULL; - pub->parameters.rsaDetail.scheme.scheme = TPM_ALG_NULL; -} - -TPM_RC openssl_to_tpm_public_rsa(TPMT_PUBLIC *pub, EVP_PKEY *pkey) -{ - RSA *rsa = EVP_PKEY_get1_RSA(pkey); - BIGNUM *n, *e; - int size = RSA_size(rsa); - unsigned long exp; - - if (size > MAX_RSA_KEY_BYTES) - return TPM_RC_KEY_SIZE; - -#if OPENSSL_VERSION_NUMBER < 0x10100000 - n = rsa->n; - e = rsa->e; -#else - RSA_get0_key(&n, &e, NULL); -#endif - exp = BN_get_word(e); - /* TPM limitations means exponents must be under a word in size */ - if (exp == 0xffffffffL) - return TPM_RC_KEY_SIZE; - tpm2_public_template_rsa(pub); - pub->parameters.rsaDetail.keyBits = size*8; - if (exp == 0x10001) - pub->parameters.rsaDetail.exponent = 0; - else - pub->parameters.rsaDetail.exponent = exp; - - pub->unique.rsa.t.size = BN_bn2bin(n, pub->unique.rsa.t.buffer); - - return 0; -} - -TPM_RC openssl_to_tpm_public(TPM2B_PUBLIC *pub, EVP_PKEY *pkey) -{ - TPMT_PUBLIC *tpub = &pub->publicArea; - pub->size = sizeof(*pub); - - switch (EVP_PKEY_type(pkey->type)) { - case EVP_PKEY_RSA: - return openssl_to_tpm_public_rsa(tpub, pkey); - default: - break; - } - return TPM_RC_ASYMMETRIC; -} - -TPM_RC openssl_to_tpm_private_rsa(TPMT_SENSITIVE *s, EVP_PKEY *pkey) -{ - BIGNUM *q; - TPM2B_PRIVATE_KEY_RSA *t2brsa = &s->sensitive.rsa; - RSA *rsa = EVP_PKEY_get1_RSA(pkey); - -#if OPENSSL_VERSION_NUMBER < 0x10100000 - q = rsa->q; -#else - BIGNUM *p; - - RSA_get0_factors(rsa, &p, &q); -#endif - - if (!q) - return TPM_RC_ASYMMETRIC; - - s->sensitiveType = TPM_ALG_RSA; - s->seedValue.b.size = 0; - - t2brsa->t.size = BN_bn2bin(q, t2brsa->t.buffer); - return 0; -} - -TPM_RC openssl_to_tpm_private(TPMT_SENSITIVE *priv, EVP_PKEY *pkey) -{ - switch (EVP_PKEY_type(pkey->type)) { - case EVP_PKEY_RSA: - return openssl_to_tpm_private_rsa(priv, pkey); - default: - break; - } - return TPM_RC_ASYMMETRIC; -} - -TPM_RC wrap_key(TPM2B_PRIVATE *priv, const char *password, EVP_PKEY *pkey) -{ - TPMT_SENSITIVE s; - TPM2B_SENSITIVE b; - BYTE *buf; - int32_t size; - TPM_RC rc; - - memset(&b, 0, sizeof(b)); - memset(&s, 0, sizeof(s)); - - openssl_to_tpm_private(&s, pkey); - - if (password) { - int len = strlen(password); - - memcpy(s.authValue.b.buffer, password, len); - s.authValue.b.size = len; - } else { - s.authValue.b.size = 0; - } - size = sizeof(s); - buf = b.b.buffer; - rc = TSS_TPMT_SENSITIVE_Marshal(&s, &b.b.size, &buf, &size); - if (rc) - tpm2_error(rc, "TSS_TPMT_SENSITIVE_Marshal"); - - size = sizeof(*priv); - buf = priv->b.buffer; - priv->b.size = 0; - /* no encryption means innerIntegrity and outerIntegrity are - * absent, so the TPM2B_PRIVATE is a TPMT_SENSITIVE*/ - rc = TSS_TPM2B_PRIVATE_Marshal((TPM2B_PRIVATE *)&b, &priv->b.size, &buf, &size); - if (rc) - tpm2_error(rc, "TSS_TPM2B_PRIVATE_Marshal"); - - return TPM_RC_ASYMMETRIC; -} - -int main(int argc, char **argv) -{ - char *filename, c, *wrap = NULL, *auth = NULL; - int option_index; - const char *reason; - TSS_CONTEXT *tssContext = NULL; - TPM_HANDLE parent = 0; - TPM_RC rc = 0; - BYTE pubkey[sizeof(TPM2B_PUBLIC)],privkey[sizeof(TPM2B_PRIVATE)], *buffer; - uint16_t pubkey_len, privkey_len; - int32_t size = 0; - TPM2B_PUBLIC *pub; - TPM2B_PRIVATE *priv; - - - while (1) { - option_index = 0; - c = getopt_long(argc, argv, "n:ap:hw:", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 'a': - auth = malloc(128); - break; - case 'h': - usage(argv[0]); - break; - case 'n': - if (!strcasecmp("sha1", optarg)) { - name_alg = TPM_ALG_SHA1; - name_alg_size = SHA1_DIGEST_SIZE; - } else if (strcasecmp("sha256", optarg)) { - /* default, do nothing */ - } else if (strcasecmp("sha384", optarg)) { - name_alg = TPM_ALG_SHA384; - name_alg_size = SHA384_DIGEST_SIZE; -#ifdef TPM_ALG_SHA512 - } else if (strcasecmp("sha512", optarg)) { - name_alg = TPM_ALG_SHA512; - name_alg_size = SHA512_DIGEST_SIZE; -#endif - } else { - usage(argv[0]); - } - break; - case 'p': - parent = strtol(optarg, NULL, 16); - break; - case 'w': - wrap = optarg; - break; - default: - usage(argv[0]); - break; - } - } - - filename = argv[argc - 1]; - - if (argc < 2) - usage(argv[0]); - - if (!wrap) { - fprintf(stderr, "wrap is a compulsory option\n"); - usage(argv[0]); - } - - if (!parent) { - fprintf(stderr, "parent handle is a compulsory option\n"); - usage(argv[0]); - } - - if (parent && (parent & 0xff000000) != 0x81000000) { - fprintf(stderr, "you must specify a persistent parent handle\n"); - usage(argv[0]); - } - - if (auth) { - if (EVP_read_pw_string(auth, 128, "Enter TPM key authority: ", 1)) { - fprintf(stderr, "Passwords do not match\n"); - exit(1); - } - } - - rc = TSS_Create(&tssContext); - if (rc) { - reason = "TSS_Create"; - goto out_err; - } - - /* - * avoid using the device TCTI as that will bind - * exclusively to the TPM device. Instead - * use the Kernel TPM Resource Manager as that - * allows concurrent access - * - * N.B: This assumes that the kernel-modules-tpm - * pkg is installed with the modified tpm_crb KLM - */ - rc = TSS_SetProperty(tssContext, TPM_DEVICE, "/dev/tpmrm0"); - if (rc) { - reason = "TSS_SetProperty: TPM_USE_RESOURCE_MANAGER"; - goto out_err; - } - - if (wrap) { - Import_In iin; - Import_Out iout; - EVP_PKEY *pkey; - - /* may be needed to decrypt the key */ - OpenSSL_add_all_ciphers(); - pkey = openssl_read_key(wrap); - if (!pkey) { - reason = "unable to read key"; - goto out_delete; - } - - iin.parentHandle = parent; - iin.encryptionKey.t.size = 0; - openssl_to_tpm_public(&iin.objectPublic, pkey); - /* set random iin.symSeed */ - iin.inSymSeed.t.size = 0; - iin.symmetricAlg.algorithm = TPM_ALG_NULL; - wrap_key(&iin.duplicate, auth, pkey); - openssl_to_tpm_public(&iin.objectPublic, pkey); - rc = TSS_Execute(tssContext, - (RESPONSE_PARAMETERS *)&iout, - (COMMAND_PARAMETERS *)&iin, - NULL, - TPM_CC_Import, - TPM_RS_PW, NULL, 0, - TPM_RH_NULL, NULL, 0, - TPM_RH_NULL, NULL, 0, - TPM_RH_NULL, NULL, 0); - if (rc) { - reason = "TPM2_Import"; - goto out_flush; - } - pub = &iin.objectPublic; - priv = &iout.outPrivate; - } - - buffer = pubkey; - pubkey_len = 0; - size = sizeof(pubkey); - TSS_TPM2B_PUBLIC_Marshal(pub, &pubkey_len, &buffer, &size); - buffer = privkey; - privkey_len = 0; - size = sizeof(privkey); - TSS_TPM2B_PRIVATE_Marshal(priv, &privkey_len, &buffer, &size); - openssl_write_tpmfile(filename, pubkey, pubkey_len, privkey, privkey_len, auth == NULL, parent); - TSS_Delete(tssContext); - exit(0); - - out_flush: - out_delete: - TSS_Delete(tssContext); - out_err: - tpm2_error(rc, reason); - - exit(1); -} diff --git a/security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2.c b/security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2.c deleted file mode 100644 index 488f6a682..000000000 --- a/security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2.c +++ /dev/null @@ -1,860 +0,0 @@ -/* - * Copyright (c) 2017 Wind River Systems, Inc. -* -* SPDX-License-Identifier: Apache-2.0 -* - */ -/* ==================================================================== - * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * licensing@OpenSSL.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * This product is inspired by the original TPM 1.2 openssl engine written - * by Kent Yoder for the Trousers Project. This product - * includes TPM key blob ASN-1 encoding scheme from James Bottomley - * - * - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "e_tpm2.h" - -#include "tpm2-asn.h" - -//IMPLEMENT_ASN1_FUNCTIONS(TSSLOADABLE) - -/* IBM TSS2 library functions */ -static const char *TPM_F_File_ReadStructure = "TSS_File_ReadStructure"; -static const char *TPM_F_Context_Create = "TSS_Create"; -static const char *TPM_F_Context_Close = "TSS_Delete"; -static const char *TPM_F_TPM_Execute = "TSS_Execute"; -static const char *TPM_F_Hash_Generate = "TSS_Hash_Generate"; -static const char *TPM_F_Structure_Marshal = "TSS_Structure_Marshal"; -static const char *TPM_F_PrivateKey_Unmarshal = "TPM2B_PRIVATE_Unmarshal"; -static const char *TPM_F_PublicKey_Unmarshal = "TPM2B_PUBLIC_Unmarshal"; -static const char *TPM_F_Set_Property = "TSS_SetProperty"; - -/* engine specific functions */ -static int tpm_engine_destroy(ENGINE *); -static int tpm_engine_init(ENGINE *); -static int tpm_engine_finish(ENGINE *); -static int tpm_engine_ctrl(ENGINE *, int, long, void *, void (*)()); -static EVP_PKEY *tpm_engine_load_key(ENGINE *, const char *, UI_METHOD *, void *); -static int tpm_engine_flush_key_context(TPMI_DH_OBJECT hKey); - -#ifndef OPENSSL_NO_RSA -/* rsa functions */ -static int tpm_rsa_init(RSA *rsa); -static int tpm_rsa_finish(RSA *rsa); -static int tpm_rsa_priv_dec(int, const unsigned char *, unsigned char *, RSA *, int); -static int tpm_rsa_priv_enc(int, const unsigned char *, unsigned char *, RSA *, int); -#endif - - -/* The definitions for control commands specific to this engine */ -#define TPM_CMD_SO_PATH ENGINE_CMD_BASE -static const ENGINE_CMD_DEFN tpm_cmd_defns[] = { - {TPM_CMD_SO_PATH, - "SO_PATH", - "Specifies the path to the libtpm2.so shared library", - ENGINE_CMD_FLAG_STRING}, - {0, NULL, NULL, 0} -}; - -// for now we will only overwrite the RSA decryption -// operation to go over TPM 2.0. -// Add additional hooks as new use cases pop up -#ifndef OPENSSL_NO_RSA -static RSA_METHOD tpm_rsa = { - "TPM 2.0 RSA method", // name - NULL, // rsa_pub_enc (encrypt) - NULL, // rsa_pub_dec (verify arbitrary data) - tpm_rsa_priv_enc, // rsa_priv_enc (sign) - tpm_rsa_priv_dec, // rsa_priv_dec (decrypt) - NULL, // rsa_mod_exp - BN_mod_exp_mont, // bn_mod_exp - tpm_rsa_init, // init - tpm_rsa_finish, // free - (RSA_FLAG_SIGN_VER | RSA_FLAG_NO_BLINDING | RSA_FLAG_EXT_PKEY), - NULL, // app_data - NULL, /* sign */ // rsa_sign - NULL, /* verify */ // rsa_verify - NULL // rsa_keygen -}; -#endif - -/* Constants used when creating the ENGINE */ -static const char *engine_tpm_id = "tpm2"; -static const char *engine_tpm_name = "TPM 2.0 hardware engine support for"; -static const char *TPM_LIBNAME = "tpm2"; - -static TSS_CONTEXT *hContext = NULL_HCONTEXT; -static TPMI_DH_OBJECT hKey = NULL_HKEY; -/* varibles used to get/set CRYPTO_EX_DATA values */ -int ex_app_data = TPM_ENGINE_EX_DATA_UNINIT; - -/* This is a process-global DSO handle used for loading and unloading - * the TSS library. NB: This is only set (or unset) during an - * init() or finish() call (reference counts permitting) and they're - * operating with global locks, so this should be thread-safe - * implicitly. */ - -static DSO *tpm_dso = NULL; - -/* These are the function pointers that are (un)set when the library has - * successfully (un)loaded. */ -static unsigned int (*p_tpm2_File_ReadStructure)(); -static unsigned int (*p_tpm2_Context_Create)(); -static unsigned int (*p_tpm2_Context_Close)(); -static unsigned int (*p_tpm2_TPM_Execute)(); -static unsigned int (*p_tpm2_Hash_Generate)(); -static unsigned int (*p_tpm2_Structure_Marshal)(); -static unsigned int (*p_tpm2_TPM_PrivateKey_Unmarshal)(); -static unsigned int (*p_tpm2_TPM_PublicKey_Unmarshal)(); -static unsigned int (*p_tpm2_Set_Property)(); - - -/* This internal function is used by ENGINE_tpm() and possibly by the - * "dynamic" ENGINE support too */ -static int bind_helper(ENGINE * e) -{ -#ifndef OPENSSL_NO_RSA - const RSA_METHOD *meth1; -#endif - if (!ENGINE_set_id(e, engine_tpm_id) || - !ENGINE_set_name(e, engine_tpm_name) || -#ifndef OPENSSL_NO_RSA - !ENGINE_set_RSA(e, &tpm_rsa) || -#endif - !ENGINE_set_destroy_function(e, tpm_engine_destroy) || - !ENGINE_set_init_function(e, tpm_engine_init) || - !ENGINE_set_finish_function(e, tpm_engine_finish) || - !ENGINE_set_ctrl_function(e, tpm_engine_ctrl) || - !ENGINE_set_load_privkey_function(e, tpm_engine_load_key) || - !ENGINE_set_cmd_defns(e, tpm_cmd_defns)) - return 0; - -#ifndef OPENSSL_NO_RSA - /* We know that the "PKCS1_SSLeay()" functions hook properly - * to the tpm-specific mod_exp and mod_exp_crt so we use - * those functions. NB: We don't use ENGINE_openssl() or - * anything "more generic" because something like the RSAref - * code may not hook properly, and if you own one of these - * cards then you have the right to do RSA operations on it - * anyway! */ - meth1 = RSA_PKCS1_SSLeay(); - if (meth1) - { - tpm_rsa.rsa_mod_exp = meth1->rsa_mod_exp; - tpm_rsa.rsa_pub_enc = meth1->rsa_pub_enc; - tpm_rsa.rsa_pub_dec = meth1->rsa_pub_dec; - } -#endif - - /* Ensure the tpm error handling is set up */ - ERR_load_TPM_strings(); - return 1; -} - -static ENGINE *engine_tpm(void) -{ - ENGINE *ret = ENGINE_new(); - if (!ret) - return NULL; - if (!bind_helper(ret)) { - ENGINE_free(ret); - return NULL; - } - return ret; -} - -void ENGINE_load_tpm(void) -{ - /* Copied from eng_[openssl|dyn].c */ - ENGINE *toadd = engine_tpm(); - if (!toadd) - return; - ENGINE_add(toadd); - ENGINE_free(toadd); - ERR_clear_error(); -} - -/* Destructor (complements the "ENGINE_tpm()" constructor) */ -static int tpm_engine_destroy(ENGINE * e) -{ - /* Unload the tpm error strings so any error state including our - * functs or reasons won't lead to a segfault (they simply get displayed - * without corresponding string data because none will be found). */ - ERR_unload_TPM_strings(); - return 1; -} - -/* initialisation function */ -static int tpm_engine_init(ENGINE * e) -{ - void (*p1) (); - void (*p2) (); - void (*p3) (); - void (*p4) (); - void (*p5) (); - void (*p6) (); - void (*p7) (); - void (*p8) (); - void (*p9) (); - TPM_RC result; - - if (tpm_dso != NULL) { - TSSerr(TPM_F_TPM_ENGINE_INIT, TPM_R_ALREADY_LOADED); - return 1; - } - - if ((tpm_dso = DSO_load(NULL, TPM_LIBNAME, NULL, 0)) == NULL) { - TSSerr(TPM_F_TPM_ENGINE_INIT, TPM_R_DSO_FAILURE); - goto err; - } - - if (!(p1 = DSO_bind_func(tpm_dso, TPM_F_File_ReadStructure)) || - !(p2 = DSO_bind_func(tpm_dso, TPM_F_Context_Create)) || - !(p3 = DSO_bind_func(tpm_dso, TPM_F_Context_Close)) || - !(p4 = DSO_bind_func(tpm_dso, TPM_F_TPM_Execute)) || - !(p5 = DSO_bind_func(tpm_dso, TPM_F_Hash_Generate)) || - !(p6 = DSO_bind_func(tpm_dso, TPM_F_Structure_Marshal)) || - !(p7 = DSO_bind_func(tpm_dso, TPM_F_PrivateKey_Unmarshal)) || - !(p8 = DSO_bind_func(tpm_dso, TPM_F_PublicKey_Unmarshal)) || - !(p9 = DSO_bind_func(tpm_dso, TPM_F_Set_Property)) - ) { - TSSerr(TPM_F_TPM_ENGINE_INIT, TPM_R_DSO_FAILURE); - goto err; - } - - /* Copy the pointers */ - p_tpm2_File_ReadStructure = (unsigned int (*) ()) p1; - p_tpm2_Context_Create = (unsigned int (*) ()) p2; - p_tpm2_Context_Close = (unsigned int (*) ()) p3; - p_tpm2_TPM_Execute = (unsigned int (*) ()) p4; - p_tpm2_Hash_Generate = (unsigned int (*) ()) p5; - p_tpm2_Structure_Marshal = (unsigned int (*) ()) p6; - p_tpm2_TPM_PrivateKey_Unmarshal = (unsigned int (*) ()) p7; - p_tpm2_TPM_PublicKey_Unmarshal = (unsigned int (*) ()) p8; - p_tpm2_Set_Property = (unsigned int (*) ()) p9; - - if ((result = p_tpm2_Context_Create(&hContext))) { - TSSerr(TPM_F_TPM_ENGINE_INIT, TPM_R_UNIT_FAILURE); - goto err; - } - - /* - * avoid using the tpm0 device TCTI as that will bind - * exclusively to the TPM device. Instead - * use the Kernel TPM Resource Manager as that - * allows concurrent access - * - * N.B: This assumes that the kernel-modules-tpm - * pkg is installed with the modified tpm_crb KLM - */ - if ((result = p_tpm2_Set_Property(hContext, - TPM_INTERFACE_TYPE, "dev"))) { - DBG("Failed to set Resource Manager in context (%p): rc %d", - hContext, (int)result); - TSSerr(TPM_F_TPM_ENGINE_INIT, TPM_R_UNIT_FAILURE); - goto err; - } - - if ((result = p_tpm2_Set_Property(hContext, - TPM_DEVICE, "/dev/tpmrm0"))) { - DBG("Failed to set Resource Manager in context (%p): rc %d", - hContext, (int)result); - TSSerr(TPM_F_TPM_ENGINE_INIT, TPM_R_UNIT_FAILURE); - goto err; - } - - return 1; -err: - if (hContext != NULL_HCONTEXT) { - p_tpm2_Context_Close(hContext); - hContext = NULL_HCONTEXT; - } - - if (tpm_dso) { - DSO_free(tpm_dso); - tpm_dso = NULL; - } - - p_tpm2_File_ReadStructure = NULL; - p_tpm2_Context_Create = NULL; - p_tpm2_Context_Close = NULL; - p_tpm2_TPM_Execute = NULL; - p_tpm2_Hash_Generate = NULL; - p_tpm2_Structure_Marshal = NULL; - p_tpm2_TPM_PrivateKey_Unmarshal = NULL; - p_tpm2_TPM_PublicKey_Unmarshal = NULL; - p_tpm2_Set_Property = NULL; - - return 0; -} - -static int tpm_engine_finish(ENGINE * e) -{ - if (tpm_dso == NULL) { - TSSerr(TPM_F_TPM_ENGINE_FINISH, TPM_R_NOT_LOADED); - return 0; - } - - if (hKey != NULL_HKEY) { - tpm_engine_flush_key_context(hKey); - hKey = NULL_HKEY; - } - - if (hContext != NULL_HCONTEXT) { - p_tpm2_Context_Close(hContext); - hContext = NULL_HCONTEXT; - } - - if (!DSO_free(tpm_dso)) { - TSSerr(TPM_F_TPM_ENGINE_FINISH, TPM_R_DSO_FAILURE); - return 0; - } - tpm_dso = NULL; - - return 1; -} - -int fill_out_rsa_object(RSA *rsa, TPMT_PUBLIC *pub, TPMI_DH_OBJECT hKey) -{ - struct rsa_app_data *app_data; - unsigned long exp; - - if ((app_data = OPENSSL_malloc(sizeof(struct rsa_app_data))) == NULL) { - TSSerr(TPM_F_TPM_FILL_RSA_OBJECT, ERR_R_MALLOC_FAILURE); - return 0; - } - - /* set e in the RSA object */ - if (!rsa->e && ((rsa->e = BN_new()) == NULL)) { - TSSerr(TPM_F_TPM_FILL_RSA_OBJECT, ERR_R_MALLOC_FAILURE); - return 0; - } - - if (pub->parameters.rsaDetail.exponent == 0) - exp = 65537; - else - exp = pub->parameters.rsaDetail.exponent; - - if (!BN_set_word(rsa->e, exp)) { - TSSerr(TPM_F_TPM_FILL_RSA_OBJECT, TPM_R_REQUEST_FAILED); - BN_free(rsa->e); - return 0; - } - - /* set n in the RSA object */ - if (!rsa->n && ((rsa->n = BN_new()) == NULL)) { - TSSerr(TPM_F_TPM_FILL_RSA_OBJECT, ERR_R_MALLOC_FAILURE); - BN_free(rsa->e); - return 0; - } - - if (!BN_bin2bn(pub->unique.rsa.t.buffer, pub->unique.rsa.t.size, - rsa->n)) { - TSSerr(TPM_F_TPM_FILL_RSA_OBJECT, ERR_R_MALLOC_FAILURE); - BN_free(rsa->e); - BN_free(rsa->n); - return 0; - } - -#if OPENSSL_VERSION_NUMBER >= 0x10100000 - RSA_set0_key(rsa, rsa->n, rsa->e, NULL); -#endif - - DBG("Setting hKey(0x%x) in RSA object", hKey); - - memset(app_data, 0, sizeof(struct rsa_app_data)); - app_data->hKey = hKey; - RSA_set_ex_data(rsa, ex_app_data, app_data); - - return 1; -} - -static int tpm_engine_flush_key_context(TPMI_DH_OBJECT hKey) -{ - TPM_RC rc; - FlushContext_In input; - - if (hKey == NULL_HKEY) { - TSSerr(TPM_F_TPM_FLUSH_OBJECT_CONTEXT, TPM_R_INVALID_KEY); - return -1; - } - input.flushHandle = hKey; - - if ((rc = p_tpm2_TPM_Execute(hContext, - NULL, - (COMMAND_PARAMETERS *)&input, - NULL, - TPM_CC_FlushContext, - TPM_RH_NULL, NULL, 0))) { - DBG("Context Flush Failed: Ret code %d", rc); - TSSerr(TPM_F_TPM_FLUSH_OBJECT_CONTEXT, - TPM_R_REQUEST_FAILED); - return -1; - } - - return 0; -} - -static EVP_PKEY *tpm_engine_load_key(ENGINE *e, const char *key_id, - UI_METHOD *ui, void *cb_data) -{ - RSA *rsa; - EVP_PKEY *pkey; - BIO *bf; - char oid[128]; - TPM_RC rc; - TSSLOADABLE *tssl; // the TPM key - Load_In input; - Load_Out output; - - const char *parentPassword = NULL; - TPMI_SH_AUTH_SESSION sessionHandle0 = TPM_RS_PW; - unsigned int sessionAttributes0 = 0; - TPMI_SH_AUTH_SESSION sessionHandle1 = TPM_RH_NULL; - unsigned int sessionAttributes1 = 0; - TPMI_SH_AUTH_SESSION sessionHandle2 = TPM_RH_NULL; - unsigned int sessionAttributes2 = 0; - - - if (!key_id) { - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, - ERR_R_PASSED_NULL_PARAMETER); - return NULL; - } - - // check if the file exists - if ((bf = BIO_new_file(key_id, "r")) == NULL) { - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, - TPM_R_FILE_NOT_FOUND); - return NULL; - } - - tssl = PEM_read_bio_TSSLOADABLE(bf, NULL, NULL, NULL); - BIO_free(bf); - - - if (!tssl) { - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, - TPM_R_FILE_READ_FAILED); - goto load_err; - } - - if (OBJ_obj2txt(oid, sizeof(oid), tssl->type, 1) == 0) { - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, TPM_R_FILE_READ_FAILED); - goto load_err; - } - - if (strcmp(OID_loadableKey, oid) == 0) { - DBG ("TSSL key type is of format that can be loaded in TPM 2.0"); - } else if (strcmp(OID_12Key, oid) == 0) { - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, - TPM_R_TPM_1_2_KEY); - goto load_err; - } else if (strcmp(OID_importableKey, oid) == 0) { - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, - TPM_R_KEY_UNSUPPORTED); - goto load_err; - } else { - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, TPM_R_KEY_UNRECOGNIZED); - goto err; - } - - // since this TPM key was wrapped in the Endorsement - // Key hierarchy and its handle was persisted, we will - // specify that as the Parent Handle for the Load operation - if (!tssl->parent) { - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, TPM_R_KEY_NO_PARENT_HANDLE); - goto load_err; - } - - input.parentHandle = ASN1_INTEGER_get(tssl->parent); - DBG ("Got parent handle 0x%x", input.parentHandle); - // unmarshal the public and private key portions from - // within the TPM ASN1 key blob - p_tpm2_TPM_PrivateKey_Unmarshal(&input.inPrivate, - &(tssl->privkey->data), - &(tssl->privkey->length)); - p_tpm2_TPM_PublicKey_Unmarshal(&input.inPublic, - &(tssl->pubkey->data), - &(tssl->pubkey->length), - FALSE); - if ((rc = p_tpm2_TPM_Execute(hContext, - (RESPONSE_PARAMETERS *)&output, - (COMMAND_PARAMETERS *)&input, - NULL, - TPM_CC_Load, - sessionHandle0, - parentPassword, - sessionAttributes0, - sessionHandle1, - NULL, - sessionAttributes1, - sessionHandle2, - NULL, - sessionAttributes2, - TPM_RH_NULL, NULL, 0))) { - DBG("Context Load Failed: Ret code %08x", rc); - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, - TPM_R_REQUEST_FAILED); - goto load_err; - } - hKey = output.objectHandle; - - /* create the new objects to return */ - if ((pkey = EVP_PKEY_new()) == NULL) { - goto err; - } - pkey->type = EVP_PKEY_RSA; - - if ((rsa = RSA_new()) == NULL) { - EVP_PKEY_free(pkey); - goto err; - } - rsa->meth = &tpm_rsa; - /* call our local init function here */ - rsa->meth->init(rsa); - pkey->pkey.rsa = rsa; - - if (!fill_out_rsa_object(rsa, - &input.inPublic.publicArea, - hKey)) { - EVP_PKEY_free(pkey); - RSA_free(rsa); - goto err; - } - - EVP_PKEY_assign_RSA(pkey, rsa); - return pkey; - -err: - tpm_engine_flush_key_context(hKey); - hKey = NULL_HKEY; - TSSerr(TPM_F_TPM_ENGINE_LOAD_KEY, ERR_R_MALLOC_FAILURE); - -load_err: - //TSSLOADABLE_free(tssl); - return NULL; -} - -static int tpm_engine_ctrl(ENGINE * e, int cmd, long i, void *p, void (*f) ()) -{ - int initialised = ((tpm_dso == NULL) ? 0 : 1); - switch (cmd) { - case TPM_CMD_SO_PATH: - if (p == NULL) { - TSSerr(TPM_F_TPM_ENGINE_CTRL, - ERR_R_PASSED_NULL_PARAMETER); - return 0; - } - if (initialised) { - TSSerr(TPM_F_TPM_ENGINE_CTRL, - TPM_R_ALREADY_LOADED); - return 0; - } - TPM_LIBNAME = (const char *) p; - return 1; - default: - break; - } - TSSerr(TPM_F_TPM_ENGINE_CTRL, TPM_R_CTRL_COMMAND_NOT_IMPLEMENTED); - return 0; -} - -static int tpm_rsa_init(RSA *rsa) -{ - if (ex_app_data == TPM_ENGINE_EX_DATA_UNINIT) - ex_app_data = RSA_get_ex_new_index(0, NULL, NULL, NULL, NULL); - - if (ex_app_data == TPM_ENGINE_EX_DATA_UNINIT) { - TSSerr(TPM_F_TPM_RSA_INIT, TPM_R_REQUEST_FAILED); - return 0; - } - - return 1; -} - -static int tpm_rsa_finish(RSA *rsa) -{ - struct rsa_app_data *app_data = RSA_get_ex_data(rsa, ex_app_data); - - OPENSSL_free(app_data); - - return 1; -} - -static int tpm_rsa_priv_dec(int flen, - const unsigned char *from, - unsigned char *to, - RSA *rsa, - int padding) -{ - struct rsa_app_data *app_data = RSA_get_ex_data(rsa, ex_app_data); - TPM_RC result; - UINT32 out_len; - int rv; - RSA_Decrypt_In input; - RSA_Decrypt_Out output; - // the parent object is not passwod protected - // but it may be in the future. - const char *parentPassword = NULL; - TPMI_SH_AUTH_SESSION sessionHandle0 = TPM_RS_PW; - unsigned int sessionAttributes0 = 0; - TPMI_SH_AUTH_SESSION sessionHandle1 = TPM_RH_NULL; - unsigned int sessionAttributes1 = 0; - TPMI_SH_AUTH_SESSION sessionHandle2 = TPM_RH_NULL; - unsigned int sessionAttributes2 = 0; - - - if (!app_data) { - TSSerr(TPM_F_TPM_RSA_PRIV_DEC, TPM_R_NO_APP_DATA); - if ((rv = RSA_PKCS1_SSLeay()->rsa_priv_dec(flen, from, to, rsa, - padding)) < 0) { - TSSerr(TPM_F_TPM_RSA_PRIV_DEC, TPM_R_REQUEST_FAILED); - } - - return rv; - } - - // hKey is the handle of the private key that is used for decrypt - if (app_data->hKey == NULL_HKEY) { - TSSerr(TPM_F_TPM_RSA_PRIV_DEC, TPM_R_INVALID_KEY); - return 0; - } - /* handler of the private key that will perform rsa decrypt */ - input.keyHandle = app_data->hKey; - - // fill in the TPM2RB_PUBLIC_KEY_RSA structure with the - // cipher text and cipher lenght - { - input.label.t.size = 0; - input.cipherText.t.size = flen; - memcpy(input.cipherText.t.buffer, from, flen); - } - - /* - * Table 157 - Definition of {RSA} TPMT_RSA_DECRYPT Structure: - * we MAY set the input scheme to TPM_ALG_NULL to allow - * for the encryption algorithm prescribed in the digital - * certificate to be used for encryption - */ - input.inScheme.scheme = TPM_ALG_RSAES; /* TPM_ALG_NULL; */ - - // decrypt this cipher text using the private key stored inside - // tpm and referenced by hKey - if ((result = p_tpm2_TPM_Execute(hContext, - (RESPONSE_PARAMETERS *)&output, - (COMMAND_PARAMETERS *)&input, - NULL, - TPM_CC_RSA_Decrypt, - sessionHandle0, - parentPassword, - sessionAttributes0, - sessionHandle1, - NULL, - sessionAttributes1, - sessionHandle2, - NULL, - sessionAttributes2, - TPM_RH_NULL, NULL, 0))) { - DBG("RSA Decrypt Failed: Ret code %d", result); - TSSerr(TPM_F_TPM_RSA_PRIV_DEC, TPM_R_REQUEST_FAILED); - return 0; - } - DBG ("Doing RSA Decryption"); - - // Unmarshal the output data and return decrypted cipher text - // and output length - rv = p_tpm2_Structure_Marshal(&to, &out_len, - &output.message, - (MarshalFunction_t) - TSS_TPM2B_PUBLIC_KEY_RSA_Marshal); - if (rv == 0) { - DBG("writing out %d bytes as a signature", out_len); - return out_len; - } - return 0; -} - -static int tpm_rsa_priv_enc(int flen, - const unsigned char *from, - unsigned char *to, - RSA *rsa, - int padding) -{ - struct rsa_app_data *app_data = RSA_get_ex_data(rsa, ex_app_data); - TPM_RC result = 0; - UINT32 sig_len; - int rv; - RSA_Decrypt_In input; - RSA_Decrypt_Out output; - // the parent object is not passwod protected - // but it may be in the future. - const char *parentPassword = NULL; - TPMI_SH_AUTH_SESSION sessionHandle0 = TPM_RS_PW; - unsigned int sessionAttributes0 = 0; - TPMI_SH_AUTH_SESSION sessionHandle1 = TPM_RH_NULL; - unsigned int sessionAttributes1 = 0; - TPMI_SH_AUTH_SESSION sessionHandle2 = TPM_RH_NULL; - unsigned int sessionAttributes2 = 0; - - if (!app_data) { - TSSerr(TPM_F_TPM_RSA_PRIV_DEC, TPM_R_NO_APP_DATA); - if ((rv = RSA_PKCS1_SSLeay()->rsa_priv_enc(flen, from, to, rsa, - padding)) < 0) { - TSSerr(TPM_F_TPM_RSA_PRIV_ENC, TPM_R_REQUEST_FAILED); - } - return rv; - } - - if (padding != RSA_PKCS1_PADDING) { - TSSerr(TPM_F_TPM_RSA_PRIV_ENC, TPM_R_INVALID_PADDING_TYPE); - return 0; - } - - // hKey is the handle to the private key that is used for hashing - if (app_data->hKey == NULL_HKEY) { - TSSerr(TPM_F_TPM_RSA_PRIV_ENC, TPM_R_INVALID_KEY); - return 0; - } - /* handler of the private key that will perform signing */ - input.keyHandle = app_data->hKey; - - /* - * Table 145 - Definition of TPMT_SIG_SCHEME inscheme: - * we will set the input scheme to TPM_ALG_NULL to allow - * for the hash algorithm prescribed in the digital certificate - * to be used for signing. - * - * Note that we are using a Decryption operation instead of ] - * a TPM 2.0 Sign operation because of a serious limitation in the - * IBM TSS that it will only sign digests which it has hashed itself, - * i.e. the hash has a corresponding TPM_ST_HASHCHECK validation - * ticket in TPM memory. Long story short, TPM will only sign - * stuff it knows the OID to. - * - * We will therefore specify a Decyrption operation with our - * own padding applied upto the RSA block size and specify - * a TPM_ALG_NULL hashing scheme so that a decrypt operation - * essentially becomes an encrypt op - */ - input.inScheme.scheme = TPM_ALG_NULL; - - /* digest to be signed */ - int size = RSA_size(rsa); - input.cipherText.t.size = size; - RSA_padding_add_PKCS1_type_1(input.cipherText.t.buffer, - size, from, flen); - input.label.t.size = 0; - - // sign this digest using the private key stored inside - // tpm and referenced by hKey - if ((result = p_tpm2_TPM_Execute(hContext, - (RESPONSE_PARAMETERS *)&output, - (COMMAND_PARAMETERS *)&input, - NULL, - TPM_CC_RSA_Decrypt, - sessionHandle0, - parentPassword, - sessionAttributes0, - sessionHandle1, - NULL, - sessionAttributes1, - sessionHandle2, - NULL, - sessionAttributes2, - TPM_RH_NULL, NULL, 0))) { - DBG("RSA Sign Failed: Ret code %d", result); - TSSerr(TPM_F_TPM_RSA_PRIV_ENC, TPM_R_REQUEST_FAILED); - return 0; - } - - // thats right son!!! finally signed - sig_len = output.message.t.size; - memcpy(to, output.message.t.buffer, sig_len); - - DBG("writing out %d bytes as a signature", sig_len); - return sig_len; -} - -/* This stuff is needed if this ENGINE is being compiled into a self-contained - * shared-library. */ -static int bind_fn(ENGINE * e, const char *id) -{ - if (id && (strcmp(id, engine_tpm_id) != 0)) { - TSSerr(TPM_F_TPM_BIND_FN, TPM_R_ID_INVALID); - return 0; - } - if (!bind_helper(e)) { - TSSerr(TPM_F_TPM_BIND_FN, TPM_R_REQUEST_FAILED); - return 0; - } - return 1; -} - -IMPLEMENT_DYNAMIC_CHECK_FN() -IMPLEMENT_DYNAMIC_BIND_FN(bind_fn) diff --git a/security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2.h b/security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2.h deleted file mode 100644 index 9b8f7a500..000000000 --- a/security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2.h +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright (c) 2017 Wind River Systems, Inc. -* -* SPDX-License-Identifier: Apache-2.0 -* - */ -/* ==================================================================== - * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * licensing@OpenSSL.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * This product is inspired by the original TPM 1.2 openssl engine written - * by Kent Yoder for the Trousers Project. This product - * includes TPM key blob ASN-1 encoding scheme from James Bottomley - * - * - */ - -#ifndef _E_TPM_H -#define _E_TPM_H - -#include -#include -#include -#include -#include -#include - -#define TPM_LIB_NAME "tpm2 engine" - -#define NULL_HCONTEXT NULL -#define NULL_HKEY 0 - -void ERR_load_TPM_strings(void); -void ERR_unload_TPM_strings(void); -void ERR_TSS_error(int function, int reason, char *file, int line); - -#define TSSerr(f,r) ERR_TSS_error((f),(r),__FILE__,__LINE__) -#define DBG(x, ...) fprintf(stderr, "DEBUG %s:%d %s " x "\n", __FILE__,__LINE__,__FUNCTION__,##__VA_ARGS__) - -/* Error codes for the TPM functions. */ - -/* Function codes. */ -#define TPM_F_TPM_ENGINE_CTRL 100 -#define TPM_F_TPM_ENGINE_FINISH 101 -#define TPM_F_TPM_ENGINE_INIT 102 -#define TPM_F_TPM_RSA_PRIV_ENC 103 -#define TPM_F_TPM_RSA_PRIV_DEC 104 -#define TPM_F_TPM_RSA_FINISH 105 -#define TPM_F_TPM_RSA_INIT 106 -#define TPM_F_TPM_ENGINE_LOAD_KEY 107 -#define TPM_F_TPM_BIND_FN 108 -#define TPM_F_TPM_FILL_RSA_OBJECT 109 -#define TPM_F_TPM_FLUSH_OBJECT_CONTEXT 110 - -/* Reason codes. */ -#define TPM_R_ALREADY_LOADED 100 -#define TPM_R_CTRL_COMMAND_NOT_IMPLEMENTED 101 -#define TPM_R_DSO_FAILURE 102 -#define TPM_R_MEXP_LENGTH_TO_LARGE 103 -#define TPM_R_MISSING_KEY_COMPONENTS 104 -#define TPM_R_NOT_INITIALISED 105 -#define TPM_R_NOT_LOADED 106 -#define TPM_R_OPERANDS_TOO_LARGE 107 -#define TPM_R_OUTLEN_TO_LARGE 108 -#define TPM_R_REQUEST_FAILED 109 -#define TPM_R_UNDERFLOW_CONDITION 110 -#define TPM_R_UNDERFLOW_KEYRECORD 111 -#define TPM_R_UNIT_FAILURE 112 -#define TPM_R_INVALID_KEY_SIZE 113 -#define TPM_R_BN_CONVERSION_FAILED 114 -#define TPM_R_INVALID_EXPONENT 115 -#define TPM_R_REQUEST_TOO_BIG 116 -#define TPM_R_NO_APP_DATA 117 -#define TPM_R_INVALID_ENC_SCHEME 118 -#define TPM_R_INVALID_MSG_SIZE 119 -#define TPM_R_INVALID_PADDING_TYPE 120 -#define TPM_R_INVALID_KEY 121 -#define TPM_R_FILE_NOT_FOUND 122 -#define TPM_R_FILE_READ_FAILED 123 -#define TPM_R_ID_INVALID 124 -#define TPM_R_TPM_1_2_KEY 125 -#define TPM_R_KEY_UNSUPPORTED 126 -#define TPM_R_KEY_UNRECOGNIZED 127 -#define TPM_R_KEY_NO_PARENT_HANDLE 128 - -/* structure pointed to by the RSA object's app_data pointer. - * this is used to tag TPM meta data in the RSA object and - * use that to distinguish between a vanilla Openssl RSA object - * and a TPM RSA object - */ -struct rsa_app_data -{ - TPMI_DH_OBJECT hKey; - // add additional meta data as need be -}; - -#define TPM_ENGINE_EX_DATA_UNINIT -1 -#define RSA_PKCS1_OAEP_PADDING_SIZE (2 * SHA_DIGEST_LENGTH + 2) - -#endif diff --git a/security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2_err.c b/security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2_err.c deleted file mode 100644 index 6a584a965..000000000 --- a/security/tpm2-openssl-engine/tpm2-openssl-engine/e_tpm2_err.c +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright (c) 2017 Wind River Systems, Inc. -* -* SPDX-License-Identifier: Apache-2.0 -* - */ -/* ==================================================================== - * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * licensing@OpenSSL.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * This product is inspired by the original TPM 1.2 openssl engine written - * by Kent Yoder for the Trousers Project. This product - * includes TPM key blob ASN-1 encoding scheme from James Bottomley - * - * - */ -#include - -#include -#include -#include - -#include "e_tpm2.h" - -/* BEGIN ERROR CODES */ -#ifndef OPENSSL_NO_ERR -static ERR_STRING_DATA TPM_str_functs[] = { - {ERR_PACK(0, TPM_F_TPM_ENGINE_CTRL, 0), "TPM_ENGINE_CTRL"}, - {ERR_PACK(0, TPM_F_TPM_ENGINE_FINISH, 0), "TPM_ENGINE_FINISH"}, - {ERR_PACK(0, TPM_F_TPM_ENGINE_INIT, 0), "TPM_ENGINE_INIT"}, - {ERR_PACK(0, TPM_F_TPM_RSA_PRIV_ENC, 0), "TPM_RSA_PRIV_ENC"}, - {ERR_PACK(0, TPM_F_TPM_RSA_PRIV_DEC, 0), "TPM_RSA_PRIV_DEC"}, - {ERR_PACK(0, TPM_F_TPM_RSA_FINISH, 0), "TPM_RSA_FINISH"}, - {ERR_PACK(0, TPM_F_TPM_RSA_INIT, 0), "TPM_RSA_INIT"}, - {ERR_PACK(0, TPM_F_TPM_ENGINE_LOAD_KEY, 0), "TPM_ENGINE_LOAD_KEY"}, - {ERR_PACK(0, TPM_F_TPM_BIND_FN, 0), "TPM_BIND_FN"}, - {ERR_PACK(0, TPM_F_TPM_FILL_RSA_OBJECT, 0), "TPM_FILL_RSA_OBJECT"}, - {ERR_PACK(0, TPM_F_TPM_FLUSH_OBJECT_CONTEXT, 0), "TPM_FLUSH_OBJECT_CONTEXT"}, - {0, NULL} -}; - -static ERR_STRING_DATA TPM_str_reasons[] = { - {TPM_R_ALREADY_LOADED, "already loaded"}, - {TPM_R_CTRL_COMMAND_NOT_IMPLEMENTED, "ctrl command not implemented"}, - {TPM_R_DSO_FAILURE, "dso failure"}, - {TPM_R_MISSING_KEY_COMPONENTS, "missing key components"}, - {TPM_R_NOT_INITIALISED, "not initialised"}, - {TPM_R_NOT_LOADED, "not loaded"}, - {TPM_R_OPERANDS_TOO_LARGE, "operands too large"}, - {TPM_R_OUTLEN_TO_LARGE, "outlen to large"}, - {TPM_R_REQUEST_FAILED, "request failed"}, - {TPM_R_REQUEST_TOO_BIG, "requested number of random bytes > 4096"}, - {TPM_R_UNDERFLOW_CONDITION, "underflow condition"}, - {TPM_R_UNDERFLOW_KEYRECORD, "underflow keyrecord"}, - {TPM_R_UNIT_FAILURE, "unit failure"}, - {TPM_R_INVALID_KEY_SIZE, "invalid key size"}, - {TPM_R_BN_CONVERSION_FAILED, "bn conversion failed"}, - {TPM_R_INVALID_EXPONENT, "invalid exponent"}, - {TPM_R_NO_APP_DATA, "no app data in RSA object"}, - {TPM_R_INVALID_ENC_SCHEME, "invalid encryption scheme"}, - {TPM_R_INVALID_MSG_SIZE, "invalid message size to sign"}, - {TPM_R_INVALID_PADDING_TYPE, "invalid padding type"}, - {TPM_R_INVALID_KEY, "invalid key"}, - {TPM_R_FILE_NOT_FOUND, "file to load not found"}, - {TPM_R_FILE_READ_FAILED, "failed reading the key file"}, - {TPM_R_ID_INVALID, "engine id doesn't match"}, - {TPM_R_TPM_1_2_KEY, "tpm 1.2 key format not supported"}, - {TPM_R_KEY_UNSUPPORTED, "unsupported TPM key format"}, - {TPM_R_KEY_UNRECOGNIZED, "unrecognized TPM key format"}, - {TPM_R_KEY_NO_PARENT_HANDLE, "TPM key has no parent handle"}, - {0, NULL} -}; - -#endif - -static ERR_STRING_DATA TPM_lib_name[] = { - {0, TPM_LIB_NAME}, - {0, NULL} -}; - - -static int TPM_lib_error_code = 0; -static int TPM_error_init = 1; - -void ERR_load_TPM_strings(void) -{ - if (TPM_lib_error_code == 0) { - TPM_lib_error_code = ERR_get_next_error_library(); - DBG("TPM_lib_error_code is %d", TPM_lib_error_code); - } - - if (TPM_error_init) { - TPM_error_init = 0; -#ifndef OPENSSL_NO_ERR - ERR_load_strings(TPM_lib_error_code, TPM_str_functs); - ERR_load_strings(TPM_lib_error_code, TPM_str_reasons); -#endif - TPM_lib_name[0].error = ERR_PACK(TPM_lib_error_code, 0, 0); - ERR_load_strings(0, TPM_lib_name); - } -} - -void ERR_unload_TPM_strings(void) -{ - if (TPM_error_init == 0) { -#ifndef OPENSSL_NO_ERR - ERR_unload_strings(TPM_lib_error_code, TPM_str_functs); - ERR_unload_strings(TPM_lib_error_code, TPM_str_reasons); -#endif - - ERR_load_strings(0, TPM_lib_name); - TPM_error_init = 1; - } -} - -void ERR_TSS_error(int function, int reason, char *file, int line) -{ - if (TPM_lib_error_code == 0) - TPM_lib_error_code = ERR_get_next_error_library(); - - ERR_PUT_error(TPM_lib_error_code, function, reason, file, line); -} - diff --git a/security/tpm2-openssl-engine/tpm2-openssl-engine/tpm2-asn.h b/security/tpm2-openssl-engine/tpm2-openssl-engine/tpm2-asn.h deleted file mode 100644 index 20c8c074c..000000000 --- a/security/tpm2-openssl-engine/tpm2-openssl-engine/tpm2-asn.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (c) 2017 Wind River Systems, Inc. -* -* SPDX-License-Identifier: Apache-2.0 -* - */ -/* ==================================================================== - * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * licensing@OpenSSL.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * This product is inspired by the original TPM 1.2 openssl engine written - * by Kent Yoder for the Trousers Project. This product - * includes TPM key blob ASN-1 encoding scheme from James Bottomley - * - * - */ - -#ifndef _TPM2_ASN_H -#define _TPM2_ASN_H - -#include -#include - -/* - * Define the format of a TPM key file. The current format covers - * both TPM1.2 keys as well as symmetrically encrypted private keys - * produced by TSS2_Import and the TPM2 format public key which - * contains things like the policy but which is cryptographically tied - * to the private key. - * - * TPMKey ::= SEQUENCE { - * type OBJECT IDENTIFIER - * emptyAuth [0] EXPLICIT BOOLEAN OPTIONAL - * parent [1] EXPLICIT INTEGER OPTIONAL - * pubkey [2] EXPLICIT OCTET STRING OPTIONAL - * privkey OCTET STRING - * } - */ - -typedef struct { - ASN1_OBJECT *type; - ASN1_BOOLEAN emptyAuth; - ASN1_INTEGER *parent; - ASN1_OCTET_STRING *pubkey; - ASN1_OCTET_STRING *privkey; -} TSSLOADABLE; - -/* the two type oids are in the TCG namespace 2.23.133; we choose an - * unoccupied child (10) for keytype file and two values: - * 1 : Key that is directly loadable - * 2 : Key that must first be imported then loaded - */ -#define OID_12Key "2.23.133.10.1" -#define OID_loadableKey "2.23.133.10.2" -#define OID_importableKey "2.23.133.10.3" - -ASN1_SEQUENCE(TSSLOADABLE) = { - ASN1_SIMPLE(TSSLOADABLE, type, ASN1_OBJECT), - ASN1_EXP_OPT(TSSLOADABLE, emptyAuth, ASN1_BOOLEAN, 0), - ASN1_EXP_OPT(TSSLOADABLE, parent, ASN1_INTEGER, 1), - ASN1_EXP_OPT(TSSLOADABLE, pubkey, ASN1_OCTET_STRING, 2), - ASN1_SIMPLE(TSSLOADABLE, privkey, ASN1_OCTET_STRING) -} ASN1_SEQUENCE_END(TSSLOADABLE) - -IMPLEMENT_ASN1_FUNCTIONS(TSSLOADABLE) -//DECLARE_ASN1_FUNCTIONS(TSSLOADABLE) - -/* This is the PEM guard tag */ -#define TSSLOADABLE_PEM_STRING "TSS2 KEY BLOB" - -static IMPLEMENT_PEM_write_bio(TSSLOADABLE, TSSLOADABLE, TSSLOADABLE_PEM_STRING, TSSLOADABLE) -static IMPLEMENT_PEM_read_bio(TSSLOADABLE, TSSLOADABLE, TSSLOADABLE_PEM_STRING, TSSLOADABLE) - -#endif diff --git a/security/wrs-ssl/LICENSE b/security/wrs-ssl/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/security/wrs-ssl/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/security/wrs-ssl/centos/build_srpm.data b/security/wrs-ssl/centos/build_srpm.data deleted file mode 100644 index b2285b4cb..000000000 --- a/security/wrs-ssl/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -COPY_LIST="$PKG_BASE/files/* $PKG_BASE/LICENSE $PKG_BASE/server-csr.conf" -TIS_PATCH_VER=13 diff --git a/security/wrs-ssl/centos/wrs-ssl.spec b/security/wrs-ssl/centos/wrs-ssl.spec deleted file mode 100644 index f4983978e..000000000 --- a/security/wrs-ssl/centos/wrs-ssl.spec +++ /dev/null @@ -1,41 +0,0 @@ -Summary: wrs-ssl version 1.0.0-r2 -Name: wrs-ssl -Version: 1.0.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -BuildRequires: openssl - -Source0: LICENSE -Source1: server-csr.conf -Source2: tpmdevice-setup - -%description -Wind River Security - -%install -rm -rf $RPM_BUILD_ROOT - -RPM_BUILD_DIR_PKG="%{name}-%{version}" -mkdir -p $RPM_BUILD_DIR_PKG -CSRCONF="$RPM_BUILD_DIR_PKG/server-csr.conf" -PEMFILE="$RPM_BUILD_DIR_PKG/self-signed-server-cert.pem" -cp %{SOURCE1} $CSRCONF -# generate a self signed default certificate -/usr/bin/openssl req -new -x509 -sha256 -keyout $PEMFILE -out $PEMFILE -days 365 -nodes -config $CSRCONF -mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/ssl/private -install -m 400 $PEMFILE $RPM_BUILD_ROOT/%{_sysconfdir}/ssl/private/self-signed-server-cert.pem - -mkdir -p $RPM_BUILD_ROOT/%{_sbindir} -install -m 700 %{SOURCE2} $RPM_BUILD_ROOT/%{_sbindir}/tpmdevice-setup - -mkdir -p $RPM_BUILD_ROOT/%{_defaultdocdir}/%{name}-%{version} -install -m 644 %{SOURCE0} $RPM_BUILD_ROOT/%{_defaultdocdir}/%{name}-%{version} - -%files -%defattr(-,root,root,-) -%{_sysconfdir}/* -%{_sbindir}/* -%{_defaultdocdir}/%{name}-%{version} diff --git a/security/wrs-ssl/files/tpmdevice-setup b/security/wrs-ssl/files/tpmdevice-setup deleted file mode 100644 index e47e989b3..000000000 --- a/security/wrs-ssl/files/tpmdevice-setup +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2013-2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# TPM setup (both active controller and remote) - -export TPM_INTERFACE_TYPE=dev - -CERTIFICATE_FILE="server-cert.pem" -LOGFILE="/etc/ssl/private/.install.log" -ORIGINAL_KEY=$1 -TPM_OBJECT_CONTEXT=$2 -PUBLIC_KEY=$3 -TPM_KEY_HIERARCHY_HANDLE=0x81010002 - -if [ -z "$ORIGINAL_KEY" ] || [ -z "$TPM_OBJECT_CONTEXT" ] || [ -z "$PUBLIC_KEY" ]; then - echo "ERROR: Missing required parameters" - echo "USAGE: $0 " - exit 1 -fi - -CERTIFICATE_DIR=$(dirname "${ORIGINAL_KEY}") -export TPM_DATA_DIR=$CERTIFICATE_DIR - -# TPM specific environment -TPM_OBJECT_NAME="$CERTIFICATE_DIR/key.blob.name" -RESOURCEMGR_DEFAULT_PORT="2323" - -### Helper functions ### - -# Echo's an error and exits with provided error code -# Input : error message ($1), ret code ($2) -# Output : None -# Note : If no retcode is provided, exits with 1 -error_exit () { - echo "$1" - # remove previous object context - rm -f $TPM_OBJECT_CONTEXT &> /dev/null - exit "${2:-1}" -} - -# func: checkTPMTools -# check if the appropriate TPM2.0-tools are installed -# -# Input : None -# Output : None -checkTPMTools () { -declare -a helper_scripts=("tss2_createprimary" - "tss2_importpem" - "tss2_getcapability" - "tss2_load" - "tss2_contextsave" - "tss2_evictcontrol" - "tss2_flushcontext" - "create_tpm2_key") -for src in "${helper_scripts[@]}"; do - if ! type "$src" &>/dev/null; then - error_exit "ERROR: Cannot find $src. Needed for TPM configuration" - fi -done -} - -### Main ### -# remove previous object context -rm -f $TPM_OBJECT_CONTEXT &> /dev/null -rm -f $CERTIFICATE_DIR/*.bin &> /dev/null - -tpmCheck=`lsmod | grep "tpm" -c` -[ "$tpmCheck" -ne 0 ] || error_exit "TPM Kernel Module not found. Check BIOS/Kernel configuration" - -# Ensure that the appropriate TPM tool utilities are -# installed on the system -checkTPMTools - -# Confirm that this is a TPM 2.0 device -TPM_VERSION=`tss2_getcapability -cap 6 | grep TPM_PT_FAMILY_INDICATOR | awk '{print $4}' | xxd -r -p` -if [ "$TPM_VERSION" != "2.0" ]; then - error_exit "ERROR: TPM Device is not version 2.0 compatible" -fi - -# Clear the NV -# as well as all stale transient handles in -# the endorsement hierarchy. -tss2_clear -hi l - -# Create the Endorsement Primary Key hierarchy which will be used -# for wrapping the private key. Use RSA as the primary key encryption -# and SHA 256 for hashing. Allow TPM to output the object -# handle as a file context -PRIMARY_HANDLE=`tss2_createprimary -hi e -rsa -halg sha256 | grep "Handle" | awk '{print $2}'` -[ ! -z "$PRIMARY_HANDLE" ] || error_exit "Unable to create TPM Key Hierarchy" -PRIMARY_HANDLE="0x$PRIMARY_HANDLE" - -# The object context will be lost over node reboots, and needs to -# be persistently stored in TPM NV. -# evict the persistent handle if it exists previously -tss2_evictcontrol -hi o -ho $TPM_KEY_HIERARCHY_HANDLE -hp $TPM_KEY_HIERARCHY_HANDLE -tss2_evictcontrol -hi o -ho $PRIMARY_HANDLE -hp $TPM_KEY_HIERARCHY_HANDLE >> $LOGFILE -[ $? -eq 0 ] || error_exit "Unable to persist Key Hierarchy in TPM memory" - -tss2_flushcontext -ha $PRIMARY_HANDLE - -# wrap the original private key in TPM's Endorsement key hierarchy -# this will generate a TSS key blob in ASN 1 encoding -create_tpm2_key -p $TPM_KEY_HIERARCHY_HANDLE -w $ORIGINAL_KEY $TPM_OBJECT_CONTEXT >> $LOGFILE -[ $? -eq 0 ] || error_exit "Unable to wrap provided private key into TPM Key Hierarchy" - -# the apps will also need to the public key, place it in -# the certificate dirpath -mv $PUBLIC_KEY $CERTIFICATE_DIR/$CERTIFICATE_FILE - -# ensure that the TPM object and the public cert are only readable by root -chown root $CERTIFICATE_DIR/$CERTIFICATE_FILE $TPM_OBJECT_CONTEXT -chmod 0600 $CERTIFICATE_DIR/$CERTIFICATE_FILE $TPM_OBJECT_CONTEXT - -# remove all sysinv key copy artifacts -rm -f $ORIGINAL_KEY "${ORIGINAL_KEY}.sysinv" "${PUBLIC_KEY}.sysinv" &> /dev/null - -exit 0 diff --git a/security/wrs-ssl/server-csr.conf b/security/wrs-ssl/server-csr.conf deleted file mode 100644 index ac9fdb513..000000000 --- a/security/wrs-ssl/server-csr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[ req ] -default_bits = 1024 -distinguished_name = req_distinguished_name -prompt = no - -[ req_distinguished_name ] -CN = StarlingX - diff --git a/tools/collector/LICENSE b/tools/collector/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/tools/collector/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tools/collector/centos/build_srpm.data b/tools/collector/centos/build_srpm.data deleted file mode 100644 index abb9f2e28..000000000 --- a/tools/collector/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -SRC_DIR="scripts" -TIS_PATCH_VER=30 diff --git a/tools/collector/centos/collector.spec b/tools/collector/centos/collector.spec deleted file mode 100644 index 4de41526c..000000000 --- a/tools/collector/centos/collector.spec +++ /dev/null @@ -1,67 +0,0 @@ -Summary: CGCS Platform Data Collection Scripts Package -Name: collector -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -Source0: %{name}-%{version}.tar.gz - -%description -This packages scripts that implement data and log collection that field -support can execute to gather current state and runtime history for off -platform analysis and debug. - -%prep -%setup - -%install -mkdir -p %{buildroot} - -install -d 755 -d %{buildroot}%{_sysconfdir}/collect.d -install -d 755 -d %{buildroot}%{_sysconfdir}/collect -install -d 755 -d %{buildroot}/usr/local/sbin -install -d 755 -d %{buildroot}/usr/local/bin -install -d 755 -d %{buildroot}%{_sbindir} - -install -m 755 collect %{buildroot}/usr/local/sbin/collect -install -m 755 collect_host %{buildroot}/usr/local/sbin/collect_host -install -m 755 collect_date %{buildroot}/usr/local/sbin/collect_date -install -m 755 collect_utils %{buildroot}/usr/local/sbin/collect_utils -install -m 755 collect_parms %{buildroot}/usr/local/sbin/collect_parms -install -m 755 collect_mask_passwords %{buildroot}/usr/local/sbin/collect_mask_passwords -install -m 755 expect_done %{buildroot}/usr/local/sbin/expect_done - -install -m 755 collect_sysinv.sh %{buildroot}%{_sysconfdir}/collect.d/collect_sysinv -install -m 755 collect_psqldb.sh %{buildroot}%{_sysconfdir}/collect.d/collect_psqldb -install -m 755 collect_openstack.sh %{buildroot}%{_sysconfdir}/collect.d/collect_openstack -install -m 755 collect_networking.sh %{buildroot}%{_sysconfdir}/collect.d/collect_networking -install -m 755 collect_ceph.sh %{buildroot}%{_sysconfdir}/collect.d/collect_ceph -install -m 755 collect_sm.sh %{buildroot}%{_sysconfdir}/collect.d/collect_sm -install -m 755 collect_tc.sh %{buildroot}%{_sysconfdir}/collect.d/collect_tc -install -m 755 collect_nfv_vim.sh %{buildroot}%{_sysconfdir}/collect.d/collect_nfv_vim -install -m 755 collect_ovs.sh %{buildroot}%{_sysconfdir}/collect.d/collect_ovs -install -m 755 collect_patching.sh %{buildroot}%{_sysconfdir}/collect.d/collect_patching -install -m 755 collect_coredump.sh %{buildroot}%{_sysconfdir}/collect.d/collect_coredump -install -m 755 collect_crash.sh %{buildroot}%{_sysconfdir}/collect.d/collect_crash -install -m 755 collect_ima.sh %{buildroot}%{_sysconfdir}/collect.d/collect_ima -install -m 755 collect_fm.sh %{buildroot}%{_sysconfdir}/collect.d/collect_fm - -install -m 755 etc.exclude %{buildroot}%{_sysconfdir}/collect/etc.exclude -install -m 755 run.exclude %{buildroot}%{_sysconfdir}/collect/run.exclude - -ln -sf /usr/local/sbin/collect %{buildroot}/usr/local/bin/collect -ln -sf /usr/local/sbin/collect %{buildroot}%{_sbindir}/collect - -%clean -rm -rf %{buildroot} - -%files -%license LICENSE -%defattr(-,root,root,-) -%{_sysconfdir}/collect/* -%{_sysconfdir}/collect.d/* -/usr/local/sbin/* -/usr/local/bin/collect -%{_sbindir}/collect diff --git a/tools/collector/scripts/LICENSE b/tools/collector/scripts/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/tools/collector/scripts/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tools/collector/scripts/collect b/tools/collector/scripts/collect deleted file mode 100755 index 88e373ea6..000000000 --- a/tools/collector/scripts/collect +++ /dev/null @@ -1,1245 +0,0 @@ -#! /bin/bash -######################################################################## -# -# Copyright (c) 2014-2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -######################################################################## -# -# Description: This script creates a tarball of logs and runtime -# configuration information for any of the following -# -# - current host ... collect -# - specified host ... collect hostname -# - group of hosts ... collect --list ... -# - all hosts ... collect --all -# -# Behavior : See print_help below. -# -# Inclusions : What is collected. -# -# - /var/log -# - /var/run (exclusions listed in /etc/collect/exclude.list) -# - area specific configuration and data -> ./var/extra -# - all databases in plain text ; except for ceilometer and keystone -# -# Additional collected info is expressed by the following runtime output. -# Generally, individual commands that display output have that output -# redirected to the appropriate info file in /scratch/var/extra -# -# sysadmin@controller-0:/scratch# sudo collect -# nodetype : controller -# Collector: /scratch -# Extra Dir: /scratch/var/extra -# Database : /scratch/database -# Tarball : /scratch/controller-0.20140318.232925.tgz -# ------------------------------------------------------------------------ -# controller-0: Process Info ......: /scratch/var/extra/process.info -# controller-0: Host Info .........: /scratch/var/extra/host.info -# controller-0: Memory Info .......: /scratch/var/extra/memory.info -# controller-0: Filesystem Info ...: /scratch/var/extra/filesystem.info -# controller-0: Bash History ......: /scratch/var/extra/history.info -# controller-0: Interrupt Info ....: /scratch/var/extra/interrupt.info -# controller-0: HA Info ...........: /scratch/var/extra/crm.info -# controller-0: CIB Admin Info ....: /scratch/var/extra/crm.xml -# controller-0: Mtce Info .........: /scratch/var/extra/mtce.info -# controller-0: Networking Info ...: /scratch/var/extra/networking.info -# controller-0: RabbitMQ Info .....: /scratch/var/extra/rabbitmq.info -# controller-0: Database Info .....: /scratch/var/extra/database.info -# controller-0: Dumping Database ..: /scratch/database/postgres.db.sql.txt -# controller-0: Dumping Database ..: /scratch/database/glance.db.sql.txt -# controller-0: Dumping Database ..: /scratch/database/nova.db.sql.txt -# controller-0: Dumping Database ..: /scratch/database/cinder.db.sql.txt -# controller-0: Dumping Database ..: /scratch/database/heat.db.sql.txt -# controller-0: Dumping Database ..: /scratch/database/neutron.db.sql.txt -# controller-0: Dumping Database ..: /scratch/database/sysinv.db.sql.txt -# controller-0: Creating Tarball ..: /scratch/controller-0.20140318.232925.tgz -# -# Tarball: /scratch/..tgz -# -# The script first collects the process, host, memory, -# filesystem, interrupt and HA information. -# It then proceeds to calls run-parts against the -# /etc/collect.d direcory which contains service level -# collectors. Additional collected can be added to that -# collect.d directory and will be called automatically. -# -# Warning: Script currently must be run as root. -# The collector scripts consider nodetype when deciding -# which commands to execute where. -# -################################################################## - - -TOOL_NAME=collect -TOOL_VER=2 -TOOL_REV=0 - -# collect must be run as sysadmin -if [ ${UID} -eq 0 ]; then - echo "Error: Cannot run collect as 'root' user" - exit 1 -fi - -# pull in common utils and environment -source /usr/local/sbin/collect_utils -source_openrc_if_needed - -function clean_up() -{ - `reset` - echo "" -} - -function control_c() -{ - echo "" - echo "... received exit signal ..." - clean_up - exit 0 -} - -# Handle exit signals -trap control_c SIGINT -trap control_c SIGTERM - - - -# static expect log level control ; -# 0 = hide expect output -# 1 = show expect outout -USER_LOG_MODE=0 - -# static execution status 'return value' -RETVAL=0 - -# limit scp bandwidth to 1MB/s -# increase limit of scp bandwidth from 1MB/s to 10MB/s -SCP_CMD="scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PreferredAuthentications=password -o PubkeyAuthentication=no -l $((10*8*1000))" -SCP_TIMEOUT="600" -SSH_CMD="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PreferredAuthentications=password -o PubkeyAuthentication=no" -NOWDATE=`date +"%Y%m%d.%H%M%S"` -COLLECT_BASE_DIR="/scratch" -collect_host="/usr/local/sbin/collect_host" -CURR_DIR=`pwd` - - -# common permission error strings -pw_error="orry, try again" -ac_error="ermission denied" - -function print_help() -{ - echo "" - echo "Titanium Cloud Log Collection Tool, version ${TOOL_VER}.${TOOL_REV}" - echo "" - echo "Usage: ${TOOL_NAME} [COMMANDS ...] {options}" - echo "" - echo "Titanium Cloud 'collect' is used by the customer support organization" - echo " to collect logs and data for off system analysis." - echo "" - echo "Running collect will collect logs to /scratch/" - echo "on the host collect is run from. Use host names to specify which hosts to collect from." - echo "" - echo "Host data collection scope can be the current host, any single specified hostname," - echo "a --list of hostnames or --all hosts in the system using a single command." - echo "" - echo "Optionally specify --start-date and/or --end-date options to limit" - echo " the date range and therefore size of the collect." - echo "" - echo "Optionally specify a --name prefix of the collected tar file." - echo "" - echo "With the command set specified, simply run collect as sysadmin and when" - echo "prompted provide the sysadmin sudo password and let collect handle the rest." - echo "" - echo "Scope Options:" - echo "" - echo " collect ... collect logs for current host" - echo " collect host1 ... collect logs for single named host" - echo " collect host1 host2 host3 ... collect logs for stacked host list" - echo " collect [--list | -l] host1 host2 host3 ... collect logs for list of named hosts" - echo " collect [--all | -a] ... collect data for all hosts" - echo "" - echo "Dated Collect:" - echo "" - echo "collect [--start-date | -s] YYYYMMDD ... collection of logs on and after this date" - echo "collect [--end-date | -e] YYYYMMDD ... collection of logs on and before this date" - echo "" - echo "Tarball Prefix:" - echo "" - echo "collect [--name | -n] {scope and date options} ... specify the name prefix of the collect tarball" - echo "" - echo "Detailed Display:" - echo "" - echo "collect [--verbose | -v] ... print details during collect" - echo "" - echo "Avoid password and security masking:" - echo "" - echo "collect [--skip-mask] ... skip masking of collect data" - echo "" - echo "Examples:" - echo "" - echo "collect ... all logs for current host" - echo "collect --all ... all logs from all hosts in the system" - echo "collect --all --start-date 20150101 ... logs dated on and after Jan 1 2015 from all hosts" - echo "collect --all --start-date 20151101 --end-date 20160201 ... logs dated between Nov 1, 2015 and Feb 1 2016 from all hosts" - echo "collect --start-date 20151101 --end-date 20160201 ... only logs dated between Nov 1, 2015 and Feb 1 2016 for current host" - echo "collect --list controller-0 worker-0 storage-0 ... all logs from specified host list" - echo "collect --list controller-0 worker-1 --end-date 20160201 ... only logs before Nov 1, 2015 for host list" - echo "collect --list controller-1 storage-0 --start-date 20160101 ... only logs after Jan 1 2016 for controller-1 and storage-0" - echo "" - exit 0 -} - -# command line arguement variables ; defaulted -DEBUG=false -CLEAN=false -VERBOSE=false -SKIP_MASK=false - -# date variables -STARTDATE="any" -STARTTIME="any" -ENDDATE="any" -ENDTIME="any" -GETSTARTDATE=false -GETENDDATE=false - -# host selection variables -LISTING=false -ALLHOSTS=false -HOSTS=1 -HOSTLIST=(${HOSTNAME}) -THISHOST=false - -COLLECT_TARNAME="" - -# clear multi option modes -function clear_variable_args() -{ - LISTING=false - GETSTARTDATE=false - GETENDDATE=false -} - -# -# Utility function to print a status message and record the last error code -# -# Assumptions: Handles specific cases of invalid password and permission errors -# by exiting so as to avoid repeated errors during multi-host -# collection. -# -# $1 - status string -# $2 - status code number -# -function print_status() -{ - local string=${1} - local code=${2} - - logger -t ${COLLECT_TAG} "${string} (reason:${code})" - - # if the status code is in the FAIL range ( less than WARNING ) then update RETVAL - if [ ${code} -lt ${WARN_WARNING} ] ; then - RETVAL=${code} - fi - - if [ ${RETVAL} -eq ${FAIL_PASSWORD} ] ; then - - echo "Invalid password ; exiting (${string})" - exit ${RETVAL} - - elif [ ${RETVAL} -eq ${FAIL_PERMISSION} ] ; then - - echo "Permission error ; exiting (${string})" - exit ${RETVAL} - - elif [ ${RETVAL} -eq ${FAIL_UNREACHABLE} ] ; then - - echo "${string} (reason:${code}:host unreachable)" - - elif [ ${RETVAL} -eq ${FAIL_PERMISSION_SKIP} -o ${RETVAL} -eq ${FAIL_PERMISSION} ] ; then - - echo "${string} (reason:${code}:permission error)" - - elif [ ${RETVAL} -eq ${FAIL_OUT_OF_SPACE} ] ; then - - echo "${string} (reason:${code}) ; need to increase available space in host ${COLLECT_BASE_DIR}" - - elif [ ${RETVAL} -eq ${FAIL_OUT_OF_SPACE_LOCAL} ] ; then - - echo "${string} (reason:${code}) ; need to increase available space in ${HOSTNAME}:${COLLECT_BASE_DIR}" - - elif [ ${RETVAL} -eq ${FAIL_INSUFFICIENT_SPACE} ] ; then - - echo "${string} (reason:${code}) ; ${HOSTNAME}:${COLLECT_BASE_DIR} usage must be below ${MIN_PERCENT_SPACE_REQUIRED}%" - - elif [ ${RETVAL} -ge ${FAIL_TIMEOUT} -a ${RETVAL} -le ${FAIL_TIMEOUT9} ] ; then - - echo "${string} (reason:${code}:operation timeout)" - - else - echo "${string} (reason:${code})" - fi -} - -# -# checks to see if the specified hostname is known -# to inventory as a valid provisioned host - -# $1 - this_hostname - -function is_valid_host() -{ - local this_hostname=${1} - - if [ "${this_hostname}" == "None" ] ; then - return ${FAIL_HOSTNAME} - elif [ "${this_hostname}" == "${HOSTNAME}" ] ; then - return $PASS - elif [ "${ACTIVE}" = true ] ; then - system host-show "${this_hostname}" 2>/dev/null 1>/dev/null - if [ ${?} -ne 0 ] ; then - return ${FAIL_HOSTNAME} - fi - else - print_status "Error: can only run collect for remote hosts on active controller" ${FAIL_INACTIVE} - exit ${FAIL_INACTIVE} - fi - return $PASS -} - - -# Parse the command line -while [[ ${#} -gt 0 ]] ; do - - key="${1}" - - case $key in - - -h|--help) - print_help - exit 0 - ;; - - -n|--name) - COLLECT_TARNAME=${2}_${NOWDATE} - clear_variable_args - shift - ;; - - -v|--verbose) - VERBOSE=true - ;; - - -c|--clean) - CLEAN=true - ;; - - -l|--list) - if [[ ${#} -lt 2 ]] ; then - print_status "Error: empty host --list" ${FAIL} - exit ${FAIL} - fi - is_valid_host "${2}" - if [ ${?} -ne 0 ] ; then - print_status "Error: empty host --list or invalid first hostname" ${FAIL} - exit ${FAIL} - fi - - HOSTLIST=(${2}) - HOSTS=1 - if [ "${2}" == "${HOSTNAME}" ] ; then - THISHOST=true - elif [ "${ACTIVE}" = false ] ; then - print_status "Error: can only run collect for remote hosts on active controller" ${FAIL_INACTIVE} - exit ${FAIL_INACTIVE} - fi - LISTING=true - GETSTARTDATE=false - GETENDDATE=false - shift - ;; - - -a|--all|all) - if [ "${ACTIVE}" = false ] ; then - print_status "Error: can only run collect for remote hosts on active controller" ${FAIL_INACTIVE} - exit ${FAIL_INACTIVE} - fi - ALLHOSTS=true - HOSTLIST=(${HOSTNAME}) - HOSTS=1 - THISHOST=true - clear_variable_args - ;; - - -s|--start-date) - STARTDATE="${2}" - LISTING=false - GETSTARTDATE=true - GETENDDATE=false - shift - ;; - - -e|--end-date) - ENDDATE="${2}" - LISTING=false - GETSTARTDATE=false - GETENDDATE=true - shift - ;; - - -d|--debug) - DEBUG=true - USER_LOG_MODE=1 - clear_variable_args - ;; - - --skip-mask) - SKIP_MASK=true - shift - ;; - - *) - if [ "${LISTING}" = true ] ; then - is_valid_host ${key} - if [ ${?} -eq 0 ] ; then - HOSTS=$((${HOSTS} + 1)) - HOSTLIST=( "${HOSTLIST[@]}" ${key} ) - if [ "${key}" == "${HOSTNAME}" ] ; then - THISHOST=true - fi - else - # make the invalid hostname a warning only. - # if we got here then at least the first hostname was valid - print_status "Warning: cannot collect data from unknown host '${key}'" ${WARN_HOSTNAME} - fi - elif [ "${GETSTARTDATE}" = true ] ; then - dlog "accepting but ignoring legacy starttime specification" - elif [ "${GETENDDATE}" = true ] ; then - dlog "accepting but ignoring legacy endtime specification" - else - is_valid_host ${key} - RETVAL=${?} - if [ ${RETVAL} -eq 0 ] ; then - HOSTLIST=${key} - HOSTS=1 - LISTING=true - if [ "${key}" == "${HOSTNAME}" ] ; then - THISHOST=true - fi - else - print_status "Error: cannot collect data from unknown host '${key}'" ${RETVAL} - exit ${RETVAL} - fi - fi - GETSTARTDATE=false - GETENDDATE=false - ;; - esac - shift # past argument or value -done - -if [ ${RETVAL} -ne 0 ]; then - echo "command line parse error (${RETVAL})" - print_help - exit ${RETVAL} -fi - - -# -# request root password and use it for -# all the expect driven requests below -# -read -s -p "[sudo] password for ${USER}:" pw -echo "" - -# Although bash 'read' will handle sanitizing the password -# input for the purposes of storing it in ${pw}, expect -# will need certain special characters to be backslash -# delimited -pw=${pw/\\/\\\\} # replace '\' with '\\' -pw=${pw/\]/\\\]} # replace ']' with '\]' -pw=${pw/\[/\\\[} # replace '[' with '\[' -pw=${pw/$/\\$} # replace '$' with '\$' -pw=${pw/\"/\\\"} # replace '"' with '\"' - -# -# if the user specified the '--all' option then override -# the current list and add them all from inventory. -# -if [ "${ALLHOSTS}" = true ] ; then - - for foreign_host in $(system host-list | grep '[0-9]' | cut -d '|' -f 3 | tr -d ' ' | grep -v ${HOSTNAME}); do - if [ "${foreign_host}" != "None" ] ; then - HOSTS=$((${HOSTS} + 1)) - HOSTLIST=( "${HOSTLIST[@]}" ${foreign_host}) - dlog "Host:${HOSTS}: ${foreign_host}" - fi - done - -elif [ ${HOSTS} == 0 ] ; then - - HOSTLIST=${HOSTNAME} - THISHOST=true - COLLECT_TARNAME="${HOSTNAME}_${NOWDATE}" - -fi - -# Print Summary -if [ "${DEBUG}" == true ] ; then - - echo "HOSTLIST = <${HOSTLIST[@]}>" - echo "HOSTS = ${HOSTS}" - echo "ALLHOSTS = ${ALLHOSTS}" - echo "STARTDATE= ${STARTDATE}" - echo "ENDDATE = ${ENDDATE}" - - for hosts in "${HOSTLIST[@]}" ; do - echo "Host:${hosts}" - done - -elif [ ${HOSTS} -eq 0 ] ; then - - print_status "Error: no hosts specified" "${FAIL}" - exit ${FAIL} - -elif [ "${CLEAN}" == false ] ; then - - ilog "collecting data from ${HOSTS} host(s): ${HOSTLIST[@]}" - -else - - ilog "cleaning scratch space on ${HOSTLIST[@]}" - -fi - -# -# removes contents of the local /scratch directory -# -# $1 - host -# $2 - specified directory (always $COLLECT_BASE_DIR) -# -function clean_scratch_dir_local () -{ - local this_hostname=${1} - local directory=${2} - -/usr/bin/expect << EOF - log_user ${USER_LOG_MODE} - spawn bash -i - set timeout 60 - expect -re $ - send -- "sudo rm -rf ${directory}/*_????????.??????* ; cat ${cmd_done_file}\n" - expect { - "assword:" { send "${pw}\r" ; exp_continue } - "${cmd_done_sig}" { exit ${PASS} } - "annot remove" { exit ${FAIL_CLEANUP} } - "${pw_error}" { exit ${FAIL_PASSWORD} } - "${ac_error}" { exit ${FAIL_PERMISSION} } - timeout { exit ${FAIL_TIMEOUT} } - } -EOF - local rc=${?} - if [ ${rc} -ne ${PASS} ] ; then - print_status "Error: clean_scratch_dir_local ${this_hostname} failed" ${rc} - fi - return ${rc} -} - -# -# cleans the contents of the specified hosts's scratch dir -# -# $1 - this hostname -# $2 - specified directory (always $COLLECT_BASE_DIR) -# -function clean_scratch_dir_remote() -{ - local this_hostname=${1} - local directory=${2} - -/usr/bin/expect << EOF - log_user ${USER_LOG_MODE} - spawn bash -i - expect -re $ - set timeout 60 - send "${SSH_CMD} sysadmin@${this_hostname}\n" - expect { - "assword:" { - send "${pw}\r" - expect { - "${this_hostname}" { - set timeout 30 - expect -re $ - send "sudo rm -rf ${directory}/*_????????.??????* ; cat ${cmd_done_file}\n" - expect { - "assword:" { send -- "${pw}\r" ; exp_continue } - "${cmd_done_sig}" { exit ${PASS} } - "${cmd_done_file}: No such file or directory" { exit ${PASS} } - "annot remove" { exit ${FAIL_CLEANUP} } - "${pw_error}" { exit ${FAIL_PASSWORD} } - "${ac_error}" { exit ${FAIL_PERMISSION}} - timeout { exit ${FAIL_TIMEOUT3} } - } - } - timeout { exit ${FAIL_TIMEOUT1} } - } - } - "(yes/no)?" { - send "yes\r" - exp_continue - } - "No route to host" { - exit ${FAIL_UNREACHABLE} - } - "Could not resolve hostname" { - exit ${FAIL_UNREACHABLE} - } - timeout { exit ${FAIL_TIMEOUT} } - } -EOF - local rc=${?} - if [ ${rc} -ne ${PASS} ] ; then - print_status "Error: clean_scratch_dir_remote ${this_hostname} failed" ${rc} - fi - return ${rc} -} - -# -# deletes a remote directory or file -# -# $1 - this hostname -# $2 - dir or file with full path -# -function delete_remote_dir_or_file() -{ - local this_hostname=${1} - local dir_or_file=${2} - -/usr/bin/expect << EOF - log_user ${USER_LOG_MODE} - spawn bash -i - expect -re $ - set timeout 60 - send "${SSH_CMD} sysadmin@${this_hostname}\n" - expect { - "assword:" { - send "${pw}\r" - expect { - "${this_hostname}:" { - set timeout 10 - expect -re $ - send "sudo rm -rf ${dir_or_file} ; cat ${cmd_done_file}\n" - expect { - "assword:" { send -- "${pw}\r" ; exp_continue } - "${cmd_done_sig}" { exit ${PASS} } - "${cmd_done_file}: No such file or directory" { exit ${PASS} } - "annot remove" { exit ${FAIL_CLEANUP} } - "${pw_error}" { exit ${FAIL_PASSWORD} } - "${ac_error}" { exit ${FAIL_PERMISSION}} - timeout { exit ${FAIL_TIMEOUT3} } - } - } - timeout { exit ${FAIL_TIMEOUT1} } - } - } - "(yes/no)?" { - send "yes\r" - exp_continue - } - "No route to host" { - exit ${FAIL_UNREACHABLE} - } - "Could not resolve hostname" { - exit ${FAIL_UNREACHABLE} - } - timeout { exit ${FAIL_TIMEOUT} } - } -EOF - local rc=${?} - if [ ${rc} -ne ${PASS} ] ; then - print_status "Error: delete_remote_dir_or_file ${this_hostname} failed" ${rc} - fi - return ${rc} -} - -HOST_COLLECT_ERROR_LOG="/tmp/host_collect_error.log" -# -# Fetch a file from a remote host using the global pw -# $1 - this hostname -# $2 - remote source path/filename -# $3 - local path destination -# -function get_file_from_host() -{ - local this_hostname=${1} - local remote_src=${2} - local local_dest=${3} - - remove_file_local ${HOST_COLLECT_ERROR_LOG} - -/usr/bin/expect << EOF - log_user ${USER_LOG_MODE} - spawn bash -i - set timeout ${SCP_TIMEOUT} - expect -re $ - send "${SCP_CMD} sysadmin@${this_hostname}:${remote_src} ${local_dest} 2>>${HOST_COLLECT_ERROR_LOG}\n" - expect { - "assword:" { - send "${pw}\r" - expect { - "100%" { exit ${PASS} } - "${pw_error}" { exit ${FAIL_PASSWORD} } - "${ac_error}" { exit ${FAIL_PERMISSION}} - timeout { exit ${FAIL_TIMEOUT1} } - } - } - "No route to host" { - exit ${FAIL_UNREACHABLE} - } - "Could not resolve hostname" { - exit ${FAIL_UNREACHABLE} - } - timeout { exit ${FAIL_TIMEOUT} } - } -EOF - local rc=${?} - if [ ${rc} -ne ${PASS} ] ; then - print_status "failed to get_file_from ${this_hostname}" ${rc} - else - # Look for "No space left on device" error - grep -q "${FAIL_OUT_OF_SPACE_STR}" ${HOST_COLLECT_ERROR_LOG} - if [ "$?" == "0" ] ; then - rc=${FAIL_OUT_OF_SPACE} - fi - fi - - remove_file_local ${HOST_COLLECT_ERROR_LOG} - - return ${rc} -} - -# -# Create the local dated collect dir where all -# the tarballs for this collect will get put. -# -# Permissions are set to make it easy to copy -# tarballs from remote host into -# -# $1 - the fill dir -# -function create_collect_dir_local() -{ - local dir=${1} - -/usr/bin/expect << EOF - log_user ${USER_LOG_MODE} - spawn bash -i - set timeout 10 - expect -re $ - send "sudo mkdir -m 775 -p ${dir} ; cat ${cmd_done_file}\n" - expect { - "assword:" { - send "${pw}\r" - expect { - "${cmd_done_sig}" { exit ${PASS} } - "${pw_error}" { exit ${FAIL_PASSWORD} } - "${ac_error}" { exit ${FAIL_PERMISSION}} - timeout { exit ${FAIL_TIMEOUT1} } - } - } - "${cmd_done_sig}" { exit ${PASS} } - "${ac_error}" { exit ${FAIL_PERMISSION}} - timeout { exit ${FAIL_TIMEOUT} } - } -EOF - local rc=${?} - if [ ${rc} -ne ${PASS} ] ; then - print_status "failed to create_collect_dir_local for ${dir}" ${rc} - fi - return ${rc} -} - -# -# Delete the specified file using sudo -# -# $1 - the file to be delete with full path specified -# -function remove_file_local() -{ - local local_file=${1} - local rc=${PASS} - - if [ -e ${local_file} ] ; then - -/usr/bin/expect << EOF - log_user ${USER_LOG_MODE} - spawn bash -i - set timeout 10 - expect -re $ - send -- "sudo rm -f ${local_file} ; cat ${cmd_done_file}\n" - expect { - "assword:" { send -- "${pw}\r" ; exp_continue } - "${cmd_done_sig}" { exit ${PASS} } - "annot remove" { exit ${FAIL_CLEANUP} } - "${pw_error}" { exit ${FAIL_PASSWORD} } - "${ac_error}" { exit ${FAIL_PERMISSION} } - timeout { exit ${FAIL_TIMEOUT} } - } -EOF - local rc=${?} - if [ ${rc} -ne ${PASS} ] ; then - print_status "failed to remove_file_local ${local_file}" ${rc} - fi - fi - return ${rc} -} - -# -# Delete the specified file using sudo -# -# $1 - the directory to be removed with full path specified -# -function remove_dir_local() -{ - local dir=${1} - -/usr/bin/expect << EOF - log_user ${USER_LOG_MODE} - spawn bash -i - set timeout 10 - expect -re $ - send -- "sudo rm -rf ${dir} ; cat ${cmd_done_file}\n" - expect { - "assword:" { send -- "${pw}\r" ; exp_continue } - "${cmd_done_sig}" { exit ${PASS} } - "annot remove" { exit ${FAIL_CLEANUP} } - "${pw_error}" { exit ${FAIL_PASSWORD} } - "${ac_error}" { exit ${FAIL_PERMISSION} } - timeout { exit ${FAIL_TIMEOUT} } - } -EOF - local rc=${?} - if [ ${rc} -ne ${PASS} ] ; then - print_status "failed to remove_dir_local ${dir}" ${rc} - fi - return ${rc} -} - -# -# Move a file and change permissions using sudo -# -# $1 - src path/file -# $2 - dest path/file -# -function move_file_local() -{ - local src=${1} - local dst=${2} - -/usr/bin/expect << EOF - log_user ${USER_LOG_MODE} - spawn bash -i - set timeout 10 - expect -re $ - send -- "sudo mv ${src} ${dst} ; cat ${cmd_done_file}\n" - expect { - "assword:" { send -- "${pw}\r" ; exp_continue } - "${cmd_done_sig}" { exit ${PASS} } - "annot remove" { exit ${FAIL_CLEANUP} } - "${pw_error}" { exit ${FAIL_PASSWORD} } - "${ac_error}" { exit ${FAIL_PERMISSION} } - timeout { exit ${FAIL_TIMEOUT} } - } -EOF - local rc=${?} - if [ ${rc} -ne ${PASS} ] ; then - print_status "failed to move_file_local ${src} to ${dst}" ${rc} - fi - return ${rc} -} - -# Append the echoed collect done with collect duration and file size -# ... done (HH:MM:SS xxM) -function echo_stats() -{ - local secs=${1} - local file=${2} - - echo -n " ($(date -d@${secs} -u +%H:%M:%S)" - if [ -e ${file} ] ; then - size=$(du -h ${file} | cut -f 1 2>/dev/null) - if [ $? -eq 0 ] ; then - printf " %5s)\n" "${size}" - return - fi - fi - echo ")" -} - - -# Handle clean command -if [ "${CLEAN}" == true ] ; then - for host in "${HOSTLIST[@]}" ; do - if [ "${host}" != " " ] ; then - - if [ "${host}" == "None" ] ; then - continue - elif [ "${host}" == "" ] ; then - continue - fi - - echo -n "cleaning ${host}:${COLLECT_BASE_DIR} ... " - if [ "${host}" == "${HOSTNAME}" ] ; then - clean_scratch_dir_local ${host} ${COLLECT_BASE_DIR} - if [ ${?} -eq ${PASS} ] ; then - echo "done" - fi - else - clean_scratch_dir_remote ${host} ${COLLECT_BASE_DIR} - if [ ${?} -eq ${PASS} ] ; then - echo "done" - fi - fi - logger -t ${COLLECT_TAG} "user cleaned ${host}:${COLLECT_BASE_DIR} content" - fi - done - exit 0 -fi - - -if [ ! -z ${COLLECT_TARNAME} ] ; then - - # User specified tarname - COLLECT_NAME=${COLLECT_TARNAME} - COLLECT_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" - TARBALL_NAME="${COLLECT_DIR}.tar" - named="user-named" - -elif [ "${ALLHOSTS}" = true ] ; then - - # All hosts bundle - COLLECT_NAME="ALL_NODES_${NOWDATE}" - COLLECT_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" - TARBALL_NAME="${COLLECT_DIR}.tar" - named="all-nodes" - - -elif [ ${HOSTS} -eq 1 ] ; then - - # Single host bundle - COLLECT_NAME="${HOSTLIST[0]}_${NOWDATE}" - COLLECT_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" - TARBALL_NAME="${COLLECT_DIR}.tar" - named="single-node" - -else - - # Otherwise its a multi host bundle - COLLECT_NAME="SELECT_NODES_${NOWDATE}" - COLLECT_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" - TARBALL_NAME="${COLLECT_DIR}.tar" - named="selected-node" - -fi - -# -# Create the local collect directory where -# the tarball(s) will temporarily stored -# -create_collect_dir_local "${COLLECT_DIR}" - -declare COLLECT_START_TIME=${SECONDS} - -declare -i longest_hostname=0 -for host in "${HOSTLIST[@]}" ; do - len=${#host} - if [ $len -gt ${longest_hostname} ] ; then - longest_hostname=$len - fi -done - -# -# Loop over all the targetted hosts and -# 1. run collect -# 2. copy the tarball to $COLLECT_DIR -# -for host in "${HOSTLIST[@]}" ; do - if [ "${host}" != " " ] ; then - - if [ "${host}" == "None" ] ; then - continue - elif [ "${host}" == "" ] ; then - continue - fi - - HOST_START_TIME=${SECONDS} - - TARNAME="${host}_${NOWDATE}" - - # line up the hostr namaes - echo -n "collecting" - len=${#host} - for ((i=len;i>${COLLECT_ERROR_LOG} ; cat ${cmd_done_file})\n" - expect { - "assword:" { - send "${pw}\r" - expect { - "${cmd_done_sig}" { exit ${PASS} } - "${pw_error}" { exit ${FAIL_PASSWORD} } - "${ac_error}" { exit ${FAIL_PERMISSION} } - timeout { exit ${FAIL_TIMEOUT1} } - } - } - timeout { exit ${FAIL_TIMEOUT} } - } -EOF - RETVAL=${?} - if [ ${RETVAL} -ne ${PASS} ] ; then - collect_errors ${HOSTNAME} - print_status "failed to create ${TARBALL_NAME}" ${RETVAL} - else - collect_errors ${HOSTNAME} - RETVAL=$? - if [ ${RETVAL} -eq ${PASS} ] ; then - secs=$((SECONDS-COLLECT_START_TIME)) - echo -n "done" - echo_stats $secs "${TARBALL_NAME}" - logger -t ${COLLECT_TAG} "created ${named} tarball ${TARBALL_NAME}" - else - echo "removing incomplete collect: ${TARBALL_NAME}" - remove_file_local "${TARBALL_NAME}" - fi - fi - remove_file_local ${COLLECT_ERROR_LOG} - remove_dir_local "${COLLECT_DIR}" - -# return to callers dir -cd ${CURR_DIR} - -exit ${RETVAL} diff --git a/tools/collector/scripts/collect_ceph.sh b/tools/collector/scripts/collect_ceph.sh deleted file mode 100755 index 1a5863ed1..000000000 --- a/tools/collector/scripts/collect_ceph.sh +++ /dev/null @@ -1,81 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="ceph" -LOGFILE="${extradir}/ceph.info" -echo "${hostname}: Ceph Info .........: ${LOGFILE}" - -function is_service_active { - active=`sm-query service management-ip | grep "enabled-active"` - if [ -z "$active" ] ; then - return 0 - else - return 1 - fi -} - -function exit_if_timeout { - if [ "$?" = "124" ] ; then - echo "Exiting due to ceph command timeout" >> ${LOGFILE} - exit 0 - fi -} - -############################################################################### -# Only Controller -############################################################################### -if [ "$nodetype" = "controller" ] ; then - - # Using timeout with all ceph commands because commands can hang for - # minutes if the ceph cluster is down. If ceph is not configured, the - # commands return immediately. - - delimiter ${LOGFILE} "ceph status" - timeout 30 ceph status >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - exit_if_timeout - - delimiter ${LOGFILE} "ceph mon dump" - timeout 30 ceph mon dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - exit_if_timeout - - delimiter ${LOGFILE} "ceph osd dump" - timeout 30 ceph osd dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - exit_if_timeout - - delimiter ${LOGFILE} "ceph osd tree" - timeout 30 ceph osd tree >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - exit_if_timeout - - delimiter ${LOGFILE} "ceph osd crush dump" - timeout 30 ceph osd crush dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - exit_if_timeout - - is_service_active - if [ "$?" = "0" ] ; then - exit 0 - fi - - delimiter ${LOGFILE} "ceph df" - timeout 30 ceph df >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - exit_if_timeout - - delimiter ${LOGFILE} "ceph osd df tree" - timeout 30 ceph osd df tree >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - exit_if_timeout - - delimiter ${LOGFILE} "ceph health detail" - timeout 30 ceph health detail >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - exit_if_timeout - -fi - -exit 0 diff --git a/tools/collector/scripts/collect_coredump.sh b/tools/collector/scripts/collect_coredump.sh deleted file mode 100644 index 7614909f4..000000000 --- a/tools/collector/scripts/collect_coredump.sh +++ /dev/null @@ -1,35 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables - -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="coredump" -LOGFILE="${extradir}/${SERVICE}.info" - - -COREDUMPDIR="/var/lib/systemd/coredump" - -echo "${hostname}: Core Dump Info ....: ${LOGFILE}" - -files=`ls ${COREDUMPDIR} | wc -l` -if [ "${files}" == "0" ] ; then - echo "No core dumps" >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -else - COMMAND="ls -lrtd ${COREDUMPDIR}/*" - delimiter ${LOGFILE} "${COMMAND}" - ${COMMAND} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - COMMAND="md5sum ${COREDUMPDIR}/*" - delimiter ${LOGFILE} "${COMMAND}" - ${COMMAND} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -fi - -exit 0 diff --git a/tools/collector/scripts/collect_crash.sh b/tools/collector/scripts/collect_crash.sh deleted file mode 100644 index fc8c7982c..000000000 --- a/tools/collector/scripts/collect_crash.sh +++ /dev/null @@ -1,30 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables - -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="crash" -LOGFILE="${extradir}/${SERVICE}.info" - - -CRASHDIR="/var/crash" - -echo "${hostname}: Kernel Crash Info .: ${LOGFILE}" - -COMMAND="find ${CRASHDIR}" -delimiter ${LOGFILE} "${COMMAND}" -${COMMAND} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -COMMAND="rsync -a --include=*.txt --include=*/ --exclude=* ${CRASHDIR} ${basedir}/var/" -delimiter ${LOGFILE} "${COMMAND}" -${COMMAND} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -exit 0 diff --git a/tools/collector/scripts/collect_date b/tools/collector/scripts/collect_date deleted file mode 100755 index 22c62fb1b..000000000 --- a/tools/collector/scripts/collect_date +++ /dev/null @@ -1,1064 +0,0 @@ -#!/bin/bash -####################################################################### -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -######################################################################## -# -# This file is a new member of the Titanium Cloud "Log Collect Utility". -# This file enhances date restricted collect in response. -# -# This file is invoked by collect_host when a date restricted -# collect using --start-date and/or --end-date options are used. -# -# This new data restricted collect service applies to /var/log and its -# subdirectories only. This service determines if a log file is to be -# included in dated collect by looking at the logs at the head and tail -# of the files and subdirectories in /var/log. Those dates are then -# compared to the user specified date range. if a file is determined to -# contain logs within that date range then that file is included in the -# collect log. A valid log date prefix is "YYYY-MM-DD". -# -# Unfortunately, not all log files contain the correct date placement and -# format. This feature has implemented special case handling for many but not -# not all of such cases. To avoid accidental exclusion of a key file, this -# feature will by default include log files whose log date content could -# not be determined if its file date is after the specified start date. -# -# Note: local convension , example ${head_date} vs ${HEAD_DATE} -# -# Lower case date variables contain integer values while -# Upper case date variables contain formatted string values of same. -# -# Calling sequence: -# -# /usr/local/sbin/collect_date -# /usr/local/sbin/collect_date 20170701 20170901 /tmp/file.list true -# -######################################################################## - -# -# Import commands, variables and convenience functions available to -# all collectors ; common and user defined. -# -source /usr/local/sbin/collect_utils - -# where to find the logs -declare -r baselogdir="/var/log" - -# include / exclude labels -declare -r INCLUDE_FILE="inc" -declare -r EXCLUDE_FILE="exc" - -# a global reason string that is only valid -# in the context of the file beeing looked at. -declare __this_reason="" - -# setup defaults -INC_FILE_LIST="/var/run/collect_include.list" -EXC_FILE_LIST="/var/run/collect_exclude.list" -NOD_FILE_LIST="/var/run/collect_nodate.list" - -BOT_DATE="2000-01-01" # beginning of time date -bot_date=730013 # beginning of time date as integer - -EOT_DATE="9999-12-31" # end of time date -eot_date=3649810 # end of time date as integer - -# manage debug mode -DEBUG="${4}" -set_debug_mode "${DEBUG}" -echo "Debug Mode: ${DEBUG}" - -dlog "collect_date args: ${1} ${2} ${3} ${4} ${5}" - -############################################################################# -# -# 'track' is the main accounting procedure that manages file inclusions and -# exclusions as well as the metrics around all the parsed files. -# -# It also reports accounting mismatch logs, if they occur (should not) -# and the file that started the mismatch (to assist in debug). -# -# $1 - filename -# $2 - label -# -############################################################################# - -# accounting defaults -declare -i file_count=0 -declare -i inc_file_count=0 -declare -i exc_file_count=0 -declare -i empty_file_count=0 - -function track() -{ - local fn="${1}" - local label="${2}" - - if [ -z "${fn}" ] ; then - elog "Ignoring call with empty filename" - return - - elif [ "${label}" == "totals" ] ; then - ((file_count++)) - return - - elif [ "${label}" == "empty" ] ; then - ((empty_file_count++)) - return - - elif [ "${label}" == "${INCLUDE_FILE}" ] ; then - manage_file "${fn}" "${label}" "${__this_reason}" - ((inc_file_count++)) - - elif [ "${label}" == "${EXCLUDE_FILE}" ] ; then - manage_file "${fn}" "${label}" "${__this_reason}" - ((exc_file_count++)) - - else - elog "Unknown label '${label}'" - - fi - - sum=$((inc_file_count + exc_file_count)) - if [ ${file_count} -ne ${sum} ] ; then - wlog "MISMATCH: ${file_count} != ${inc_file_count} + ${exc_file_count} - ${fn}" - fi -} - -############################################################################ -# -# 'summary' is an accounting display procedure used to show the -# accounting results of the total number of files processed, -# number of empty files and most importanly the number if -# included or excluded files. -# -############################################################################ - -function summary() -{ - dlog "Summary:" - dlog "Total Files: ${file_count}" - dlog "Empty Files: ${empty_file_count}" - dlog "Added Files: ${inc_file_count}" - dlog "Omitd Files: ${exc_file_count}" -} - -############################################################################# -# -# 'date_to_int' converts a standard formatted YYYY-MM-DD string date -# to an integer and stores it in __this_integer_date variable -# to be used in context on demand. -# -############################################################################# - -# short lived global integer date value updated by date_to_int utility -declare -i __this_integer_date="" - -function date_to_int() -{ - local yy="${1:0:4}" - local mm="${1:5:2}" - local dd="${1:8:2}" - - # handle leading zeros in month and day - if [ "${mm:0:1}" == "0" ] ; then - mm=${mm:1:1} - fi - if [ "${dd:0:1}" == "0" ] ; then - dd=${dd:1:1} - fi - - # 365 days in a year, 31 days in a month, 1 day in a day - __this_integer_date=$((yy*365 + mm*31 + dd)) -} - -############################################################################ -# -# 'create_list_file' removes old/stale list file and creates a new empty -# one with correct permissions. -# -############################################################################ - -function create_list_file() -{ - local fn="${1}" - if [ -e "${fn}" ] ; then - rm -f "${fn}" - fi - touch "${fn}" - chmod 644 "${fn}" -} - -######################################################################## -# -# Handle the incoming 'start' and 'end' date format defensively. -# -# If the date is with no dashes as it would come in from the user's -# date specification then set it up like the standard date delimited with '-' -# i.e. 20171002 is updated to 2017-10-02. -# -# If verified to be in the standard format just copy in. -# -# Otherwise assume the start date is from the beginning of time or -# end date is the end of time. - -# load up the start date string and integer representation -if [ -z "${1}" ] ; then - START_DATE="${BOT_DATE}" -elif [[ "${1}" =~ [0-9]{4}[0-9]{2}[0-9]{2} ]] ; then - START_DATE="${1:0:4}-${1:4:2}-${1:6:2}" -elif [[ "${1}" =~ [0-9]{4}-[0-9]{2}-[0-9]{2} ]] ; then - START_DATE="${1}" -else - START_DATE="${BOT_DATE}" -fi - -# Convert the correct or corrected 'start' date to an integer value -date_to_int "${START_DATE}" -start_date=${__this_integer_date} - - -# load up the end date string and integer representation -if [ -z "${2}" ] ; then - END_DATE="${EOT_DATE}" -elif [[ "${2}" =~ [0-9]{4}[0-9]{2}[0-9]{2} ]] ; then - END_DATE="${2:0:4}-${2:4:2}-${2:6:2}" -elif [[ "${2}" =~ [0-9]{4}-[0-9]{2}-[0-9]{2} ]] ; then - END_DATE="${2}" -else - END_DATE="${EOT_DATE}" -fi - -# Convert the correct or corrected 'end' date to an integer value -date_to_int "${END_DATE}" -end_date=${__this_integer_date} - -# Handle user error of specifying an end date that is before the start date -if [ ${start_date} -gt ${end_date} ] ; then - wlog "invalid date range ; end date (${END_DATE}:${end_date}) is before start (${START_DATE}:${start_date})" - wlog "correcting to defaults: from ${START_DATE} to ${END_DATE}" - START_DATE="${BOT_DATE}" - END_DATE="${EOT_DATE}" - start_date=${bot_date} - end_date="${eot_date}" -fi - -ilog "collecting log files containing logs dated ${START_DATE} to ${END_DATE} (inclusive)" - - -if [ "${3}" == "" ] ; then - elog "dated collect include file list name not specified ... exiting" - exit 1 -else - VAR_LOG_INCLUDE_LIST=${3} -fi - -create_list_file "${VAR_LOG_INCLUDE_LIST}" -create_list_file "${INC_FILE_LIST}" -create_list_file "${EXC_FILE_LIST}" -create_list_file "${NOD_FILE_LIST}" - -# Declare and init the include and exclude debug lists. -inclist=("") -exclist=("") - -############################################################################# -# -# 'filedatelist' is a list of files that are known to not contain dated logs. -# Instead these files are included unless its file date is -# older that the specified start date. -# -############################################################################# - -filedatelist=("") -filedatelist+=("/var/log/wtmp") -filedatelist+=("/var/log/dmesg") -filedatelist+=("/var/log/dmesg.old") -filedatelist+=("/var/log/sm-trap.log") -filedatelist+=("/var/log/sm-customer.log") -filedatelist+=("/var/log/sm-customer.alarm") -filedatelist+=("/var/log/sm-shutdown.log") -filedatelist+=("/var/log/nfv-vim-events.log") -filedatelist+=("/var/log/fm-customer.log") -filedatelist+=("/var/log/fm-alarm.log") -filedatelist+=("/var/log/lighttpd-access.log") -filedatelist+=("/var/log/audit/audit.log") -filedatelist+=("/var/log/rabbitmq/shutdown_log") -filedatelist+=("/var/log/rabbitmq/startup_log") -filedatelist+=("/var/log/rabbitmq/wait_log") -filedatelist+=("/var/log/rabbitmq/rabbit@localhost.log") -filedatelist+=("/var/log/nfv-vim-alarms.log") -filedatelist+=("/var/log/vswitch.cmds.log") - -# This is a list of files to always include -autoaddlist=("") -autoaddlist+=("/var/log/collect.log") - -######################################################################### -# -# 'is_in_range' returns true if the specified log file data range -# is within the bounded date range specified by the caller. -# Otherwise a false is returned. -# -# ${1} is HEAD_DATE and is the date of the first log of the file in contect -# ${2} is TAIL_DATE and is the date of the last log in the file in context -# -# expected date format is ... YYYY-MM-DD -# -# Calling Sequence is ... is_in_range HEAD_DATE TAIL_DATE -# -# There are several cases that aer handled ; -# see case comment inline below. -# -######################################################################### - -function is_in_range() -{ - local HEAD_DATE="${1}" - local TAIL_DATE="${2}" - if [[ ${HEAD_DATE} =~ [0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - - # Convert the date to an integer value - # to make the compare easier and faster - date_to_int "${HEAD_DATE}" - head_date=${__this_integer_date} - - if [[ ${TAIL_DATE} =~ [0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - - # Convert the date to an integer value - # to make the compare easier and faster - date_to_int "${TAIL_DATE}" - tail_date=${__this_integer_date} - - in_range=false - - # The last log is before the start date or the first log is after the end date - # if [[ "${TAIL_DATE}" < "${START_DATE}" || "${HEAD_DATE}" > "${END_DATE}" ]] ; then - if [ ${tail_date} -lt ${start_date} -o ${head_date} -gt ${end_date} ] ; then - __this_reason+=":case 0" - in_range=false - - # Case 1: the head after the start but before the end date - # .... S ... head ... E .... - elif [ ${head_date} -ge ${start_date} -a ${head_date} -le ${end_date} ] ; then - __this_reason+=":case 1" - in_range=true - - # Case 2: the tail after the start but before the end date - # .... S ... tail ... E .... - elif [ ${tail_date} -ge ${start_date} -a ${tail_date} -le ${end_date} ] ; then - __this_reason+=":case 2" - in_range=true - - # Case 3: log file date range spans the start and end dates - # head S ... ... E tail - elif [ ${head_date} -le ${start_date} -a ${tail_date} -ge ${end_date} ] ; then - __this_reason+=":case 3" - in_range=true - - else - __this_reason+=":default" - fi - else - __this_reason+=":invalid-tail-date" - # so the tail date is unknown. - # include this file as long as the head date is before end date - if [ ${head_date} -lt ${end_date} ] ; then - in_range=true - else - in_range=false - fi - fi - - if [ "${in_range}" = true ] ; then - __this_reason+=":in-range ${HEAD_DATE} to ${TAIL_DATE}" - true - else - __this_reason+=":out-of-range ${HEAD_DATE} to ${TAIL_DATE}" - false - fi - return - fi - - __this_reason+=":date-format-error ${HEAD_DATE} to ${TAIL_DATE}" - true - return -} - -########################################################################### -# -# Name : want_this_file -# -# Description: This utility first compares the filename to known exception -# cases and handles them accordingly. Exception cases do look -# for the date but with different methods. Once the date info -# is or is not found then the choice to or not to include it -# follows same general logic as others below. -# -# If not an exception case then it determines the file type -# and performs any preprocessing required. i.e. uncompressing -# the file and switching the filename to the uncompressed name. -# Data files or other unknown file types are automatically -# included without further data query by immediately returning -# true. -# -# With an expected supported filename in hand this utility will -# extract the date-only (time not included) portion, the first -# 10 characters of the first and last logs and determin if this -# logfile has logs that fall withing the specified date range. -# -# Returns : If there is no valid date found then true is returned. -# If file contains in range logs then true is returned. -# if file does not contain in range logs then false is returned. -# -# Parameters : $1 is the full pathed log file name. -# -# $1 - the filename of the file to check the date for -# -########################################################################### - -function want_this_file() -{ - local inc=true - local LOGFILE="${1}" - local filetype=$(file "${LOGFILE}") - local HEAD_DATE="" - local TAIL_DATE="" - - for add in "${autoaddlist[@]}" - do - if [ "${add}" == "${LOGFILE}" ] ; then - __this_reason+="autoadd" - true - return - fi - done - - ########################################################################## - # Exception Case: known free formatted log files. - ########################################################################## - # - # Some log files are known to not contain properly dated logs. - # Such files may just contian free format strings of information. - # - # A list of such files is in hard coded in filedatelist. - # TODO: consider making this a file that is loaded. - # - # Check to see if this is an auto add file - # Only exlude such files if its last modified date is before start date. - # - ########################################################################## - for add in "${filedatelist[@]}" - do - if [ "${add}" == "${LOGFILE}" ] ; then - __this_reason+="filedate" - - # Don't include empty files that are in the hard coded filedatelist - filetype=$(file "${LOGFILE}") - if [ ! -z "${filetype}" ] ; then - case ${filetype} in - *empty*) - __this_reason="empty" - track "${LOGFILE}" "empty" - false - return - ;; - *) - ;; - esac - fi - - # get last modified date - FILE_DATE=$(stat -c %y "${LOGFILE}" | cut -b 1-10) - date_to_int "${FILE_DATE}" - if [ ${__this_integer_date} -ge ${start_date} ] ; then - __this_reason+=":in-range ${FILE_DATE}" - true - else - __this_reason+=":out-of-range ${FILE_DATE}" - false - fi - return - fi - done - - # O.K. if we get here then this filename is not in the static list - if [ ! -z "${filetype}" ] ; then - - case ${filetype} in - - *directory*) - # Skip over a directory only path. - # No worries, the files in that directory will be handled. - __this_reason+="directory" - false - return - ;; - - *ASCII*|*text*|*compressed*) - - if [[ ${filetype} == *"compressed"* ]] ; then - fileext=${LOGFILE##*.} - case "${fileext}" in - gz) - tmpfile=$(mktemp) - #__this_reason+="gzipped" - zcat "${LOGFILE}" | head -5 > "$tmpfile" - zcat "${LOGFILE}" | tail -5 >> "$tmpfile" - - # save the current compressed log filename - # so that it can be restored after the - # recursion call below - LOGFILE_save="${LOGFILE}" - want_this_file "$tmpfile" - rc=${?} - LOGFILE="${LOGFILE_save}" - - # cleanup ; get rid of the temp file - rm -f "$tmpfile" 2>/dev/null - if [ ${rc} -eq 0 ] ; then - true - else - false - fi - return - ;; - tgz) - __this_reason+="tarball" - true - return - ;; - *) - __this_reason+="compress:[${fileext}]" - true - return - ;; - esac - fi - - # Read the first log in the file - HEAD_DATE=$(head -1 "${LOGFILE}") - - ############################################################## - # Minor Exception Case: empty/short first log - ############################################################## - # - # handle one empty or short first line by fetching second log - # - ############################################################## - - if [ ${#HEAD_DATE} -lt 10 ] ; then - HEAD_DATE=$(head -2 "${LOGFILE}" | sed -n '2p' | cut -b 1-11) - fi - - - ############################################################## - # Typical Case: YYYY-MM-DD - ############################################################## - # - # check for most typical date format. - # - ############################################################## - - if [[ ${HEAD_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - __this_reason+="typical" - TAIL_DATE=$(tail -1 "${LOGFILE}" | cut -b 1-11) - if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - - # a call to 'is_in_range' returns false (1) if this - # file's logs are all out of range date - is_in_range "${HEAD_DATE:0:10}" "${TAIL_DATE:0:10}" - if [ $? -eq 0 ] ; then - true - else - false - fi - return - - else - - ####################################################### - # Exception Case: Unrecognized date format in last log - ####################################################### - # - # try the second last line. This case is typical in - # cron.log in 15.12 MAIL logs which send a purious ')' - # as a second log. Also if the log file has auto blank - # lines between logs leaving a blank line as the last - # log. - # - # this exception ties the second last log instead. - # - ####################################################### - TAIL_DATE=$(tail -2 "${LOGFILE}" | sed -n '1p' | cut -b 1-11) - if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - - is_in_range "${HEAD_DATE:0:10}" "${TAIL_DATE:0:10}" - if [ $? -eq 0 ] ; then - true - else - false - fi - return - - else - # default to true if the dates could not be parsed - __this_reason+=":invalid-tail-date" - - date_to_int "${HEAD_DATE}" - head_date=${__this_integer_date} - - # so the tail date is unknown. - # include this file as long as the head date is before end date - if [ ${head_date} -lt ${end_date} ] ; then - true - else - false - fi - return - fi - fi - - else - - ########################################################### - # Exception Case 1: logs date prefix starts with '[' - ########################################################### - # - # logdate starts with a '[' ... [2017-10-02 - # - # In this case we just recognize it and increment past it - # and then assume the last log will have the same format - # - ########################################################### - - if [ "${HEAD_DATE:0:1}" == "[" ] ; then - __this_reason+="exception1" - HEAD_DATE=${HEAD_DATE:1:11} - if [[ ${HEAD_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - - TAIL_DATE=$(tail -1 "${LOGFILE}" | cut -b 2-11) - if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - __this_reason+=".1" - is_in_range "${HEAD_DATE:0:10}" "${TAIL_DATE:0:10}" - if [ $? -eq 0 ] ; then - true - else - false - fi - return - else - TAIL_DATE=$(tail -1 "${LOGFILE}" | cut -b 1-10) - if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - __this_reason+=".2" - is_in_range "${HEAD_DATE:0:10}" "${TAIL_DATE:0:10}" - if [ $? -eq 0 ] ; then - true - else - false - fi - return - - else - - if [ "${TAIL_DATE:0:1}" == "[" ] ; then - __this_reason+=".3" - TAIL_DATE=${TAIL_DATE:1:11} - if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - is_in_range "${HEAD_DATE}" "${TAIL_DATE}" - if [ $? -eq 0 ] ; then - true - else - false - fi - return - else - __this_reason+=":invalid-tail-date" - true - return - fi - else - __this_reason+=":tail-date-not-found" - is_in_range "${HEAD_DATE}" "${EOT_DATE}" - if [ $? -eq 0 ] ; then - true - else - false - fi - return - fi - fi - fi - else - # /var/log/dmesg is typical of this case - # no log date and many logs start with [ uptime] - __this_reason+=":invalid-head-date" - true - return - fi - - ########################################################### - # Exception Case 2: journel.log handling - ########################################################### - # - # first log in file contains start and stop date - # - # "-- Logs begin at Thu 2017-07-06 12:28:35 UTC, end at Thu 2017-07-06 12:33:31 UTC. --" - # ^^^^^^^^^^ ^^^^^^^^^^ - # - # This exception case gets the head and tail log date from - # this first log. - ########################################################### - - elif [ "${HEAD_DATE:0:13}" == "-- Logs begin" ] ; then - __this_reason+="exception2" - - # need to get more of the line - HEAD_DATE=$(head -1 "${LOGFILE}") - - is_in_range "${HEAD_DATE:21:10}" "${HEAD_DATE:57:10}" - if [ $? -eq 0 ] ; then - true - else - false - fi - return - - ########################################################### - # Exception Case 3: journel.log handling - ########################################################### - # - # some logs like openstack.log have some logs that are - # prefixed by keystone:log. This case handles that - # - ########################################################### - elif [ "${HEAD_DATE:0:13}" == "keystone:log " ] ; then - __this_reason+="exception3" - - # need to get more of the line - HEAD_DATE="${HEAD_DATE:13:10}" - TAIL_DATE=$(tail -1 "${LOGFILE}") - - if [ "${TAIL_DATE:0:13}" == "keystone:log " ] ; then - TAIL_DATE="${TAIL_DATE:13:10}" - else - TAIL_DATE="${TAIL_DATE:0:10}" - fi - - is_in_range "${HEAD_DATE}" "${TAIL_DATE}" - if [ $? -eq 0 ] ; then - true - else - false - fi - return - - else - - ####################################################### - # Exception Case 4: horizon.log - ####################################################### - # - # Search the first and last 30 logs for a valid date. - # This should handle seeing a traceback at the head or - # tail of the log file. - # - ####################################################### - __this_reason+="exception4" - temp_head=$(head -30 "${LOGFILE}") - for ((loop_head=1;loop_head<31;loop_head++)) - do - HEAD_DATE=$(echo "${temp_head}" | sed -n "${loop_head}"p | cut -b 1-10) - if [[ ${HEAD_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - temp_tail=$(tail -30 "${LOGFILE}") - for ((loop_tail=1;loop_tail<31;loop_tail++)) - do - TAIL_DATE=$(echo "${temp_tail}" | sed -n ${loop_tail}p | cut -b 1-10) - if [[ ${TAIL_DATE} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2} ]]; then - - is_in_range "${HEAD_DATE}" "${TAIL_DATE}" - if [ $? -eq 0 ] ; then - true - else - false - fi - return - - fi - done - - # default to including it if no date at - # the end of the file is found - true - return - fi - done - - ###################################################### - # Exception Case 5: - ###################################################### - # - # Otherwise the file has no date or the date - # format is unrecognized so just include the file - # regardless of its date. - # - ###################################################### - __this_reason="nodate" - true - return - fi - fi - ;; - - *archive*) - - # Archive files like .tar are not extracted. - # Instead it is only collected if its last modified date is - # after the start date - - __this_reason+="archive" - FILE_DATE=$(stat -c %y "${LOGFILE}" | cut -b 1-10) - date_to_int "${FILE_DATE}" - if [ ${__this_integer_date} -ge ${start_date} ] ; then - __this_reason+=":in-range ${FILE_DATE}" - true - else - __this_reason+=":out-of-range ${FILE_DATE}" - false - fi - return - ;; - - *empty*) - __this_reason="empty" - track "${LOGFILE}" "empty" - false - return - ;; - - *data*) - __this_reason="data" - true - return - ;; - - *executable*) - __this_reason="executable" - true - return - ;; - - # very short file (no magic) - *"very short file"*) - __this_reason="small" - true - return - ;; - - *link*) - __this_reason="link" - false - return - ;; - - *swap*) - - __this_reason="swap" - false - return - ;; - - *fifo*) - - __this_reason="fifo" - false - return - ;; - - *socket*) - - __this_reason="socket" - false - return - ;; - - *) - __this_reason="other" - true - return - ;; - esac - else - __this_reason="unknown" - wlog "Adding ${logfile} ; unknown filetype" - true - return - fi - - # catch all default - true - return -} - -############################################################################# -# -# 'manage_file' adds the specified file to either the 'include' or exclude' -# reason lists. In the include case the most important part of -# this function appends the filename to the file specified by -# "VAR_LOG_INCLUDE_LIST" which is the file that collect_host -# uses to know what files in /var/log need to be included in -# the collect tarball. -# -############################################################################# - -function manage_file() -{ - local filename="${1}" - local action="${2}" - local reason="${3}" - - if [ "${action}" == "${EXCLUDE_FILE}" ] ; then - echo "${filename} excluded (${reason})" >> "${EXC_FILE_LIST}" - else - echo "${filename} included (${reason})" >> "${INC_FILE_LIST}" - - # add the file to the list of files to be collected - echo "${filename}" >> ${VAR_LOG_INCLUDE_LIST} - fi - - dlog "${action}: ${filename} (${reason})" -} - -############################################################################# -# -# 'handle_response' adds or excludes the specified file based on -# arguement $2 being 0 - true - include or -# !0 - false - exclude -# -# $1 - file -# $2 - include control ( true or false ) -# -############################################################################# - -function handle_response() -{ - local logfile="${1}" - local include="${2}" - - if [ "${include}" -eq 0 ] ; then - inclist=("${inclist[@]}" ${logfile}) - track "${logfile}" "${INCLUDE_FILE}" - - else - exclist=("${exclist[@]}" ${logfile}) - track "${logfile}" "${EXCLUDE_FILE}" - fi - - # record any that have been tagged as 'nodate' as - # candidate for special handling. - if [[ "${__this_reason}" == *"nodate"* ]] ; then - echo "${logfile}" >> "${NOD_FILE_LIST}" - fi -} - -########################################################################### -########################################################################### -# -# Lets start looking at the files now ... -# -# Get all the files in /var/log base dir (not the subdirectories) -# -########################################################################### -########################################################################### - -# get a list of the files in "baselogdir" ; aka /var/log -# will look at the sub directories later. -dirlist+=$(find ${baselogdir} -mindepth 1 -maxdepth 1 -type f) - -# -# Debug: -# -# To debug handling a specific file as a filelist override. -# This clears the list in favor of the specific file specified as -# argument 8 on the command line. -# -if [ "${5}" != "" ] ; then - dlog "Overriding dirlist with specified file:${5}" - dirlist=("${5}") -fi - -# echo "${baselogdir} filelist: ... ${dirlist}..." -for logfile in ${dirlist} -do - # echo "File: ${logfile}" - __this_reason="" - track "${logfile}" "totals" - want_this_file "${logfile}" - handle_response "${logfile}" "${?}" -done - -########################################################################### -# Get all the files in baselogdir subdirectories # -########################################################################### - -subdirlist=$(find ${baselogdir} -mindepth 1 -maxdepth 20 -type d) - -# -# Debug: -# -# To debug handling a specific file that is in a /var/log subdirectory as a -# filelist override. -# -if [ "${5}" != "" ] ; then - dlog "Overriding subdirlist with specified file:${5}" - subdirlist=("") -fi - -# echo "${baselogdir} subdirlist ${subdirlist}..." -for logdir in ${subdirlist} -do - __this_reason="" - - # this find must find more than just its own dir - # so we compare to greater than one - if [ $(find "${logdir}" | wc -l) -gt 1 ]; then - for logfile in ${logdir}/* - do - __this_reason="" - track "$logfile" "totals" - want_this_file "$logfile" - handle_response "$logfile" "$?" - done - else - __this_reason="empty" - manage_file "${logdir}" "${EXCLUDE_FILE}" "empty directory" - fi -done - - -dlog "Include List: ${INC_FILE_LIST}" -for inc in "${inclist[@]}" -do - if [ ${#inc} -gt 2 ] ; then - dlog "including ${inc}" - # echo "${inc}" >> "${INC_FILE_LIST}.summary" - fi -done - - -dlog "Exclude List: ${EXC_FILE_LIST}" -for exc in "${exclist[@]}" -do - if [ ${#exc} -gt 2 ] ; then - dlog "excluding ${exc}" - # echo "${exc}" >> "${EXC_FILE_LIST}.summary" - fi -done - -summary - -exit 0 diff --git a/tools/collector/scripts/collect_fm.sh b/tools/collector/scripts/collect_fm.sh deleted file mode 100644 index 4ef489a4b..000000000 --- a/tools/collector/scripts/collect_fm.sh +++ /dev/null @@ -1,41 +0,0 @@ -#! /bin/bash -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables - -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="alarms" -LOGFILE="${extradir}/${SERVICE}.info" - -function is_service_active { - active=`sm-query service management-ip | grep "enabled-active"` - if [ -z "$active" ] ; then - return 0 - else - return 1 - fi -} - -############################################################################### -# Only Controller -############################################################################### -if [ "$nodetype" = "controller" ] ; then - - is_service_active - if [ "$?" = "0" ] ; then - exit 0 - fi - - echo "${hostname}: System Alarm List .: ${LOGFILE}" - - # These go into the SERVICE.info file - delimiter ${LOGFILE} "fm alarm-list" - fm alarm-list 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} -fi - -exit 0 diff --git a/tools/collector/scripts/collect_host b/tools/collector/scripts/collect_host deleted file mode 100755 index e94177ac1..000000000 --- a/tools/collector/scripts/collect_host +++ /dev/null @@ -1,487 +0,0 @@ -#! /bin/bash -######################################################################## -# -# Copyright (c) 2016-2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -######################################################################## - -# make these platform.conf variables global. -# values are loaded in source_openrc_if_needed. -export nodetype="" -export subfunction="" -export system_type="" -export security_profile="" -export sdn_enabled="" -export region_config="" -export vswitch_type="" -export system_mode="" -export sw_version="" - -# assume this is not the active controller until learned -export ACTIVE=false - -# -# Import commands, variables and convenience functions available to -# all collectors ; common and user defined. -# -source /usr/local/sbin/collect_utils -source_openrc_if_needed - -# -# parse input parameters -# -COLLECT_NAME="${1}" -DEBUG=${8} -set_debug_mode ${DEBUG} - -# Calling parms -# -# 1 = collect name -# 2 = start date option -# 3 = start date -# 4 = "any" (ignored - no longer used ; kept to support upgrades/downgrades) -# 5 = end date option -# 6 = end date -# 7 = "any" (ignored - no longer used ; kept to support upgrades/downgrades) -# 8 = debug mode -logger -t ${COLLECT_TAG} "${0} ${1} ${2} ${3} ${4} ${5} ${6} ${7} ${8}" - -# parse out the start data/time data if it is present -STARTDATE_RANGE=false -STARTDATE="any" -if [ "${2}" == "${STARTDATE_OPTION}" ] ; then - if [ "${3}" != "any" -a ${#3} -gt 7 ] ; then - STARTDATE_RANGE=true - STARTDATE="${3}" - fi -fi - -# parse out the end date/time if it is present -ENDDATE_RANGE=false -ENDDATE="any" -if [ "${5}" == "${ENDDATE_OPTION}" ] ; then - if [ "${6}" != "any" -a ${#6} -gt 7 ] ; then - ENDDATE_RANGE=true - ENDDATE="${6}" - fi -fi - -COLLECT_BASE_DIR="/scratch" -EXTRA="var/extra" -hostname="${HOSTNAME}" -COLLECT_NAME_DIR="${COLLECT_BASE_DIR}/${COLLECT_NAME}" -EXTRA_DIR="${COLLECT_NAME_DIR}/${EXTRA}" -TARBALL="${COLLECT_NAME_DIR}.tgz" -COLLECT_PATH="/etc/collect.d" -RUN_EXCLUDE="/etc/collect/run.exclude" -ETC_EXCLUDE="/etc/collect/etc.exclude" -COLLECT_INCLUDE="/var/run /etc /root" -FLIGHT_RECORDER_PATH="var/lib/sm/" -FLIGHT_RECORDER_FILE="sm.eru.v1" -VAR_LOG_INCLUDE_LIST="/tmp/${COLLECT_NAME}.lst" -COLLECT_DIR_PCENT_CMD="df --output=pcent ${COLLECT_BASE_DIR}" -COLLECT_DIR_USAGE_CMD="df -h ${COLLECT_BASE_DIR}" -COLLECT_DATE="/usr/local/sbin/collect_date" - -function log_space() -{ - local msg=${1} - - space="`${COLLECT_DIR_USAGE_CMD}`" - space1=`echo "${space}" | grep -v Filesystem` - ilog "${COLLECT_BASE_DIR} ${msg} ${space1}" -} - -function space_precheck() -{ - space="`${COLLECT_DIR_PCENT_CMD}`" - space1=`echo "${space}" | grep -v Use` - size=`echo ${space1} | cut -f 1 -d '%'` - if [ ${size} -ge 0 -a ${size} -le 100 ] ; then - ilog "${COLLECT_BASE_DIR} is $size% full" - if [ ${size} -ge ${MIN_PERCENT_SPACE_REQUIRED} ] ; then - wlog "${HOSTNAME}:${COLLECT_BASE_DIR} does not have enough available space in to perform collect" - wlog "${HOSTNAME}:${COLLECT_BASE_DIR} must be below ${MIN_PERCENT_SPACE_REQUIRED}% to perform collect" - wlog "Increase available space in ${HOSTNAME}:${COLLECT_BASE_DIR} and retry operation." - echo "${FAIL_INSUFFICIENT_SPACE_STR}" - exit ${FAIL_INSUFFICIENT_SPACE} - fi - else - wlog "unable to parse available space from '${COLLECT_DIR_PCENT_CMD}' output" - fi -} - -space_precheck - -CURR_DIR=`pwd` -mkdir -p ${COLLECT_NAME_DIR} -cd ${COLLECT_NAME_DIR} - -# create dump target extra-stuff directory -mkdir -p ${EXTRA_DIR} - -RETVAL=0 - -# Remove any previous collect error log. -# Start this collect with an empty file. -# -# stderr is directed to this log during the collect process. -# By searching this log after collect_host is run we can find -# errors that occured during collect. -# The only real error that we care about right now is the -# -# "No space left on device" error -# -rm -f ${COLLECT_ERROR_LOG} -touch ${COLLECT_ERROR_LOG} -chmod 644 ${COLLECT_ERROR_LOG} -echo "`date '+%F %T'` :${COLLECT_NAME_DIR}" > ${COLLECT_ERROR_LOG} - -ilog "creating local collect tarball ${COLLECT_NAME_DIR}.tgz" - -################################################################################ -# Run collect scripts to check system status -################################################################################ -function collect_parts() -{ - if [ -d ${COLLECT_PATH} ]; then - for i in ${COLLECT_PATH}/*; do - if [ -f $i ]; then - $i ${COLLECT_NAME_DIR} ${EXTRA_DIR} ${hostname} - fi - done - fi -} - - -function collect_extra() -{ - # dump process lists - LOGFILE="${EXTRA_DIR}/process.info" - echo "${hostname}: Process Info ......: ${LOGFILE}" - - delimiter ${LOGFILE} "ps -e -H -o ..." - ${PROCESS_DETAIL_CMD} >> ${LOGFILE} - - # Collect process and thread info (tree view) - delimiter ${LOGFILE} "pstree --arguments --ascii --long --show-pids" - pstree --arguments --ascii --long --show-pids >> ${LOGFILE} - - # Collect process, thread and scheduling info (worker subfunction only) - # (also gets process 'affinity' which is useful on workers; - which ps-sched.sh >/dev/null 2>&1 - if [ $? -eq 0 ]; then - delimiter ${LOGFILE} "ps-sched.sh" - ps-sched.sh >> ${LOGFILE} - fi - - # Collect process, thread and scheduling, and elapsed time - # This has everything that ps-sched.sh does, except for cpu affinity mask, - # adds: stime,etime,time,wchan,tty). - delimiter ${LOGFILE} "ps -eL -o pid,lwp,ppid,state,class,nice,rtprio,priority,psr,stime,etime,time,wchan:16,tty,comm,command" - ps -eL -o pid,lwp,ppid,state,class,nice,rtprio,priority,psr,stime,etime,time,wchan:16,tty,comm,command >> ${LOGFILE} - - # Various host attributes - LOGFILE="${EXTRA_DIR}/host.info" - echo "${hostname}: Host Info .........: ${LOGFILE}" - - # CGCS build info - delimiter ${LOGFILE} "${BUILD_INFO_CMD}" - ${BUILD_INFO_CMD} >> ${LOGFILE} - - delimiter ${LOGFILE} "uptime" - uptime >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "cat /proc/cmdline" - cat /proc/cmdline >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "cat /proc/version" - cat /proc/version >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "cat /proc/cpuinfo" - cat /proc/cpuinfo >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "ip addr show" - ip addr show >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "lspci -nn" - lspci -nn >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "find /sys/kernel/iommu_groups/ -type l" - find /sys/kernel/iommu_groups/ -type l >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # networking totals - delimiter ${LOGFILE} "cat /proc/net/dev" - cat /proc/net/dev >> ${LOGFILE} - - delimiter ${LOGFILE} "dmidecode" - dmidecode >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # summary of scheduler tunable settings - delimiter ${LOGFILE} "cat /proc/sched_debug | head -15" - cat /proc/sched_debug | head -15 >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - if [ "${SKIP_MASK}" = "true" ]; then - delimiter ${LOGFILE} "facter (excluding ssh info)" - facter | grep -iv '^ssh' >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - else - delimiter ${LOGFILE} "facter" - facter >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - fi - - if [[ "$nodetype" == "worker" || "$subfunction" == *"worker"* ]] ; then - delimiter ${LOGFILE} "topology" - topology >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - fi - - LOGFILE="${EXTRA_DIR}/memory.info" - echo "${hostname}: Memory Info .......: ${LOGFILE}" - - delimiter ${LOGFILE} "cat /proc/meminfo" - cat /proc/meminfo >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "cat /sys/devices/system/node/node?/meminfo" - cat /sys/devices/system/node/node?/meminfo >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "cat /proc/slabinfo" - log_slabinfo ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "ps -e -o ppid,pid,nlwp,rss:10,vsz:10,cmd --sort=-rss" - ps -e -o ppid,pid,nlwp,rss:10,vsz:10,cmd --sort=-rss >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # mounted hugepages - delimiter ${LOGFILE} "lsof | grep /mnt/huge" - lsof | awk '($3 !~ /^[0-9]+$/ && /\/mnt\/huge/) || NR==1 {print $0;}' >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # hugepages numa mapping - delimiter ${LOGFILE} "grep huge /proc/*/numa_maps" - grep -e " huge " /proc/*/numa_maps >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # rootfs and tmpfs usage - delimiter ${LOGFILE} "df -h -H -T --local -t rootfs -t tmpfs" - df -h -H -T --local -t rootfs -t tmpfs >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - LOGFILE="${EXTRA_DIR}/filesystem.info" - echo "${hostname}: Filesystem Info ...: ${LOGFILE}" - - # disk inodes usage - delimiter ${LOGFILE} "df -h -H -T --local -t rootfs -t tmpfs" - df -h -H -T --local -t rootfs -t tmpfs >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # disk space usage - delimiter ${LOGFILE} "df -h -H -T --local -t ext2 -t ext3 -t ext4 -t xfs --total" - df -h -H -T --local -t ext2 -t ext3 -t ext4 -t xfs --total >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # disk inodes usage - delimiter ${LOGFILE} "df -h -H -T --local -i -t ext2 -t ext3 -t ext4 -t xfs --total" - df -h -H -T --local -i -t ext2 -t ext3 -t ext4 -t xfs --total >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # disks by-path values - delimiter ${LOGFILE} "ls -lR /dev/disk" - ls -lR /dev/disk >> ${LOGFILE} - - # disk summary (requires sudo/root) - delimiter ${LOGFILE} "fdisk -l" - fdisk -l >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "cat /proc/scsi/scsi" - cat /proc/scsi/scsi >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # Controller specific stuff - if [ "$nodetype" = "controller" ] ; then - - delimiter ${LOGFILE} "cat /proc/drbd" - cat /proc/drbd >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "/sbin/drbdadm dump" - /sbin/drbdadm dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - fi - - # LVM summary - delimiter ${LOGFILE} "/usr/sbin/vgs --version ; /usr/sbin/pvs --version ; /usr/sbin/lvs --version" - /usr/sbin/vgs --version >> ${LOGFILE} - /usr/sbin/pvs --version >> ${LOGFILE} - /usr/sbin/lvs --version >> ${LOGFILE} - - delimiter ${LOGFILE} "/usr/sbin/vgs --all --options all" - /usr/sbin/vgs --all --options all >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "/usr/sbin/pvs --all --options all" - /usr/sbin/pvs --all --options all >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "/usr/sbin/lvs --all --options all" - /usr/sbin/lvs --all --options all >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # iSCSI Information - LOGFILE="${EXTRA_DIR}/iscsi.info" - echo "${hostname}: iSCSI Information ......: ${LOGFILE}" - - if [ "$nodetype" = "controller" ] ; then - # Controller- LIO exported initiators summary - delimiter ${LOGFILE} "targetcli ls" - targetcli ls >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # Controller - LIO sessions - delimiter ${LOGFILE} "targetcli sessions detail" - targetcli sessions detail >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - elif [[ "$nodetype" == "worker" || "$subfunction" == *"worker"* ]] ; then - # Worker - iSCSI initiator information - collect_dir=${EXTRA_DIR}/iscsi_initiator_info - mkdir -p ${collect_dir} - cp -rf /run/iscsi-cache/nodes/* ${collect_dir} - find ${collect_dir} -type d -exec chmod 750 {} \; - - # Worker - iSCSI initiator active sessions - delimiter ${LOGFILE} "iscsiadm -m session" - iscsiadm -m session >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # Worker - iSCSI udev created nodes - delimiter ${LOGFILE} "ls -la /dev/disk/by-path | grep \"iqn\"" - ls -la /dev/disk/by-path | grep "iqn" >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - fi - - LOGFILE="${EXTRA_DIR}/history.info" - echo "${hostname}: Bash History ......: ${LOGFILE}" - - # history - delimiter ${LOGFILE} "cat /home/sysadmin/.bash_history" - cat /home/sysadmin/.bash_history >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - LOGFILE="${EXTRA_DIR}/interrupt.info" - echo "${hostname}: Interrupt Info ....: ${LOGFILE}" - - # interrupts - delimiter ${LOGFILE} "cat /proc/interrupts" - cat /proc/interrupts >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "cat /proc/softirqs" - cat /proc/softirqs >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # Controller specific stuff - if [ "$nodetype" = "controller" ] ; then - netstat -pan > ${EXTRA_DIR}/netstat.info - fi - - LOGFILE="${EXTRA_DIR}/blockdev.info" - echo "${hostname}: Block Devices Info : ${LOGFILE}" - - # Collect block devices - show all sda and cinder devices, and size - delimiter ${LOGFILE} "lsblk" - lsblk >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # Collect block device topology - show devices and which io-scheduler - delimiter ${LOGFILE} "lsblk --topology" - lsblk --topology >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - # Collect SCSI devices - show devices and cinder attaches, etc - delimiter ${LOGFILE} "lsblk --scsi" - lsblk --scsi >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -} - -log_space "before collect ......:" - -collect_extra -collect_parts - -# -# handle collect collect-after and collect-range and then -# in elif clause collect-before -# -VAR_LOG="/var/log" -if [ -e /www/var/log ]; then - VAR_LOG="$VAR_LOG /www/var/log" -fi - -rm -f ${VAR_LOG_INCLUDE_LIST} - -if [ "${STARTDATE_RANGE}" == true ] ; then - if [ "${ENDDATE_RANGE}" == false ] ; then - ilog "collecting $VAR_LOG files containing logs after ${STARTDATE}" - ${COLLECT_DATE} ${STARTDATE} ${ENDDATE} ${VAR_LOG_INCLUDE_LIST} ${DEBUG} "" - else - ilog "collecting $VAR_LOG files containing logs between ${STARTDATE} and ${ENDDATE}" - ${COLLECT_DATE} ${STARTDATE} ${ENDDATE} ${VAR_LOG_INCLUDE_LIST} ${DEBUG} "" - fi -elif [ "${ENDDATE_RANGE}" == true ] ; then - STARTDATE="20130101" - ilog "collecting $VAR_LOG files containing logs before ${ENDDATE}" - ${COLLECT_DATE} ${STARTDATE} ${ENDDATE} ${VAR_LOG_INCLUDE_LIST} ${DEBUG} "" -else - ilog "collecting all of $VAR_LOG" - find $VAR_LOG ! -empty > ${VAR_LOG_INCLUDE_LIST} -fi - -# Add VM console.log -for i in /var/lib/nova/instances/*/console.log; do - if [ -e "$i" ]; then - tmp=`dirname $i` - mkdir -p ${COLLECT_NAME_DIR}/$tmp - cp $i ${COLLECT_NAME_DIR}/$tmp - fi -done - -log_space "before first tar ....:" - -(cd ${COLLECT_NAME_DIR} ; ${IONICE_CMD} ${NICE_CMD} ${TAR_CMD} ${COLLECT_NAME_DIR}/${COLLECT_NAME}.tar -T ${VAR_LOG_INCLUDE_LIST} -X ${RUN_EXCLUDE} -X ${ETC_EXCLUDE} ${COLLECT_INCLUDE} 2>>${COLLECT_ERROR_LOG} 1>>${COLLECT_ERROR_LOG} ) - -log_space "after first tar .....:" - -(cd ${COLLECT_NAME_DIR} ; ${IONICE_CMD} ${NICE_CMD} ${UNTAR_CMD} ${COLLECT_NAME_DIR}/${COLLECT_NAME}.tar 2>>${COLLECT_ERROR_LOG} 1>>${COLLECT_ERROR_LOG} ) - -log_space "after first untar ...:" - -rm -f ${COLLECT_NAME_DIR}/${COLLECT_NAME}.tar - -log_space "after delete tar ....:" - -if [ "${SKIP_MASK}" != "true" ]; then - # Run password masking before final tar - dlog "running /usr/local/sbin/collect_mask_passwords ${COLLECT_NAME_DIR} ${EXTRA_DIR}" - /usr/local/sbin/collect_mask_passwords ${COLLECT_NAME_DIR} ${EXTRA_DIR} - log_space "after passwd masking :" -fi - -(cd ${COLLECT_BASE_DIR} ; ${IONICE_CMD} ${NICE_CMD} ${TAR_ZIP_CMD} ${COLLECT_NAME_DIR}.tgz ${COLLECT_NAME} 2>/dev/null 1>/dev/null ) - -log_space "after first tarball .:" - -mkdir -p ${COLLECT_NAME_DIR}/${FLIGHT_RECORDER_PATH} - -(cd /${FLIGHT_RECORDER_PATH} ; ${TAR_ZIP_CMD} ${COLLECT_NAME_DIR}/${FLIGHT_RECORDER_PATH}/${FLIGHT_RECORDER_FILE}.tgz ./${FLIGHT_RECORDER_FILE} 2>>${COLLECT_ERROR_LOG} 1>>${COLLECT_ERROR_LOG}) - -# Pull in an updated user.log which contains the most recent collect logs -# ... be sure to exclude any out of space logs -tail -30 /var/log/user.log | grep "COLLECT:" | grep -v "${FAIL_OUT_OF_SPACE_STR}" >> ${COLLECT_ERROR_LOG} -cp -a ${COLLECT_LOG} ${COLLECT_LOG}.last -cp -a ${COLLECT_ERROR_LOG} ${COLLECT_LOG} -cp -a ${COLLECT_LOG} ${COLLECT_NAME_DIR}/var/log - -log_space "with flight data ....:" - -(cd ${COLLECT_BASE_DIR} ; ${IONICE_CMD} ${NICE_CMD} ${TAR_ZIP_CMD} ${COLLECT_NAME_DIR}.tgz ${COLLECT_NAME} 2>>${COLLECT_ERROR_LOG} 1>>${COLLECT_ERROR_LOG} ) - -log_space "after collect .......:" - -rm -rf ${COLLECT_NAME_DIR} -rm -f ${VAR_LOG_INCLUDE_LIST} - -log_space "after cleanup .......:" - -# Check for collect errors -# Only out of space error is enough to fail this hosts's collect -collect_errors ${HOSTNAME} -RC=${?} - -rm -f ${COLLECT_ERROR_LOG} - -if [ ${RC} -ne 0 ] ; then - rm -f ${COLLECT_NAME_DIR}.tgz - ilog "${FAIL_OUT_OF_SPACE_STR} ${COLLECT_BASE_DIR}" -else - ilog "collect of ${COLLECT_NAME_DIR}.tgz succeeded" - echo "${collect_done}" -fi diff --git a/tools/collector/scripts/collect_ima.sh b/tools/collector/scripts/collect_ima.sh deleted file mode 100755 index 14c751e42..000000000 --- a/tools/collector/scripts/collect_ima.sh +++ /dev/null @@ -1,59 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -function is_extended_profile { - if [ ! -n "${security_profile}" ] || [ "${security_profile}" != "extended" ]; then - return 0 - else - return 1 - fi -} - -SERVICE="ima" -LOGFILE="${extradir}/${SERVICE}.info" - -############################################################################### -# All Node Types -############################################################################### - -is_extended_profile -if [ "$?" = "0" ] ; then - exit 0 -fi - -echo "${hostname}: IMA Info ..........: ${LOGFILE}" - -delimiter ${LOGFILE} "IMA Kernel Modules" -lsmod | grep ima >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -delimiter ${LOGFILE} "Auditd status" -service auditd status >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -ps -aux | grep audit >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -mkdir -p ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} - -delimiter ${LOGFILE} "IMA Runtime Measurement and Violations cache" -if [ -d "/sys/kernel/security/ima" ]; then - ls /sys/kernel/security/ima >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - cp -rf /sys/kernel/security/ima ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} -else - echo "ERROR: IMA Securityfs directory does not exist!" >> ${LOGFILE} -fi - -cp -rf /etc/modprobe.d/ima.conf ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} -cp -rf /etc/modprobe.d/integrity.conf ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} -cp -rf /etc/ima.policy ${extradir}/integrity 2>>${COLLECT_ERROR_LOG} - -# make sure all these collected files are world readible -chmod -R 755 ${extradir}/integrity - -exit 0 diff --git a/tools/collector/scripts/collect_mask_passwords b/tools/collector/scripts/collect_mask_passwords deleted file mode 100644 index 5cda34265..000000000 --- a/tools/collector/scripts/collect_mask_passwords +++ /dev/null @@ -1,123 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -COLLECT_NAME_DIR=$1 -EXTRA_DIR=$2 - -# Strip the passwords from assorted config files -for conffile in \ - ${COLLECT_NAME_DIR}/etc/aodh/aodh.conf \ - ${COLLECT_NAME_DIR}/etc/barbican/barbican.conf \ - ${COLLECT_NAME_DIR}/etc/ceilometer/ceilometer.conf \ - ${COLLECT_NAME_DIR}/etc/cinder/cinder.conf \ - ${COLLECT_NAME_DIR}/etc/fm/fm.conf \ - ${COLLECT_NAME_DIR}/etc/glance/glance-api.conf \ - ${COLLECT_NAME_DIR}/etc/glance/glance-registry.conf \ - ${COLLECT_NAME_DIR}/etc/heat/heat.conf \ - ${COLLECT_NAME_DIR}/etc/ironic/ironic.conf \ - ${COLLECT_NAME_DIR}/etc/keystone/keystone.conf \ - ${COLLECT_NAME_DIR}/etc/magnum/magnum.conf \ - ${COLLECT_NAME_DIR}/etc/murano/murano.conf \ - ${COLLECT_NAME_DIR}/etc/neutron/metadata_agent.ini \ - ${COLLECT_NAME_DIR}/etc/neutron/neutron.conf \ - ${COLLECT_NAME_DIR}/etc/nfv/nfv_plugins/nfvi_plugins/config.ini \ - ${COLLECT_NAME_DIR}/etc/nova/nova.conf \ - ${COLLECT_NAME_DIR}/etc/nslcd.conf \ - ${COLLECT_NAME_DIR}/etc/openldap/slapd.conf.backup \ - ${COLLECT_NAME_DIR}/etc/openstack-dashboard/local_settings \ - ${COLLECT_NAME_DIR}/etc/panko/panko.conf \ - ${COLLECT_NAME_DIR}/etc/patching/patching.conf \ - ${COLLECT_NAME_DIR}/etc/proxy/nova-api-proxy.conf \ - ${COLLECT_NAME_DIR}/etc/rabbitmq/murano-rabbitmq.config \ - ${COLLECT_NAME_DIR}/etc/rabbitmq/rabbitmq.config \ - ${COLLECT_NAME_DIR}/etc/sysinv/api-paste.ini \ - ${COLLECT_NAME_DIR}/etc/sysinv/sysinv.conf \ - ${COLLECT_NAME_DIR}/var/extra/platform/sysinv/*/sysinv.conf.default \ - ${COLLECT_NAME_DIR}/etc/mtc.ini - -do - if [ ! -f $conffile ]; then - continue - fi - - sed -i -r 's/^(admin_password) *=.*/\1 = xxxxxx/; - s/^(auth_encryption_key) *=.*/\1 = xxxxxx/; - s/^(bindpw) .*/\1 xxxxxx/; - s/^(rootpw) .*/\1 xxxxxx/; - s/^(connection) *=.*/\1 = xxxxxx/; - s/^( *credentials) *=.*/\1 = xxxxxx/; - s/^(metadata_proxy_shared_secret) *=.*/\1 = xxxxxx/; - s/^(password) *=.*/\1 = xxxxxx/; - s/^(rabbit_password) *=.*/\1 = xxxxxx/; - s/^(sql_connection) *=.*/\1 = xxxxxx/; - s/^(stack_domain_admin_password) *=.*/\1 = xxxxxx/; - s/^(transport_url) *=.*/\1 = xxxxxx/; - s/^(SECRET_KEY) *=.*/\1 = xxxxxx/; - s/^(keystone_auth_pw) *=.*/\1 = xxxxxx/; - s/\{default_pass, <<\".*\">>\}/\{default_pass, <<\"xxxxxx\">>\}/' $conffile -done - -find ${COLLECT_NAME_DIR} -name server-cert.pem | xargs --no-run-if-empty rm -f -rm -rf ${COLLECT_NAME_DIR}/var/extra/platform/config/*/ssh_config -rm -f ${COLLECT_NAME_DIR}/var/extra/platform/puppet/*/hieradata/secure*.yaml - -# Mask user passwords in sysinv db dump -if [ -f ${COLLECT_NAME_DIR}/var/extra/database/sysinv.db.sql.txt ]; then - sed -i -r '/COPY i_user/, /^--/ s/^(([^\t]*\t){10})[^\t]*(\t.*)/\1xxxxxx\3/; - /COPY i_community/, /^--/ s/^(([^\t]*\t){5})[^\t]*(\t.*)/\1xxxxxx\3/; - /COPY i_trap_destination/, /^--/ s/^(([^\t]*\t){6})[^\t]*(\t.*)/\1xxxxxx\3/; - s/(identity\t[^\t]*\tpassword\t)[^\t]*/\1xxxxxx/' \ - ${COLLECT_NAME_DIR}/var/extra/database/sysinv.db.sql.txt -fi - -# Mask passwords in host profiles -grep -rl '\"name\": \"password\"' ${COLLECT_NAME_DIR}/var/extra/platform/sysinv/ \ - | xargs --no-run-if-empty perl -i -e ' - $prev=""; - while (<>) - { - if (/\"name\": \"password\"/) - { - $prev =~ s/\"value\": \".*\"/\"value\": \"xxxxxx\"/; - } - print $prev; - $prev=$_; - } - print $prev;' - -# Cleanup snmp -sed -i -r 's/(rocommunity[^ ]*).*/\1 xxxxxx/' ${COLLECT_NAME_DIR}/var/extra/platform/config/*/snmp/* -sed -i -r 's/(trap2sink *[^ ]*).*/\1 xxxxxx/' ${COLLECT_NAME_DIR}/var/extra/platform/config/*/snmp/* - -# Mask passwords in bash.log and history logs -USER_HISTORY_FILES=$(find ${COLLECT_NAME_DIR} -type f -name .bash_history 2>/dev/null) -sed -i -r 's/(snmp-comm-(delete|show)) *((\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*) *){1,}/\1 xxxxxx/; - s/(snmp.*) *(--community|-c) *(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 \2 xxxxxx/; - s/(password)=(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1=xxxxxx/; - s/(openstack.*) *(--password) *(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 \2 xxxxxx/; - s/(ldapmodifyuser.*userPassword *)(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 xxxxxx/' \ - ${USER_HISTORY_FILES} \ - ${COLLECT_NAME_DIR}/var/extra/history.info \ - ${COLLECT_NAME_DIR}/var/log/bash.log \ - ${COLLECT_NAME_DIR}/var/log/auth.log \ - ${COLLECT_NAME_DIR}/var/log/ldapscripts.log - -for f in ${COLLECT_NAME_DIR}/var/log/bash.log.*.gz \ - ${COLLECT_NAME_DIR}/var/log/auth.log.*.gz \ - ${COLLECT_NAME_DIR}/var/log/ldapscripts.log.*.gz -do - zgrep -q 'snmp|password' $f || continue - gunzip $f - unzipped=${f%%.gz} - sed -i -r 's/(snmp-comm-(delete|show)) *((\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*) *){1,}/\1 xxxxxx/; - s/(snmp.*) *(--community|-c) *(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 \2 xxxxxx/; - s/(password)=(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1=xxxxxx/; - s/(openstack.*) *(--password) *(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 \2 xxxxxx/; - s/(ldapmodifyuser.*userPassword *)(\"[^\"]*\"|'\''[^'"'"']*'"'"'|[^ ]*)/\1 xxxxxx/' $unzipped - gzip $unzipped -done - diff --git a/tools/collector/scripts/collect_networking.sh b/tools/collector/scripts/collect_networking.sh deleted file mode 100755 index 02a9e6c4d..000000000 --- a/tools/collector/scripts/collect_networking.sh +++ /dev/null @@ -1,61 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables - -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="networking" -LOGFILE="${extradir}/${SERVICE}.info" -echo "${hostname}: Networking Info ...: ${LOGFILE}" - -############################################################################### -# All nodes -############################################################################### -delimiter ${LOGFILE} "ip -s link" -ip -s link >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -delimiter ${LOGFILE} "ip -s addr" -ip -s addr >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -delimiter ${LOGFILE} "ip -s neigh" -ip -s neigh >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -delimiter ${LOGFILE} "ip rule" -ip rule >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -delimiter ${LOGFILE} "ip route" -ip route >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -delimiter ${LOGFILE} "iptables -L -v -x -n" -iptables -L -v -x -n >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -delimiter ${LOGFILE} "iptables -L -v -x -n -t nat" -iptables -L -v -x -n -t nat >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -delimiter ${LOGFILE} "iptables -L -v -x -n -t mangle" -iptables -L -v -x -n -t mangle >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - -############################################################################### -# Only Worker -############################################################################### -if [[ "$nodetype" = "worker" || "$subfunction" == *"worker"* ]] ; then - NAMESPACES=($(ip netns)) - for NS in ${NAMESPACES[@]}; do - delimiter ${LOGFILE} "${NS}" - ip netns exec ${NS} ip -s link - ip netns exec ${NS} ip -s addr - ip netns exec ${NS} ip -s neigh - ip netns exec ${NS} ip route - ip netns exec ${NS} ip rule - done >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -fi - -exit 0 diff --git a/tools/collector/scripts/collect_nfv_vim.sh b/tools/collector/scripts/collect_nfv_vim.sh deleted file mode 100644 index c5ccbc7fa..000000000 --- a/tools/collector/scripts/collect_nfv_vim.sh +++ /dev/null @@ -1,44 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# Loads Up Utilities and Commands Variables -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -LOGFILE="${extradir}/nfv-vim.info" -echo "${hostname}: NFV-Vim Info ......: ${LOGFILE}" - -function is_service_active { - active=`sm-query service vim | grep "enabled-active"` - if [ -z "$active" ] ; then - return 0 - else - return 1 - fi -} - -############################################################################### -# Only Controller -############################################################################### - -if [ "$nodetype" = "controller" ] ; then - is_service_active - if [ "$?" = "0" ] ; then - exit 0 - fi - - # Assumes that database_dir is unique in /etc/nfv/vim/config.ini - DATABASE_DIR=$(awk -F "=" '/database_dir/ {print $2}' /etc/nfv/vim/config.ini) - - SQLITE_DUMP="/usr/bin/sqlite3 ${DATABASE_DIR}/vim_db_v1 .dump" - - delimiter ${LOGFILE} "dump database" - timeout 30 ${SQLITE_DUMP} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -fi - -exit 0 - diff --git a/tools/collector/scripts/collect_openstack.sh b/tools/collector/scripts/collect_openstack.sh deleted file mode 100755 index e03eca852..000000000 --- a/tools/collector/scripts/collect_openstack.sh +++ /dev/null @@ -1,68 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -function is_service_active { - active=`sm-query service rabbit-fs | grep "enabled-active"` - if [ -z "$active" ] ; then - return 0 - else - return 1 - fi -} - -SERVICE="openstack" -LOGFILE="${extradir}/${SERVICE}.info" -echo "${hostname}: Openstack Info ....: ${LOGFILE}" - -############################################################################### -# Only Controller -############################################################################### -if [ "$nodetype" = "controller" ] ; then - - is_service_active - if [ "$?" = "0" ] ; then - exit 0 - fi - - delimiter ${LOGFILE} "openstack project list" - openstack project list >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "openstack user list" - openstack user list >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - MQ_STATUS="rabbitmqctl status" - delimiter ${LOGFILE} "${MQ_STATUS} | grep -e '{memory' -A30" - ${MQ_STATUS} 2>/dev/null | grep -e '{memory' -A30 >> ${LOGFILE} - - delimiter ${LOGFILE} "RabbitMQ Queue Info" - num_queues=$(rabbitmqctl list_queues | wc -l); ((num_queues-=2)) - num_bindings=$(rabbitmqctl list_bindings | wc -l); ((num_bindings-=2)) - num_exchanges=$(rabbitmqctl list_exchanges | wc -l); ((num_exchanges-=2)) - num_connections=$(rabbitmqctl list_connections | wc -l); ((num_connections-=2)) - num_channels=$(rabbitmqctl list_channels | wc -l); ((num_channels-=2)) - arr=($(rabbitmqctl list_queues messages consumers memory | \ - awk '/^[0-9]/ {a+=$1; b+=$2; c+=$3} END {print a, b, c}')) - messages=${arr[0]}; consumers=${arr[1]}; memory=${arr[2]} - printf "%6s %8s %9s %11s %8s %8s %9s %10s\n" "queues" "bindings" "exchanges" "connections" "channels" "messages" "consumers" "memory" >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - printf "%6d %8d %9d %11d %8d %8d %9d %10d\n" $num_queues $num_bindings $num_exchanges $num_connections $num_channels $messages $consumers $memory >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -fi - -############################################################################### -# collect does not retrieve /etc/keystone dir -# Additional logic included to copy /etc/keystone directory -############################################################################### - -mkdir -p ${extradir}/../../etc/ -cp -R /etc/keystone/ ${extradir}/../../etc -chmod -R 755 ${extradir}/../../etc/keystone - -exit 0 diff --git a/tools/collector/scripts/collect_ovs.sh b/tools/collector/scripts/collect_ovs.sh deleted file mode 100644 index 94e98e696..000000000 --- a/tools/collector/scripts/collect_ovs.sh +++ /dev/null @@ -1,35 +0,0 @@ -#! /bin/bash -######################################################################## -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -######################################################################## - -# Loads Up Utilities and Commands Variables - -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="ovs" -LOGFILE="${extradir}/${SERVICE}.info" - - -############################################################################### -# Only Worker Nodes -############################################################################### -if [[ "$nodetype" == "worker" || "$subfunction" == *"worker"* ]] ; then - - if [[ "$vswitch_type" == *ovs* ]]; then - echo "${hostname}: OVS Info ..........: ${LOGFILE}" - - delimiter ${LOGFILE} "ovsdb-client dump" - ovsdb-client dump >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "ovs-vsctl show" - ovs-vsctl --timeout 10 show >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - fi -fi - -exit 0 diff --git a/tools/collector/scripts/collect_parms b/tools/collector/scripts/collect_parms deleted file mode 100644 index 66001504f..000000000 --- a/tools/collector/scripts/collect_parms +++ /dev/null @@ -1,29 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -#echo "defaults: $1-$2-$3-$4" - -if [ -z ${1} ] ; then - basedir=/scratch -else - basedir=$1 -fi - -if [ -z ${2} ] ; then - extradir=$basedir/var/extra -else - extradir=$2 -fi - -if [ -z ${3} ] ; then - hostname=$HOSTNAME -else - hostname=$3 -fi - -mkdir -p ${extradir} diff --git a/tools/collector/scripts/collect_patching.sh b/tools/collector/scripts/collect_patching.sh deleted file mode 100755 index 3d696d259..000000000 --- a/tools/collector/scripts/collect_patching.sh +++ /dev/null @@ -1,45 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="patching" -LOGFILE="${extradir}/${SERVICE}.info" -echo "${hostname}: Patching Info .....: ${LOGFILE}" - -############################################################################### -# All nodes -############################################################################### -delimiter ${LOGFILE} "smart channel --show" -smart channel --show 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - -############################################################################### -# Only Controller -############################################################################### -if [ "$nodetype" = "controller" ] ; then - - delimiter ${LOGFILE} "sw-patch query" - sw-patch query 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "sw-patch query-hosts" - sw-patch query-hosts 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "sw-patch query-hosts --debug" - sw-patch query-hosts --debug 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "find /opt/patching" - find /opt/patching 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "find /www/pages/updates" - find /www/pages/updates 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - -fi - -exit 0 diff --git a/tools/collector/scripts/collect_psqldb.sh b/tools/collector/scripts/collect_psqldb.sh deleted file mode 100755 index d223b1b7b..000000000 --- a/tools/collector/scripts/collect_psqldb.sh +++ /dev/null @@ -1,117 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -# postgres database commands -PSQL_CMD="sudo -u postgres psql --pset pager=off -q" -PG_DUMP_CMD="sudo -u postgres pg_dump" - -SERVICE="database" -DB_DIR="${extradir}/database" -LOGFILE="${extradir}/database.info" -echo "${hostname}: Database Info .....: ${LOGFILE}" - -function is_service_active { - active=`sm-query service postgres | grep "enabled-active"` - if [ -z "$active" ] ; then - return 0 - else - return 1 - fi -} - -############################################################################### -# All node types -############################################################################### -mkdir -p ${DB_DIR} - -function log_database { - db_list=( $(${PSQL_CMD} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") ) - for db in "${db_list[@]}"; do - echo "postgres database: ${db}" - ${PSQL_CMD} -d ${db} -c " - SELECT - table_schema, - table_name, - pg_size_pretty(table_size) AS table_size, - pg_size_pretty(indexes_size) AS indexes_size, - pg_size_pretty(total_size) AS total_size, - live_tuples, - dead_tuples - FROM ( - SELECT - table_schema, - table_name, - pg_table_size(table_name) AS table_size, - pg_indexes_size(table_name) AS indexes_size, - pg_total_relation_size(table_name) AS total_size, - pg_stat_get_live_tuples(table_name::regclass) AS live_tuples, - pg_stat_get_dead_tuples(table_name::regclass) AS dead_tuples - FROM ( - SELECT - table_schema, - table_name - FROM information_schema.tables - WHERE table_schema='public' - AND table_type='BASE TABLE' - ) AS all_tables - ORDER BY total_size DESC - ) AS pretty_sizes; - " - done >> ${1} -} - - - -DB_EXT=db.sql.txt -function database_dump { - mkdir -p ${DB_DIR} - db_list=( $(${PSQL_CMD} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") ) - for DB in "${db_list[@]}"; do - if [ "$DB" != "keystone" -a "$DB" != "ceilometer" ] ; then - echo "${hostname}: Dumping Database ..: ${DB_DIR}/$DB.$DB_EXT" - (cd ${DB_DIR} ; sudo -u postgres pg_dump $DB > $DB.$DB_EXT) - fi - done -} - -############################################################################### -# Only Controller -############################################################################### - -if [ "$nodetype" = "controller" ] ; then - is_service_active - if [ "$?" = "0" ] ; then - exit 0 - fi - - # postgres DB sizes - delimiter ${LOGFILE} "formatted ${PSQL_CMD} -c" - ${PSQL_CMD} -c " - SELECT - pg_database.datname, - pg_database_size(pg_database.datname), - pg_size_pretty(pg_database_size(pg_database.datname)) - FROM pg_database - ORDER BY pg_database_size DESC; - " >> ${LOGFILE} - - # Number of postgres connections - delimiter ${LOGFILE} "ps -C postgres -o cmd=" - ps -C postgres -o cmd= >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "call to log_database" - log_database ${LOGFILE} - - database_dump -fi - -exit 0 diff --git a/tools/collector/scripts/collect_sm.sh b/tools/collector/scripts/collect_sm.sh deleted file mode 100644 index 5f0f3c9ba..000000000 --- a/tools/collector/scripts/collect_sm.sh +++ /dev/null @@ -1,26 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="sm" -LOGFILE="${extradir}/sm.info" -echo "${hostname}: Service Management : ${LOGFILE}" - -############################################################################### -# Only Controller -############################################################################### - -if [ "$nodetype" = "controller" ] ; then - kill -SIGUSR1 $(>${COLLECT_ERROR_LOG} >> ${LOGFILE} -fi - -exit 0 diff --git a/tools/collector/scripts/collect_sysinv.sh b/tools/collector/scripts/collect_sysinv.sh deleted file mode 100755 index 3f27a2886..000000000 --- a/tools/collector/scripts/collect_sysinv.sh +++ /dev/null @@ -1,72 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="inventory" -LOGFILE="${extradir}/${SERVICE}.info" -RPMLOG="${extradir}/rpm.info" - -function is_service_active { - active=`sm-query service management-ip | grep "enabled-active"` - if [ -z "$active" ] ; then - return 0 - else - return 1 - fi -} - -############################################################################### -# Only Controller -############################################################################### -if [ "$nodetype" = "controller" ] ; then - - echo "${hostname}: Software Config ...: ${RPMLOG}" - # These go into the SERVICE.info file - delimiter ${RPMLOG} "rpm -qa" - rpm -qa >> ${RPMLOG} - - is_service_active - if [ "$?" = "0" ] ; then - exit 0 - fi - - echo "${hostname}: System Inventory ..: ${LOGFILE}" - - # These go into the SERVICE.info file - delimiter ${LOGFILE} "system host-list" - system host-list 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "system service-list" - system service-list 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "nova service-list" - nova service-list 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "neutron host-list" - neutron host-list 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "system host-port-list controller-0" - system host-port-list controller-0 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "system host-port-list controller-1" - system host-port-list controller-1 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "Dump all Instances" - nova list --fields name,status,OS-EXT-SRV-ATTR:host --all-tenant 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - delimiter ${LOGFILE} "vm-topology" - timeout 60 vm-topology --show all 2>>${COLLECT_ERROR_LOG} >> ${LOGFILE} - - cp -a /opt/platform ${extradir} -fi - - -exit 0 diff --git a/tools/collector/scripts/collect_tc.sh b/tools/collector/scripts/collect_tc.sh deleted file mode 100755 index 737461a88..000000000 --- a/tools/collector/scripts/collect_tc.sh +++ /dev/null @@ -1,82 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Loads Up Utilities and Commands Variables -source /usr/local/sbin/collect_parms -source /usr/local/sbin/collect_utils - -SERVICE="tc" -LOGFILE="${extradir}/tc.info" -echo "${hostname}: Traffic Controls . : ${LOGFILE}" - -############################################################################### -# Interface Info -############################################################################### -delimiter ${LOGFILE} "cat /etc/network/interfaces" -if [ -f /etc/network/interfaces ]; then - cat /etc/network/interfaces >> ${LOGFILE} -else - echo "/etc/network/interfaces NOT FOUND" >> ${LOGFILE} -fi - -delimiter ${LOGFILE} "ip link" -ip link >> ${LOGFILE} - -for i in $(ip link | grep mtu | grep eth |awk '{print $2}' | sed 's#:##g'); do - - delimiter ${LOGFILE} "ethtool ${i}" - ethtool ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "cat /sys/class/net/${i}/speed" - cat /sys/class/net/${i}/speed >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "ethtool -S ${i}" - ethtool -S ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -done - -############################################################################### -# TC Configuration Script (/usr/local/bin/cgcs_tc_setup.sh) -############################################################################### -delimiter ${LOGFILE} "cat /usr/local/bin/cgcs_tc_setup.sh" -if [ -f /usr/local/bin/cgcs_tc_setup.sh ]; then - cat /usr/local/bin/cgcs_tc_setup.sh >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -else - echo "/usr/local/bin/cgcs_tc_setup.sh NOT FOUND" >> ${LOGFILE} -fi - -############################################################################### -# TC Configuration -############################################################################### -delimiter ${LOGFILE} "tc qdisc show" -tc qdisc show >> ${LOGFILE} - -for i in $(ip link | grep htb | awk '{print $2}' | sed 's#:##g'); do - - delimiter ${LOGFILE} "tc class show dev ${i}" - tc class show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "tc filter show dev ${i}" - tc filter show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -done - -############################################################################### -# TC Statistics -############################################################################### -delimiter ${LOGFILE} "tc -s qdisc show" -tc -s qdisc show >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - -for i in $(ip link | grep htb | awk '{print $2}' | sed 's#:##g'); do - - delimiter ${LOGFILE} "tc -s class show dev ${i}" - tc -s class show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} - - delimiter ${LOGFILE} "tc -s filter show dev ${i}" - tc -s filter show dev ${i} >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG} -done - -exit 0 diff --git a/tools/collector/scripts/collect_utils b/tools/collector/scripts/collect_utils deleted file mode 100755 index 195b22e7c..000000000 --- a/tools/collector/scripts/collect_utils +++ /dev/null @@ -1,237 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2013-2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -########################################################################################## - -DEBUG=false - -# Fail Codes -PASS=0 -FAIL=1 -RETRY=2 - -FAIL_NODETYPE=3 - -FAIL_TIMEOUT=10 -FAIL_TIMEOUT1=11 -FAIL_TIMEOUT2=12 -FAIL_TIMEOUT3=13 -FAIL_TIMEOUT4=14 -FAIL_TIMEOUT5=15 -FAIL_TIMEOUT6=16 -FAIL_TIMEOUT7=17 -FAIL_TIMEOUT8=18 -FAIL_TIMEOUT9=19 - -FAIL_PASSWORD=30 -FAIL_PERMISSION=31 -FAIL_CLEANUP=32 -FAIL_UNREACHABLE=33 -FAIL_HOSTNAME=34 -FAIL_INACTIVE=35 -FAIL_PERMISSION_SKIP=36 -FAIL_OUT_OF_SPACE=37 -FAIL_INSUFFICIENT_SPACE=38 -FAIL_OUT_OF_SPACE_LOCAL=39 -FAIL_CREATE=39 - -# Warnings are above 200 -WARN_WARNING=200 -WARN_HOSTNAME=201 - -# Failure Strings -FAIL_OUT_OF_SPACE_STR="No space left on device" -FAIL_TAR_OUT_OF_SPACE_STR="tar: Error is not recoverable" -FAIL_INSUFFICIENT_SPACE_STR="Not enough space on device" - -# The minimum amount of % free space on /scratch to allow collect to proceed -MIN_PERCENT_SPACE_REQUIRED=75 - -# Log file path/names -COLLECT_LOG=/var/log/collect.log -COLLECT_ERROR_LOG=/tmp/collect_error.log - -function source_openrc_if_needed -{ - # get the node and subfunction types - nodetype="" - subfunction="" - PLATFORM_CONF=/etc/platform/platform.conf - if [ -e ${PLATFORM_CONF} ] ; then - source ${PLATFORM_CONF} - fi - - if [ "${nodetype}" != "controller" -a "${nodetype}" != "worker" -a "${nodetype}" != "storage" ] ; then - logger -t ${COLLECT_TAG} "could not identify nodetype ($nodetype)" - exit $FAIL_NODETYPE - fi - - ACTIVE=false - if [ "$nodetype" == "controller" ] ; then - # get local host activity state - OPENRC="/etc/platform/openrc" - if [ -e "${OPENRC}" ] ; then - OS_USERNAME="" - source ${OPENRC} - if [ "${OS_USERNAME}" != "" ] ; then - ACTIVE=true - fi - fi - fi -} - - -# Setup an expect command completion file. -# This is used to force serialization of expect -# sequences and highlight command completion -collect_done="collect done" -cmd_done_sig="expect done" -cmd_done_file="/usr/local/sbin/expect_done" - -# Compression Commands -TAR_ZIP_CMD="tar -cvzf" -TAR_UZIP_CMD="tar -xvzf" -TAR_CMD="tar -cvhf" -UNTAR_CMD="tar -xvf" -ZIP_CMD="gzip" -NICE_CMD="/usr/bin/nice -n19" -IONICE_CMD="/usr/bin/ionice -c2 -n7" -COLLECT_TAG="COLLECT" - -STARTDATE_OPTION="--start-date" -ENDDATE_OPTION="--end-date" - - -PROCESS_DETAIL_CMD="ps -e -H -o ruser,tid,pid,ppid,flags,stat,policy,rtprio,nice,priority,rss:10,vsz:10,sz:10,psr,stime,tty,cputime,wchan:14,cmd" -BUILD_INFO_CMD="cat /etc/build.info" - -################################################################################ -# Log Debug, Info or Error log message to syslog -################################################################################ -function log -{ - logger -t ${COLLECT_TAG} $@ -} - -function ilog -{ - echo "$@" - logger -t ${COLLECT_TAG} $@ - #logger -p local3.info -t ${COLLECT_TAG} $@ -} - -function elog -{ - echo "Error: $@" - logger -t ${COLLECT_TAG} $@ -} - -function wlog -{ - echo "Warning: $@" - logger -t ${COLLECT_TAG} $@ -} - -function set_debug_mode() -{ - DEBUG=${1} -} - -function dlog() -{ - if [ "$DEBUG" == true ] ; then - logger -t ${COLLECT_TAG} $@ - echo "Debug: $@" - fi -} - - -function delimiter() -{ - echo "--------------------------------------------------------------------" >> ${1} 2>>${COLLECT_ERROR_LOG} - echo "`date` : ${myhostname} : ${2}" >> ${1} 2>>${COLLECT_ERROR_LOG} - echo "--------------------------------------------------------------------" >> ${1} 2>>${COLLECT_ERROR_LOG} -} - -function log_slabinfo() -{ - PAGE_SIZE=$(getconf PAGE_SIZE) - cat /proc/slabinfo | awk -v page_size_B=${PAGE_SIZE} ' - BEGIN {page_KiB = page_size_B/1024; TOT_KiB = 0;} - (NF == 17) { - gsub(/[<>]/, ""); - printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8s\n", - $2, $3, $4, $5, $6, $7, $8, $10, $11, $12, $13, $15, $16, $17, "KiB"); - } - (NF == 16) { - num_objs=$3; obj_per_slab=$5; pages_per_slab=$6; - KiB = (obj_per_slab > 0) ? page_KiB*num_objs/obj_per_slab*pages_per_slab : 0; - TOT_KiB += KiB; - printf("%-22s %11d %8d %8d %10d %12d %1s %5d %10d %12d %1s %12d %9d %11d %8d\n", - $1, $2, $3, $4, $5, $6, $7, $9, $10, $11, $12, $14, $15, $16, KiB); - } - END { - printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8d\n", - "TOTAL", "-", "-", "-", "-", "-", ":", "-", "-", "-", ":", "-", "-", "-", TOT_KiB); - } - ' >> ${1} 2>>${COLLECT_ERROR_LOG} -} -########################################################################### -# -# Name : collect_errors -# -# Description: search COLLECT_ERROR_LOG for "No space left on device" logs -# Return 0 if no such logs are found. -# Return 1 if such logs are found -# -# Assumptions: Caller should assume a non-zero return as an indication of -# a corrupt or incomplete collect log -# -# Create logs and screen echos that record the error for the user. -# -# May look for other errors in the future -# -########################################################################### - -listOfOutOfSpaceErrors=( -"${FAIL_OUT_OF_SPACE_STR}" -"${FAIL_TAR_OUT_OF_SPACE_STR}" -"${FAIL_INSUFFICIENT_SPACE_STR}" -) - -function collect_errors() -{ - local host=${1} - local RC=0 - - if [ -e "${COLLECT_ERROR_LOG}" ] ; then - - ## now loop through known space related error strings - index=0 - while [ "x${listOfOutOfSpaceErrors[index]}" != "x" ] - do - grep -q "${listOfOutOfSpaceErrors[index]}" ${COLLECT_ERROR_LOG} - if [ "$?" == "0" ] ; then - - string="failed to collect from ${host} (reason:${FAIL_OUT_OF_SPACE}:${FAIL_OUT_OF_SPACE_STR})" - - # /var/log/user.log it - logger -t ${COLLECT_TAG} "${string}" - - # logs that show up in the foreground - echo "${string}" - echo "Increase available space in ${host}:${COLLECT_BASE_DIR} and retry operation." - - # return error code - RC=1 - break - fi - index=$(($index+1)) - done - fi - return ${RC} -} diff --git a/tools/collector/scripts/etc.exclude b/tools/collector/scripts/etc.exclude deleted file mode 100644 index be9bdc8fc..000000000 --- a/tools/collector/scripts/etc.exclude +++ /dev/null @@ -1,40 +0,0 @@ -/etc/postgresql -/etc/alternatives -/etc/terminfo -/etc/tempest -/etc/security -/etc/yum -/etc/collect -/etc/collect.d -/etc/logrotate.d -/etc/logrotate* -/etc/keystone -/etc/pam.d -/etc/environment -/etc/sudoers.d -/etc/sudoers -/etc/passwd -/etc/passwd- -/etc/shadow -/etc/shadow- -/etc/gshadow -/etc/gshadow- -/etc/group -/etc/group- -/etc/ssh -/etc/X11 -/etc/bluetooth -/etc/chatscripts -/etc/cron* -/etc/rc5.d -/etc/rc4.d -/etc/rc1.d -/etc/rc2.d -/etc/bash_completion.d -/etc/pm -/etc/systemd/system/*.mount -/etc/systemd/system/*.socket -/etc/systemd/system/lvm2-lvmetad.service -/etc/systemd/system/ctrl-alt-del.target -/etc/ssl -/etc/mtc/tmp diff --git a/tools/collector/scripts/expect_done b/tools/collector/scripts/expect_done deleted file mode 100755 index a846adb7e..000000000 --- a/tools/collector/scripts/expect_done +++ /dev/null @@ -1 +0,0 @@ -expect done diff --git a/tools/collector/scripts/run.exclude b/tools/collector/scripts/run.exclude deleted file mode 100644 index b1c1794cb..000000000 --- a/tools/collector/scripts/run.exclude +++ /dev/null @@ -1,12 +0,0 @@ -/var/run/sanlock/sanlock.sock -/var/run/tgtd.ipc_abstract_namespace.0 -/var/run/wdmd/wdmd.sock -/var/run/acpid.socket -/var/run/rpcbind.sock -/var/run/libvirt/libvirt-sock-ro -/var/run/libvirt/libvirt-sock -/var/run/dbus/system_bus_socket -/var/run/named-chroot -/var/run/avahi-daemon -/var/run/neutron/metadata_proxy -/var/run/.vswitch diff --git a/tools/engtools/hostdata-collectors/README b/tools/engtools/hostdata-collectors/README deleted file mode 100644 index b33a91d69..000000000 --- a/tools/engtools/hostdata-collectors/README +++ /dev/null @@ -1,12 +0,0 @@ -The Engineering tools is meant to be installed as a patch. Therefore, the RPM is generated as part -of the build but is not included in the image. Assuming your development environment is fully set up, -simply run patch-engtools.sh to generate the patch: - -In this directory ($MY_REPO/stx/middleware/util/recipes-common/engtools/hostdata-collectors), -enter the command: ->./patch-engtools.sh - -This generates ENGTOOLS-X.patch (X is Tis release version) which can be applied via sw-patch. - -The patch is built with --all-nodes option by default. This can be changed to a combination of the following: ---controller, --compute, --storage, --controller-compute, and --compute-lowlatency. diff --git a/tools/engtools/hostdata-collectors/centos/build_srpm.data b/tools/engtools/hostdata-collectors/centos/build_srpm.data deleted file mode 100644 index 5d6264ad1..000000000 --- a/tools/engtools/hostdata-collectors/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -SRC_DIR="scripts" -TIS_PATCH_VER=4 diff --git a/tools/engtools/hostdata-collectors/centos/collect-engtools.spec b/tools/engtools/hostdata-collectors/centos/collect-engtools.spec deleted file mode 100644 index 288577bce..000000000 --- a/tools/engtools/hostdata-collectors/centos/collect-engtools.spec +++ /dev/null @@ -1,101 +0,0 @@ -Summary: Host performance data collection tools package -Name: engtools -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: Tools -Packager: Wind River -URL: http://www.windriver.com/ -BuildArch: noarch -Source: %{name}-%{version}.tar.gz - -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root -BuildRequires: systemd -Requires: iperf3 - -%description -This package contains data collection tools to monitor host performance. -Tools are general purpose engineering and debugging related. Includes -overall memory, cpu occupancy, per-task cpu, per-task scheduling, per-task -io. - -# Don't try fancy stuff like debuginfo, which is useless on binary-only -# packages. Don't strip binary too -# Be sure buildpolicy set to do nothing -%define __spec_install_post %{nil} -%define debug_package %{nil} -%define __os_install_post %{_dbpath}/brp-compress -%define _binaries_in_noarch_packages_terminate_build 0 - -%define local_dir /usr/local -%define local_bindir %{local_dir}/bin/ -%define local_initdir /etc/init.d/ -%define local_confdir /etc/engtools/ -%define local_systemddir /etc/systemd/system/ - -%prep -%setup -q - -%build -# Empty section. - -%install -mkdir -p %{buildroot} -install -d 755 %{buildroot}%{local_bindir} -# Installing additional tools, memtop, occtop and schedtop are already in the image -install -m 755 buddyinfo.py %{buildroot}%{local_bindir} -install -m 755 chewmem %{buildroot}%{local_bindir} -# Installing data collection scripts -install -m 755 ceph.sh %{buildroot}%{local_bindir} -install -m 755 cleanup-engtools.sh %{buildroot}%{local_bindir} -install -m 755 collect-engtools.sh %{buildroot}%{local_bindir} -install -m 755 diskstats.sh %{buildroot}%{local_bindir} -install -m 755 engtools_util.sh %{buildroot}%{local_bindir} -install -m 755 filestats.sh %{buildroot}%{local_bindir} -install -m 755 iostat.sh %{buildroot}%{local_bindir} -install -m 755 linux_benchmark.sh %{buildroot}%{local_bindir} -install -m 755 memstats.sh %{buildroot}%{local_bindir} -install -m 755 netstats.sh %{buildroot}%{local_bindir} -install -m 755 postgres.sh %{buildroot}%{local_bindir} -install -m 755 rabbitmq.sh %{buildroot}%{local_bindir} -install -m 755 remote/rbzip2-engtools.sh %{buildroot}%{local_bindir} -install -m 755 remote/rstart-engtools.sh %{buildroot}%{local_bindir} -install -m 755 remote/rstop-engtools.sh %{buildroot}%{local_bindir} -install -m 755 remote/rsync-engtools-data.sh %{buildroot}%{local_bindir} -install -m 755 slab.sh %{buildroot}%{local_bindir} -install -m 755 ticker.sh %{buildroot}%{local_bindir} -install -m 755 top.sh %{buildroot}%{local_bindir} -install -m 755 vswitch.sh %{buildroot}%{local_bindir} -install -m 755 live_stream.py %{buildroot}%{local_bindir} -# Installing conf file -install -d 755 %{buildroot}%{local_confdir} -install -m 644 -p -D cfg/engtools.conf %{buildroot}%{local_confdir} -# Installing init script -install -d 755 %{buildroot}%{local_initdir} -install -m 755 init.d/collect-engtools.sh %{buildroot}%{local_initdir} -# Installing service file -install -d 755 %{buildroot}%{local_systemddir} -install -m 644 -p -D collect-engtools.service %{buildroot}%{local_systemddir} - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -%license LICENSE -%defattr(-,root,root,-) -%{local_bindir}/* -%{local_confdir}/* -%{local_initdir}/* -%{local_systemddir}/* - -%post -/bin/systemctl enable collect-engtools.service > /dev/null 2>&1 -/bin/systemctl start collect-engtools.service > /dev/null 2>&1 - -%preun -#/bin/systemctl --no-reload disable collect-engtools.sh.service > /dev/null 2>&1 -#/bin/systemctl stop collect-engtools.sh.service > /dev/null 2>&1 -%systemd_preun collect-engtools.service - -%postun -%systemd_postun_with_restart collect-engtools.service diff --git a/tools/engtools/hostdata-collectors/patch-engtools.sh b/tools/engtools/hostdata-collectors/patch-engtools.sh deleted file mode 100755 index dbb80682e..000000000 --- a/tools/engtools/hostdata-collectors/patch-engtools.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Designer patches: -# http://twiki.wrs.com/PBUeng/Patching - -if [ -z $MY_WORKSPACE ] || [ -z $MY_REPO ]; then - echo "Some dev environment variables are not set." - echo "Refer to http://wiki.wrs.com/PBUeng/CentOSBuildProcess for instructions." - exit 1 -fi - -ENGTOOLS=$(ls ${MY_WORKSPACE}/std/rpmbuild/RPMS/engtools*noarch.rpm 2>/dev/null) -if [ $? -ne 0 ]; then - echo "Engtools RPM has not been built. Please run \"build-pkgs engtools\" first." - exit 1 -fi - -source ${MY_REPO}/stx/middleware/recipes-common/build-info/release-info.inc -#TiS_REL="16.10" -#PATCH_ID="ENGTOOLS-${TiS_REL}" -PATCH_ID="ENGTOOLS-${PLATFORM_RELEASE}" - -PWD=$(pwd) - -# Create CGCS Patch -cd ${MY_WORKSPACE} -PATCH_BUILD=${MY_REPO}/stx/stx-update/extras/scripts/patch_build.sh -${PATCH_BUILD} --id ${PATCH_ID} --reboot-required=N \ - --summary "System engineering data collection and analysis tools." \ - --desc "System engineering data collection and analysis tools." \ - --all-nodes ${ENGTOOLS} \ - --warn "Intended for system engineering use only." -cd ${PWD} -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/LICENSE b/tools/engtools/hostdata-collectors/scripts/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/tools/engtools/hostdata-collectors/scripts/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tools/engtools/hostdata-collectors/scripts/buddyinfo.py b/tools/engtools/hostdata-collectors/scripts/buddyinfo.py deleted file mode 100644 index d23b404bf..000000000 --- a/tools/engtools/hostdata-collectors/scripts/buddyinfo.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 textwidth=79 autoindent - -""" -Python source code -Last modified: 15 Feb 2014 - 13:38 -Last author: lmwangi at gmail com -Displays the available memory fragments -by querying /proc/buddyinfo -Example: -# python buddyinfo.py -""" -import optparse -import os -import re -from collections import defaultdict -import logging - - -class Logger: - def __init__(self, log_level): - self.log_level = log_level - - def get_formatter(self): - return logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - - def get_handler(self): - return logging.StreamHandler() - - def get_logger(self): - """Returns a Logger instance for the specified module_name""" - logger = logging.getLogger('main') - logger.setLevel(self.log_level) - log_handler = self.get_handler() - log_handler.setFormatter(self.get_formatter()) - logger.addHandler(log_handler) - return logger - - -class BuddyInfo(object): - """BuddyInfo DAO""" - def __init__(self, logger): - super(BuddyInfo, self).__init__() - self.log = logger - self.buddyinfo = self.load_buddyinfo() - - def parse_line(self, line): - line = line.strip() - self.log.debug("Parsing line: %s" % line) - parsed_line = re.match("Node\s+(?P\d+).*zone\s+(?P\w+)\s+(?P.*)", line).groupdict() - self.log.debug("Parsed line: %s" % parsed_line) - return parsed_line - - def read_buddyinfo(self): - buddyhash = defaultdict(list) - buddyinfo = open("/proc/buddyinfo").readlines() - for line in map(self.parse_line, buddyinfo): - numa_node = int(line["numa_node"]) - zone = line["zone"] - free_fragments = [int(nr) for nr in line["nr_free"].split()] - max_order = len(free_fragments) - fragment_sizes = self.get_order_sizes(max_order) - usage_in_bytes = [block[0] * block[1] for block in zip(free_fragments, fragment_sizes)] - buddyhash[numa_node].append({ - "zone": zone, - "nr_free": free_fragments, - "sz_fragment": fragment_sizes, - "usage": usage_in_bytes}) - return buddyhash - - def load_buddyinfo(self): - buddyhash = self.read_buddyinfo() - self.log.info(buddyhash) - return buddyhash - - def page_size(self): - return os.sysconf("SC_PAGE_SIZE") - - def get_order_sizes(self, max_order): - return [self.page_size() * 2**order for order in range(0, max_order)] - - def __str__(self): - ret_string = "" - width = 20 - for node in self.buddyinfo: - ret_string += "Node: %s\n" % node - for zoneinfo in self.buddyinfo.get(node): - ret_string += " Zone: %s\n" % zoneinfo.get("zone") - ret_string += " Free KiB in zone: %.2f\n" % (sum(zoneinfo.get("usage")) / (1024.0)) - ret_string += '\t{0:{align}{width}} {1:{align}{width}} {2:{align}{width}}\n'.format( - "Fragment size", "Free fragments", "Total available KiB", - width=width, - align="<") - for idx in range(len(zoneinfo.get("sz_fragment"))): - ret_string += '\t{order:{align}{width}} {nr:{align}{width}} {usage:{align}{width}}\n'.format( - width=width, - align="<", - order=zoneinfo.get("sz_fragment")[idx], - nr=zoneinfo.get("nr_free")[idx], - usage=zoneinfo.get("usage")[idx] / 1024.0) - - return ret_string - - -def main(): - """Main function. Called when this file is a shell script""" - usage = "usage: %prog [options]" - parser = optparse.OptionParser(usage) - parser.add_option("-s", "--size", dest="size", choices=["B", "K", "M"], - action="store", type="choice", help="Return results in bytes, kib, mib") - - (options, args) = parser.parse_args() - logger = Logger(logging.DEBUG).get_logger() - logger.info("Starting....") - logger.info("Parsed options: %s" % options) - print(logger) - buddy = BuddyInfo(logger) - print(buddy) - - -if __name__ == '__main__': - main() - diff --git a/tools/engtools/hostdata-collectors/scripts/ceph.sh b/tools/engtools/hostdata-collectors/scripts/ceph.sh deleted file mode 100644 index 61e5540d7..000000000 --- a/tools/engtools/hostdata-collectors/scripts/ceph.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -# Usage: ceph.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - -# Print key ceph statistics -function print_ceph { - print_separator - TOOL_HIRES_TIME - - cmd='ceph -s' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - - cmd='ceph osd tree' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - - cmd='ceph df detail' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} -} - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) - -for ((rep=1; rep <= REPEATS ; rep++)); do - print_ceph - sleep ${INTERVAL_SEC} -done -print_ceph -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/cfg/engtools.conf b/tools/engtools/hostdata-collectors/scripts/cfg/engtools.conf deleted file mode 100644 index aefc62ae8..000000000 --- a/tools/engtools/hostdata-collectors/scripts/cfg/engtools.conf +++ /dev/null @@ -1,99 +0,0 @@ -# engtools configuration - -# You may comment out any unwanted fields under the Intervals section, but do not comment out any other configuration options as the python parsing utility will complain. Please follow the comments - -[LabConfiguration] -# Set this option to Y/N depending on the setup of your lab -CPE_LAB=N - -[LiveStream] -# Set this option to Y/N before patch creation to enable/disable live stats collection -ENABLE_LIVE_STREAM=Y - -# Set the duration of the live stream capture utility. Leave blank for continuous collection. Ex: 1s,1m,1h,1d -DURATION= - -[StaticCollection] -# Set this option to Y/N before patch creation to enable/disable static stats collection -ENABLE_STATIC_COLLECTION=N - -[CollectInternal] -# controller external OAM interface used to communicate with remote server. If unset, the first interface from ifconfig will be used -CONTROLLER0_EXTERNAL_INTERFACE= -CONTROLLER1_EXTERNAL_INTERFACE= - -[RemoteServer] -# remote server influx and grafana info -INFLUX_IP=128.224.186.61 -INFLUX_PORT=8086 -INFLUX_DB= -GRAFANA_PORT=3000 - -# This key is created through Grafana. If deleted, a new key (with admin privileges) must be created and copied here -GRAFANA_API_KEY=eyJrIjoiSkR1SXcxbkVVckd1dW9PMHFKS0EzQ2hQWTd1YUhtSkIiLCJuIjoiZGJfY3JlYXRvciIsImlkIjoxfQ== - -[Intervals] -# Set the collection interval (in seconds) to be used in the live_stream.py script. If unset or commented out, that field will not be collected -memtop=10 -memstats=10 -occtop=10 -schedtop=10 -load_avg=3 -cpu_count=60 -diskstats=30 -iostat=10 -filestats=30 -netstats=10 -postgres=30 -rabbitmq=3600 -vswitch=120 -api_requests=5 - -[AdditionalOptions] -# Set this option to Y/N to enable/disable Openstack API GET/POST collection -API_REQUESTS=Y - -# Set this option to Y/N to enable/disable the collection of all services and not just the ones listed below. Note that this hasn't been tested thoroughly -ALL_SERVICES=N - -# Set this option to Y/N to enable/disable fast postgres connections collection. By default, postgres connections use the same collection interval as postgres DB size (set above), this option will set the collection interval to 0 seconds while not affecting the above postgres collection interval -FAST_POSTGRES_CONNECTIONS=N - -# Set this option to Y/N to enable/disable automatic database deletion for InfluxDB and Grafana. As of now, this feature does not work with the engtools patch -AUTO_DELETE_DB=N - -[ControllerServices] -CONTROLLER_SERVICE_LIST=aodh-api aodh-listener aodh-notifier aodh-evaluator barbican-api barbican-keystone-listener barbican-worker beam.smp ceilometer-api ceilometer-collector ceilometer-agent-notification ceilometer-mem-db ceph-mon mgr-restful-plugin ceph-alarm-manager cinder-api cinder-volume cinder-scheduler glance-api glance-registry gnocchi-api gnocchi-metricd heat-api heat-engine heat-api-cfn heat-api-cloudwatch hbsAgent ironic-api ironic-conductor magnum-api magnum-conductor neutron-server nova-api nova-api-proxy nova-compute nova-scheduler nova-conductor nova-console-auth nova-novncproxy nova-placement-api panko-api sysinv-api sysinv-conductor postgres fmManager rabbitmq-server gunicorn postgres snmpd patch-alarm-manager lighttpd sw-patch-controller-daemon nfv-vim nfv-vim-api nfv-vim-webserver slapd mtcAgent guestAgent dcmanager-api dcmanager-manager dcorch-engine dcorch-patch-api-proxy dcorch-snmp dcorch-sysinv-api-proxy memcached influxd - -[ComputeServices] -COMPUTE_SERVICE_LIST=nova-compute neutron-dhcp-agent neutron-metadata-agent neutron-sriov-nic-agent kvm libvirtd guestServer host_agent dmeventd virtlockd - -[StorageServices] -STORAGE_SERVICE_LIST=ceph-mon ceph-osd ceph-manager mgr-restful-plugin - -[RabbitmqServices] -RABBITMQ_QUEUE_LIST=notifications.info versioned_notifications.info - -[CommonServices] -COMMON_SERVICE_LIST=dnsmasq ceilometer-polling haproxy hwmond pmond fsmond sw-patch-agent sysinv-agent syslog-ng hostwd iscsid io-monitor-manager acpid hbsClient logmgmt mtcClient mtcalarmd mtclogd sshd ntpd ptp4l phc2sys smartd sm sm-eru sm-watchdog sm-api ceilometer keyring cinder-rtstool tuned polkitd lldpd IPaddr2 dnsmasq systemd-udevd systemd-journald logrotate collectd - -[StaticServices] -STATIC_SERVICE_LIST=occtop memtop schedtop top.sh iostat.sh netstats.sh diskstats.sh memstats.sh filestats.sh ceph.sh postgres.sh rabbitmq.sh vswitch.sh - -[OpenStackServices] -OPEN_STACK_SERVICE_LIST=nova cinder aodh barbican ceilometer heat glance ceph horizon keystone puppet sysinv neutron nova_api postgres panko nova_cell0 magnum ironic murano gnocchi - -[SkipList] -SKIP_LIST=ps top sh curl awk wc sleep lsof cut grep ip tail su - -[ExcludeList] -EXCLUDE_LIST=python python2 bash perl sudo init - -[ApiStatsConstantPorts] -DB_PORT_NUMBER=5432 -RABBIT_PORT_NUMBER=5672 - -# The api stats data structure has three fields: the name displayed in ps -ef, the name displayed in lsof -Pn -i tcp and the specific api port of the service. -[ApiStatsServices] -API_STATS_STRUCTURE=ironic-conductor;ironic-co;|ironic-api;ironic-ap;6485|radosgw-swift;radosgw;8|magnum-conductor;magnum-co;|magnum-api;magnum-ap;9511|murano-api;murano-ap;8082|murano-engine;murano-en;|keystone-public;gunicorn;5000|openstack_dashboard.wsgi;gunicorn;8080|gnocchi-api;gunicorn;8041|aodh-api;gunicorn;8042|panko-api;gunicorn;8977|sysinv-conductor;sysinv-co ;|neutron-server;neutron-s;9696|nova-conductor;nova-cond ;|sysinv-agent;sysinv-ag;|sysinv-api;sysinv-ap;6385|nova-api;nova-api ;18774|cinder-api;cinder-a;8776|glance-api;glance-a;9292|vim;nfv-vim;4545|heat-api;heat-a;8004|heat-engine;heat-e;8004|barbican-api;gunicorn;9311 - diff --git a/tools/engtools/hostdata-collectors/scripts/chewmem b/tools/engtools/hostdata-collectors/scripts/chewmem deleted file mode 100644 index 03ed3d8a2..000000000 --- a/tools/engtools/hostdata-collectors/scripts/chewmem +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/perl -# Usage: -# ./chewmem.pl - -# Description: -# This will create a character array requiring "MiB" actual memory. -# Summarize high-level memory usage. - -# Ideally we can demonstate creating larger and larger -# successful memory allocations until Avail is near 0. -# It is very likely to trigger OOM Killer or cause reset -# if we run completely out of memory. - -use warnings; -use strict; -use POSIX qw(strftime); - -sub show_memusage() { - our $count; - $::count++; $::count %= 15; - - my $Ki = 1024.0; - my ($MemTotal, $MemFree, $Buffers, $Cached, $CommitLimit, $Committed_AS, $Slab, $SReclaimable); - # Process all entries of MEMINFO - my $file = '/proc/meminfo'; - open(FILE, $file) || die "Cannot open file: $file ($!)"; - MEMINFO_LOOP: while($_ = ) { - s/[\0\e\f\r\a]//g; chomp; # strip control characters if any - last MEMINFO_LOOP if (/^\s*$/); # end at blank-line - if (/\bMemTotal:\s+(\d+)\s+kB/) { - $MemTotal = $1; next MEMINFO_LOOP; - } - if (/\bMemFree:\s+(\d+)\s+kB/) { - $MemFree = $1; next MEMINFO_LOOP; - } - if (/\bBuffers:\s+(\d+)\s+kB/) { - $Buffers = $1; next MEMINFO_LOOP; - } - if (/\bCached:\s+(\d+)\s+kB/) { - $Cached = $1; next MEMINFO_LOOP; - } - if (/\bCommitLimit:\s+(\d+)\s+kB/) { - $CommitLimit = $1; next MEMINFO_LOOP; - } - if (/\bCommitted_AS:\s+(\d+)\s+kB/) { - $Committed_AS = $1; next MEMINFO_LOOP; - } - if (/\bSlab:\s+(\d+)\s+kB/) { - $Slab = $1; next MEMINFO_LOOP; - } - if (/\bSReclaimable:\s+(\d+)\s+kB/) { - $SReclaimable = $1; next MEMINFO_LOOP; - } - } - close(FILE); - - my $Avail_MiB = ($MemFree + $Cached + $Buffers + $SReclaimable)/$Ki; - my $Strict_MiB = ($CommitLimit - $Committed_AS)/$Ki; - my $now = strftime "%Y-%m-%d %H:%M:%S", localtime(); - if ($::count == 1) { - printf "%19s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", - 'yyyy-mm-dd hh:mm:ss', 'Tot', 'Free', 'Ca', 'Buf', 'Slab', 'CAS', 'CLim', 'Avail', 'Strict'; - } - printf "%19s %6.1f %6.1f %6.1f %6.1f %6.1f %6.1f %6.1f %6.1f %6.1f\n", - $now, $MemTotal/$Ki, $MemFree/$Ki, $Cached/$Ki, $Buffers/$Ki, $Slab/$Ki, - $Committed_AS/$Ki, $CommitLimit/$Ki, $Avail_MiB, $Strict_MiB; -} - -#------------------------------------------------------------------------------- -# MAIN PROGRAM -# Autoflush output -select(STDERR); -$| = 1; -select(STDOUT); # default -$| = 1; - -my $MiB = $ARGV[0] ||=0.0; -my $A = "A" x (1024*1024*$MiB/2); -print "Allocating $MiB MiB character array.\n"; -while(1) { - sleep(1); - show_memusage(); -} -exit 0; - -1; diff --git a/tools/engtools/hostdata-collectors/scripts/cleanup-engtools.sh b/tools/engtools/hostdata-collectors/scripts/cleanup-engtools.sh deleted file mode 100644 index 5d5701b53..000000000 --- a/tools/engtools/hostdata-collectors/scripts/cleanup-engtools.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# Purpose: -# Some of the engtools scripts are not shutting down gracefully. - -# Define common utility functions -TOOLBIN=$(dirname $0) -. ${TOOLBIN}/engtools_util.sh -if [ $UID -ne 0 ]; then - ERRLOG "Require sudo/root access." - exit 1 -fi - -declare -a TOOLS -TOOLS=() -TOOLS+=('collect-engtools.sh') -TOOLS+=('ceph.sh') -TOOLS+=('diskstats.sh') -TOOLS+=('iostat.sh') -TOOLS+=('rabbitmq.sh') -TOOLS+=('ticker.sh') -TOOLS+=('top.sh') -TOOLS+=('memstats.sh') -TOOLS+=('netstats.sh') -TOOLS+=('postgres.sh') -TOOLS+=('vswitch.sh') -TOOLS+=('filestats.sh') -TOOLS+=('live_stream.py') - -LOG "Cleanup engtools:" - -# Brute force methods (assume trouble with: service collect-engtools.sh stop) -# ( be sure not to clobber /etc/init.d/collect-engtools.sh ) -LOG "kill processes brute force" -pids=( $(pidof -x /usr/local/bin/collect-engtools.sh) ) -if [ ${#pids[@]} -ne 0 ]; then - LOG "killing: ${pids[@]}" - for pid in ${pids[@]}; do - LOG "kill: [ ${pid} ] " - pkill -KILL -P ${pid} - kill -9 ${pid} - done - pkill -KILL iostat - pkill -KILL top -else - LOG "no pids found" -fi - -LOG "remove pidfiles" -for TOOL in "${TOOLS[@]}"; do - rm -f -v /var/run/${TOOL}.pid -done -LOG "done" - -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/collect-engtools.service b/tools/engtools/hostdata-collectors/scripts/collect-engtools.service deleted file mode 100644 index e00e1cd65..000000000 --- a/tools/engtools/hostdata-collectors/scripts/collect-engtools.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Engineering data collection tools to monitor host performance -Requires=network.service -After=network.service getty.target - -[Service] -Type=forking -ExecStart=/etc/init.d/collect-engtools.sh start -ExecStop=/etc/init.d/collect-engtools.sh stop -ExecReload=/etc/init.d/collect-engtools.sh reload -PIDFile=/var/run/collect-engtools.sh.pid -Restart=always - -[Install] -WantedBy=multi-user.target diff --git a/tools/engtools/hostdata-collectors/scripts/collect-engtools.sh b/tools/engtools/hostdata-collectors/scripts/collect-engtools.sh deleted file mode 100644 index e59f382b4..000000000 --- a/tools/engtools/hostdata-collectors/scripts/collect-engtools.sh +++ /dev/null @@ -1,333 +0,0 @@ -#!/bin/bash -# Usage: -# collect-engtools.sh [-f] [-p ] [-i ] [-c ] [-h] - -# Define common utility functions -TOOLBIN=$(dirname $0) -. ${TOOLBIN}/engtools_util.sh - -# ENABLE DEBUG (0=disable, 1=enable) -OPT_DEBUG=0 - -# Set options for long soak (vs, shorter collection) -#OPT_SOAK=0 # long soak -OPT_SOAK=1 # few hour soak -#OPT_SOAK=2 # < hour soak - -# Define command to set nice + ionice -CMD_IDLE=$( cmd_idle_priority ) - -# Purge configuration options -# - how much data may be created per cycle -PURGE_HEADROOM_MB=100 -# - how much remaining space to leave -PURGE_HEADROOM_PERCENT=15 -# - maximum size of data collection -PURGE_MAXUSAGE_MB=1000 - -# Affine to pinned cores -AFFINE_PINNED=1 - -# Line-buffer stream output (instead of buffered) -STDBUF="stdbuf -oL" - -# Define some common durations -DUR_60MIN_IN_SEC=$[60*60] -DUR_30MIN_IN_SEC=$[30*60] -DUR_15MIN_IN_SEC=$[15*60] -DUR_10MIN_IN_SEC=$[10*60] -DUR_5MIN_IN_SEC=$[5*60] -DUR_1MIN_IN_SEC=$[1*60] - -# Global variables -declare -a parallel_outfiles -declare df_size_bytes -declare df_avail_bytes -declare du_used_bytes -declare tgt_avail_bytes -declare tgt_used_bytes - -# do_parallel_commands - launch parallel tools with separate output files -function do_parallel_commands { - parallel_outfiles=() - for elem in "${tlist[@]}"; do - tool=""; period=""; repeat=""; interval="" - my_hash="elem[*]" - local ${!my_hash} - if [ ! -z "${name}" ]; then - fname="${TOOL_DEST_DIR}/${HOSTNAME}_${timestamp}_${name}" - parallel_outfiles+=( $fname ) - LOG "collecting ${tool}, ${interval} second intervals, to: ${fname}" - if [ ! -z "${period}" ]; then - ${STDBUF} ${tool} -p ${period} -i ${interval} > ${fname} 2>/dev/null & - elif [ ! -z "${repeat}" ]; then - ${STDBUF} ${tool} --repeat=${repeat} --delay=${interval} > ${fname} 2>/dev/null & - fi - else - # run without file output (eg., ticker) - ${STDBUF} ${tool} -p ${period} -i ${interval} 2>/dev/null & - fi - done -} - -# get_current_avail_usage() - get output destination file-system usage and -# availability. -# - updates: df_size_bytes, df_avail_bytes, du_used_bytes -function get_current_avail_usage { - local -a df_arr_bytes=( $(df -P --block-size=1 ${TOOL_DEST_DIR} | awk 'NR==2 {print $2, $4}') ) - df_size_bytes=${df_arr_bytes[0]} - df_avail_bytes=${df_arr_bytes[1]} - du_used_bytes=$(du --block-size=1 ${TOOL_DEST_DIR} | awk 'NR==1 {print $1}') -} - -# purge_oldest_files() - remove oldest files based on file-system available space, -# and maximum collection size -function purge_oldest_files { - # get current file-system usage - get_current_avail_usage - msg=$(printf "avail %d MB, headroom %d MB; used %d MB, max %d MB" \ - $[$df_avail_bytes/1024/1024] $[$tgt_avail_bytes/1024/1024] \ - $[$du_used_bytes/1024/1024] $[$tgt_used_bytes/1024/1024]) - LOG "usage: ${msg}" - - if [[ $df_avail_bytes -lt $tgt_avail_bytes ]] || \ - [[ $du_used_bytes -gt $tgt_used_bytes ]]; then - # wait for compression to complete - wait - - get_current_avail_usage - if [[ $df_avail_bytes -lt $tgt_avail_bytes ]]; then - msg=$(printf "purge: avail %d MB < target %d MB" \ - $[$df_avail_bytes/1024/1024] $[$tgt_avail_bytes/1024/1024] ) - LOG "purge: ${msg}" - fi - if [[ $du_used_bytes -gt $tgt_used_bytes ]]; then - msg=$(printf "purge: used %d MB > target %d MB" \ - $[$du_used_bytes/1024/1024] $[$tgt_used_bytes/1024/1024] ) - LOG "purge: ${msg}" - fi - else - return - fi - - # remove files in oldest time sorted order until we meet usage targets, - # incrementally updating usage as we remve files - for file in $( ls -rt ${TOOL_DEST_DIR}/${HOSTNAME}_* 2>/dev/null ); do - if [[ $df_avail_bytes -ge $tgt_avail_bytes ]] && \ - [[ $du_used_bytes -le $tgt_used_bytes ]]; then - break - fi - - if [ ${OPT_DEBUG} -eq 1 ]; then - msg="purge: file=$file" - if [[ $df_avail_bytes -lt $tgt_avail_bytes ]]; then - msg="${msg}, < AVAIL" - fi - if [[ $du_used_bytes -gt $tgt_used_bytes ]]; then - msg="${msg}, > MAXUSAGE" - fi - LOG "${msg}" - fi - - sz_bytes=$(stat --printf="%s" $file) - ((df_avail_bytes += sz_bytes)) - ((du_used_bytes -= sz_bytes)) - rm -fv ${file} - done -} - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Read configuration variable file if it is present -NAME=collect-engtools.sh -[ -r /etc/default/$NAME ] && . /etc/default/$NAME - -# Initialize tool -tools_init - -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" - -# Affine tools to NOVA pinned cores (i.e., non-cpu 0) -# - remove interference with cpu 0 -if [ "${AFFINE_PINNED}" -eq 1 ]; then - NOVA_CONF=/etc/nova/compute_extend.conf - if [ -f "${NOVA_CONF}" ]; then - source "${NOVA_CONF}" - CPULIST=${compute_pinned_cpulist} - else - CPULIST="" - fi -fi -set_affinity ${CPULIST} - -# Define output directory -if [[ "${HOSTNAME}" =~ "controller-" ]]; then - TOOL_DEST_DIR=/scratch/syseng_data/${HOSTNAME} -elif [[ "${HOSTNAME}" =~ "compute-" ]]; then - TOOL_DEST_DIR=/tmp/syseng_data/${HOSTNAME} -else - TOOL_DEST_DIR=/tmp/syseng_data/${HOSTNAME} -fi -mkdir -p ${TOOL_DEST_DIR} - -# Define daemon log output -timestamp=$( date +"%Y-%0m-%0e_%H%M" ) -DAEMON_OUT="${TOOL_DEST_DIR}/${HOSTNAME}_${timestamp}_${TOOLNAME}.log" - -# Redirect stdout and append to log if not connected to TTY -if test ! -t 1 ; then - exec 1>> ${DAEMON_OUT} -fi - -# Get current availability and usage -get_current_avail_usage - -# Calculate disk usage and availability purge targets -df_offset_bytes=$[$PURGE_HEADROOM_MB*1024*1024] -tgt_used_bytes=$[$PURGE_MAXUSAGE_MB*1024*1024] -((tgt_avail_bytes = df_size_bytes/100*PURGE_HEADROOM_PERCENT + df_offset_bytes)) - -# Set granularity based on duration -if [ $PERIOD_MIN -le 30 ]; then - GRAN_MIN=5 -else - GRAN_MIN=60 -fi - -# Adjust repeats and intervals based on GRAN_MIN granularity -PERIOD_MIN=$[($PERIOD_MIN+(GRAN_MIN-1))/GRAN_MIN*GRAN_MIN] -((REPEATS = PERIOD_MIN/GRAN_MIN)) -GRAN_MIN_IN_SEC=$[$GRAN_MIN*60] -if [ ${INTERVAL_SEC} -gt ${GRAN_MIN_IN_SEC} ]; then - INTERVAL_SEC=${GRAN_MIN_IN_SEC} -fi - -# Define tools and options -# [ JGAULD - need config file for customization; long soak vs specific tools ] -# [ Ideally sample < 5 second granularity, but files get big, and tool has cpu overhead ] -# [ Need < 5 second granularity to see cache pressure/flush issues ] -# [ Desire 60 sec interval for soak ] -if [ ${OPT_SOAK} -eq 1 ]; then - # Desire 60 second or greater interval for longer term data collections, - # otherwise collection files get too big. - schedtop_interval=20 - occtop_interval=60 - memtop_interval=60 - netstats_interval=60 - # JGAULD: temporarily increase frequency to 1 min - postgres_interval=${DUR_1MIN_IN_SEC} - #postgres_interval=${DUR_15MIN_IN_SEC} - rabbitmq_interval=${DUR_15MIN_IN_SEC} - ceph_interval=${DUR_15MIN_IN_SEC} - diskstats_interval=${DUR_15MIN_IN_SEC} - memstats_interval=${DUR_15MIN_IN_SEC} - filestats_interval=${DUR_15MIN_IN_SEC} -elif [ ${OPT_SOAK} -eq 2 ]; then - # Assume much shorter collection (eg, < hours) - schedtop_interval=2 # i.e., 2 second interval - occtop_interval=2 # i.e., 2 second interval - memtop_interval=1 # i.e., 1 second interval - netstats_interval=30 # i.e., 30 second interval - postgres_interval=${DUR_5MIN_IN_SEC} - rabbitmq_interval=${DUR_5MIN_IN_SEC} - ceph_interval=${DUR_5MIN_IN_SEC} - diskstats_interval=${DUR_5MIN_IN_SEC} - memstats_interval=${DUR_5MIN_IN_SEC} - filestats_interval=${DUR_5MIN_IN_SEC} -else - # Assume shorter collection (eg, < a few hours) - schedtop_interval=5 # i.e., 5 second interval - occtop_interval=5 # i.e., 5 second interval - memtop_interval=5 # i.e., 5 second interval - netstats_interval=30 # i.e., 30 second interval - postgres_interval=${DUR_5MIN_IN_SEC} - rabbitmq_interval=${DUR_5MIN_IN_SEC} - ceph_interval=${DUR_5MIN_IN_SEC} - diskstats_interval=${DUR_5MIN_IN_SEC} - memstats_interval=${DUR_5MIN_IN_SEC} - filestats_interval=${DUR_5MIN_IN_SEC} -fi -schedtop_repeat=$[ $PERIOD_MIN * 60 / $schedtop_interval ] -occtop_repeat=$[ $PERIOD_MIN * 60 / $occtop_interval ] -memtop_repeat=$[ $PERIOD_MIN * 60 / $memtop_interval ] -netstats_repeat=$[ $PERIOD_MIN * 60 / $netstats_interval ] - -# Disable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=0 - -# Define parallel engtools configuration -# - tool name, filename, and collection interval attributes -BINDIR=/usr/bin -LBINDIR=/usr/local/bin - -. /etc/engtools/engtools.conf - -declare -a tlist -if [[ ${ENABLE_STATIC_COLLECTION} == "Y" ]] || [[ ${ENABLE_STATIC_COLLECTION} == "y" ]]; then - tlist+=( "tool=${LBINDIR}/top.sh name=top period=${PERIOD_MIN} interval=${DUR_1MIN_IN_SEC}" ) - tlist+=( "tool=${LBINDIR}/iostat.sh name=iostat period=${PERIOD_MIN} interval=${DUR_1MIN_IN_SEC}" ) - tlist+=( "tool=${LBINDIR}/netstats.sh name=netstats period=${PERIOD_MIN} interval=${netstats_interval}" ) - tlist+=( "tool=${BINDIR}/occtop name=occtop repeat=${occtop_repeat} interval=${occtop_interval}" ) - tlist+=( "tool=${BINDIR}/memtop name=memtop repeat=${memtop_repeat} interval=${memtop_interval}" ) - tlist+=( "tool=${BINDIR}/schedtop name=schedtop repeat=${schedtop_repeat} interval=${schedtop_interval}" ) - tlist+=( "tool=${LBINDIR}/diskstats.sh name=diskstats period=${PERIOD_MIN} interval=${diskstats_interval}" ) - tlist+=( "tool=${LBINDIR}/memstats.sh name=memstats period=${PERIOD_MIN} interval=${memstats_interval}" ) - tlist+=( "tool=${LBINDIR}/filestats.sh name=filestats period=${PERIOD_MIN} interval=${filestats_interval}" ) - if [[ "${HOSTNAME}" =~ "controller-" ]]; then - tlist+=( "tool=${LBINDIR}/ceph.sh name=ceph period=${PERIOD_MIN} interval=${ceph_interval}" ) - tlist+=( "tool=${LBINDIR}/postgres.sh name=postgres period=${PERIOD_MIN} interval=${postgres_interval}" ) - tlist+=( "tool=${LBINDIR}/rabbitmq.sh name=rabbitmq period=${PERIOD_MIN} interval=${rabbitmq_interval}" ) - elif [[ "${HOSTNAME}" =~ "compute-" ]]; then - tlist+=( "tool=${LBINDIR}/vswitch.sh name=vswitch period=${PERIOD_MIN} interval=${DUR_1MIN_IN_SEC}" ) - fi - - # ticker - shows progress on the screen - tlist+=( "tool=${LBINDIR}/ticker.sh name= period=${PERIOD_MIN} interval=${DUR_1MIN_IN_SEC}" ) -fi - -if [[ ${ENABLE_LIVE_STREAM} == "Y" ]] || [[ ${ENABLE_LIVE_STREAM} == "y" ]]; then - ${TOOLBIN}/live_stream.py & -fi - -#------------------------------------------------------------------------------- -# Main loop -#------------------------------------------------------------------------------- -OPT_DEBUG=0 -REP=0 - -if [ ${#tlist[@]} -ne 0 ]; then - # Static stats collection is turned on - while [[ ${TOOL_USR1_SIGNAL} -eq 0 ]] && [[ ${OPT_FOREVER} -eq 1 || ${REP} -lt ${REPEATS} ]]; do - # increment loop counter - ((REP++)) - - # purge oldest files - purge_oldest_files - - # define filename timestamp - timestamp=$( date +"%Y-%0m-%0e_%H%M" ) - - # collect tools in parallel to separate output files - LOG "collecting ${TOOLNAME} at ${timestamp} for ${PERIOD_MIN} mins, repeat=${REP}" - do_parallel_commands - wait - - # Compress latest increment - LOG "compressing: ${parallel_outfiles[@]}" - ${CMD_IDLE} bzip2 -q -f ${parallel_outfiles[@]} 2>/dev/null & - done - - # Wait for the compression to complete - wait - tools_cleanup 0 -fi - -# Should wait here in case live stats streaming is turned on. -wait - -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/diskstats.sh b/tools/engtools/hostdata-collectors/scripts/diskstats.sh deleted file mode 100644 index c419a9a0e..000000000 --- a/tools/engtools/hostdata-collectors/scripts/diskstats.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash -# Usage: diskstats.sh -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - -# Print disk summary -function print_disk { - print_separator - TOOL_HIRES_TIME - - # NOTES: - # --total (grand-total) is a new option, but don't necessarily want to add tmpfs - # or dummy filesystems. - # - use -H to print in SI (eg, GB, vs GiB) - # - can use -a to print all filesystems including dummy filesystems, but then - # there can be double-counting: - print_separator - cmd='df -h -H -T --local -t ext2 -t ext3 -t ext4 -t xfs --total' - ${ECHO} "Disk space usage ext2,ext3,ext4,xfs,tmpfs (SI):" - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - - print_separator - cmd='df -h -H -T --local -i -t ext2 -t ext3 -t ext4 -t xfs --total' - ${ECHO} "Disk inodes usage ext2,ext3,ext4,xfs,tmpfs (SI):" - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - - print_separator - cmd='drbd-overview' - ${ECHO} "drbd disk usage and status:" - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - - print_separator - cmd='lvs' - ${ECHO} "logical volumes usage and status:" - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - - print_separator - cmd='pvs' - ${ECHO} "physical volumes usage and status:" - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - - print_separator - cmd='vgs' - ${ECHO} "volume groups usage and status:" - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} -} - -# Print disk static summary -function print_disk_static { - print_separator - cmd='cat /proc/scsi/scsi' - ${ECHO} "Attached devices: ${cmd}" - ${cmd} - ${ECHO} - - # fdisk - requires sudo/root - print_separator - cmd='fdisk -l' - if [ $UID -eq 0 ]; then - ${ECHO} "List disk devices: ${cmd}" - ${cmd} - else - WARNLOG "Skipping cmd=${cmd}, root/sudo passwd required" - fi - ${ECHO} - - # parted - requires sudo/root - print_separator - cmd='parted -l' - if [ $UID -eq 0 ]; then - ${ECHO} "List disk devices: ${cmd}" - ${cmd} - else - WARNLOG "Skipping cmd=${cmd}, root/sudo passwd required" - fi - ${ECHO} -} - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Print static disk information -print_disk_static - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) - -for ((rep=1; rep <= REPEATS ; rep++)); do - print_disk - sleep ${INTERVAL_SEC} -done -print_disk -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/engtools_util.sh b/tools/engtools/hostdata-collectors/scripts/engtools_util.sh deleted file mode 100644 index af1aaa4e4..000000000 --- a/tools/engtools/hostdata-collectors/scripts/engtools_util.sh +++ /dev/null @@ -1,479 +0,0 @@ -#!/bin/bash -TOOLNAME=$(basename $0) -PIDFILE=/var/run/${TOOLNAME}.pid -TOOL_DEBUG=1 -TOOL_EXIT_SIGNAL=0 -TOOL_USR1_SIGNAL=0 -TOOL_USR2_SIGNAL=0 -TOOL_TTY=0 -if tty 1>/dev/null ; then - TOOL_TTY=1 -fi - -# [ JGAULD : SHOULD RENAME TO TOOL_X ] -OPT_USE_INTERVALS=0 -OPT_FOREVER=0 -PERIOD_MIN=5 -INTERVAL_SEC=60 -CPULIST=0 - -# Include lsb functions -if [ -d /lib/lsb ]; then - . /lib/lsb/init-functions -else - . /etc/init.d/functions -fi -# Lightweight replacement for pidofproc -p -function check_pidfile { - local pidfile pid - - OPTIND=1 - while getopts p: opt ; do - case "$opt" in - p) - pidfile="$OPTARG" - ;; - esac - done - shift $(($OPTIND - 1)) - - read pid < "${pidfile}" - if [ -n "${pid:-}" ]; then - if $(kill -0 "${pid:-}" 2> /dev/null); then - echo "$pid" - return 0 - elif ps "${pid:-}" >/dev/null 2>&1; then - echo "$pid" - return 0 # program is running, but not owned by this user - else - return 1 # program is dead and /var/run pid file exists - fi - fi -} - -# tools_init - initialize tool resources -function tools_init { - local rc=0 - local error=0 - TOOLNAME=$(basename $0) - - # Check for sufficient priviledges - if [ $UID -ne 0 ]; then - ERRLOG "${NAME} requires sudo/root access." - return 1 - fi - - # Check for essential binaries - ECHO=$(which echo 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ECHO=echo # use bash built-in echo - ${ECHO} "FATAL, 'echo' not found, rc=$rc"; - error=$rc - fi - DATE=$(which date 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ${ECHO} "FATAL, 'date' not found, rc=$rc"; - error=$rc - fi - - # Check for standard linux binaries, at least can use LOG functions now - # - these are used in tools_header - CAT=$(which cat 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'cat' not found, rc=$rc"; - error=$rc - fi - - ARCH=$(which arch 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'arch' not found, rc=$rc"; - error=$rc - fi - - SED=$(which sed 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'sed' not found, rc=$rc"; - error=$rc - fi - - GREP=$(which grep 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'grep' not found, rc=$rc"; - error=$rc - fi - - WC=$(which wc 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'wc' not found, rc=$rc"; - error=$rc - fi - - UNAME=$(which uname 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'uname' not found, rc=$rc"; - error=$rc - fi - - SORT=$(which sort 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'sort' not found, rc=$rc"; - error=$rc - fi - - TR=$(which tr 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'tr' not found, rc=$rc"; - error=$rc - fi - - AWK=$(which awk 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'awk' not found, rc=$rc"; - error=$rc - fi - - PKILL=$(which pkill 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'pkill' not found, rc=$rc"; - error=$rc - fi - - LS=$(which ls 2>/dev/null) - rc=$? - if [ $rc -ne 0 ]; then - ERRLOG "'ls' not found, rc=$rc"; - error=$rc - fi - - # The following block is needed for LSB systems such as Windriver Linux. - # The utility is not available on CentOS so comment it out. - # Generic utility, but may not be available - # LSB=$(which lsb_release 2>/dev/null) - # rc=$? - # if [ $rc -ne 0 ]; then - # WARNLOG "'lsb_release' not found, rc=$rc"; - # fi - - # Let parent program decide what to do with the errors, - # give ominous warning - if [ $error -eq 1 ]; then - WARNLOG "possibly cannot continue, missing linux binaries" - fi - - # Check if tool was previously running - if [ -e ${PIDFILE} ]; then - # [ JGAULD - remove pidofproc() / LSB compatibility issue ] - if check_pidfile -p "${PIDFILE}" >/dev/null; then - ERRLOG "${PIDFILE} exists and ${TOOLNAME} is running" - return 1 - else - # remove pid file - WARNLOG "${PIDFILE} exists but ${TOOLNAME} is not running; cleaning up" - rm -f ${PIDFILE} - fi - fi - - # Create pid file - echo $$ > ${PIDFILE} - - # Setup trap handler - these signals trigger child shutdown and cleanup - trap tools_exit_handler INT HUP TERM EXIT - trap tools_usr1_handler USR1 - trap tools_usr2_handler USR2 - - return ${rc} -} - -# tools_cleanup() - terminate child processes -function tools_cleanup { - # restore signal handling to default behaviour - trap - INT HUP TERM EXIT - trap - USR1 USR2 - - local VERBOSE_OPT='' - if [ "$1" -ne "0" ]; then - LOG "cleanup invoked with code: $1" - if [ ${TOOL_DEBUG} -ne 0 ]; then - VERBOSE_OPT='-v' - fi - fi - - - # stop all processes launched from this process - pkill -TERM -P $$ - if [ "$1" -ne "0" ]; then - sleep 1 - fi - - # OK, if the above didn't work, use force - pkill -KILL -P $$ - - # remove pid file - if [ -e ${PIDFILE} ]; then - rm -f ${VERBOSE_OPT} ${PIDFILE} - fi - exit $1 -} - -# tools_exit_handler() - exit handler routine -function tools_exit_handler { - TOOL_EXIT_SIGNAL=1 - tools_cleanup 128 -} -# tools_usr1_handler() - USR1 handler routine -function tools_usr1_handler { - TOOL_USR1_SIGNAL=1 - LOG "caught USR1" -} -# tools_usr2_handler() - USR2 handler routine -function tools_usr2_handler { - TOOL_USR2_SIGNAL=1 - LOG "caught USR1" -} - -# LOG(), WARNLOG(), ERRLOG() - simple print log functions (not logger) -function LOG { - local tstamp_H=$( date +"%Y-%0m-%0e %H:%M:%S" ) - echo "${tstamp_H} ${HOSTNAME} $0($$): $@"; -} - -function LOG_NOCR { - local tstamp_H=$( date +"%Y-%0m-%0e %H:%M:%S" ) - echo -n "${tstamp_H} ${HOSTNAME} $0($$): $@"; -} - -function WARNLOG { - LOG "WARN $@"; -} - -function ERRLOG { - LOG "ERROR $@"; -} - -# TOOL_HIRES_TIME() - easily parsed date/timestamp and hi-resolution uptime -function TOOL_HIRES_TIME { - echo "time: " $( ${DATE} +"%a %F %H:%M:%S.%N %Z %z" ) "uptime: " $( cat /proc/uptime ) -} - -# set_affinity() - set affinity for current script if a a CPULIST is defined -function set_affinity { - local CPULIST=$1 - if [ -z "${CPULIST}" ]; then - return - fi - - # Set cpu affinity for current program - local TASKSET=$(which taskset 2>/dev/null) - if [ -x "${TASKSET}" ]; then - ${TASKSET} -pc ${CPULIST} $$ 2>/dev/null - fi -} - -# cmd_idle_priority() - command to set nice + ionice -function cmd_idle_priority { - local NICE="" - local IONICE="" - - NICE=$( which nice 2>/dev/null ) - if [ $? -eq 0 ]; then - NICE="${NICE} -n 19" - else - NICE="" - fi - IONICE=$( which ionice 2>/dev/null ) - if [ $? -eq 0 ]; then - IONICE="${IONICE} -c 3" - else - IONICE="" - fi - echo "${NICE} ${IONICE}" -} - - -# print_separator() - print a horizontal separation line '\u002d' is '-' -function print_separator { - printf '\u002d%.s' {1..80} - printf '\n' -} - -# tools_header() - print out common GenWare tools header -function tools_header { - local TOOLNAME=$(basename $0) - - # Get timestamp - #local tstamp=$( date +"%Y-%0m-%0e %H:%M:%S" 2>/dev/null ) - local tstamp=$( date --rfc-3339=ns | cut -c1-23 2>/dev/null ) - - # Linux Generic - local UPTIME=/proc/uptime - - # Get number of online cpus - local CPUINFO=/proc/cpuinfo - local online_cpus=$( cat ${CPUINFO} | grep -i ^processor | wc -l 2>/dev/null ) - - # Get load average, run-queue size, and number of threads - local LOADAVG=/proc/loadavg - local LDAVG=( `cat ${LOADAVG} | sed -e 's#[/]# #g' 2>/dev/null` ) - - # Get current architecture - local arch=$( uname -m ) - - # Determine processor name (there are many different formats... *sigh* ) - # - build up info from multiple lines - local processor='unk' - local NAME=$( cat ${CPUINFO} | grep \ - -e '^cpu\W\W:' \ - -e ^'cpu model' \ - -e ^'model name' \ - -e ^'system type' \ - -e ^Processor \ - -e ^[Mm]achine | \ - sort -u | awk 'BEGIN{FS=":";} {print $2;}' | \ - tr '\n' ' ' | tr -s [:blank:] 2>/dev/null ) - if [ ! -z "${NAME}" ]; then - processor=${NAME} - fi - - # Determine processor speed (abort grep after first match) - local speed='unk' - local BOGO=$( cat ${CPUINFO} | grep -m1 -e ^BogoMIPS -e ^bogomips | \ - awk 'BEGIN{FS=":";} {printf "%.1f", $2;}' 2>/dev/null ) - local MHZ=$( cat ${CPUINFO} | grep -m1 -e ^'cpu MHz' -e ^clock | \ - awk 'BEGIN{FS=":";} {printf "%.1f", $2;}' 2>/dev/null ) - local MHZ2=$( cat ${CPUINFO} | grep -m1 -e ^Cpu0ClkTck -e ^'cycle frequency' | \ - awk 'BEGIN{FS=":";} {printf "%.1f", $2/1.0E6;}' 2>/dev/null ) - if [ ! -z "${MHZ}" ]; then - speed=${MHZ} - elif [ ! -z "${MHZ2}" ]; then - speed=${MHZ2} - elif [ ! -z ${BOGO} ]; then - speed=${BOGO} - fi - - # Determine OS and kernel version - local os_name=$( uname -s 2>/dev/null ) - local os_release=$( uname -r 2>/dev/null ) - - declare -a arr - - local dist_id="" - # Determine OS distribution ID - if [ lsb_pres == "yes" ]; then - arr=( $( lsb_release -i 2>/dev/null ) ) - dist_id=${arr[2]} - else - local dist_id=$(cat /etc/centos-release | awk '{print $1}' 2>/dev/null) - fi - - local dist_rel="" - if [ lsb_pres == "yes" ]; then - # Determine OS distribution release - arr=( $( cat /proc/version | awk '{print $3}' 2>/dev/null ) ) - local dist_rel=${arr[1]} - else - local dist_rel=$(cat /etc/centos-release | awk '{print $4}' 2>/dev/null) - fi - # Print generic header - echo "${TOOLNAME} -- ${tstamp} load average:${LDAVG[0]}, ${LDAVG[1]}, ${LDAVG[2]} runq:${LDAVG[3]} nproc:${LDAVG[4]}" - echo " host:${HOSTNAME} Distribution:${dist_id} ${dist_rel} ${os_name} ${os_release}" - echo " arch:${arch} processor:${processor} speed:${speed} MHz CPUs:${online_cpus}" -} - - - - -# tools_usage() - show generic tools tool usage -function tools_usage { - if [ ${OPT_USE_INTERVALS} -eq 1 ]; then - echo "usage: ${TOOLNAME} [-f] [-p ] [-i ] [-c ] [-h]" - else - echo "Usage: ${TOOLNAME} [-f] [-p ] [-c ] [-h]" - fi -} - -# tools_print_help() - print generic tool help -function tools_print_help { - tools_usage - echo - echo "Options:"; - echo " -f : collect forever : default: none" - echo " -p : overall collection period (minutes) : default: ${DEFAULT_PERIOD_MIN}" - if [ ${OPT_USE_INTERVALS} -eq 1 ]; then - echo " -i : sample interval (seconds) : default: ${DEFAULT_INTERVAL_SEC}" - fi - echo " -c : cpu list where tool runs (e.g., 0-1,8) : default: none" - echo - if [ ${OPT_USE_INTERVALS} -eq 1 ]; then - echo "Example: collect 5 minute period, sample every 30 seconds interval" - echo " ${TOOLNAME} -p 5 -i 30" - else - echo "Example: collect 5 minute period" - echo " ${TOOLNAME} -p 5" - fi -} - -# tools_parse_options() -- parse common options for tools scripts -function tools_parse_options { - # check for no arguments, print usage - if [ $# -eq "0" ]; then - tools_usage - tools_cleanup 0 - exit 0 - fi - - # parse the input arguments - while getopts "fp:i:c:h" Option; do - case $Option in - f) - OPT_FOREVER=1 - PERIOD_MIN=60 - ;; - p) PERIOD_MIN=$OPTARG ;; - i) - OPT_USE_INTERVALS=1 - INTERVAL_SEC=$OPTARG - ;; - c) CPULIST=$OPTARG ;; - h) - tools_print_help - tools_cleanup 0 - exit 0 - ;; - *) - tools_usage - tools_cleanup 0 - exit 0 - ;; - esac - done - - # validate input arguments - PERIOD_MAX=$[4*24*60] - INTERVAL_MAX=$[60*60] - - error=0 - if [[ ${PERIOD_MIN} -lt 1 || ${PERIOD_MIN} -gt ${PERIOD_MAX} ]]; then - echo "-p must be > 0 and <= ${PERIOD_MAX}." - error=1 - fi - if [[ ${INTERVAL_SEC} -lt 1 || ${INTERVAL_SEC} -gt ${INTERVAL_MAX} ]]; then - echo "-i must be > 0 and <= ${INTERVAL_MAX}." - error=1 - fi - if [ ${error} -eq 1 ]; then - tools_cleanup 0 - exit 1 - fi -} diff --git a/tools/engtools/hostdata-collectors/scripts/filestats.sh b/tools/engtools/hostdata-collectors/scripts/filestats.sh deleted file mode 100644 index 8b38e695b..000000000 --- a/tools/engtools/hostdata-collectors/scripts/filestats.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash -# Usage: filestats.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -PAGE_SIZE=$(getconf PAGE_SIZE) - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - - -function print_files { - print_separator - TOOL_HIRES_TIME - - ${ECHO} "# ls -l /proc/*/fd" - sudo ls -l /proc/*/fd 2>/dev/null | awk \ - '$11 ~ /socket/ {a += 1} ; \ - $11 ~ /null/ {b += 1} ; \ - {c += 1} \ - END {\ - {printf "%-10s %-10s %-10s %-10s\n", "TOTAL", "FILES", "SOCKETS", "NULL PIPES"} \ - {printf "%-10s %-10s %-10s %-10s\n", c, c-(a+b) , a, b}}' - - ${ECHO} - - ${ECHO} "# lsof" - printf "%-7s %-7s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %s\n" "PID" "TOTAL" "FD" "U" "W" "R" "CWD" "RTD" "TXT" "MEM" "DEL" "TCP" "CMD" - sudo lsof +c 15| awk '$3 !~ /^[0-9]+/{ {pids[$2]["COMMAND"]=$1}\ - {pids[$2]["PID"]=$2}\ - {pids[$2]["TOTAL"]+=1}\ - {pids[$2]["TCP"]+=($8=="TCP")? 1 : 0}\ - {($4 ~ /^[0-9][0-9]*[urw]/ )? \ - pids[$2][substr($4, length($4),1)]+=1 : pids[$2][$4]+=1} } - END { - { for (i in pids) \ - if(pids[i]["PID"]!="PID") { - {printf "%-7s %-7s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %-6s %s\n", \ - pids[i]["PID"], \ - pids[i]["TOTAL"],\ - ((pids[i]["u"]!="")? pids[i]["u"] : 0) + ((pids[i]["w"]!="")? pids[i]["w"] : 0 )+ ((pids[i]["r"]!="")? pids[i]["r"] : 0),\ - (pids[i]["u"]!="")? pids[i]["u"] : 0,\ - (pids[i]["w"]!="")? pids[i]["w"] : 0,\ - (pids[i]["r"]!="")? pids[i]["r"] : 0,\ - (pids[i]["cwd"]!="")? pids[i]["cwd"] : 0,\ - (pids[i]["rtd"]!="")? pids[i]["rtd"] : 0,\ - (pids[i]["txt"]!="")? pids[i]["txt"] : 0,\ - (pids[i]["mem"]!="")? pids[i]["mem"] : 0,\ - (pids[i]["DEL"]!="")? pids[i]["DEL"] : 0,\ - (pids[i]["TCP"]!="")? pids[i]["TCP"] : 0,\ - pids[i]["COMMAND"]} }}}' | sort -n -r -k3 - - ${ECHO} - - ${ECHO} "# lsof -nP +L1" - sudo lsof -nP +L1 - ${ECHO} -} - - - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) - -for ((rep=1; rep <= REPEATS ; rep++)); do - print_files - sleep ${INTERVAL_SEC} -done -print_files -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/init.d/collect-engtools.sh b/tools/engtools/hostdata-collectors/scripts/init.d/collect-engtools.sh deleted file mode 100644 index fa52a65a6..000000000 --- a/tools/engtools/hostdata-collectors/scripts/init.d/collect-engtools.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash -### BEGIN INIT INFO -# Provides: collect-engtools -# Required-Start: $local_fs $network $syslog postgresql -# Required-Stop: $local_fs $network $syslog postgresql -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: initscript to launch engineering tools data collection daemon -# Description: initscript to launch engineering tools data collection daemon -# Blah. -### END INIT INFO - -PATH=/sbin:/usr/sbin:/bin:/usr/bin -DESC="collect engtools service" -NAME="collect-engtools.sh" -DAEMON=/usr/local/bin/${NAME} -DAEMON_ARGS="-f" -PIDFILE=/var/run/${NAME}.pid -SCRIPTNAME=/etc/init.d/${NAME} -DEFAULTFILE=/etc/default/${NAME} - -# Exit if the package is not installed -[ -x "$DAEMON" ] || exit 0 -. /etc/init.d/functions -# Read configuration variable file if it is present -[ -r $DEFAULTFILE ] && . $DEFAULTFILE - -# Load the VERBOSE setting and other rcS variables -#. /lib/init/vars.sh - -# Define lsb fallback versions of: -# log_daemon_msg(), log_end_msg() -log_daemon_msg() { echo -n "${1:-}: ${2:-}"; } -log_end_msg() { echo "."; } - -# Use lsb functions to perform the operations. -if [ -f /lib/lsb/init-functions ]; then - . /lib/lsb/init-functions -fi - -# Check for sufficient priviledges -# [ JGAULD : possibly provide user = 'operator' option instead... ] -if [ $UID -ne 0 ]; then - log_daemon_msg "Starting ${NAME} requires sudo/root access." - exit 1 -fi - -case $1 in - start) - if [ -e ${PIDFILE} ]; then - pid=$(pidof -x ${NAME}) - if test "${pid}" != "" - then - echo_success "${NAME} already running" - exit - fi - fi - - - log_daemon_msg "Starting ${NAME}" - if start-stop-daemon --start --background --quiet --oknodo --pidfile ${PIDFILE} \ - --exec ${DAEMON} -- ${DAEMON_ARGS} ; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - - stop) - if [ -e ${PIDFILE} ]; then - pids=$(pidof -x ${NAME}) - if [[ ! -z "${pids}" ]]; then - echo_success "Stopping ${NAME} [$pid]" - start-stop-daemon --stop --quiet --oknodo --pidfile ${PIDFILE} --retry=TERM/3/KILL/5 - # [ JGAULD: none of the following should be necessary ] - /usr/local/bin/cleanup-engtools.sh - else - echo_failure "${NAME} is not running" - fi - else - echo_failure "${PIDFILE} does not exist" - fi - ;; - - restart) - $0 stop && sleep 2 && $0 start - ;; - - status) - if [ -e ${PIDFILE} ]; then - pid=$(pidof -x ${NAME}) - if test "${pid}" != "" - then - echo_success "${NAME} is running" - else - echo_success "${NAME} is not running" - fi - else - echo_success "${NAME} is not running" - fi - ;; - - reload) - if [ -e ${PIDFILE} ]; then - start-stop-daemon --stop --signal USR1 --quiet --pidfile ${PIDFILE} --name ${NAME} - echo_success "${NAME} reloaded successfully" - else - echo_success "${PIDFILE} does not exist" - fi - ;; - - *) - echo "Usage: $0 {start|stop|restart|reload|status}" - exit 2 - ;; -esac - -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/iostat.sh b/tools/engtools/hostdata-collectors/scripts/iostat.sh deleted file mode 100644 index 7ac65aceb..000000000 --- a/tools/engtools/hostdata-collectors/scripts/iostat.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -# Usage: iostat.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - -IOSTAT=$( which iostat 2>/dev/null ) -if [ $? -ne 0 ]; then - print_separator - WARNLOG "iostat not available" - tools_cleanup 0 -fi - -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) -((REP = REPEATS + 1)) - -# Execute tool for specified duration -CMD="${IOSTAT} -k -x -t ${INTERVAL_SEC} ${REP}" -#LOG "CMD: ${CMD}" -${CMD} -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/linux_benchmark.sh b/tools/engtools/hostdata-collectors/scripts/linux_benchmark.sh deleted file mode 100644 index 32d7dd206..000000000 --- a/tools/engtools/hostdata-collectors/scripts/linux_benchmark.sh +++ /dev/null @@ -1,533 +0,0 @@ -#!/bin/bash - -username="sysadmin" -password="Li69nux*" -test_duration="30" -wait_duration="5" -udp_find_0_frameloss="1" -udp_max_iter="20" -udp_granularity="100000" -result_dir="/home/${username}/benchmark_results" -summary_file="${result_dir}/benchmark_summary.xls" -host="" -remote="" -controllers=() -computes=() -nodes=() -max_compute_node="10" -interfaces=("") -# udp header total length: Ethernet header ( 14 ) + CRC ( 4 ) + IPv4 header ( 20 ) + UDP header ( 8 ) -udp_header_len="46" -# icmp header total length: ICMP header ( 8 ) + IPv4 header ( 20 ) -icmp_header_len="28" -frame_sizes=(64 128 256 512 1024 1280 1518) -ssh_opt="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q" -# ports used for different kind of traffics except hiprio. these are chosen randomly since they are not used -# 8000 - storage; 8001 - migration; 8002 - default; 8003 - drbd -controller_ports=(8000 8001 8002 8003) -compute_ports=(8000 8001 8002) -traffic_types=(storage migration default drbd) -flow_ids=(1:20 1:30 1:40 1:50) - -function exec_cmd { - node="$1" - cmd="$2" - - if [[ "${node}" == *"${host}"* ]]; then - echo "$(bash -c "${cmd}")" - else - echo "$(ssh ${ssh_opt} ${username}@${node} "${cmd}")" - fi -} - -function iperf3_server_start { - local server="$1" - local result="$2" - local port="$3" - local cmd="iperf3 -s" - - if [ "${port}" ]; then - cmd="${cmd} -p ${port}" - fi - cmd="nohup ${cmd} > ${result} 2>&1 &" - $(exec_cmd "${server}" "${cmd}") -} - -function iperf3_client_tcp_start { - local result="${result_dir}/throughput" - local cmd="" - local client="$1" - local server="$2" - local port="$3" - - cmd="iperf3 -t ${test_duration} -c $(get_ip_addr "${server}")" - if [ "${port}" ]; then - cmd="${cmd} -p ${port} -O ${wait_duration}" - result="${result}_parallel_${port}" - else - result="${result}_tcp" - if [[ "${server}" == *"infra"* ]]; then - result="${result}_infra" - fi - fi - $(exec_cmd "${client}" "${cmd} > ${result} 2>&1") -} - -function iperf3_client_udp_start { - local result="${result_dir}/throughput_udp" - local cmd="" - local client="$1" - local server="$2" - local frame_size="$3" - local bw="0" - - if [ "${4}" ]; then - bw="${4}" - fi - - cmd="iperf3 -u -t ${test_duration} -c $(get_ip_addr ${server})" - if [ ${frame_size} ]; then - cmd="${cmd} -l ${frame_size}" - result="${result}_$[${frame_size}+${udp_header_len}]" - fi - - if [[ ${server} == *"infra"* ]]; then - result="${result}_infra" - fi - - $(exec_cmd "${client}" "${cmd} -b ${bw} >> ${result} 2>&1" ) -} - -function iperf3_stop { - local node="$1" - local cmd="pkill iperf3" - $(exec_cmd "${node}" "${cmd}") -} - -function get_ip_addr { - arp -a | grep -oP "(?<=$1 \()[^)]*" | head -n 1 -} - -function throughput_tcp_test { - for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do - for interface in "${interfaces[@]}"; do - local interface_name="management" - local interface_suffix="" - local result_suffix="" - if [ "${interface}" == "infra" ]; then - interface_name="infrastructure" - interface_suffix="-infra" - result_suffix="_infra" - fi - local result_file="${result_dir}/throughput_tcp${result_suffix}" - printf "Running TCP throughput test between ${nodes[${i}]} and ${nodes[$[${i}+1]]}'s ${interface_name} network..." - iperf3_server_start ${nodes[$[${i}+1]]}${interface_suffix} ${result_file} - iperf3_client_tcp_start ${nodes[${i}]}${interface_suffix} ${nodes[$[${i}+1]]}${interface_suffix} - iperf3_stop ${nodes[$[${i}+1]]}${interface_suffix} - result=$(exec_cmd "${nodes[${i}]}" "awk '/sender/ {print \$7 \" \" \$8}' ${result_file}") - printf " Done (${result})\n" - done - done -} - -function throughput_udp_test { - for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do - for interface in "${interfaces[@]}"; do - local interface_name="management" - local interface_suffix="" - local result_suffix="" - if [ "${interface}" == "infra" ]; then - interface_name="infrastructure" - interface_suffix="-infra" - result_suffix="_infra" - fi - echo "Running UDP throughput test between ${nodes[${i}]} and ${nodes[$[${i}+1]]}'s ${interface_name} network" - for frame_size in "${frame_sizes[@]}"; do - local max_bw="0" - local min_bw="0" - local cur_bw="0" - local old_bw="0" - local result="" - local result_unit="" - local frame_loss="" - local max_result="" - local max_result_unit="" - local max_frame_loss="" - local result_file="${result_dir}/throughput_udp_${frame_size}${result_suffix}" - local iter="0" - local diff="" - printf "\tFrame size = ${frame_size}..." - while true; do - iperf3_server_start ${nodes[$[${i}+1]]}${interface_suffix} ${result_file} - iperf3_client_udp_start ${nodes[${i}]}${interface_suffix} ${nodes[$[${i}+1]]}${interface_suffix} $[${frame_size}-${udp_header_len}] ${cur_bw} - iperf3_stop ${nodes[$[${i}+1]]}${interface_suffix} - result=$(exec_cmd "${nodes[${i}]}" "awk '/%/ {print \$7}' ${result_file} | tail -n1") - result_unit=$(exec_cmd "${nodes[${i}]}" "awk '/%/ {print \$8}' ${result_file} | tail -n1") - frame_loss=$(exec_cmd "${nodes[${i}]}" "awk '/%/ {print \$12}' ${result_file} | tail -n1 | tr -d '()%'") - if [ "${udp_find_0_frameloss}" == "1" ]; then - if [ "${iter}" -eq "0" ]; then - max_result="${result}" - max_result_unit="${result_unit}" - max_frame_loss="${frame_loss}" - fi - if [ $(echo ${frame_loss} | grep e) ]; then - frame_loss="$(echo ${frame_loss} | sed 's/e/*10^/g;s/ /*/' )" - fi - if [ "$(echo "${frame_loss} > 0" | bc -l)" -eq "1" ]; then - max_bw="${result}" - if [ "${result_unit}" == "Kbits/sec" ]; then - max_bw="$(echo "(${max_bw} * 1000) / 1" | bc)" - elif [ "${result_unit}" == "Mbits/sec" ]; then - max_bw="$(echo "(${max_bw} * 1000000) / 1" | bc)" - elif [ "${result_unit}" == "Gbits/sec" ]; then - max_bw="$(echo "(${max_bw} * 1000000000) / 1" | bc)" - fi - else - if [ "${iter}" -eq "0" ]; then - break - else - min_bw="${result}" - if [ "${result_unit}" == "Kbits/sec" ]; then - min_bw="$(echo "(${min_bw} * 1000) / 1" | bc)" - elif [ "${result_unit}" == "Mbits/sec" ]; then - min_bw="$(echo "(${min_bw} * 1000000) / 1" | bc)" - elif [ "${result_unit}" == "Gbits/sec" ]; then - min_bw="$(echo "(${min_bw} * 1000000000) / 1" | bc)" - fi - fi - fi - old_bw="${cur_bw}" - cur_bw="$[(${max_bw} + ${min_bw}) / 2]" - diff="$(echo "$[${cur_bw} - ${old_bw}]" | tr -d '-')" - #break - ((iter++)) - if [ "${diff}" -lt "${udp_granularity}" ]; then - break - fi - if [ "${udp_max_iter}" -ne "0" ] && [ "${iter}" -ge "${udp_max_iter}" ]; then - break - fi - else - break - fi - done - if [ "${udp_find_0_frameloss}" == "1" ]; then - printf " Done (%s %s @ %s%% & %s %s @ %s%%)\n" "${max_result}" "${max_result_unit}" "${max_frame_loss}" "${result}" "${result_unit}" "${frame_loss}" - else - printf " Done (%s %s @ %s%%)\n" "${result}" "${result_unit}" "${frame_loss}" - fi - done - done - done -} - -function throughput_parallel_test { - local dev="" - local ip_addr="" - local interface_name="" - local interface_suffix="" - local result_file="${result_dir}/throughput_parallel" - # get device name of the interface - if [ "${#interfaces[@]}" -gt "1" ]; then - interface_name="infrastructure" - interface_suffix="-infra" - ip_addr=$(ping -c1 ${host}-infra | awk -F'[()]' '/PING/{print $2}') - else - interface_name="management" - ip_addr=$(ping -c1 ${host} | awk -F'[()]' '/PING/{print $2}') - fi - dev=$(ifconfig | grep -B1 "inet ${ip_addr}" | awk '$1!="inet" && $1!="--" {print $1}') - - - # set all the filters - for node in ${nodes[@]}; do - local ports=("${controller_ports[@]}") - if [[ "${node}" == *"compute"* ]]; then - ports=("${compute_ports[@]}") - fi - for i in $(seq 0 $[${#ports[@]} - 1]); do - if [ ${traffic_types[i]} != "default" ]; then - tc_dport="tc filter add dev ${dev} protocol ip parent 1:0 prio 1 u32 match ip protocol 6 0xff match ip dport ${ports[i]} 0xffff flowid ${flow_ids[i]}" - tc_sport="tc filter add dev ${dev} protocol ip parent 1:0 prio 1 u32 match ip protocol 6 0xff match ip sport ${ports[i]} 0xffff flowid ${flow_ids[i]}" - $(exec_cmd "${node}" "echo ${password} | sudo -S bash -c '${tc_dport}; ${tc_sport}' > /dev/null 2>&1") - fi - done - done - - # run the tests - for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do - local ports=("${controller_ports[@]}") - if [[ "${nodes[${i}]}" == *"compute"* ]]; then - ports=("${compute_ports[@]}") - fi - printf "Running parallel throughput test between ${nodes[${i}]} and ${nodes[$[${i}+1]]}'s ${interface_name} network..." - - # start the servers - for port in "${ports[@]}"; do - iperf3_server_start "${nodes[$[${i}+1]]}${interface_suffix}" "${result_file}_${port}" "${port}" - done - #start the clients - for port in "${controller_ports[@]}"; do - iperf3_client_tcp_start ${nodes[${i}]}${interface_suffix} ${nodes[$[${i}+1]]}${interface_suffix} ${port} & - done - sleep $[${test_duration} + ${wait_duration} + 1] - iperf3_stop ${nodes[$[${i}+1]]}${interface_suffix} - printf " Done\n" - - # get results - for j in $(seq 0 $[${#ports[@]} - 1]); do - result=$(exec_cmd "${nodes[${i}]}" "awk '/sender/ {print \$7 \" \" \$8}' ${result_file}_${ports[${j}]}") - printf "\t${traffic_types[$j]} = ${result}\n" - done - done - - # remove all the filters - for node in ${nodes[@]}; do - local handles=() - local ports=("${controller_ports[@]}") - if [[ "${node}" == *"compute"* ]]; then - ports=("${compute_ports[@]}") - fi - handles=($(exec_cmd "${node}" "/usr/sbin/tc filter show dev ${dev} | awk '/filter/ {print \$10}' | tail -n $[(${#ports[@]} - 1) * 2 ]")) - for handle in "${handles[@]}"; do - $(exec_cmd "${node}" "echo ${password} | sudo -S /usr/sbin/tc filter delete dev ${dev} parent 1: handle ${handle} prio 1 u32 > /dev/null 2>&1") - done - done -} - -function latency_test { - for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do - for interface in "${interfaces[@]}"; do - local interface_name="management" - local interface_suffix="" - local result_suffix="" - if [ "${interface}" == "infra" ]; then - interface_name="infrastructure" - interface_suffix="-infra" - result_suffix="_infra" - fi - echo "Running latency test between ${nodes[${i}]} and ${nodes[$[${i}+1]]}'s ${interface_name} network" - for frame_size in "${frame_sizes[@]}"; do - local result_file="${result_dir}/latency_${frame_size}${result_suffix}" - printf "\tFrame size = ${frame_size}..." - $(exec_cmd "${nodes[${i}]}" "ping -s $[${frame_size}-8] -w ${test_duration} -i 0.2 ${nodes[$[${i}+1]]}${interface_suffix} > ${result_file} 2>&1") - result=$(exec_cmd "${nodes[${i}]}" "awk '/rtt/ {print \$2 \" = \" \$4 \" \" \$5}' ${result_file}") - printf " Done (%s)\n" "${result}" - done - done - done -} - -function setup { - for node in ${nodes[@]}; do - iperf3_stop "${node}" - $(exec_cmd "${node}" "rm -rf ${result_dir}; mkdir -p ${result_dir}") - done -} - -function get_remote_results { - for node in ${nodes[@]}; do - if [ "${node}" != "${host}" ]; then - mkdir ${result_dir}/${node} - scp ${ssh_opt} ${username}@${node}:${result_dir}/* ${result_dir}/${node} > /dev/null 2>&1 - fi - done -} - -function get_interface_info { - local dev="" - local ip_addr="" - printf "Network interfaces info\n" >> ${summary_file} - for interface in "${interfaces[@]}"; do - local interface_suffix="" - local interface_name="management" - if [ "${interface}" == "infra" ]; then - interface_name="infrastructure" - interface_suffix="-infra" - fi - ip_addr=$(ping -c1 ${host}${interface_suffix} | awk -F'[()]' '/PING/{print $2}') - dev=$(ifconfig | grep -B1 "inet ${ip_addr}" | awk '$1!="inet" && $1!="--" {print $1}') - printf "%s network interface\n" "${interface_name}" >> ${summary_file} - echo ${password} | sudo -S ethtool ${dev} >> ${summary_file} - done -} - -function generate_summary { - local header="" - local result="" - local result_file="" - - printf "Summary\n\n" > ${summary_file} - printf "Throughput TCP\n" >> ${summary_file} - for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do - for interface in "${interfaces[@]}"; do - local node_type="controller" - local interface_type="mgmt" - local result_suffix="" - if [[ "${nodes[${i}]}" == *"compute"* ]]; then - node_type="compute" - fi - if [ "${interface}" == "infra" ]; then - interface_type="infra" - result_suffix="_infra" - fi - header="${header},${node_type}'s ${interface_type}" - result_file="${result_dir}" - if [ ${node_type} == "compute" ]; then - result_file="${result_file}/${nodes[${i}]}" - fi - result_file="${result_file}/throughput_tcp${result_suffix}" - result="${result},$(awk '/sender/ {print $7 " " $8}' ${result_file})" - done - done - printf "%s\n%s\n\n" "${header}" "${result}" >> ${summary_file} - - printf "Throughput UDP\n" >> ${summary_file} - header=",frame,max throughput,max frameloss" - if [ "${udp_find_0_frameloss}" == "1" ]; then - header="${header},final throughput, final frameloss" - fi - for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do - for interface in "${interfaces[@]}"; do - local node_type="controller" - local interface_type="mgmt" - local result_suffix="" - if [[ "${nodes[${i}]}" == *"compute"* ]]; then - node_type="compute" - fi - if [ "${interface}" == "infra" ]; then - interface_type="infra" - result_suffix="_infra" - fi - printf "%s's %s\n%s\n" "${node_type}" "${interface_type}" "${header}" >> ${summary_file} - result_file=${result_dir} - if [ ${node_type} == "compute" ]; then - result_file="${result_file}/${nodes[${i}]}" - fi - for frame in ${frame_sizes[@]}; do - result="${frame},$(awk '/%/ {print $7 " " $8}' ${result_file}/throughput_udp_${frame}${result_suffix} | head -n1),$(awk '/%/ {print $12}' ${result_file}/throughput_udp_${frame}${result_suffix} | head -n1 | tr -d '()')" - if [ "${udp_find_0_frameloss}" == "1" ]; then - result="${result},$(awk '/%/ {print $7 " " $8}' ${result_file}/throughput_udp_${frame}${result_suffix} | tail -n1),$(awk '/%/ {print $12}' ${result_file}/throughput_udp_${frame}${result_suffix} | tail -n1 | tr -d '()')" - fi - printf ",%s\n" "${result}" >> ${summary_file} - done - printf "\n" >> ${summary_file} - done - done - - printf "Parallel throughput result\n" >> ${summary_file} - header=",Node type" - for traffic_type in "${traffic_types[@]}"; do - header="${header},${traffic_type}" - done - printf "%s\n" "${header}" >> ${summary_file} - for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do - local node_type="controller" - local ports=("${controller_ports[@]}") - if [[ "${nodes[${i}]}" == *"compute"* ]]; then - node_type="compute" - fi - result_file=${result_dir} - if [ ${node_type} == "compute" ]; then - ports=("${compute_ports[@]}") - result_file="${result_file}/${nodes[${i}]}" - fi - result=",${node_type}" - for port in "${ports[@]}"; do - result="${result},$(awk '/sender/ {print $7 " " $8}' ${result_file}/throughput_parallel_${port})" - done - printf "%s\n" "${result}" >> ${summary_file} - done - - printf "\nLatency result in ms\n" >> ${summary_file} - for (( i = 0; i < ${#nodes[@]} ; i+=2 )); do - for interface in "${interfaces[@]}"; do - local node_type="controller" - local interface_type="mgmt" - local result_suffix="" - if [[ "${nodes[${i}]}" == *"compute"* ]]; then - node_type="compute" - fi - if [ "${interface}" == "infra" ]; then - interface_type="infra" - result_suffix="_infra" - fi - printf "%s's %s network\n" "${node_type}" "${interface_type}" >> ${summary_file} - result_file=${result_dir} - if [ ${node_type} == "compute" ]; then - result_file="${result_file}/${nodes[${i}]}" - fi - result_file="${result_file}/latency" - printf ",frame size,%s\n" "$(awk '/rtt/ {print $2}' ${result_file}_${frame_sizes}${result_suffix} | tr '/' ',' )" >> ${summary_file} - for frame_size in "${frame_sizes[@]}"; do - printf ",%s,%s\n" "${frame_size}" "$(awk '/rtt/ {print $4}' ${result_file}_${frame_size}${result_suffix} | tr '/' ',' )" >> ${summary_file} - done - - printf "latency distribution\n" >> ${summary_file} - printf ",frame size" >> ${summary_file} - for (( j = 1; j < "20" ; j+=1 )); do - printf ",%s" "$(echo "scale=3;${j}/100" | bc | awk '{printf "%.3f", $0}')" >> ${summary_file} - done - printf "\n" >> ${summary_file} - for frame_size in "${frame_sizes[@]}"; do - printf ",%s" "${frame_size}" >> ${summary_file} - for (( j = 1; j < "20" ; j+=1 )); do - printf ",%s" "$(grep -c "time=$(echo "scale=2;${j}/100" | bc | awk '{printf "%.2f", $0}')" ${result_file}_${frame_size}${result_suffix})" >> ${summary_file} - done - printf "\n" >> ${summary_file} - done - printf "\n" >> ${summary_file} - done - done - - get_interface_info -} - -echo "Starting linux interface benchmark test. ($(date))" - -# find the nodes to test -host=${HOSTNAME} -if [ "${host}" == "controller-1" ]; then - remote="controller-0" -else - remote="controller-1" -fi - -# at least another controller needs to be reachable -ping -c1 ${remote} > /dev/null 2>&1 -if [ $? -eq 0 ]; then - controllers=(${host} ${remote}) - nodes+=("${controllers[@]}") -else - echo "Stopping test as ${remote} is not reachable" - exit 1 -fi - -# check if infrastructure interface is provisioned -ping -c1 "${remote}-infra" > /dev/null 2>&1 -if [ $? -eq 0 ]; then - echo "Infrastructure network is provisioned" - interfaces+=("infra") -fi - -# check if there are any compute nodes -for i in $(seq 0 $[${max_compute_node} - 1]); do - ping -c1 compute-${i} > /dev/null 2>&1 - if [ $? -eq 0 ]; then - computes+=("compute-${i}") - if [ ${#computes[@]} -ge "2" ]; then - nodes+=("${computes[@]}") - break - fi - fi -done - -setup -throughput_tcp_test -throughput_udp_test -throughput_parallel_test -latency_test -get_remote_results -generate_summary -echo "Linux interface benchmark test finished. ($(date))" - diff --git a/tools/engtools/hostdata-collectors/scripts/live_stream.py b/tools/engtools/hostdata-collectors/scripts/live_stream.py deleted file mode 100644 index 3fc10a502..000000000 --- a/tools/engtools/hostdata-collectors/scripts/live_stream.py +++ /dev/null @@ -1,1600 +0,0 @@ -#!/usr/bin/python - -""" -Copyright (c) 2017 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 -""" - -import os -import sys -import time -import datetime -import psutil -import fcntl -import logging -from six.moves import configparser -import itertools -import six -from multiprocessing import Process -from multiprocessing import cpu_count -from subprocess import Popen -from subprocess import PIPE -from collections import OrderedDict -from six.moves import input - - -def generateString(meas, tag_n, tag_v, field_n, field_v): - """generates the required string for the areas where fields are not static""" - base = "{},".format(meas) - try: - for i in range(len(tag_n)): - if i == len(tag_n) - 1: - # have space between tags and fields - base += "'{}'='{}' ".format(tag_n[i], str(tag_v[i])) - else: - # separate with commas - base += "'{}'='{}',".format(tag_n[i], str(tag_v[i])) - for i in range(len(field_v)): - if str(field_v[i]).replace(".", "").isdigit(): - if i == len(field_v) - 1: - base += "'{}'='{}'".format(field_n[i], str(field_v[i])) - else: - base += "'{}'='{}',".format(field_n[i], str(field_v[i])) - return base - except IndexError: - return None - - -def collectMemtop(influx_info, node, ci): - """collects system memory information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("memtop data starting collection with a collection interval of {}s".format(ci["memtop"])) - measurement = "memtop" - tags = {"node": node} - MiB = 1024.0 - while True: - try: - fields = OrderedDict([("total", 0), ("used", 0), ("free", 0), ("cached", 0), ("buf", 0), ("slab", 0), ("cas", 0), ("clim", 0), ("dirty", 0), ("wback", 0), ("anon", 0), ("avail", 0)]) - with open("/proc/meminfo", "r") as f: - hps = 0 - # for each line in /proc/meminfo, match with element in fields - for line in f: - line = line.strip("\n").split() - if line[0].strip(":").startswith("MemTotal"): - # convert to from kibibytes to mibibytes - fields["total"] = float(line[1]) / MiB - elif line[0].strip(":").startswith("MemFree"): - fields["free"] = int(line[1]) / MiB - elif line[0].strip(":").startswith("MemAvailable"): - fields["avail"] = float(line[1]) / MiB - elif line[0].strip(":").startswith("Buffers"): - fields["buf"] = float(line[1]) / MiB - elif line[0].strip(":").startswith("Cached"): - fields["cached"] = float(line[1]) / MiB - elif line[0].strip(":").startswith("Slab"): - fields["slab"] = float(line[1]) / MiB - elif line[0].strip(":").startswith("CommitLimit"): - fields["clim"] = float(line[1]) / MiB - elif line[0].strip(":").startswith("Committed_AS"): - fields["cas"] = float(line[1]) / MiB - elif line[0].strip(":").startswith("Dirty"): - fields["dirty"] = float(line[1]) / MiB - elif line[0].strip(":").startswith("Writeback"): - fields["wback"] = float(line[1]) / MiB - elif line[0].strip(":").endswith("(anon)"): - fields["anon"] += float(line[1]) / MiB - elif line[0].strip(":").endswith("Hugepagesize"): - hps = float(line[1]) / MiB - fields["used"] = fields["total"] - fields["avail"] - f.close() - # get platform specific memory info - fields["platform_avail"] = 0 - fields["platform_hfree"] = 0 - for file in os.listdir("/sys/devices/system/node"): - if file.startswith("node"): - node_num = file.replace("node", "").strip("\n") - avail = hfree = 0 - with open("/sys/devices/system/node/{}/meminfo".format(file)) as f1: - for line in f1: - line = line.strip("\n").split() - if line[2].strip(":").startswith("MemFree") or line[2].strip(":").startswith("FilePages") or line[2].strip(":").startswith("SReclaimable"): - avail += float(line[3]) - elif line[2].strip(":").startswith("HugePages_Free"): - hfree = float(line[3]) * hps - fields["{}:avail".format(node_num)] = avail / MiB - fields["{}:hfree".format(node_num)] = hfree - # get platform sum - fields["platform_avail"] += avail / MiB - fields["platform_hfree"] += hfree - f1.close() - s = generateString(measurement, list(tags.keys()), list(tags.values()), list(fields.keys()), list(fields.values())) - if s is None: - good_string = False - else: - good_string = True - if good_string: - # send data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], s), shell=True) - p.communicate() - time.sleep(ci["memtop"]) - except KeyboardInterrupt: - break - except Exception: - logging.error("memtop collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectMemstats(influx_info, node, ci, services, syseng_services, openstack_services, exclude_list, skip_list, collect_all): - """collects rss and vsz information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("memstats data starting collection with a collection interval of {}s".format(ci["memstats"])) - measurement = "memstats" - tags = {"node": node} - ps_output = None - influx_string = "" - while True: - try: - fields = {} - ps_output = Popen("exec ps -e -o rss,vsz,cmd", shell=True, stdout=PIPE) - # create dictionary of dictionaries - if collect_all is False: - for svc in services: - fields[svc] = {"rss": 0, "vsz": 0} - fields["static_syseng"] = {"rss": 0, "vsz": 0} - fields["live_syseng"] = {"rss": 0, "vsz": 0} - fields["total"] = {"rss": 0, "vsz": 0} - ps_output.stdout.readline() - while True: - # for each line in ps output, get rss and vsz info - line = ps_output.stdout.readline().strip("\n").split() - # if at end of output, send data - if not line: - break - else: - rss = float(line[0]) - vsz = float(line[1]) - # go through all command outputs - for i in range(2, len(line)): - # remove unwanted characters and borders from cmd name. Ex: /usr/bin/example.py -> example.py - svc = line[i].replace("(", "").replace(")", "").strip(":").split("/")[-1].strip("\n") - if svc == "gunicorn": - gsvc = line[-1].replace("[", "").replace("]", "").strip("\n") - if gsvc == "public:application": - gsvc = "keystone-public" - elif gsvc == "admin:application": - gsvc = "keystone-admin" - gsvc = "gunicorn_{}".format(gsvc) - if gsvc not in fields: - fields[gsvc] = {"rss": rss, "vsz": vsz} - else: - fields[gsvc]["rss"] += rss - fields[gsvc]["vsz"] += vsz - - elif svc == "postgres": - if (len(line) <= i + 2): - # Command line could be "sudo su postgres", skip it - break - - if line[i + 1].startswith("-") is False and line[i + 1].startswith("_") is False and line[i + 1] != "psql": - psvc = "" - if line[i + 2] in openstack_services: - psvc = line[i + 2].strip("\n") - else: - for j in range(i + 1, len(line)): - psvc += "{}_".format(line[j].strip("\n")) - psvc = "postgres_{}".format(psvc).strip("_") - if psvc not in fields: - fields[psvc] = {"rss": rss, "vsz": vsz} - else: - fields[psvc]["rss"] += rss - fields[psvc]["vsz"] += vsz - - if collect_all is False: - if svc in services: - fields[svc]["rss"] += rss - fields[svc]["vsz"] += vsz - fields["total"]["rss"] += rss - fields["total"]["vsz"] += vsz - break - elif svc in syseng_services: - if svc == "live_stream.py": - fields["live_syseng"]["rss"] += rss - fields["live_syseng"]["vsz"] += vsz - else: - fields["static_syseng"]["rss"] += rss - fields["static_syseng"]["vsz"] += vsz - fields["total"]["rss"] += rss - fields["total"]["vsz"] += vsz - break - # Collect all services - else: - if svc in exclude_list or svc.startswith("-") or svc[0].isdigit() or svc.startswith("[") or svc.endswith("]"): - continue - elif svc in skip_list or svc.startswith("IPaddr"): - break - else: - if svc not in fields: - fields[svc] = {"rss": rss, "vsz": vsz} - else: - fields[svc]["rss"] += rss - fields[svc]["vsz"] += vsz - fields["total"]["rss"] += rss - fields["total"]["vsz"] += vsz - break - # send data to InfluxDB - for key in fields: - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "rss", fields[key]["rss"], "vsz", fields[key]["vsz"]) + "\n" - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - ps_output.kill() - time.sleep(ci["memstats"]) - except KeyboardInterrupt: - if ps_output is not None: - ps_output.kill() - break - except Exception: - logging.error("memstats collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectSchedtop(influx_info, node, ci, services, syseng_services, openstack_services, exclude_list, skip_list, collect_all): - """collects task cpu information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("schedtop data starting collection with a collection interval of {}s".format(ci["schedtop"])) - measurement = "schedtop" - tags = {"node": node} - influx_string = "" - top_output = Popen("exec top -b -c -w 512 -d{}".format(ci["schedtop"]), shell=True, stdout=PIPE) - while True: - try: - fields = {} - pro = psutil.Process(top_output.pid) - # if process dies, restart it - if pro.status() == "zombie": - top_output.kill() - top_output = Popen("exec top -b -c -w 512 -d{}".format(ci["schedtop"]), shell=True, stdout=PIPE) - if collect_all is False: - for svc in services: - fields[svc] = 0 - fields["static_syseng"] = 0 - fields["live_syseng"] = 0 - fields["total"] = 0 - # check first line - line = top_output.stdout.readline() - if not line: - pass - else: - # skip header completely - for _ in range(6): - top_output.stdout.readline() - while True: - line = top_output.stdout.readline().strip("\n").split() - # if end of top output, leave this while loop - if not line: - break - else: - occ = float(line[8]) - # for each command listed, check if it matches one from the list - for i in range(11, len(line)): - # remove unwanted characters and borders from cmd name. Ex: /usr/bin/example.py -> example.py - svc = line[i].replace("(", "").replace(")", "").strip(":").split("/")[-1] - if svc == "gunicorn": - gsvc = line[-1].replace("[", "").replace("]", "").strip("\n") - if gsvc == "public:application": - gsvc = "keystone-public" - elif gsvc == "admin:application": - gsvc = "keystone-admin" - gsvc = "gunicorn_{}".format(gsvc) - if gsvc not in fields: - fields[gsvc] = occ - else: - fields[gsvc] += occ - - elif svc == "postgres": - if (len(line) <= i + 2): - # Command line could be "sudo su postgres", skip it - break - - if line[i + 1].startswith("-") is False and line[i + 1].startswith("_") is False and line[i + 1] != "psql": - psvc = "" - if line[i + 2] in openstack_services: - psvc = line[i + 2].strip("\n") - else: - for j in range(i + 1, len(line)): - psvc += "{}_".format(line[j].strip("\n")) - psvc = "postgres_{}".format(psvc).strip("_") - if psvc not in fields: - fields[psvc] = occ - else: - fields[psvc] += occ - - if collect_all is False: - if svc in services: - fields[svc] += occ - fields["total"] += occ - break - elif svc in syseng_services: - if svc == "live_stream.py": - fields["live_syseng"] += occ - else: - fields["static_syseng"] += occ - fields["total"] += occ - break - # Collect all services - else: - if svc in exclude_list or svc.startswith("-") or svc[0].isdigit() or svc.startswith("[") or svc.endswith("]"): - continue - elif svc in skip_list or svc.startswith("IPaddr"): - break - else: - if svc not in fields: - fields[svc] = occ - else: - fields[svc] += occ - fields["total"] += occ - break - for key in fields: - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", key, "occ", fields[key]) + "\n" - # send data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - time.sleep(ci["schedtop"]) - except KeyboardInterrupt: - if top_output is not None: - top_output.kill() - break - except Exception: - logging.error("schedtop collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectDiskstats(influx_info, node, ci): - """collects disk utilization information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("diskstats data starting collection with a collection interval of {}s".format(ci["diskstats"])) - measurement = "diskstats" - tags = {"node": node, "file_system": None, "type": None, "mount": None} - fields = {"size": 0, "used": 0, "avail": 0, "usage": 0} - influx_string = "" - while True: - try: - parts = psutil.disk_partitions() - for i in parts: - # gather all partitions - tags["mount"] = str(i[1]).split("/")[-1] - # if mount == '', call it root - if tags["mount"] == "": - tags["mount"] = "root" - # skip boot - elif tags["mount"] == "boot": - continue - tags["file_system"] = str(i[0]).split("/")[-1] - tags["type"] = i[2] - u = psutil.disk_usage(i[1]) - fields["size"] = u[0] - fields["used"] = u[1] - fields["avail"] = u[2] - fields["usage"] = u[3] - influx_string += "{},'{}'='{}','{}'='{}','{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "file_system", tags["file_system"], "type", tags["type"], "mount", tags["mount"], "size", fields["size"], "used", fields["used"], "avail", fields["avail"], "usage", fields["usage"]) + "\n" - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - time.sleep(ci["diskstats"]) - except KeyboardInterrupt: - break - except Exception: - logging.error("diskstats collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectIostat(influx_info, node, ci): - """collect device I/O information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("iostat data starting collection with a collection interval of {}s".format(ci["iostat"])) - measurement = "iostat" - tags = {"node": node} - sector_size = 512.0 - influx_string = "" - while True: - try: - fields = {} - tmp = {} - tmp1 = {} - start = time.time() - # get initial values - for dev in os.listdir("/sys/block/"): - if dev.startswith("sr"): - continue - else: - fields[dev] = {"r/s": 0, "w/s": 0, "io/s": 0, "rkB/s": 0, "wkB/s": 0, "rrqms/s": 0, "wrqms/s": 0, "util": 0} - tmp[dev] = {"init_reads": 0, "init_reads_merged": 0, "init_read_sectors": 0, "init_read_wait": 0, "init_writes": 0, "init_writes_merged": 0, "init_write_sectors": 0, "init_write_wait": 0, "init_io_progress": 0, "init_io_time": 0, "init_wait_time": 0} - with open("/sys/block/{}/stat".format(dev), "r") as f: - # get initial readings - line = f.readline().strip("\n").split() - tmp[dev]["init_reads"] = int(line[0]) - tmp[dev]["init_reads_merged"] = int(line[1]) - tmp[dev]["init_read_sectors"] = int(line[2]) - tmp[dev]["init_read_wait"] = int(line[3]) - tmp[dev]["init_writes"] = int(line[4]) - tmp[dev]["init_writes_merged"] = int(line[5]) - tmp[dev]["init_write_sectors"] = int(line[6]) - tmp[dev]["init_write_wait"] = int(line[7]) - tmp[dev]["init_io_progress"] = int(line[8]) - tmp[dev]["init_io_time"] = int(line[9]) - tmp[dev]["init_wait_time"] = int(line[10]) - time.sleep(ci["iostat"]) - dt = time.time() - start - # get values again - for dev in os.listdir("/sys/block/"): - if dev.startswith("sr"): - continue - else: - # during a swact, some devices may not have been read in the initial reading. If found now, add them to dict - if dev not in fields: - fields[dev] = {"r/s": 0, "w/s": 0, "io/s": 0, "rkB/s": 0, "wkB/s": 0, "rrqms/s": 0, "wrqms/s": 0, "util": 0} - tmp1[dev] = {"reads": 0, "reads_merged": 0, "read_sectors": 0, "read_wait": 0, "writes": 0, "writes_merged": 0, "write_sectors": 0, "write_wait": 0, "io_progress": 0, "io_time": 0, "wait_time": 0} - with open("/sys/block/{}/stat".format(dev), "r") as f: - line = f.readline().strip("\n").split() - tmp1[dev]["reads"] = int(line[0]) - tmp1[dev]["reads_merged"] = int(line[1]) - tmp1[dev]["read_sectors"] = int(line[2]) - tmp1[dev]["read_wait"] = int(line[3]) - tmp1[dev]["writes"] = int(line[4]) - tmp1[dev]["writes_merged"] = int(line[5]) - tmp1[dev]["write_sectors"] = int(line[6]) - tmp1[dev]["write_wait"] = int(line[7]) - tmp1[dev]["io_progress"] = int(line[8]) - tmp1[dev]["io_time"] = int(line[9]) - tmp1[dev]["wait_time"] = int(line[10]) - # take difference and divide by delta t - for key in fields: - # if device was found in initial and second reading, do calculation - if key in tmp and key in tmp1: - fields[key]["r/s"] = abs(tmp1[key]["reads"] - tmp[key]["init_reads"]) / dt - fields[key]["w/s"] = abs(tmp1[key]["writes"] - tmp[key]["init_writes"]) / dt - fields[key]["rkB/s"] = abs(tmp1[key]["read_sectors"] - tmp[key]["init_read_sectors"]) * sector_size / dt / 1000 - fields[key]["wkB/s"] = abs(tmp1[key]["write_sectors"] - tmp[key]["init_write_sectors"]) * sector_size / dt / 1000 - fields[key]["rrqms/s"] = abs(tmp1[key]["reads_merged"] - tmp[key]["init_reads_merged"]) / dt - fields[key]["wrqms/s"] = abs(tmp1[key]["writes_merged"] - tmp[key]["init_writes_merged"]) / dt - fields[key]["io/s"] = fields[key]["r/s"] + fields[key]["w/s"] + fields[key]["rrqms/s"] + fields[key]["wrqms/s"] - fields[key]["util"] = abs(tmp1[key]["io_time"] - tmp[key]["init_io_time"]) / dt / 10 - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "device", key, "r/s", fields[key]["r/s"], "w/s", fields[key]["w/s"], "rkB/s", fields[key]["rkB/s"], "wkB/s", fields[key]["wkB/s"], "rrqms/s", fields[key]["rrqms/s"], "wrqms/s", fields[key]["wrqms/s"], "io/s", fields[key]["io/s"], "util", fields[key]["util"]) + "\n" - # send data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - except KeyboardInterrupt: - break - except Exception: - logging.error("iostat collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectLoadavg(influx_info, node, ci): - """collects cpu load average information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("load_avg data starting collection with a collection interval of {}s".format(ci["load_avg"])) - measurement = "load_avg" - tags = {"node": node} - fields = {"load_avg": 0} - while True: - try: - fields["load_avg"] = os.getloadavg()[0] - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{},'{}'='{}' '{}'='{}''".format(influx_info[0], influx_info[1], influx_info[2], measurement, "node", tags["node"], "load_avg", fields["load_avg"]), shell=True) - p.communicate() - time.sleep(ci["load_avg"]) - except KeyboardInterrupt: - break - except Exception: - logging.error("load_avg collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectOcctop(influx_info, node, ci, pc): - """collects cpu utilization information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("occtop data starting collection with a collection interval of {}s".format(ci["occtop"])) - measurement = "occtop" - tags = {"node": node} - platform_cores = pc - influx_string = "" - while True: - try: - cpu = psutil.cpu_percent(percpu=True) - cpu_times = psutil.cpu_times_percent(percpu=True) - fields = {} - # sum all cpu percents - total = float(sum(cpu)) - sys_total = 0 - fields["platform_total"] = {"usage": 0, "system": 0} - cores = 0 - # for each core, get values and assign a tag - for el in cpu: - fields["usage"] = float(el) - fields["system"] = float(cpu_times[cores][2]) - sys_total += float(cpu_times[cores][2]) - tags["core"] = "core_{}".format(cores) - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "core", tags["core"], "usage", fields["usage"], "system", fields["system"]) + "\n" - if len(platform_cores) > 0: - if cores in platform_cores: - fields["platform_total"]["usage"] += float(el) - fields["platform_total"]["system"] += float(cpu_times[cores][2]) - cores += 1 - # add usage and system total to influx string - if len(platform_cores) > 0: - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "core", "platform_total", "usage", fields["platform_total"]["usage"], "system", fields["platform_total"]["system"]) + "\n" - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "core", "total", "usage", total, "system", sys_total) + "\n" - # send data to Influx - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - time.sleep(ci["occtop"]) - except KeyboardInterrupt: - break - except Exception: - logging.error("occtop collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectNetstats(influx_info, node, ci): - """collects network interface information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("netstats data starting collection with a collection interval of {}s".format(ci["netstats"])) - measurement = "netstats" - tags = {"node": node} - fields = {} - prev_fields = {} - Mbps = float(1000000 / 8) - influx_string = "" - while True: - try: - net = psutil.net_io_counters(pernic=True) - # get initial data for difference calculation - for key in net: - prev_fields[key] = {"tx_B": net[key][0], "rx_B": net[key][1], "tx_p": net[key][2], "rx_p": net[key][3]} - start = time.time() - time.sleep(ci["netstats"]) - net = psutil.net_io_counters(pernic=True) - # get new data for difference calculation - dt = time.time() - start - for key in net: - tx_B = (float(net[key][0]) - float(prev_fields[key]["tx_B"])) - tx_Mbps = tx_B / Mbps / dt - rx_B = (float(net[key][1]) - float(prev_fields[key]["rx_B"])) - rx_Mbps = rx_B / Mbps / dt - tx_pps = (float(net[key][2]) - float(prev_fields[key]["tx_p"])) / dt - rx_pps = (float(net[key][3]) - float(prev_fields[key]["rx_p"])) / dt - # ensure no division by zero - if rx_B > 0 and rx_pps > 0: - rx_packet_size = rx_B / rx_pps - else: - rx_packet_size = 0 - if tx_B > 0 and tx_pps > 0: - tx_packet_size = tx_B / tx_pps - else: - tx_packet_size = 0 - fields[key] = {"tx_mbps": tx_Mbps, "rx_mbps": rx_Mbps, "tx_pps": tx_pps, "rx_pps": rx_pps, "tx_packet_size": tx_packet_size, "rx_packet_size": rx_packet_size} - for key in fields: - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "interface", key, "rx_mbps", fields[key]["rx_mbps"], "tx_mbps", fields[key]["tx_mbps"], "rx_pps", fields[key]["rx_pps"], "tx_pps", fields[key]["tx_pps"], "rx_packet_size", fields[key]["rx_packet_size"], "tx_packet_size", fields[key]["tx_packet_size"]) + "\n" - # send data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - except KeyboardInterrupt: - break - except Exception: - logging.error("netstats collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectPostgres(influx_info, node, ci): - """collects postgres db size and postgres service size information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("postgres data starting collection with a collection interval of {}s".format(ci["postgres"])) - measurement = "postgres_db_size" - measurement1 = "postgres_svc_stats" - tags = {"node": node, "service": None, "table_schema": 0, "table": None} - fields = {"db_size": 0, "connections": 0} - fields1 = {"table_size": 0, "total_size": 0, "index_size": 0, "live_tuples": 0, "dead_tuples": 0} - postgres_output = postgres_output1 = None - influx_string = influx_string1 = "" - good_string = False - dbcount = 0 - BATCH_SIZE = 10 - - while True: - try: - # make sure this is active controller, otherwise postgres queries wont work - if isActiveController(): - while True: - postgres_output = Popen("sudo -u postgres psql --pset pager=off -q -t -c'SELECT datname, pg_database_size(datname) FROM pg_database WHERE datistemplate = false;'", shell=True, stdout=PIPE) - db_lines = postgres_output.stdout.read().replace(" ", "").strip().split("\n") - if db_lines == "" or db_lines is None: - postgres_output.kill() - break - else: - # for each database from the previous output - for line in db_lines: - if not line: - break - line = line.replace(" ", "").split("|") - tags["service"] = line[0] - fields["db_size"] = line[1] - # send DB size to InfluxDB - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "db_size", fields["db_size"]) + "\n" - # get tables for each database - sql = "SELECT table_schema,table_name,pg_size_pretty(table_size) AS table_size,pg_size_pretty(indexes_size) AS indexes_size,pg_size_pretty(total_size) AS total_size,live_tuples,dead_tuples FROM (SELECT table_schema,table_name,pg_table_size(table_name) AS table_size,pg_indexes_size(table_name) AS indexes_size,pg_total_relation_size(table_name) AS total_size,pg_stat_get_live_tuples(table_name::regclass) AS live_tuples,pg_stat_get_dead_tuples(table_name::regclass) AS dead_tuples FROM (SELECT table_schema,table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE') AS all_tables ORDER BY total_size DESC) AS pretty_sizes;" - postgres_output1 = Popen('sudo -u postgres psql --pset pager=off -q -t -d{} -c"{}"'.format(line[0], sql), shell=True, stdout=PIPE) - tbl_lines = postgres_output1.stdout.read().replace(" ", "").strip().split("\n") - for line in tbl_lines: - if line == "": - continue - else: - line = line.replace(" ", "").split("|") - elements = list() - # ensures all data is present - if len(line) != 7: - good_string = False - break - else: - # do some conversions - for el in line: - if el.endswith("bytes"): - el = int(el.replace("bytes", "")) - elif el.endswith("kB"): - el = el.replace("kB", "") - el = int(el) * 1000 - elif el.endswith("MB"): - el = el.replace("MB", "") - el = int(el) * 1000000 - elif el.endswith("GB"): - el = el.replace("GB", "") - el = int(el) * 1000000000 - elements.append(el) - tags["table_schema"] = elements[0] - tags["table"] = elements[1] - fields1["table_size"] = int(elements[2]) - fields1["index_size"] = int(elements[3]) - fields1["total_size"] = int(elements[4]) - fields1["live_tuples"] = int(elements[5]) - fields1["dead_tuples"] = int(elements[6]) - influx_string1 += "{},'{}'='{}','{}'='{}','{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement1, "node", tags["node"], "service", tags["service"], "table_schema", tags["table_schema"], "table", tags["table"], "table_size", fields1["table_size"], "index_size", fields1["index_size"], "total_size", fields1["total_size"], "live_tuples", fields1["live_tuples"], "dead_tuples", fields1["dead_tuples"]) + "\n" - good_string = True - dbcount += 1 - if dbcount == BATCH_SIZE and good_string: - # Curl will barf if the batch is too large - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string1), shell=True) - p.communicate() - influx_string1 = "" - dbcount = 0 - if good_string: - # send table data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string1), shell=True) - p.communicate() - influx_string = influx_string1 = "" - dbcount = 0 - time.sleep(ci["postgres"]) - postgres_output1.kill() - postgres_output.kill() - else: - time.sleep(20) - except KeyboardInterrupt: - if postgres_output is not None: - postgres_output.kill() - if postgres_output1 is not None: - postgres_output1.kill() - break - except Exception: - logging.error("postgres collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectPostgresConnections(influx_info, node, ci, fast): - """collect postgres connections information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - if fast: - logging.info("postgres_connections data starting collection with a constant collection interval") - else: - logging.info("postgres_connections data starting collection with a collection interval of {}s".format(ci["postgres"])) - measurement = "postgres_connections" - tags = {"node": node, "service": None, "state": None} - connections_output = None - influx_string = "" - while True: - try: - # make sure this is active controller, otherwise postgres queries wont work - if isActiveController(): - while True: - fields = {} - # outputs a list of postgres dbs and their connections - connections_output = Popen("sudo -u postgres psql --pset pager=off -q -c 'SELECT datname,state,count(*) from pg_stat_activity group by datname,state;'", shell=True, stdout=PIPE) - line = connections_output.stdout.readline() - if line == "" or line is None: - break - # skip header - connections_output.stdout.readline() - while True: - line = connections_output.stdout.readline().strip("\n") - if not line: - break - else: - line = line.replace(" ", "").split("|") - if len(line) != 3: - continue - else: - svc = line[0] - connections = int(line[2]) - tags["service"] = svc - if svc not in fields: - fields[svc] = {"active": 0, "idle": 0, "other": 0} - if line[1] == "active": - fields[svc]["active"] = connections - elif line[1] == "idle": - fields[svc]["idle"] = connections - else: - fields[svc]["other"] = connections - influx_string += "{},'{}'='{}','{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "state", "active", "connections", fields[svc]["active"]) + "\n" - influx_string += "{},'{}'='{}','{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "state", "idle", "connections", fields[svc]["idle"]) + "\n" - influx_string += "{},'{}'='{}','{}'='{}','{}'='{}' '{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "state", "other", "connections", fields[svc]["other"]) + "\n" - - # send data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - connections_output.kill() - if fast: - pass - else: - time.sleep(ci["postgres"]) - else: - time.sleep(20) - except KeyboardInterrupt: - if connections_output is not None: - connections_output.kill() - break - except Exception: - logging.error("postgres_connections collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectRabbitMq(influx_info, node, ci): - """collects rabbitmq information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("rabbitmq data starting collection with a collection interval of {}s".format(ci["rabbitmq"])) - measurement = "rabbitmq" - tags = OrderedDict([("node", node)]) - rabbitmq_output = None - while True: - try: - # make sure this is active controller, otherwise rabbit queries wont work - if isActiveController(): - while True: - fields = OrderedDict([]) - rabbitmq_output = Popen("sudo rabbitmqctl -n rabbit@localhost status", shell=True, stdout=PIPE) - # needed data starts where output = '{memory,[' - line = rabbitmq_output.stdout.readline() - # if no data is returned, exit - if line == "" or line is None: - rabbitmq_output.kill() - break - else: - line = rabbitmq_output.stdout.read().strip("\n").split("{memory,[") - if len(line) != 2: - rabbitmq_output.kill() - break - else: - # remove brackets from data - info = line[1].replace(" ", "").replace("{", "").replace("}", "").replace("\n", "").replace("[", "").replace("]", "").split(",") - for i in range(len(info) - 3): - if info[i].endswith("total"): - info[i] = info[i].replace("total", "memory_total") - # some data needs string manipulation - if info[i].startswith("clustering") or info[i].startswith("amqp"): - info[i] = "listeners_" + info[i] - if info[i].startswith("total_"): - info[i] = "descriptors_" + info[i] - if info[i].startswith("limit") or info[i].startswith("used"): - info[i] = "processes_" + info[i] - if info[i].replace("_", "").isalpha() and info[i + 1].isdigit(): - fields[info[i]] = info[i + 1] - s = generateString(measurement, list(tags.keys()), list(tags.values()), list(fields.keys()), list(fields.values())) - if s is None: - rabbitmq_output.kill() - else: - # send data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], s), shell=True) - p.communicate() - time.sleep(ci["rabbitmq"]) - rabbitmq_output.kill() - else: - time.sleep(20) - except KeyboardInterrupt: - if rabbitmq_output is not None: - rabbitmq_output.kill() - break - except Exception: - logging.error("rabbitmq collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectRabbitMqSvc(influx_info, node, ci, services): - """collects rabbitmq messaging information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("rabbitmq_svc data starting collection with a collection interval of {}s".format(ci["rabbitmq"])) - measurement = "rabbitmq_svc" - tags = {"node": node, "service": None} - fields = {"messages": 0, "messages_ready": 0, "messages_unacknowledged": 0, "memory": 0, "consumers": 0} - rabbitmq_svc_output = None - good_string = False - influx_string = "" - while True: - try: - # make sure this is active controller, otherwise rabbit queries wont work - if isActiveController(): - while True: - rabbitmq_svc_output = Popen("sudo rabbitmqctl -n rabbit@localhost list_queues name messages messages_ready messages_unacknowledged memory consumers", shell=True, stdout=PIPE) - # # if no data is returned, exit - if rabbitmq_svc_output.stdout.readline() == "" or rabbitmq_svc_output.stdout.readline() is None: - rabbitmq_svc_output.kill() - break - else: - for line in rabbitmq_svc_output.stdout: - line = line.split() - if not line: - break - else: - if len(line) != 6: - good_string = False - break - else: - # read line and fill fields - if line[0] in services: - tags["service"] = line[0] - fields["messages"] = line[1] - fields["messages_ready"] = line[2] - fields["messages_unacknowledged"] = line[3] - fields["memory"] = line[4] - fields["consumers"] = line[5] - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", tags["service"], "messages", fields["messages"], "messages_ready", fields["messages_ready"], "messages_unacknowledged", fields["messages_unacknowledged"], "memory", fields["memory"], "consumers", fields["consumers"]) + "\n" - good_string = True - if good_string: - # send data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - time.sleep(ci["rabbitmq"]) - rabbitmq_svc_output.kill() - else: - time.sleep(20) - except KeyboardInterrupt: - if rabbitmq_svc_output is not None: - rabbitmq_svc_output.kill() - break - except Exception: - logging.error("rabbitmq_svc collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectFilestats(influx_info, node, ci, services, syseng_services, exclude_list, skip_list, collect_all): - """collects open file information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("filestats data starting collection with a collection interval of {}s".format(ci["filestats"])) - measurement = "filestats" - tags = {"node": node} - influx_string = "" - while True: - try: - fields = {} - # fill dict with services from engtools.conf - if collect_all is False: - for svc in services: - fields[svc] = {"read/write": 0, "write": 0, "read": 0} - fields["static_syseng"] = {"read/write": 0, "write": 0, "read": 0} - fields["live_syseng"] = {"read/write": 0, "write": 0, "read": 0} - fields["total"] = {"read/write": 0, "write": 0, "read": 0} - for process in os.listdir("/proc/"): - if process.isdigit(): - # sometimes the process dies before reading its info - try: - svc = psutil.Process(int(process)).name() - svc = svc.split()[0].replace("(", "").replace(")", "").strip(":").split("/")[-1] - except Exception: - continue - if collect_all is False: - if svc in services: - try: - p = Popen("ls -l /proc/{}/fd".format(process), shell=True, stdout=PIPE) - p.stdout.readline() - while True: - line = p.stdout.readline().strip("\n").split() - if not line: - break - else: - priv = line[0] - if priv[1] == "r" and priv[2] == "w": - fields[svc]["read/write"] += 1 - fields["total"]["read/write"] += 1 - elif priv[1] == "r" and priv[2] != "w": - fields[svc]["read"] += 1 - fields["total"]["read"] += 1 - elif priv[1] != "r" and priv[2] == "w": - fields[svc]["write"] += 1 - fields["total"]["write"] += 1 - except Exception: - p.kill() - continue - p.kill() - - elif svc in syseng_services: - try: - p = Popen("ls -l /proc/{}/fd".format(process), shell=True, stdout=PIPE) - p.stdout.readline() - while True: - line = p.stdout.readline().strip("\n").split() - if not line: - break - else: - priv = line[0] - if svc == "live_stream.py": - if priv[1] == "r" and priv[2] == "w": - fields["live_syseng"]["read/write"] += 1 - fields["total"]["read/write"] += 1 - elif priv[1] == "r" and priv[2] != "w": - fields["live_syseng"]["read"] += 1 - fields["total"]["read"] += 1 - elif priv[1] != "r" and priv[2] == "w": - fields["live_syseng"]["write"] += 1 - fields["total"]["write"] += 1 - else: - if priv[1] == "r" and priv[2] == "w": - fields["static_syseng"]["read/write"] += 1 - fields["total"]["read/write"] += 1 - elif priv[1] == "r" and priv[2] != "w": - fields["static_syseng"]["read"] += 1 - fields["total"]["read"] += 1 - elif priv[1] != "r" and priv[2] == "w": - fields["static_syseng"]["write"] += 1 - fields["total"]["write"] += 1 - except Exception: - p.kill() - continue - p.kill() - - else: - # remove garbage processes - if svc in exclude_list or svc in skip_list or svc.startswith("-") or svc.endswith("-") or svc[0].isdigit() or svc[-1].isdigit() or svc[0].isupper(): - continue - elif svc not in fields: - fields[svc] = {"read/write": 0, "write": 0, "read": 0} - try: - p = Popen("ls -l /proc/{}/fd".format(process), shell=True, stdout=PIPE) - p.stdout.readline() - while True: - line = p.stdout.readline().strip("\n").split() - if not line: - break - else: - priv = line[0] - if priv[1] == "r" and priv[2] == "w": - fields[svc]["read/write"] += 1 - fields["total"]["read/write"] += 1 - elif priv[1] == "r" and priv[2] != "w": - fields[svc]["read"] += 1 - fields["total"]["read"] += 1 - elif priv[1] != "r" and priv[2] == "w": - fields[svc]["write"] += 1 - fields["total"]["write"] += 1 - if fields[svc]["read/write"] == 0 and fields[svc]["read"] == 0 and fields[svc]["write"] == 0: - del fields[svc] - except Exception: - p.kill() - continue - p.kill() - for key in fields: - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", key, "read/write", fields[key]["read/write"], "write", fields[key]["write"], "read", fields[key]["read"]) + "\n" - # send data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - time.sleep(ci["filestats"]) - except KeyboardInterrupt: - break - except Exception: - logging.error("filestats collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectVswitch(influx_info, node, ci): - """collects vshell information""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("vswitch data starting collection with a collection interval of {}s".format(ci["vswitch"])) - measurement = "vswitch" - tags = OrderedDict([("node", node), ("engine", 0)]) - tags1 = OrderedDict([("node", node), ("port", 0)]) - tags2 = OrderedDict([("node", node), ("interface", 0)]) - fields = OrderedDict([("cpuid", 0), ("rx_packets", 0), ("tx_packets", 0), ("rx_discard", 0), ("tx_discard", 0), ("tx_disabled", 0), ("tx_overflow", 0), ("tx_timeout", 0), ("usage", 0)]) - fields1 = OrderedDict([("rx_packets", 0), ("tx_packets", 0), ("rx_bytes", 0), ("tx_bytes", 0), ("tx_errors", 0), ("rx_errors", 0), ("rx_nombuf", 0)]) - fields2 = OrderedDict([("rx_packets", 0), ("tx_packets", 0), ("rx_bytes", 0), ("tx_bytes", 0), ("tx_errors", 0), ("rx_errors", 0), ("tx_discards", 0), ("rx_discards", 0), ("rx_floods", 0), ("rx_no_vlan", 0)]) - vshell_engine_stats_output = vshell_port_stats_output = vshell_interface_stats_output = None - influx_string = "" - while True: - try: - vshell_engine_stats_output = Popen("vshell engine-stats-list", shell=True, stdout=PIPE) - # skip first few lines - vshell_engine_stats_output.stdout.readline() - vshell_engine_stats_output.stdout.readline() - vshell_engine_stats_output.stdout.readline() - while True: - line = vshell_engine_stats_output.stdout.readline().replace("|", "").split() - if not line: - break - # skip lines like +++++++++++++++++++++++++++++ - elif line[0].startswith("+"): - continue - else: - # get info from output - i = 2 - tags["engine"] = line[1] - for key in fields: - fields[key] = line[i].strip("%") - i += 1 - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, list(tags.keys())[0], list(tags.values())[0], list(tags.keys())[1], list(tags.values())[1], list(fields.keys())[0], list(fields.values())[0], list(fields.keys())[1], list(fields.values())[1], list(fields.keys())[2], list(fields.values())[2], list(fields.keys())[3], list(fields.values())[3], list(fields.keys())[4], list(fields.values())[4], list(fields.keys())[5], list(fields.values())[5], list(fields.keys())[6], list(fields.values())[6], list(fields.keys())[7], list(fields.values())[7], list(fields.keys())[8], list(fields.values())[8]) + "\n" - vshell_engine_stats_output.kill() - vshell_port_stats_output = Popen("vshell port-stats-list", shell=True, stdout=PIPE) - vshell_port_stats_output.stdout.readline() - vshell_port_stats_output.stdout.readline() - vshell_port_stats_output.stdout.readline() - while True: - line = vshell_port_stats_output.stdout.readline().replace("|", "").split() - if not line: - break - elif line[0].startswith("+"): - continue - else: - i = 3 - tags1["port"] = line[1] - for key in fields1: - fields1[key] = line[i].strip("%") - i += 1 - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, list(tags1.keys())[0], list(tags1.values())[0], list(tags1.keys())[1], list(tags1.values())[1], list(fields1.keys())[0], list(fields1.values())[0], list(fields1.keys())[1], list(fields1.values())[1], list(fields1.keys())[2], list(fields1.values())[2], list(fields1.keys())[3], list(fields1.values())[3], list(fields1.keys())[4], list(fields1.values())[4], list(fields1.keys())[5], list(fields1.values())[5], list(fields1.keys())[6], list(fields1.values())[6]) + "\n" - vshell_port_stats_output.kill() - vshell_interface_stats_output = Popen("vshell interface-stats-list", shell=True, stdout=PIPE) - vshell_interface_stats_output.stdout.readline() - vshell_interface_stats_output.stdout.readline() - vshell_interface_stats_output.stdout.readline() - while True: - line = vshell_interface_stats_output.stdout.readline().replace("|", "").split() - if not line: - break - elif line[0].startswith("+"): - continue - else: - if line[2] == "ethernet" and line[3].startswith("eth"): - i = 4 - tags2["interface"] = line[3] - for key in fields2: - fields2[key] = line[i].strip("%") - i += 1 - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}','{}'='{}'".format(measurement, list(tags2.keys())[0], list(tags2.values())[0], list(tags2.keys())[1], list(tags2.values())[1], list(fields2.keys())[0], list(fields2.values())[0], list(fields2.keys())[1], list(fields2.values())[1], list(fields2.keys())[2], list(fields2.values())[2], list(fields2.keys())[3], list(fields2.values())[3], list(fields2.keys())[4], list(fields2.values())[4], list(fields2.keys())[5], list(fields2.values())[5], list(fields2.keys())[6], list(fields2.values())[6], list(fields2.keys())[7], list(fields2.values())[7], list(fields2.keys())[8], list(fields2.values())[8], list(fields2.keys())[9], list(fields2.values())[9]) + "\n" - else: - continue - vshell_interface_stats_output.kill() - # send data to InfluxDB - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - time.sleep(ci["vswitch"]) - except KeyboardInterrupt: - if vshell_engine_stats_output is not None: - vshell_engine_stats_output.kill() - if vshell_port_stats_output is not None: - vshell_port_stats_output.kill() - if vshell_interface_stats_output is not None: - vshell_interface_stats_output.kill() - break - except Exception: - logging.error("vswitch collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def collectCpuCount(influx_info, node, ci): - """collects the number of cores""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("cpu_count data starting collection with a collection interval of {}s".format(ci["cpu_count"])) - measurement = "cpu_count" - tags = {"node": node} - while True: - try: - fields = {"cpu_count": cpu_count()} - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{},'{}'='{}' '{}'='{}''".format(influx_info[0], influx_info[1], influx_info[2], measurement, "node", tags["node"], "cpu_count", fields["cpu_count"]), shell=True) - p.communicate() - time.sleep(ci["cpu_count"]) - except KeyboardInterrupt: - break - except Exception: - logging.error("cpu_count collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - - -def collectApiStats(influx_info, node, ci, services, db_port, rabbit_port): - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - logging.info("api_request data starting collection with a collection interval of {}s".format(ci["cpu_count"])) - measurement = "api_requests" - tags = {"node": node} - influx_string = "" - lsof_args = ['lsof', '-Pn', '-i', 'tcp'] - while True: - try: - fields = {} - lsof_result = Popen(lsof_args, shell=False, stdout=PIPE) - lsof_lines = list() - while True: - line = lsof_result.stdout.readline().strip("\n") - if not line: - break - lsof_lines.append(line) - lsof_result.kill() - for name, service in services.items(): - pid_list = list() - check_pid = False - if name == "keystone-public": - check_pid = True - ps_result = Popen("pgrep -f --delimiter=' ' keystone-public", shell=True, stdout=PIPE) - pid_list = ps_result.stdout.readline().strip().split(' ') - ps_result.kill() - elif name == "gnocchi-api": - check_pid = True - ps_result = Popen("pgrep -f --delimiter=' ' gnocchi-api", shell=True, stdout=PIPE) - pid_list = ps_result.stdout.readline().strip().split(' ') - ps_result.kill() - api_count = 0 - db_count = 0 - rabbit_count = 0 - for line in lsof_lines: - if service['name'] is not None and service['name'] in line and (not check_pid or any(pid in line for pid in pid_list)): - if service['api-port'] is not None and service['api-port'] in line: - api_count += 1 - elif db_port is not None and db_port in line: - db_count += 1 - elif rabbit_port is not None and rabbit_port in line: - rabbit_count += 1 - fields[name] = {"api": api_count, "db": db_count, "rabbit": rabbit_count} - influx_string += "{},'{}'='{}','{}'='{}' '{}'='{}','{}'='{}','{}'='{}'".format(measurement, "node", tags["node"], "service", name, "api", fields[name]["api"], "db", fields[name]["db"], "rabbit", fields[name]["rabbit"]) + "\n" - p = Popen("curl -s -o /dev/null 'http://'{}':'{}'/write?db='{}'' --data-binary '{}'".format(influx_info[0], influx_info[1], influx_info[2], influx_string), shell=True) - p.communicate() - influx_string = "" - except KeyboardInterrupt: - break - except Exception: - logging.error("api_request collection stopped unexpectedly with error: {}. Restarting process...".format(sys.exc_info())) - time.sleep(3) - - -def getPlatformCores(node, cpe): - """returns the cores dedicated to platform use""" - if cpe is True or node.startswith("compute"): - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - core_list = list() - try: - with open("/etc/platform/worker_reserved.conf", "r") as f: - for line in f: - if line.startswith("PLATFORM_CPU_LIST"): - core_list = line.split("=")[1].replace("\"", "").strip("\n").split(",") - core_list = [int(x) for x in core_list] - return core_list - except Exception: - logging.warning("skipping platform specific collection for {} due to error: {}".format(node, sys.exc_info())) - return core_list - else: - return [] - - -def isActiveController(): - """determine if controller is active/standby""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - try: - p = Popen("sm-dump", shell=True, stdout=PIPE) - p.stdout.readline() - p.stdout.readline() - # read line for active/standby - line = p.stdout.readline().strip("\n").split() - per = line[1] - p.kill() - if per == "active": - return True - else: - return False - except Exception: - if p is not None: - p.kill() - logging.error("sm-dump command could not be called properly. This is usually caused by a swact. Trying again on next call: {}".format(sys.exc_info())) - return False - - -def checkDuration(duration): - """checks whether the duration param has been set. If set, sleep; then kill processes upon waking up""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - if duration is None: - return None - else: - time.sleep(duration) - print("Duration interval has ended. Killing processes now") - logging.warning("Duration interval has ended. Killing processes now") - raise KeyboardInterrupt - - -def killProcesses(tasks): - """kill all processes and log each death""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - for t in tasks: - try: - logging.info("{} data stopped collection".format(str(t.name))) - t.terminate() - except Exception: - continue - - -def createDB(influx_info, grafana_port, grafana_api_key): - """create database in InfluxDB and add it to Grafana""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - p = None - try: - logging.info("Adding database to InfluxDB and Grafana") - # create database in InfluxDB if not already created. Will NOT overwrite previous db - p = Popen("curl -s -XPOST 'http://'{}':'{}'/query' --data-urlencode 'q=CREATE DATABASE {}'".format(influx_info[0], influx_info[1], influx_info[2]), shell=True, stdout=PIPE) - response = p.stdout.read().strip("\n") - if response == "": - raise Exception("An error occurred while creating the database: Please make sure the Grafana and InfluxDB services are running") - else: - logging.info("InfluxDB response: {}".format(response)) - p.kill() - - # add database to Grafana - grafana_db = '{"name":"%s", "type":"influxdb", "url":"http://%s:%s", "access":"proxy", "isDefault":false, "database":"%s"}' % (influx_info[2], influx_info[0], influx_info[1], influx_info[2]) - p = Popen("curl -s 'http://{}:{}/api/datasources' -H 'Accept: application/json' -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' --data-binary '{}'".format(influx_info[0], grafana_port, grafana_api_key, grafana_db), shell=True, stdout=PIPE) - response = p.stdout.read().strip("\n") - if response == "": - raise Exception("An error occurred while creating the database: Please make sure the Grafana and InfluxDB services are running") - else: - logging.info("Grafana response: {}".format(response)) - p.kill() - except KeyboardInterrupt: - if p is not None: - p.kill() - except Exception as e: - print(str(e)) - sys.exit(0) - - -def deleteDB(influx_info, grafana_port, grafana_api_key): - """delete database from InfluxDB and remove it from Grafana""" - logging.basicConfig(filename="/tmp/livestream.log", filemode="a", format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO) - p = None - try: - answer = str(input("\nAre you sure you would like to delete {}? (Y/N): ".format(influx_info[2]))).lower() - except Exception: - answer = None - if answer is None or answer == "" or answer == "y" or answer == "yes": - try: - logging.info("Removing database from InfluxDB and Grafana") - print("Removing database from InfluxDB and Grafana. Please wait...") - # delete database from InfluxDB - p = Popen("curl -s -XPOST 'http://'{}':'{}'/query' --data-urlencode 'q=DROP DATABASE {}'".format(influx_info[0], influx_info[1], influx_info[2]), shell=True, stdout=PIPE) - response = p.stdout.read().strip("\n") - if response == "": - raise Exception("An error occurred while removing the database: Please make sure the Grafana and InfluxDB services are running") - else: - logging.info("InfluxDB response: {}".format(response)) - p.kill() - - # get database ID for db removal - p = Popen("curl -s -G 'http://{}:{}/api/datasources/id/{}' -H 'Accept: application/json' -H 'Content-Type: application/json' -H 'Authorization: Bearer {}'".format(influx_info[0], grafana_port, influx_info[2], grafana_api_key), shell=True, stdout=PIPE) - id = p.stdout.read().split(":")[1].strip("}") - if id == "": - raise Exception("An error occurred while removing the database: Could not determine the database ID") - p.kill() - - # remove database from Grafana - p = Popen("curl -s -XDELETE 'http://{}:{}/api/datasources/{}' -H 'Accept: application/json' -H 'Content-Type: application/json' -H 'Authorization: Bearer {}'".format(influx_info[0], grafana_port, id, grafana_api_key), shell=True, stdout=PIPE) - response = p.stdout.read().strip("\n") - if response == "": - raise Exception("An error occurred while removing the database: Please make sure the Grafana and InfluxDB services are running") - else: - logging.info("Grafana response: {}".format(response)) - p.kill() - except KeyboardInterrupt: - if p is not None: - p.kill() - except Exception as e: - print(str(e)) - sys.exit(0) - - -def appendToFile(file, content): - """used for output log""" - with open(file, "a") as f: - fcntl.flock(f, fcntl.LOCK_EX) - f.write(content + '\n') - fcntl.flock(f, fcntl.LOCK_UN) - - -# main program -if __name__ == "__main__": - # make sure user is root - if os.geteuid() != 0: - print("Must be run as root!\n") - sys.exit(0) - - # initialize variables - cpe_lab = False - influx_ip = influx_port = influx_db = "" - external_if = "" - influx_info = list() - grafana_port = "" - grafana_api_key = "" - controller_services = list() - compute_services = list() - storage_services = list() - rabbit_services = list() - common_services = list() - services = {} - live_svc = ("live_stream.py",) - collection_intervals = {"memtop": None, "memstats": None, "occtop": None, "schedtop": None, "load_avg": None, "cpu_count": None, "diskstats": None, "iostat": None, "filestats": None, "netstats": None, "postgres": None, "rabbitmq": None, "vswitch": None} - duration = None - unconverted_duration = "" - collect_api_requests = False - api_requests = "" - auto_delete_db = False - delete_db = "" - collect_all_services = False - all_services = "" - fast_postgres_connections = False - fast_postgres = "" - config = configparser.ConfigParser() - - node = os.popen("hostname").read().strip("\n") - - # get info from engtools.conf - try: - conf_file = "" - if "engtools.conf" in tuple(os.listdir(os.getcwd())): - conf_file = os.getcwd() + "/engtools.conf" - elif "engtools.conf" in tuple(os.listdir("/etc/engtools/")): - conf_file = "/etc/engtools/engtools.conf" - config.read(conf_file) - if config.get("LabConfiguration", "CPE_LAB").lower() == "y" or config.get("LabConfiguration", "CPE_LAB").lower() == "yes": - cpe_lab = True - if node.startswith("controller"): - external_if = config.get("CollectInternal", "{}_EXTERNAL_INTERFACE".format(node.upper().replace("-", ""))) - influx_ip = config.get("RemoteServer", "INFLUX_IP") - influx_port = config.get("RemoteServer", "INFLUX_PORT") - influx_db = config.get("RemoteServer", "INFLUX_DB") - grafana_port = config.get("RemoteServer", "GRAFANA_PORT") - grafana_api_key = config.get("RemoteServer", "GRAFANA_API_KEY") - duration = config.get("LiveStream", "DURATION") - unconverted_duration = config.get("LiveStream", "DURATION") - api_requests = config.get("AdditionalOptions", "API_REQUESTS") - delete_db = config.get("AdditionalOptions", "AUTO_DELETE_DB") - all_services = config.get("AdditionalOptions", "ALL_SERVICES") - fast_postgres = config.get("AdditionalOptions", "FAST_POSTGRES_CONNECTIONS") - # additional options - if api_requests.lower() == "y" or api_requests.lower() == "yes": - collect_api_requests = True - if delete_db.lower() == "y" or delete_db.lower() == "yes": - auto_delete_db = True - if all_services.lower() == "y" or all_services.lower() == "yes": - collect_all_services = True - if fast_postgres.lower() == "y" or fast_postgres.lower() == "yes": - fast_postgres_connections = True - # convert duration into seconds - if duration == "": - duration = None - elif duration.endswith("s") or duration.endswith("S"): - duration = duration.strip("s") - duration = duration.strip("S") - duration = int(duration) - elif duration.endswith("m") or duration.endswith("M"): - duration = duration.strip("m") - duration = duration.strip("M") - duration = int(duration) * 60 - elif duration.endswith("h") or duration.endswith("H"): - duration = duration.strip("h") - duration = duration.strip("H") - duration = int(duration) * 3600 - elif duration.endswith("d") or duration.endswith("D"): - duration = duration.strip("d") - duration = duration.strip("D") - duration = int(duration) * 3600 * 24 - controller_services = tuple(config.get("ControllerServices", "CONTROLLER_SERVICE_LIST").split()) - compute_services = tuple(config.get("ComputeServices", "COMPUTE_SERVICE_LIST").split()) - storage_services = tuple(config.get("StorageServices", "STORAGE_SERVICE_LIST").split()) - rabbit_services = tuple(config.get("RabbitmqServices", "RABBITMQ_QUEUE_LIST").split()) - common_services = tuple(config.get("CommonServices", "COMMON_SERVICE_LIST").split()) - static_svcs = tuple(config.get("StaticServices", "STATIC_SERVICE_LIST").split()) - openstack_services = tuple(config.get("OpenStackServices", "OPEN_STACK_SERVICE_LIST").split()) - skip_list = tuple(config.get("SkipList", "SKIP_LIST").split()) - exclude_list = tuple(config.get("ExcludeList", "EXCLUDE_LIST").split()) - # get collection intervals - for i in config.options("Intervals"): - if config.get("Intervals", i) == "" or config.get("Intervals", i) is None: - collection_intervals[i] = None - else: - collection_intervals[i] = int(config.get("Intervals", i)) - # get api-stats services - DB_PORT_NUMBER = config.get("ApiStatsConstantPorts", "DB_PORT_NUMBER") - RABBIT_PORT_NUMBER = config.get("ApiStatsConstantPorts", "RABBIT_PORT_NUMBER") - SERVICES = OrderedDict() - SERVICES_INFO = tuple(config.get("ApiStatsServices", "API_STATS_STRUCTURE").split('|')) - for service_string in SERVICES_INFO: - service_tuple = tuple(service_string.split(';')) - if service_tuple[2] != "" and service_tuple[2] is not None: - SERVICES[service_tuple[0]] = {'name': service_tuple[1], 'api-port': service_tuple[2]} - else: - SERVICES[service_tuple[0]] = {'name': service_tuple[1], 'api-port': None} - except Exception: - print("An error has occurred when parsing the engtools.conf configuration file: {}".format(sys.exc_info())) - sys.exit(0) - - syseng_services = live_svc + static_svcs - if cpe_lab is True: - services["controller_services"] = controller_services + compute_services + storage_services + common_services - else: - controller_services += common_services - compute_services += common_services - storage_services += common_services - services["controller_services"] = controller_services - services["compute_services"] = compute_services - services["storage_services"] = storage_services - services["common_services"] = common_services - services["syseng_services"] = syseng_services - services["rabbit_services"] = rabbit_services - - influx_info.append(influx_ip) - influx_info.append(influx_port) - influx_info.append(influx_db) - - # add config options to log - with open("/tmp/livestream.log", "w") as log_file: - log_file.write("Configuration for {}:\n".format(node)) - log_file.write("-InfluxDB address: {}:{}\n".format(influx_ip, influx_port)) - log_file.write("-InfluxDB name: {}\n".format(influx_db)) - log_file.write("-CPE lab: {}\n".format(str(cpe_lab))) - log_file.write(("-Collect API requests: {}\n".format(str(collect_api_requests)))) - log_file.write(("-Collect all services: {}\n".format(str(collect_all_services)))) - log_file.write(("-Fast postgres connections: {}\n".format(str(fast_postgres_connections)))) - log_file.write(("-Automatic database removal: {}\n".format(str(auto_delete_db)))) - if duration is not None: - log_file.write("-Live stream duration: {}\n".format(unconverted_duration)) - log_file.close() - - # add POSTROUTING entry to NAT table - if cpe_lab is False: - # check controller-0 for NAT entry. If not there, add it - if node.startswith("controller"): - # use first interface if not specified in engtools.conf - if external_if == "" or external_if is None: - p = Popen("ifconfig", shell=True, stdout=PIPE) - external_if = p.stdout.readline().split(":")[0] - p.kill() - appendToFile("/tmp/livestream.log", "-External interface for {}: {}".format(node, external_if)) - # enable IP forwarding - p = Popen("sysctl -w net.ipv4.ip_forward=1 > /dev/null", shell=True) - p.communicate() - p = Popen("iptables -t nat -L --line-numbers", shell=True, stdout=PIPE) - tmp = [line.strip("\n") for line in p.stdout.readlines()] - # entries need to be removed in reverse order - for line in reversed(tmp): - formatted_line = " ".join(line.strip("\n").split()[1:]) - # if an entry already exists, remove it - if formatted_line.startswith("MASQUERADE tcp -- anywhere"): - line_number = line.strip("\n").split()[0] - p1 = Popen("iptables -t nat -D POSTROUTING {}".format(line_number), shell=True) - p1.communicate() - p.kill() - appendToFile("/tmp/livestream.log", "-Adding NAT information to allow compute/storage nodes to communicate with remote server\n") - # add new entry for both InfluxDB and Grafana - p = Popen("iptables -t nat -A POSTROUTING -p tcp -o {} -d {} --dport {} -j MASQUERADE".format(external_if, influx_ip, influx_port), shell=True) - p.communicate() - p = Popen("iptables -t nat -A POSTROUTING -p tcp -o {} -d {} --dport {} -j MASQUERADE".format(external_if, influx_ip, grafana_port), shell=True) - p.communicate() - - appendToFile("/tmp/livestream.log", "\nStarting collection at {}\n".format(datetime.datetime.utcnow())) - tasks = [] - - createDB(influx_info, grafana_port, grafana_api_key) - - try: - node_type = str(node.split("-")[0]) - # if not a standard node, run the common functions with collect_all enabled - if node_type != "controller" and node_type != "compute" and node_type != "storage": - node_type = "common" - collect_all_services = True - - if collection_intervals["memstats"] is not None: - p = Process(target=collectMemstats, args=(influx_info, node, collection_intervals, services["{}_services".format(node_type)], services["syseng_services"], openstack_services, exclude_list, skip_list, collect_all_services), name="memstats") - tasks.append(p) - p.start() - if collection_intervals["schedtop"] is not None: - p = Process(target=collectSchedtop, args=(influx_info, node, collection_intervals, services["{}_services".format(node_type)], services["syseng_services"], openstack_services, exclude_list, skip_list, collect_all_services), name="schedtop") - tasks.append(p) - p.start() - if collection_intervals["filestats"] is not None: - p = Process(target=collectFilestats, args=(influx_info, node, collection_intervals, services["{}_services".format(node_type)], services["syseng_services"], exclude_list, skip_list, collect_all_services), name="filestats") - tasks.append(p) - p.start() - if collection_intervals["occtop"] is not None: - p = Process(target=collectOcctop, args=(influx_info, node, collection_intervals, getPlatformCores(node, cpe_lab)), name="occtop") - tasks.append(p) - p.start() - if collection_intervals["load_avg"] is not None: - p = Process(target=collectLoadavg, args=(influx_info, node, collection_intervals), name="load_avg") - tasks.append(p) - p.start() - if collection_intervals["cpu_count"] is not None: - p = Process(target=collectCpuCount, args=(influx_info, node, collection_intervals), name="cpu_count") - tasks.append(p) - p.start() - if collection_intervals["memtop"] is not None: - p = Process(target=collectMemtop, args=(influx_info, node, collection_intervals), name="memtop") - tasks.append(p) - p.start() - if collection_intervals["diskstats"] is not None: - p = Process(target=collectDiskstats, args=(influx_info, node, collection_intervals), name="diskstats") - tasks.append(p) - p.start() - if collection_intervals["iostat"] is not None: - p = Process(target=collectIostat, args=(influx_info, node, collection_intervals), name="iostat") - tasks.append(p) - p.start() - if collection_intervals["netstats"] is not None: - p = Process(target=collectNetstats, args=(influx_info, node, collection_intervals), name="netstats") - tasks.append(p) - p.start() - if collect_api_requests is True and node_type == "controller": - p = Process(target=collectApiStats, args=(influx_info, node, collection_intervals, SERVICES, DB_PORT_NUMBER, RABBIT_PORT_NUMBER), name="api_requests") - tasks.append(p) - p.start() - - if node_type == "controller": - if collection_intervals["postgres"] is not None: - p = Process(target=collectPostgres, args=(influx_info, node, collection_intervals), name="postgres") - tasks.append(p) - p.start() - p = Process(target=collectPostgresConnections, args=(influx_info, node, collection_intervals, fast_postgres_connections), name="postgres_connections") - tasks.append(p) - p.start() - if collection_intervals["rabbitmq"] is not None: - p = Process(target=collectRabbitMq, args=(influx_info, node, collection_intervals), name="rabbitmq") - tasks.append(p) - p.start() - p = Process(target=collectRabbitMqSvc, args=(influx_info, node, collection_intervals, services["rabbit_services"]), name="rabbitmq_svc") - tasks.append(p) - p.start() - - if node_type == "compute" or cpe_lab is True: - if collection_intervals["vswitch"] is not None: - p = Process(target=collectVswitch, args=(influx_info, node, collection_intervals), name="vswitch") - tasks.append(p) - p.start() - - print("Sending data to InfluxDB. Please tail /tmp/livestream.log") - - checkDuration(duration) - # give a small delay to ensure services have started - time.sleep(3) - for t in tasks: - os.wait() - except KeyboardInterrupt: - pass - finally: - # end here once duration param has ended or ctrl-c is pressed - appendToFile("/tmp/livestream.log", "\nEnding collection at {}\n".format(datetime.datetime.utcnow())) - if tasks is not None and len(tasks) > 0: - killProcesses(tasks) - if auto_delete_db is True: - deleteDB(influx_info, grafana_port, grafana_api_key) - sys.exit(0) diff --git a/tools/engtools/hostdata-collectors/scripts/memstats.sh b/tools/engtools/hostdata-collectors/scripts/memstats.sh deleted file mode 100644 index 5ba340f0a..000000000 --- a/tools/engtools/hostdata-collectors/scripts/memstats.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/bash -# Usage: memstats.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -PAGE_SIZE=$(getconf PAGE_SIZE) - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - -# Print key networking device statistics -function print_memory { - # Configuration for netcmds - MEMINFO=/proc/meminfo - NODEINFO=/sys/devices/system/node/node?/meminfo - BUDDYINFO=/proc/buddyinfo - SLABINFO=/proc/slabinfo - - print_separator - TOOL_HIRES_TIME - - ${ECHO} "# ${MEMINFO}" - ${CAT} ${MEMINFO} - ${ECHO} - - ${ECHO} "# ${NODEINFO}" - ${CAT} ${NODEINFO} - ${ECHO} - - ${ECHO} "# ${BUDDYINFO}" - ${CAT} ${BUDDYINFO} - ${ECHO} - - ${ECHO} "# PSS" - cat /proc/*/smaps 2>/dev/null | \ - awk '/^Pss:/ {a += $2;} END {printf "%d MiB\n", a/1024.0;}' - ${ECHO} - - # use old slabinfo format (i.e. slub not enabled in kernel) - ${ECHO} "# ${SLABINFO}" - ${CAT} ${SLABINFO} | \ - awk -v page_size_B=${PAGE_SIZE} ' -BEGIN {page_KiB = page_size_B/1024; TOT_KiB = 0;} -(NF == 17) { - gsub(/[<>]/, ""); - printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8s\n", - $2, $3, $4, $5, $6, $7, $8, $10, $11, $12, $13, $15, $16, $17, "KiB"); -} -(NF == 16) { - num_objs=$3; obj_per_slab=$5; pages_per_slab=$6; - KiB = (obj_per_slab > 0) ? page_KiB*num_objs/obj_per_slab*pages_per_slab : 0; - TOT_KiB += KiB; - printf("%-22s %11d %8d %8d %10d %12d %1s %5d %10d %12d %1s %12d %9d %11d %8d\n", - $1, $2, $3, $4, $5, $6, $7, $9, $10, $11, $12, $14, $15, $16, KiB); -} -END { - printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8d\n", - "TOTAL", "-", "-", "-", "-", "-", ":", "-", "-", "-", ":", "-", "-", "-", TOT_KiB); -} -' 2>/dev/null - ${ECHO} - - ${ECHO} "# disk usage: rootfs, tmpfs" - cmd='df -h -H -T --local -t rootfs -t tmpfs' - ${ECHO} "Disk space usage rootfs,tmpfs (SI):" - ${ECHO} "${cmd}" - ${cmd} - ${ECHO} - - CMD='ps -e -o ppid,pid,nlwp,rss:10,vsz:10,cmd --sort=-rss' - ${ECHO} "# ${CMD}" - ${CMD} - ${ECHO} -} - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) - -for ((rep=1; rep <= REPEATS ; rep++)); do - print_memory - sleep ${INTERVAL_SEC} -done -print_memory -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/netstats.sh b/tools/engtools/hostdata-collectors/scripts/netstats.sh deleted file mode 100644 index c9be0a1e5..000000000 --- a/tools/engtools/hostdata-collectors/scripts/netstats.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -# Usage: netstats.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - -# Print key networking device statistics -function print_netcmds { - # Configuration for netcmds - DEV=/proc/net/dev - NETSTAT=/proc/net/netstat - - print_separator - TOOL_HIRES_TIME - - for net in \ - ${DEV} ${NETSTAT} - do - if [ -e "${net}" ]; then - ${ECHO} "# ${net}" - ${CAT} ${net} - ${ECHO} - fi - done -} - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) - -for ((rep=1; rep <= REPEATS ; rep++)); do - print_netcmds - sleep ${INTERVAL_SEC} -done -print_netcmds -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/postgres.sh b/tools/engtools/hostdata-collectors/scripts/postgres.sh deleted file mode 100644 index 340d0e429..000000000 --- a/tools/engtools/hostdata-collectors/scripts/postgres.sh +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/bash -# Usage: postgres.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - -# Print key networking device statistics -function print_postgres { - print_separator - TOOL_HIRES_TIME - - # postgressql command: set user, disable pagination, and be quiet - PSQL="sudo -u postgres psql --pset pager=off -q" - - # List postgres databases - db_list=( $(${PSQL} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") ) - ${ECHO} "# postgres databases" - echo "db_list = ${db_list[@]}" - ${ECHO} - - # List sizes of all postgres databases (similar to "\l+") - ${ECHO} "# postgres database sizes" - ${PSQL} -c " -SELECT - pg_database.datname, - pg_database_size(pg_database.datname), - pg_size_pretty(pg_database_size(pg_database.datname)) -FROM pg_database -ORDER BY pg_database_size DESC; -" - - # For each database, list tables and their sizes (similar to "\dt+") - for db in "${db_list[@]}"; do - ${ECHO} "# postgres database: ${db}" - ${PSQL} -d ${db} -c " -SELECT - table_schema, - table_name, - pg_size_pretty(table_size) AS table_size, - pg_size_pretty(indexes_size) AS indexes_size, - pg_size_pretty(total_size) AS total_size, - live_tuples, - dead_tuples -FROM ( - SELECT - table_schema, - table_name, - pg_table_size(table_name) AS table_size, - pg_indexes_size(table_name) AS indexes_size, - pg_total_relation_size(table_name) AS total_size, - pg_stat_get_live_tuples(table_name::regclass) AS live_tuples, - pg_stat_get_dead_tuples(table_name::regclass) AS dead_tuples - FROM ( - SELECT - table_schema, - table_name - FROM information_schema.tables - WHERE table_schema='public' - AND table_type='BASE TABLE' - ) AS all_tables - ORDER BY total_size DESC -) AS pretty_sizes; -" - - ${ECHO} "# postgres database vacuum: ${db}" - ${PSQL} -d ${db} -c " -SELECT - relname, - n_live_tup, - n_dead_tup, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze -FROM pg_stat_user_tables; -" - done - - # Specific table counts (This is very SLOW, look at "live tuples" instead) - # Number of keystone tokens - #${ECHO} "# keystone token count" - - # Number of postgres connections - ${ECHO} "# postgres database connections" - CONN=$(ps -C postgres -o cmd= | wc -l) - CONN_T=$(ps -C postgres -o cmd= | awk '/postgres: / {print $3}' | awk '{for(i=1;i<=NF;i++) a[$i]++} END {for(k in a) print k, a[k]}' | sort -k 2 -nr ) - ${ECHO} "connections total = ${CONN}" - ${ECHO} - ${ECHO} "connections breakdown:" - ${ECHO} "${CONN_T}" - ${ECHO} - - ${ECHO} "connections breakdown (query):" - ${PSQL} -c "SELECT datname,state,count(*) from pg_stat_activity group by datname,state;" - ${ECHO} - - ${ECHO} "connections idle age:" - ${PSQL} -c "SELECT datname,age(now(),state_change) from pg_stat_activity where state='idle';" - ${ECHO} -} - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) - -for ((rep=1; rep <= REPEATS ; rep++)); do - print_postgres - sleep ${INTERVAL_SEC} -done -print_postgres -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/rabbitmq.sh b/tools/engtools/hostdata-collectors/scripts/rabbitmq.sh deleted file mode 100644 index 2755024f6..000000000 --- a/tools/engtools/hostdata-collectors/scripts/rabbitmq.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash -# Usage: rabbitmq.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 -#Need this workaround -MQOPT="-n rabbit@localhost" -# Print key networking device statistics -function print_rabbitmq { - print_separator - TOOL_HIRES_TIME - - # IMPORTANT: - # - Difficulty getting rabbitmqctl to work from init.d script; - # apparently it requires a psuedo-TTY, which is something you don't have - # until post-init. - # - WORKAROUND: run command using 'sudo', even if you are 'root' - - # Dump various rabbitmq related stats - MQ_STATUS="rabbitmqctl ${MQOPT} status" - ${ECHO} "# ${MQ_STATUS}" - sudo ${MQ_STATUS} | grep -e '{memory' -A30 - ${ECHO} - - # THe following is useful in diagnosing rabbit memory leaks - # when end-users do not drain their queues (eg, due to RPC timeout issues, etc) - MQ_QUEUES="rabbitmqctl ${MQOPT} list_queues messages name pid messages_ready messages_unacknowledged memory consumers" - ${ECHO} "# ${MQ_QUEUES}" - sudo ${MQ_QUEUES} - ${ECHO} - - num_queues=$(sudo rabbitmqctl ${MQOPT} list_queues | wc -l); ((num_queues-=2)) - num_bindings=$(sudo rabbitmqctl ${MQOPT} list_bindings | wc -l); ((num_bindings-=2)) - num_exchanges=$(sudo rabbitmqctl ${MQOPT} list_exchanges | wc -l); ((num_exchanges-=2)) - num_connections=$(sudo rabbitmqctl ${MQOPT} list_connections | wc -l); ((num_connections-=2)) - num_channels=$(sudo rabbitmqctl ${MQOPT} list_channels | wc -l); ((num_channels-=2)) - arr=($(sudo rabbitmqctl ${MQOPT} list_queues messages consumers memory | \ - awk '/^[0-9]/ {a+=$1; b+=$2; c+=$3} END {print a, b, c}')) - messages=${arr[0]}; consumers=${arr[1]}; memory=${arr[2]} - printf "%6s %8s %9s %11s %8s %8s %9s %10s\n" \ - "queues" "bindings" "exchanges" "connections" "channels" "messages" "consumers" "memory" - printf "%6d %8d %9d %11d %8d %8d %9d %10d\n" \ - $num_queues $num_bindings $num_exchanges $num_connections $num_channels $messages $consumers $memory - ${ECHO} -} - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) - -for ((rep=1; rep <= REPEATS ; rep++)); do - print_rabbitmq - sleep ${INTERVAL_SEC} -done -print_rabbitmq -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/remote/rbzip2-engtools.sh b/tools/engtools/hostdata-collectors/scripts/remote/rbzip2-engtools.sh deleted file mode 100644 index 4323ceee0..000000000 --- a/tools/engtools/hostdata-collectors/scripts/remote/rbzip2-engtools.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# Purpose: -# bzip2 compress engtools data on all nodes. - -# Define common utility functions -TOOLBIN=$(dirname $0) -. ${TOOLBIN}/engtools_util.sh -if [ $UID -eq 0 ]; then - ERRLOG "Do not start $0 using sudo/root access." - exit 1 -fi - -# environment for system commands -source /etc/platform/openrc - -declare -a CONTROLLER -declare -a COMPUTE -declare -a STORAGE -CONTROLLER=( $(system host-list | awk '(/controller/) {print $4;}') ) -COMPUTE=( $(system host-list | awk '(/compute/) {print $4;}') ) -STORAGE=( $(system host-list | awk '(/storage/) {print $4;}') ) - -LOG "Remote bzip2 engtools data on all blades:" -for blade in ${CONTROLLER[@]}; do - ping -c1 ${blade} 1>/dev/null 2>/dev/null - if [ $? -eq 0 ]; then - LOG "bzip2 on $blade:" - ssh -q -t -o StrictHostKeyChecking=no \ - ${blade} sudo bzip2 /scratch/syseng_data/${blade}/* - else - WARNLOG "cannot ping: ${blade}" - fi -done -for blade in ${STORAGE[@]} ${COMPUTE[@]} ; do - ping -c1 ${blade} 1>/dev/null 2>/dev/null - if [ $? -eq 0 ]; then - LOG "bzip2 on $blade:" - ssh -q -t -o StrictHostKeyChecking=no \ - ${blade} sudo bzip2 /tmp/syseng_data/${blade}/* - else - WARNLOG "cannot ping: ${blade}" - fi -done -LOG "done" - -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/remote/rstart-engtools.sh b/tools/engtools/hostdata-collectors/scripts/remote/rstart-engtools.sh deleted file mode 100644 index 11b2e115c..000000000 --- a/tools/engtools/hostdata-collectors/scripts/remote/rstart-engtools.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Purpose: -# Remote start engtools on all blades. - -# Define common utility functions -TOOLBIN=$(dirname $0) -. ${TOOLBIN}/engtools_util.sh -if [ $UID -eq 0 ]; then - ERRLOG "Do not start $0 using sudo/root access." - exit 1 -fi - -# environment for system commands -source /etc/platform/openrc - -declare -a BLADES -BLADES=( $(system host-list | awk '(/compute|controller|storage/) {print $4;}') ) - -LOG "Remote start engtools on all blades:" -for blade in ${BLADES[@]}; do - if [ "${blade}" == "${HOSTNAME}" ]; then - LOG "start on $blade:" - sudo service collect-engtools.sh start - else - ping -c1 ${blade} 1>/dev/null 2>/dev/null - if [ $? -eq 0 ]; then - LOG "start on $blade:" - ssh -q -t -o StrictHostKeyChecking=no \ - ${blade} sudo service collect-engtools.sh start - else - WARNLOG "cannot ping: ${blade}" - fi - fi -done -LOG "done" - -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/remote/rstop-engtools.sh b/tools/engtools/hostdata-collectors/scripts/remote/rstop-engtools.sh deleted file mode 100644 index f8bb9b7b8..000000000 --- a/tools/engtools/hostdata-collectors/scripts/remote/rstop-engtools.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Purpose: -# Remote stop engtools on all blades. - -# Define common utility functions -TOOLBIN=$(dirname $0) -. ${TOOLBIN}/engtools_util.sh -if [ $UID -eq 0 ]; then - ERRLOG "Do not start $0 using sudo/root access." - exit 1 -fi - -# environment for system commands -source /etc/platform/openrc - -declare -a BLADES -BLADES=( $(system host-list | awk '(/compute|controller|storage/) {print $4;}') ) - -LOG "Remote stop engtools on all blades:" -for blade in ${BLADES[@]}; do - if [ "${blade}" == "${HOSTNAME}" ]; then - LOG "stop on $blade:" - sudo service collect-engtools.sh stop - else - ping -c1 ${blade} 1>/dev/null 2>/dev/null - if [ $? -eq 0 ]; then - LOG "stop on $blade:" - ssh -q -t -o StrictHostKeyChecking=no \ - ${blade} sudo service collect-engtools.sh stop - else - WARNLOG "cannot ping: ${blade}" - fi - fi -done -LOG "done" - -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/remote/rsync-engtools-data.sh b/tools/engtools/hostdata-collectors/scripts/remote/rsync-engtools-data.sh deleted file mode 100644 index 08a1623de..000000000 --- a/tools/engtools/hostdata-collectors/scripts/remote/rsync-engtools-data.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash -# Purpose: -# rsync data from all nodes to backup location. - -# Define common utility functions -TOOLBIN=$(dirname $0) -. ${TOOLBIN}/engtools_util.sh -if [ $UID -eq 0 ]; then - ERRLOG "Do not start $0 using sudo/root access." - exit 1 -fi - -# environment for system commands -source /etc/platform/openrc - -declare -a BLADES -declare -a CONTROLLER -declare -a STORAGE -declare -a COMPUTE -BLADES=( $(system host-list | awk '(/compute|controller|storage/) {print $4;}') ) -CONTROLLER=( $(system host-list | awk '(/controller/) {print $4;}') ) -COMPUTE=( $(system host-list | awk '(/compute/) {print $4;}') ) -STORAGE=( $(system host-list | awk '(/storage/) {print $4;}') ) - -DEST=/opt/backups/syseng_data/ -if [[ "${HOSTNAME}" =~ "controller-" ]]; then - LOG "rsync DEST=${DEST}" -else - LOG "*ERROR* only run this on controller" - exit 1 -fi -sudo mkdir -p ${DEST} - -# rsync options -USER=sysadmin -RSYNC_OPT="-r -l --safe-links -h -P --stats --exclude=*.pyc" - -# Rsync data from multiple locations -LOG "rsync engtools data from all blades:" - -# controllers -SRC=/scratch/syseng_data/ -DEST=/opt/backups/syseng_data/ -for HOST in ${CONTROLLER[@]}; do - ping -c1 ${HOST} 1>/dev/null 2>/dev/null - if [ $? -eq 0 ]; then - LOG "rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST}" - sudo rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST} - else - WARNLOG "cannot ping: ${HOST}" - fi -done - -# computes & storage -SRC=/tmp/syseng_data/ -DEST=/opt/backups/syseng_data/ -for HOST in ${STORAGE[@]} ${COMPUTE[@]}; do - ping -c1 ${HOST} 1>/dev/null 2>/dev/null - if [ $? -eq 0 ]; then - LOG "rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST}" - sudo rsync ${RSYNC_OPT} ${USER}@${HOST}:${SRC} ${DEST} - else - WARNLOG "cannot ping: ${HOST}" - fi -done -LOG 'done' - -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/slab.sh b/tools/engtools/hostdata-collectors/scripts/slab.sh deleted file mode 100644 index f60e0c095..000000000 --- a/tools/engtools/hostdata-collectors/scripts/slab.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -PAGE_SIZE=$(getconf PAGE_SIZE) -cat /proc/slabinfo | awk -v page_size_B=${PAGE_SIZE} ' -BEGIN {page_KiB = page_size_B/1024; TOT_KiB = 0;} -(NF == 17) { - gsub(/[<>]/, ""); - printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8s\n", - $2, $3, $4, $5, $6, $7, $8, $10, $11, $12, $13, $15, $16, $17, "KiB"); -} -(NF == 16) { - num_objs=$3; obj_per_slab=$5; pages_per_slab=$6; - KiB = (obj_per_slab > 0) ? page_KiB*num_objs/obj_per_slab*pages_per_slab : 0; - TOT_KiB += KiB; - printf("%-22s %11d %8d %8d %10d %12d %1s %5d %10d %12d %1s %12d %9d %11d %8d\n", - $1, $2, $3, $4, $5, $6, $7, $9, $10, $11, $12, $14, $15, $16, KiB); -} -END { - printf("%-22s %11s %8s %8s %10s %12s %1s %5s %10s %12s %1s %12s %9s %11s %8d\n", - "TOTAL", "-", "-", "-", "-", "-", ":", "-", "-", "-", ":", "-", "-", "-", TOT_KiB); -} -' 2>/dev/null - -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/ticker.sh b/tools/engtools/hostdata-collectors/scripts/ticker.sh deleted file mode 100644 index 1edf00ed5..000000000 --- a/tools/engtools/hostdata-collectors/scripts/ticker.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -# Usage: ticker.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) -((REP_LOG = 10 * 60 / INTERVAL_SEC)) - -LOG_NOCR "collecting " -t=0 -for ((rep=1; rep <= REPEATS ; rep++)); do - ((t++)) - sleep ${INTERVAL_SEC} - if [ ${t} -ge ${REP_LOG} ]; then - t=0 - echo "." - LOG_NOCR "collecting " - else - echo -n "." - fi -done -echo "." - -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/top.sh b/tools/engtools/hostdata-collectors/scripts/top.sh deleted file mode 100644 index 9a5ebc0cd..000000000 --- a/tools/engtools/hostdata-collectors/scripts/top.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# Usage: top.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) -((REP = REPEATS + 1)) - -# Execute tool for specified duration -CMD="top -b -c -H -n ${REP} -d ${INTERVAL_SEC}" -#LOG "CMD: ${CMD}" -${CMD} -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tools/engtools/hostdata-collectors/scripts/vswitch.sh b/tools/engtools/hostdata-collectors/scripts/vswitch.sh deleted file mode 100644 index f0bad0eeb..000000000 --- a/tools/engtools/hostdata-collectors/scripts/vswitch.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash -# Usage: vswitch.sh [-p ] [-i ] [-c ] [-h] -TOOLBIN=$(dirname $0) - -# Initialize tools environment variables, and define common utility functions -. ${TOOLBIN}/engtools_util.sh -tools_init -if [ $? -ne 0 ]; then - echo "FATAL, tools_init - could not setup environment" - exit $? -fi - -# Enable use of INTERVAL_SEC sample interval -OPT_USE_INTERVALS=1 - -# Print key networking device statistics -function print_vswitch { - print_separator - TOOL_HIRES_TIME - - cmd='vshell engine-list' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - cmd='vshell engine-stats-list' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - cmd='vshell port-list' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - cmd='vshell port-stats-list' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - cmd='vshell network-list' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - cmd='vshell network-stats-list' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - cmd='vshell interface-list' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} - cmd='vshell interface-stats-list' - ${ECHO} "# ${cmd}" ; ${cmd} ; ${ECHO} -} - -#------------------------------------------------------------------------------- -# MAIN Program: -#------------------------------------------------------------------------------- -# Parse input options -tools_parse_options "${@}" - -# Set affinity of current script -CPULIST="" -set_affinity ${CPULIST} - -LOG "collecting ${TOOLNAME} for ${PERIOD_MIN} minutes, with ${INTERVAL_SEC} second sample intervals." - -# Print tools generic tools header -tools_header - -# Calculate number of sample repeats based on overall interval and sampling interval -((REPEATS = PERIOD_MIN * 60 / INTERVAL_SEC)) - -for ((rep=1; rep <= REPEATS ; rep++)); do - print_vswitch - sleep ${INTERVAL_SEC} -done -print_vswitch -LOG "done" - -# normal program exit -tools_cleanup 0 -exit 0 diff --git a/tox.ini b/tox.ini index 59fdb7452..55352075b 100644 --- a/tox.ini +++ b/tox.ini @@ -102,13 +102,10 @@ deps = -r{toxinidir}/test-requirements.txt python-daemon==2.1.2 pylint -# There are currenrly 5 python modules with a setup.py file +# There are currenrly 2 python modules with a setup.py file commands = pylint --rcfile=./pylint.rc \ - ceph/ceph-manager/ceph-manager/ceph_manager \ - logging/logmgmt/logmgmt/logmgmt \ tools/storage-topology/storage-topology/storage_topology \ - tools/vm-topology/vm-topology/vm_topology \ - utilities/platform-util/platform-util/platform_util + tools/vm-topology/vm-topology/vm_topology [testenv:venv] basepython = python3 diff --git a/utilities/build-info/PKG-INFO b/utilities/build-info/PKG-INFO deleted file mode 100644 index 24a19279a..000000000 --- a/utilities/build-info/PKG-INFO +++ /dev/null @@ -1,12 +0,0 @@ -Metadata-Version: 1.1 -Name: build-info -Version: 1.0 -Summary: build-info version 1.0-r3 -Home-page: -Author: Windriver -Author-email: info@windriver.com -License: Apache-2.0 - -Description: CGTS build information package - -Platform: UNKNOWN diff --git a/utilities/build-info/build-info-1.0/LICENSE b/utilities/build-info/build-info-1.0/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/utilities/build-info/build-info-1.0/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/utilities/build-info/build-info-1.0/collect.sh b/utilities/build-info/build-info-1.0/collect.sh deleted file mode 100755 index 48abc23d0..000000000 --- a/utilities/build-info/build-info-1.0/collect.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash - -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -root="../../../../../.." -centOSBuildRoot=".." -jenkinsBuildFileName="BUILD" -jenkinsBuildFile="$root/$jenkinsBuildFileName" -jenkinsBuildFileCentOS="$centOSBuildRoot/$jenkinsBuildFileName" -releaseInfoFile="../release-info.inc" -destFile="build.info" -destH="build_info.h" - -# If Jenkins build file does not exist in the expected Rel 2 directory, -# check if it was packaged in the source RPM -if [ ! -e $jenkinsBuildFile ]; then - if [ -e $jenkinsBuildFileCentOS ]; then - jenkinsBuildFile=$jenkinsBuildFileCentOS - fi -fi - -if [ -e $releaseInfoFile ]; then - source $releaseInfoFile -fi - -if [ "${REPO}x" == "x" ]; then - REPO=`grep CONFIGURE_CMD $root/config.properties | awk ' { print $1 } ' | awk -F '"' ' { print $2 } ' | sed 's|\(.*\)\(\/.*\/.*\)$|\1|g'` -fi - -if [ -e $jenkinsBuildFile ]; then - cp $jenkinsBuildFile $destFile - source $jenkinsBuildFile -else - # PLATFORM_RELEASE should be set in release-info.inc - if [ "x${PLATFORM_RELEASE}" == "x" ]; then - SW_VERSION="Unknown" - else - SW_VERSION="${PLATFORM_RELEASE}" - fi - - BUILD_TARGET="Unknown" - BUILD_TYPE="Informal" - BUILD_ID="n/a" - JOB="n/a" - if [ "${BUILD_BY}x" == "x" ]; then - BUILD_BY="$USER" - fi - BUILD_NUMBER="n/a" - BUILD_HOST="$HOSTNAME" - if [ "${BUILD_DATE}x" == "x" ]; then - BUILD_DATE=`date "%F %T %z"` - if [ $? -ne 0 ]; then - BUILD_DATE=`date "+%F %T %z"` - fi - fi - - echo "SW_VERSION=\"$SW_VERSION\"" > $destFile - echo "BUILD_TARGET=\"$BUILD_TARGET\"" >> $destFile - echo "BUILD_TYPE=\"$BUILD_TYPE\"" >> $destFile - echo "BUILD_ID=\"$BUILD_ID\"" >> $destFile - echo "" >> $destFile - echo "JOB=\"$JOB\"" >> $destFile - echo "BUILD_BY=\"$BUILD_BY\"" >> $destFile - echo "BUILD_NUMBER=\"$BUILD_NUMBER\"" >> $destFile - echo "BUILD_HOST=\"$BUILD_HOST\"" >> $destFile - echo "BUILD_DATE=\"$BUILD_DATE\"" >> $destFile - echo "" >> $destFile - echo "BUILD_DIR=\""`bash -c "cd $root; pwd"`"\"" >> $destFile - echo "WRS_SRC_DIR=\"$REPO\"" >> $destFile - if [ "${WRS_GIT_BRANCH}x" == "x" ]; then - echo "WRS_GIT_BRANCH=\""`cd $REPO; git status -s -b | grep '##' | awk ' { printf $2 } '`"\"" >> $destFile - else - echo "WRS_GIT_BRANCH=\"$WRS_GIT_BRANCH\"" >> $destFile - fi - - echo "CGCS_SRC_DIR=\"$REPO/stx\"" >> $destFile - if [ "${CGCS_GIT_BRANCH}x" == "x" ]; then - echo "CGCS_GIT_BRANCH=\""`cd $REPO/stx/; git status -s -b | grep '##' | awk ' { printf $2 } '`"\"" >> $destFile - else - echo "CGCS_GIT_BRANCH=\"$CGCS_GIT_BRANCH\"" >> $destFile - fi - -fi - -echo "#ifndef _BUILD_INFO_H_" > $destH -echo "#define _BUILD_INFO_H_" >> $destH -echo "" >> $destH -echo "#define RELEASE_NAME \"$RELEASE_NAME\"" >> $destH -echo "#define SW_VERSION \"$SW_VERSION\"" >> $destH -echo "" >> $destH -echo "#define BUILD_TARGET \"$BUILD_TARGET\"" >> $destH -echo "#define BUILD_TYPE \"$BUILD_TYPE\"" >> $destH -echo "#define BUILD_ID \"$BUILD_ID\"" >> $destH -echo "" >> $destH -echo "#define JOB \"$JOB\"" >> $destH -echo "#define BUILD_BY \"$BUILD_BY\"" >> $destH -echo "#define BUILD_NUMBER \"$BUILD_NUMBER\"" >> $destH -echo "#define BUILD_HOST \"$BUILD_HOST\"" >> $destH -echo "#define BUILD_DATE \"$BUILD_DATE\"" >> $destH -echo "#endif /* _BUILD_INFO_H_ */" >> $destH diff --git a/utilities/build-info/centos/build-info.spec b/utilities/build-info/centos/build-info.spec deleted file mode 100644 index 18ca8cde2..000000000 --- a/utilities/build-info/centos/build-info.spec +++ /dev/null @@ -1,50 +0,0 @@ -Summary: build-info version 1.0-r3 -Name: build-info -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -Source0: %{name}-%{version}.tar.gz -Source1: LICENSE - -%description -Build Info - -%define local_etcdir /etc -%define local_incdir /usr/include - -%define debug_package %{nil} - -%package -n build-info-dev -Summary: build-info version 1.0-r3 - Development files -Group: devel - -%description -n build-info-dev -Build Info This package contains symbolic links, header files, and related items necessary for software development. - -%files -%license ../LICENSE -%defattr(-,root,root,-) -%{local_etcdir}/* - -%prep -%setup - -%build -./collect.sh - -%install -install -d -m 755 %{buildroot}%{local_etcdir} -install -m 644 build.info %{buildroot}/%{local_etcdir} -install -d -m 755 %{buildroot}%{local_incdir} -install -m 644 build_info.h %{buildroot}/%{local_incdir} - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -n build-info-dev -%defattr(-,root,root,-) -%{local_incdir}/* - diff --git a/utilities/build-info/centos/build_srpm b/utilities/build-info/centos/build_srpm deleted file mode 100755 index b93c548c8..000000000 --- a/utilities/build-info/centos/build_srpm +++ /dev/null @@ -1,130 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -source "$SRC_BASE/build-tools/spec-utils" - -if [ "x$DATA" == "x" ]; then - echo "ERROR: Environment variable 'DATA' not defined." - exit 1 -fi - -if [ ! -f "$DATA" ]; then - echo "ERROR: Couldn't find '$PWD/$DATA'" - exit 1 -fi - -unset TIS_PATCH_VER # Ensure there's nothing in the env already - -source $DATA - -if [ -z "$TIS_PATCH_VER" ]; then - echo "ERROR: TIS_PATCH_VER must be defined" - exit 1 -fi - -SRC_DIR="/build-info-1.0" -VERSION=$(grep '^Version:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') -TAR_NAME=$(grep '^Name:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') -CUR_DIR=`pwd` -BUILD_DIR="$RPMBUILD_BASE" - -# Additional files to include in the archive (if they exist). -EXTRA_FILES="./release-info.inc" -if [ -f $MY_WORKSPACE/BUILD ]; then - EXTRA_FILES+=" $MY_WORKSPACE/BUILD" -else - if [ -f $MY_WORKSPACE/../BUILD ]; then - EXTRA_FILES+=" $MY_WORKSPACE/../BUILD" - fi -fi - -mkdir -p $BUILD_DIR/SRPMS - -TAR_UNCOMPRESSED="$TAR_NAME-$VERSION.tar" -TAR="${TAR_UNCOMPRESSED}.gz" -COMPRESS="gzip" -TAR_PATH="$BUILD_DIR/SOURCES" - -# copy the LICENSE for rpm spec %license directive -cp .$SRC_DIR/LICENSE $BUILD_DIR/SOURCES/ - -# Check to see if our tarball needs updating -TAR_NEEDED=0 -if [ -f $TAR_PATH/$TAR ]; then - n=`find . -cnewer $TAR_PATH/$TAR -and ! -path './.git*' \ - -and ! -path './build/*' \ - -and ! -path './.pc/*' \ - -and ! -path './patches/*' \ - -and ! -path "./$DISTRO/*" \ - -and ! -path './pbr-*.egg/*' \ - | wc -l` - if [ $n -gt 0 ]; then - TAR_NEEDED=1 - fi - - # check to see if any of our EXTRA_FILES are newer than the archive - for file in "$EXTRA_FILES"; do - if [ $file -nt $TAR_PATH/$TAR ]; then - TAR_NEEDED=1 - fi - done -else - TAR_NEEDED=1 -fi - -if [ $TAR_NEEDED -gt 0 ]; then - tar cvf $TAR_PATH/$TAR_UNCOMPRESSED .$SRC_DIR \ - --exclude '.git*' --exclude 'build' --exclude='.pc' \ - --exclude='patches' --exclude="$DISTRO" --exclude='pbr-*.egg' \ - --transform "s,^\.$SRC_DIR/LICENSE,LICENSE," \ - --transform "s,^\.$SRC_DIR,$TAR_NAME-$VERSION," -fi - -for file in $EXTRA_FILES; do - if [ -e $file ]; then - tar rf $TAR_PATH/$TAR_UNCOMPRESSED -C $(dirname "${file}") $(basename "${file}") - fi -done - -$COMPRESS $TAR_PATH/$TAR_UNCOMPRESSED - -for SPEC in `ls $BUILD_DIR/SPECS`; do - SPEC_PATH="$BUILD_DIR/SPECS/$SPEC" - RELEASE=`spec_find_tag Release "$SPEC_PATH" 2>> /dev/null` - if [ $? -ne 0 ]; then - echo "ERROR: 'Release' not found in '$SPEC_PATH'" - fi - NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null` - if [ $? -ne 0 ]; then - echo "ERROR: 'Name' not found in '$SPEC_PATH'" - fi - SRPM="$NAME-$VERSION-$RELEASE.src.rpm" - SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM" - - BUILD_NEEDED=0 - if [ -f $SRPM_PATH ]; then - n=`find . -cnewer $SRPM_PATH | wc -l` - if [ $n -gt 0 ]; then - BUILD_NEEDED=1 - fi - else - BUILD_NEEDED=1 - fi - - if [ $BUILD_NEEDED -gt 0 ]; then - echo "SPEC file: $SPEC_PATH" - echo "SRPM build directory: $BUILD_DIR" - echo "TIS_PATCH_VER: $TIS_PATCH_VER" - - sed -i -e "1 i%define tis_patch_ver $TIS_PATCH_VER" $SPEC_PATH - rpmbuild -bs $SPEC_PATH --define="%_topdir $BUILD_DIR" --define="_tis_dist .tis" - fi -done - - - - - diff --git a/utilities/build-info/centos/build_srpm.data b/utilities/build-info/centos/build_srpm.data deleted file mode 100644 index 9ce9e6c0e..000000000 --- a/utilities/build-info/centos/build_srpm.data +++ /dev/null @@ -1,3 +0,0 @@ -TIS_PATCH_VER=4 -COPY_LIST=release-info.inc -OPT_DEP_LIST="$MY_WORKSPACE_TOP/BUILD $MY_PATCH_WORKSPACE_TOP/BUILD" diff --git a/utilities/build-info/release-info.inc b/utilities/build-info/release-info.inc deleted file mode 100644 index 4c7fc863c..000000000 --- a/utilities/build-info/release-info.inc +++ /dev/null @@ -1,12 +0,0 @@ -# -# Copyright (c) 2014-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -# Inclusion file to set release variables -# -# Note: Sourced from scripts, so needs to be bash-able -# -PLATFORM_RELEASE="19.09" diff --git a/utilities/namespace-utils/LICENSE b/utilities/namespace-utils/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/utilities/namespace-utils/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/utilities/namespace-utils/centos/build_srpm.data b/utilities/namespace-utils/centos/build_srpm.data deleted file mode 100644 index e981e8b5d..000000000 --- a/utilities/namespace-utils/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -SRC_DIR="namespace-utils" -TIS_PATCH_VER=0 diff --git a/utilities/namespace-utils/centos/namespace-utils.spec b/utilities/namespace-utils/centos/namespace-utils.spec deleted file mode 100644 index 601b11ec0..000000000 --- a/utilities/namespace-utils/centos/namespace-utils.spec +++ /dev/null @@ -1,35 +0,0 @@ -%define _CC gcc - -Summary: namespace utils -Name: namespace-utils -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -Source0: %{name}-%{version}.tar.gz - -%description -Titanium Cloud namespace utilities - -%prep -%setup -q - -%build -%{_CC} -o bashns bashns.c - -%install -rm -rf ${RPM_BUILD_ROOT} -install -d -m 755 ${RPM_BUILD_ROOT}%{_sbindir} -install -m 500 bashns ${RPM_BUILD_ROOT}%{_sbindir} -install -m 500 umount-in-namespace ${RPM_BUILD_ROOT}%{_sbindir} - -%clean -rm -rf ${RPM_BUILD_ROOT} - -%files -%license LICENSE -%defattr(-,root,root,-) -%{_sbindir}/umount-in-namespace -%{_sbindir}/bashns diff --git a/utilities/namespace-utils/namespace-utils/LICENSE b/utilities/namespace-utils/namespace-utils/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/utilities/namespace-utils/namespace-utils/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/utilities/namespace-utils/namespace-utils/bashns.c b/utilities/namespace-utils/namespace-utils/bashns.c deleted file mode 100644 index 2a9c15e5f..000000000 --- a/utilities/namespace-utils/namespace-utils/bashns.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2015 Wind River Systems, Inc. -* -* SPDX-License-Identifier: Apache-2.0 -* - */ - -#define _GNU_SOURCE -#include - -#include -#include -#include -#include -#include - -void usage(char *name) -{ - printf("usage: %s \n", name); - -} - -int main(int argc, char **argv) { - if (argc < 2) { - printf("usage: %s \n", argv[0]); - return -1; - } - - int pid = atoi(argv[1]); - printf("trying to open filesystem namespace of pid %d\n", pid); - - char buf[100]; - sprintf(buf, "/proc/%d/ns/mnt", pid); - - printf("trying to open %s\n", buf); - - int fd = open(buf, O_RDWR); - if (fd < 1) { - perror("unable to open file"); - return -1; - } - - printf("got fd, trying to set namespace\n"); - - int rc = setns(fd, 0); - if (rc < 0) { - perror("unable to set namespace"); - return -1; - } - - printf("entered namespace successfully, trying to exec bash\n"); - - rc = execvp("bash", 0); - if (rc < 0) { - perror("unable to exec bash"); - return -1; - } -} - diff --git a/utilities/namespace-utils/namespace-utils/umount-in-namespace b/utilities/namespace-utils/namespace-utils/umount-in-namespace deleted file mode 100644 index 934daeae2..000000000 --- a/utilities/namespace-utils/namespace-utils/umount-in-namespace +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# -# Copyright (c) 2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -MNT=$1 - -SELF_NS=$(ls -l /proc/self/ns/mnt 2>/dev/null| sed -r 's/.*\[(.*)\]/\1/') - -ls -l /proc/*/ns/mnt 2>/dev/null| sed -r 's/.*\[(.*)\]/\1/' | sort -u | while read ns -do - if [ "$ns" = "$SELF_NS" ] - then - continue - fi - - ls -l /proc/*/ns/mnt 2>/dev/null | grep $ns |grep '/proc/[0-9]*/' | sed -r 's#.*/proc/([0-9]*)/ns.*#\1#' | while read pid - do - echo "umount -n -l $MNT" | /usr/sbin/bashns $pid - done -done - diff --git a/utilities/pci-irq-affinity-agent/PKG-INFO b/utilities/pci-irq-affinity-agent/PKG-INFO deleted file mode 100644 index cb14bb3eb..000000000 --- a/utilities/pci-irq-affinity-agent/PKG-INFO +++ /dev/null @@ -1,7 +0,0 @@ -Metadata-Version: 1.2 -Name: PCIInterruptAffinityAgent -Version: 1.0 -Summary: PCI Interrupt Affinity Agent Package -Author: StarlingX -License: Apache-2.0 -Platform: UNKNOWN diff --git a/utilities/pci-irq-affinity-agent/centos/build_srpm.data b/utilities/pci-irq-affinity-agent/centos/build_srpm.data deleted file mode 100644 index 38fcf6e53..000000000 --- a/utilities/pci-irq-affinity-agent/centos/build_srpm.data +++ /dev/null @@ -1,3 +0,0 @@ -SRC_DIR="pci_irq_affinity" -COPY_LIST_TO_TAR="files/*" -TIS_PATCH_VER=1 diff --git a/utilities/pci-irq-affinity-agent/centos/pci-irq-affinity.spec b/utilities/pci-irq-affinity-agent/centos/pci-irq-affinity.spec deleted file mode 100644 index fc8d9853b..000000000 --- a/utilities/pci-irq-affinity-agent/centos/pci-irq-affinity.spec +++ /dev/null @@ -1,70 +0,0 @@ -Summary: StarlingX PCI Interrupt Affinity Agent Package -Name: pci-irq-affinity-agent -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: StarlingX -URL: unknown - -Source0: %{name}-%{version}.tar.gz - -Requires: python-novaclient -BuildRequires: python-setuptools -BuildRequires: systemd-devel - -%description -StarlingX PCI Interrupt Affinity Agent Package - -%define local_etc_initd /etc/init.d/ -%define local_etc_pmond /etc/pmon.d/ -%define pythonroot /usr/lib64/python2.7/site-packages -%define debug_package %{nil} - -%prep -%setup - -# Remove bundled egg-info -rm -rf *.egg-info - -%build -%{__python} setup.py build - -%install -%{__python} setup.py install --root=%{buildroot} \ - --install-lib=%{pythonroot} \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed - -%{__install} -d -m 755 %{buildroot}%{local_etc_initd} -%{__install} -p -D -m 755 pci-irq-affinity-agent %{buildroot}%{local_etc_initd}/pci-irq-affinity-agent - -%{__install} -d -m 755 %{buildroot}%{local_etc_pmond} -%{__install} -p -D -m 644 pci-irq-affinity-agent.conf %{buildroot}%{local_etc_pmond}/pci-irq-affinity-agent.conf -%{__install} -p -D -m 644 pci-irq-affinity-agent.service %{buildroot}%{_unitdir}/pci-irq-affinity-agent.service - -%{__install} -d %{buildroot}%{_bindir} -%{__install} -p -D -m 755 nova-sriov %{buildroot}%{_bindir}/nova-sriov - -%{__install} -d %{buildroot}%{_sysconfdir}/pci_irq_affinity -%{__install} -p -D -m 600 config.ini %{buildroot}%{_sysconfdir}/pci_irq_affinity/config.ini - -%post -/usr/bin/systemctl enable pci-irq-affinity-agent.service >/dev/null 2>&1 - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -%defattr(-,root,root,-) -%doc LICENSE -%{local_etc_initd}/pci-irq-affinity-agent -%{local_etc_pmond}/pci-irq-affinity-agent.conf -%{_unitdir}/pci-irq-affinity-agent.service -%{pythonroot}/pci_irq_affinity/* -%{pythonroot}/pci_irq_affinity_agent-%{version}*.egg-info - -%{_bindir}/pci-irq-affinity-agent -%{_bindir}/nova-sriov -%config(noreplace) %{_sysconfdir}/pci_irq_affinity/config.ini diff --git a/utilities/pci-irq-affinity-agent/files/LICENSE b/utilities/pci-irq-affinity-agent/files/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/utilities/pci-irq-affinity-agent/files/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/utilities/pci-irq-affinity-agent/files/config.ini b/utilities/pci-irq-affinity-agent/files/config.ini deleted file mode 100644 index 50fd8870c..000000000 --- a/utilities/pci-irq-affinity-agent/files/config.ini +++ /dev/null @@ -1,22 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# -[openstack] -openstack_enabled=False -username=admin -tenant=admin -authorization_protocol=http -authorization_ip=192.168.204.2 -authorization_port=5000 -user_domain_name=Default -project_domain_name=Default -keyring_service=CGCS - -[amqp] -host=192.168.204.2 -port=5672 -user_id=guest -password=guest -virt_host=/ diff --git a/utilities/pci-irq-affinity-agent/files/nova-sriov b/utilities/pci-irq-affinity-agent/files/nova-sriov deleted file mode 100755 index efdf74efb..000000000 --- a/utilities/pci-irq-affinity-agent/files/nova-sriov +++ /dev/null @@ -1,117 +0,0 @@ -#! /usr/bin/python - -# -# Copyright (c) 2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import sys -import os -import json -import fnmatch - - -def usage(): - argv0 = os.path.basename(sys.argv[0]) - print """ -Usage: ------- - %(argv0)s pci_pt_whitelist pci_sriov_whitelist - -Where pci_pt_whitelist is a list of passthrough devices and the -pci_sriov_whitelist is a list of SR-IOV interfaces. The format of the lists -are as follows: - - pci_pt_whitelist: - [{"address": "0000:09:00.0"}, ..] - - pci_sriov_whitelist: - [{"sriov_numvfs": 16, "physical_network": "group0-nic0", - "address": "0000:02:00.0"}, ..] - - """ % locals() # replace items from local variables - - -def get_vf_whitelist(sriov_if): - '''For the given PF PCI address and provider network, generate the list of VF - PCI addresses to create a VF based whitelist''' - - pf_addr = sriov_if.get('address') - dirpcidev = '/sys/bus/pci/devices/' + pf_addr - - # Attempt to configure the requested number of VFs if the device supports - # setting the number of VFs via sysfs - # Need to write 0 to sriov_numvfs before writing a new value. - numvfs = sriov_if.get('sriov_numvfs') - if numvfs is not None: - numvfs_path = os.path.join(dirpcidev, 'sriov_numvfs') - if os.path.isfile(numvfs_path): - with open(numvfs_path, 'w') as f: - f.write('0') - f.flush() - f.write(str(numvfs)) - - virtfn_links = len(fnmatch.filter(os.listdir(dirpcidev), 'virtfn*')) - - # Some devices (for e.g. Coleto Creek) don't support configuration of the - # number of VFs. Use all the VFs present in this case. - if numvfs is not None: - if virtfn_links != numvfs: - print 'Configured number of VFs is different than the present ones', \ - '(if:%s conf:%d present:%d)' % (pf_addr, numvfs, virtfn_links) - exit(1) - else: - numvfs = virtfn_links - - pci_sriov_vf_whitelist = [] - i = 0 - while i < int(numvfs): - lvf = dirpcidev + '/virtfn' + str(i) - try: - vf_addr = os.path.basename(os.readlink(lvf)) - except: - print("virtfn link %s non-existent (numvfs=%s)" % (lvf, numvfs)) - sys.exit(1) - - device = {'address': vf_addr} - - # Some devices (for e.g. Coleto Creek) are not associated with a - # physical network. - providernets = sriov_if.get('physical_network') - if providernets: - device.update({'physical_network': providernets}) - - pci_sriov_vf_whitelist.append(device) - i += 1 - - return pci_sriov_vf_whitelist - - -def main(): - ''' The goal of this script is to properly discover SR-IOV VF PCI addresses - for interfaces that were configured for SR-IOV. It is used by the - nova-compute puppet manifest and is run at manifest application time. This - script should be run after the VF driver is loaded and the VF PCI addresses - are visible in the system.''' - - if len(sys.argv) < 3: - usage() - sys.exit(1) - - try: - pci_pt_whitelist = json.loads(sys.argv[1]) - pci_sriov_whitelist = json.loads(sys.argv[2]) - except: - usage() - exit(1) - - for sriov_if in pci_sriov_whitelist: - pci_sriov_vf_whitelist = get_vf_whitelist(sriov_if) - pci_pt_whitelist.extend(pci_sriov_vf_whitelist) - - return pci_pt_whitelist - - -if __name__ == "__main__": - print json.dumps(main()) diff --git a/utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent b/utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent deleted file mode 100755 index 71c2db2e5..000000000 --- a/utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent +++ /dev/null @@ -1,105 +0,0 @@ -#! /bin/sh -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -# chkconfig: 2345 75 25 -# -### BEGIN INIT INFO -# Provides: pci-irq-affinity-agent -### END INIT INFO - -source /etc/init.d/functions - -DAEMON_NAME="pci-irq-affinity-agent" -AFFINITYAGENT="/usr/bin/${DAEMON_NAME}" -daemon_pidfile="/var/run/${DAEMON_NAME}.pid" - -if [ ! -f "${AFFINITYAGENT}" ] ; then - logger "$0: ${AFFINITYAGENT} is missing" - exit 1 -fi - -RETVAL=0 - -PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin -export PATH - -case "$1" in - start) - # Check for installation failure - if [ -f /etc/platform/installation_failed ] ; then - logger "$0: /etc/platform/installation_failed flag is set. Aborting." - exit 1 - fi - - echo -n "Setting up config for pci-irq-affinity-agent: " - - if [ -e ${daemon_pidfile} ] ; then - echo "Killing existing process before starting new" - pid=`cat ${daemon_pidfile}` - kill -TERM $pid - rm -f ${daemon_pidfile} - fi - - echo -n "Starting pci-irq-affinity-agent: " - /bin/sh -c "${AFFINITYAGENT}"' >> /dev/null 2>&1 & echo $!' > ${daemon_pidfile} - RETVAL=$? - if [ $RETVAL -eq 0 ] ; then - echo "OK" - touch /var/lock/subsys/${DAEMON_NAME} - else - echo "FAIL" - fi - ;; - - stop) - echo -n "Stopping pci-irq-affinity-agent: " - - if [ -e ${daemon_pidfile} ] ; then - pid=`cat ${daemon_pidfile}` - kill -TERM $pid - rm -f ${daemon_pidfile} - rm -f /var/lock/subsys/${DAEMON_NAME} - echo "OK" - else - echo "FAIL" - fi - ;; - - restart) - $0 stop - sleep 1 - $0 start - ;; - - status) - if [ -e ${daemon_pidfile} ] ; then - pid=`cat ${daemon_pidfile}` - ps -p $pid | grep -v "PID TTY" >> /dev/null 2>&1 - if [ $? -eq 0 ] ; then - echo "pci-irq-affinity-agent is running" - RETVAL=0 - else - echo "pci-irq-affinity-agent is not running" - RETVAL=1 - fi - else - echo "pci-irq-affinity-agent is not running ; no pidfile" - RETVAL=1 - fi - ;; - - condrestart) - [ -f /var/lock/subsys/$DAEMON_NAME ] && $0 restart - ;; - - *) - echo "usage: $0 { start | stop | status | restart | condrestart | status }" - ;; -esac - -exit $RETVAL diff --git a/utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent.conf b/utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent.conf deleted file mode 100644 index ea13f86bc..000000000 --- a/utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent.conf +++ /dev/null @@ -1,10 +0,0 @@ -[process] -process = pci-irq-affinity-agent -pidfile = /var/run/pci-irq-affinity-agent.pid -script = /etc/init.d/pci-irq-affinity-agent -style = lsb ; ocf or lsb -severity = major ; minor, major, critical -restarts = 3 ; restarts before error assertion -interval = 5 ; number of seconds to wait between restarts -debounce = 20 ; number of seconds to wait before degrade clear -subfunction = last-config ; run it only after last config is run diff --git a/utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent.service b/utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent.service deleted file mode 100644 index 737d75a4e..000000000 --- a/utilities/pci-irq-affinity-agent/files/pci-irq-affinity-agent.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=StarlingX PCI Interrupt Affinity Agent -After=sysinv-agent.service -Before=pmon.service - -[Service] -Type=forking -RemainAfterExit=yes -ExecStart=/etc/init.d/pci-irq-affinity-agent start -ExecStop=/etc/init.d/pci-irq-affinity-agent stop -PIDFile=/var/run/pci-irq-affinity-agent.pid - -[Install] -WantedBy=multi-user.target diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/__init__.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/affinity.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/affinity.py deleted file mode 100644 index 88bb1f923..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/affinity.py +++ /dev/null @@ -1,92 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Define pci_irq_affinity_provider class""" - -import utils as pci_utils -from driver import AffinePciIrqDriver -from nova_provider import novaClient -from log import LOG - - -class pci_irq_affinity_provider: - def __init__(self): - self.affinePciIrqDriver = AffinePciIrqDriver() - self.inst_dict = {} - - def reset_irq_affinity(self, uuid, irqs=None, msi_irqs=None): - """Reset irq affinity for instance - - The instance has already been deleted or - related PCI not used by it anymore. - """ - if irqs or msi_irqs: - # reset irq affinity for specified irqs - _irqs = irqs - _msi_irqs = msi_irqs - - elif uuid in self.inst_dict: - # reset all irq affinity for deleted instance - _irqs = self.inst_dict[uuid][0] - _msi_irqs = self.inst_dict[uuid][1] - else: - LOG.debug("No pci affinity need to be reset for instance=%s!" % uuid) - return - - try: - with open('/proc/irq/default_smp_affinity') as f: - cpulist = f.readline().strip() - LOG.debug("default smp affinity bitmap:%s" % cpulist) - - for x in [_irqs, _msi_irqs]: - if len(x) > 0: - pci_utils.set_irq_affinity(True, x, cpulist) - - except Exception as e: - LOG.error("Failed to reset smp affinity! error=%s" % e) - - LOG.info("Reset smp affinity done for instance=%s!" % uuid) - - def instance_irq_pcpulist_update(self, uuid, irqs, msi_irqs, cpulist): - if uuid in self.inst_dict: - _prev = self.inst_dict[uuid] - # get irqs that not appear anymore. - _irqs = _prev[0].difference(irqs) - _msi_irqs = _prev[1].difference(msi_irqs) - - # reset pci affinity for those pcis not used by intance anymore - if (len(_irqs) + len(_msi_irqs)) > 0: - self.reset_irq_affinity(uuid, _irqs, _msi_irqs) - - self.inst_dict[uuid] = [irqs, msi_irqs, cpulist] - LOG.debug(self.inst_dict) - - def affine_pci_dev_instance(self, instance, wait_for_irqs=True): - if instance is not None: - if instance.get_cpu_policy() == 'dedicated' and instance.get_pci_devices(): - LOG.debug("Instance=%s use dedicated cpu policy!!!" % instance.uuid) - irqs, msi_irqs, cpulist = \ - self.affinePciIrqDriver.affine_pci_dev_irqs(instance, wait_for_irqs) - # record instance on which pci affinity has been applied - self.instance_irq_pcpulist_update(instance.uuid, irqs, msi_irqs, cpulist) - return - - def audit_pci_irq_affinity(self): - # audit instance PCI devices periodically - filters = {'vm_state': 'active', - 'task_state': None, - 'deleted': False} - instances = novaClient.get_instances(filters) - for inst in instances: - self.affine_pci_dev_instance(inst, wait_for_irqs=False) - - -pciIrqAffinity = pci_irq_affinity_provider() diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/agent.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/agent.py deleted file mode 100644 index c50c7388d..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/agent.py +++ /dev/null @@ -1,206 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Pci interrupt affinity agent daemon entry""" - -import six -import json -import sys -import signal -import re -import eventlet -import threading -import time - -from oslo_service import periodic_task -from oslo_service import service -import oslo_messaging - -from config import CONF -from config import sysconfig -from nova_provider import novaClient -from affinity import pciIrqAffinity -from log import LOG - -stay_on = True - - -class EventType: - CREATE = 'compute.instance.create.end' - DELETE = 'compute.instance.delete.end' - RESIZE = 'compute.instance.resize.confirm.end' - - -def process_signal_handler(signum, frame): - """Process Signal Handler""" - global stay_on - - if signum in [signal.SIGTERM, signal.SIGINT, signal.SIGTSTP]: - stay_on = False - else: - LOG.info("Ignoring signal" % signum) - - -def get_inst(instance_uuid, callback): - # get instance info from nova - inst = novaClient.get_instance(instance_uuid) - if inst is not None: - LOG.debug("inst:%s" % inst) - callback(inst) - - -def query_instance_callback(inst): - LOG.debug("query inst:%s" % inst) - pciIrqAffinity.affine_pci_dev_instance(inst) - - -@periodic_task.periodic_task(spacing=CONF.pci_affine_interval) -def audit_affinity(self, context): - pciIrqAffinity.audit_pci_irq_affinity() - - -def audit_work(srv, callback): - srv.tg.add_dynamic_timer(callback, None, None, None) - srv.tg.wait() - - -def audits_initialize(): - """Init periodic audit task for pci interrupt affinity check""" - srv = service.Service() - periodicTasks = periodic_task.PeriodicTasks(CONF) - periodicTasks.add_periodic_task(audit_affinity) - thread = threading.Thread(target=audit_work, args=(srv, periodicTasks.run_periodic_tasks)) - thread.start() - return srv - - -class InstCreateNotificationEp(object): - filter_rule = oslo_messaging.NotificationFilter( - event_type=EventType.CREATE) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - uuid = payload.get('instance_id', None) - self.instance_create_handler(uuid) - - def instance_create_handler(self, instance_uuid): - if instance_uuid is not None: - LOG.info("instance_created: uuid=%s." % instance_uuid) - eventlet.spawn(get_inst, instance_uuid, query_instance_callback).wait() - - -class InstResizeNotificationEp(object): - filter_rule = oslo_messaging.NotificationFilter( - event_type=EventType.RESIZE) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - uuid = payload.get('instance_id', None) - self.instance_resize_handler(uuid) - - def instance_resize_handler(self, instance_uuid): - if instance_uuid is not None: - LOG.info("instance_resized: uuid=%s." % instance_uuid) - eventlet.spawn(get_inst, instance_uuid, query_instance_callback).wait() - - -class InstDelNotificationEp(object): - filter_rule = oslo_messaging.NotificationFilter( - event_type=EventType.DELETE) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - uuid = payload.get('instance_id', None) - self.instance_delete_handler(uuid) - - def instance_delete_handler(self, instance_uuid): - if instance_uuid is not None: - LOG.info("instance_deleted: uuid=%s." % instance_uuid) - pciIrqAffinity.reset_irq_affinity(instance_uuid) - - -def get_rabbit_config(): - """Get rabbit config info from specific system config file.""" - - rabbit_cfg = {} - rabbit_session = 'amqp' - options = ['host', 'port', 'user_id', 'password', - 'virt_host'] - try: - for option in options: - rabbit_cfg[option] = sysconfig.get(rabbit_session, option) - - except Exception as e: - LOG.error("Could not read all required rabbitmq configuration! Err=%s" % e) - rabbit_cfg = {} - - return rabbit_cfg - - -def rpc_work(srv): - srv.start() - srv.wait() - - -def start_rabbitmq_client(): - """Start Rabbitmq client to listen instance notifications from Nova""" - cfg = get_rabbit_config() - rabbit_url = "rabbit://%s:%s@%s:%s/%s" % (cfg['user_id'], cfg['password'], - cfg['host'], cfg['port'], cfg['virt_host']) - LOG.info(rabbit_url) - - target = oslo_messaging.Target(exchange="nova", topic="notifications", server="info", - version="2.1", fanout=True) - transport = oslo_messaging.get_notification_transport(CONF, url=rabbit_url) - endpoints = [InstCreateNotificationEp(), - InstResizeNotificationEp(), - InstDelNotificationEp()] - - server = oslo_messaging.get_notification_listener(transport, [target], - endpoints, "threading") - thread = threading.Thread(target=rpc_work, args=(server,)) - thread.start() - LOG.info("Rabbitmq Client Started!") - - return server - - -def process_main(): - """Entry function for PCI Interrupt Affinity Agent""" - - LOG.info("Enter PCIInterruptAffinity Agent") - - try: - signal.signal(signal.SIGTSTP, process_signal_handler) - openstack_enabled = sysconfig.get('openstack', 'openstack_enabled') - if openstack_enabled == 'true': - novaClient.open_libvirt_connect() - audit_srv = audits_initialize() - rabbit_client = start_rabbitmq_client() - - while stay_on: - time.sleep(1) - - except KeyboardInterrupt: - LOG.info("keyboard Interrupt received.") - pass - - except Exception as e: - LOG.info("%s" % e) - sys.exit(200) - - finally: - LOG.error("proces_main finalized!!!") - if openstack_enabled == 'true': - novaClient.close_libvirt_connect() - audit_srv.tg.stop() - rabbit_client.stop() - - -if __name__ == '__main__': - process_main() diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/config.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/config.py deleted file mode 100644 index 327a98522..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/config.py +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Define configuration info for pci-irq-affinity-agent""" - -from six.moves import configparser -from oslo_config import cfg - -pci_irq_affinity_opts = [ - cfg.IntOpt('pci_affine_interval', - default=60, - help='Number of seconds between pci affinity updates'), - cfg.IntOpt('msi_irq_timeout', - default=45, - help='Number of seconds to wait for msi irq configuration'), - cfg.IntOpt('msi_irq_since', - default=6, - help='Number of seconds to wait for msi irqs to stabilize.'), - cfg.IntOpt('msi_irq_check_interval', - default=2, - help='Check interval in seconds for msi irqs to stabilize.'), - cfg.StrOpt('config_file', - default='/etc/pci_irq_affinity/config.ini', - help='Get config info from specific config file.'), -] - -CONF = cfg.CONF - - -def register_opts(conf): - conf.register_opts(pci_irq_affinity_opts) - - -register_opts(CONF) - -sysconfig = configparser.ConfigParser() -sysconfig.read(CONF.config_file) diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/driver.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/driver.py deleted file mode 100644 index 9f9c2ca3d..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/driver.py +++ /dev/null @@ -1,141 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Define AffinePciIrqDriver class""" - -from oslo_service import loopingcall -from oslo_concurrency import lockutils -import utils as pci_utils -import instance -from config import CONF -from log import LOG -from nova_provider import novaClient - -synchronized = lockutils.synchronized_with_prefix('pci_irq_affinity-') - - -class AffinePciIrqDriver: - - def __init__(self): - self._msi_irq_count = {} - self._msi_irq_since = {} - self._msi_irq_elapsed = {} - - def affine_pci_dev_irqs(self, inst, wait_for_irqs=True): - """Affine PCI device irqs to VM's pcpus.""" - - def _wait_for_msi_irqs(self, inst): - """Check if each pci device has the expected number of msi irqs.""" - _prev = self._msi_irq_count.copy() - addrs = set() - - for pci_dev in inst.pci_devices: - addr = pci_dev.address - addrs.update([addr]) - try: - irqs, msi_irqs = pci_utils.get_irqs_by_pci_address(addr) - except Exception as e: - msi_irqs = set() - LOG.error('_wait_for_msi_irqs: pci_addr=%(A)s, error=%(E)s' % - {'A': addr, 'E': e}) - self._msi_irq_count[addr] = len(msi_irqs) - self._msi_irq_elapsed[addr] += \ - CONF.msi_irq_check_interval - if _prev[addr] == self._msi_irq_count[addr]: - self._msi_irq_since[addr] += \ - CONF.msi_irq_check_interval - else: - self._msi_irq_since[addr] = 0 - - # Done when msi irq counts have not changed for some time - if all((self._msi_irq_count[k] > 0) and - (self._msi_irq_since[k] >= CONF.msi_irq_since) - for k in addrs): - raise loopingcall.LoopingCallDone() - - # Abort due to timeout - if all(self._msi_irq_elapsed[k] >= CONF.msi_irq_timeout - for k in addrs): - msg = ("reached %(timeout)s seconds timeout, waiting for " - "msi irqs of pci_addrs: %(addrs)s") % { - 'timeout': CONF.msi_irq_timeout, - 'addrs': list(addrs)} - LOG.warning(msg) - raise loopingcall.LoopingCallDone() - - # Determine how many msi irqs we expect to be configured. - if len(inst.get_pci_devices()) == 0: - return - - # Initialize msi irq tracking. - for pci_dev in inst.pci_devices: - if wait_for_irqs or (pci_dev.address not in self._msi_irq_count): - self._msi_irq_count[pci_dev.address] = 0 - self._msi_irq_since[pci_dev.address] = 0 - self._msi_irq_elapsed[pci_dev.address] = 0 - - # Wait for msi irqs to be configured. - if wait_for_irqs: - timer = loopingcall.FixedIntervalLoopingCall( - _wait_for_msi_irqs, self, inst) - timer.start(interval=CONF.msi_irq_check_interval).wait() - - @synchronized(inst.uuid) - def do_affine_pci_dev_instance(refresh_need): - """Set pci device irq affinity for this instance.""" - - _irqs = set() - _msi_irqs = set() - # refresh instance info. - if refresh_need: - _inst = novaClient.get_instance(inst.uuid) - if _inst is None: - return - - numa_topology = _inst.get_numa_topology() - extra_spec = _inst.get_extra_spec() - for pci_dev in _inst.pci_devices: - try: - irqs, msi_irqs, pci_numa_node, pci_cpulist = \ - pci_utils.set_irqs_affinity_by_pci_address( - pci_dev.address, extra_spec, numa_topology) - except Exception as e: - irqs = set() - msi_irqs = set() - pci_numa_node = None - pci_cpulist = '' - LOG.error("Could not affine irqs for pci_addr:%(A)s, " - "error: %(E)s" % {"A": pci_dev.address, "E": e}) - - # Log irqs affined when there is a change in the counts. - msi_irq_count = len(msi_irqs) - if ((msi_irq_count != self._msi_irq_count[pci_dev.address]) or - wait_for_irqs): - self._msi_irq_count[pci_dev.address] = msi_irq_count - LOG.info(("Instance=%(U)s: IRQs affined for pci_addr=%(A)s, " - "dev_id=%(D)s, dev_type=%(T)s, " - "vendor_id=%(V)s, product_id=%(P)s, " - "irqs=%(I)s, msi_irqs=%(M)s, " - "numa_node=%(N)s, cpulist=%(C)s") - % {'U': inst.uuid, - 'A': pci_dev.address, - 'D': pci_dev.dev_id, - 'T': pci_dev.dev_type, - 'V': pci_dev.vendor_id, - 'P': pci_dev.product_id, - 'I': ', '.join(map(str, irqs)), - 'M': ', '.join(map(str, msi_irqs)), - 'N': pci_numa_node, 'C': pci_cpulist}) - _irqs.update(irqs) - _msi_irqs.update(msi_irqs) - return (_irqs, _msi_irqs, pci_cpulist) - return do_affine_pci_dev_instance(wait_for_irqs) - diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/guest.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/guest.py deleted file mode 100644 index ff8eac0fd..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/guest.py +++ /dev/null @@ -1,265 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Encapsulate libvirt related interfaces""" - -import libvirt -import os -import sys -import signal -from xml.dom import minidom -from xml.etree import ElementTree -from log import LOG - -debug = 0 -# libvirt timeout parameters -LIBVIRT_TIMEOUT_SEC = 5.0 -total_cpus = 0 - - -def range_to_list(csv_range=None): - """Convert a string of comma separate ranges into an expanded list of integers. - - E.g., '1-3,8-9,15' is converted to [1,2,3,8,9,15] - """ - if not csv_range: - return [] - xranges = [(lambda L: range(L[0], L[-1] + 1))(map(int, r.split('-'))) - for r in csv_range.split(',')] - return [y for x in xranges for y in x] - - -def _translate_virDomainState(state): - """Return human readable virtual domain state string.""" - states = {} - states[0] = 'NOSTATE' - states[1] = 'Running' - states[2] = 'Blocked' - states[3] = 'Paused' - states[4] = 'Shutdown' - states[5] = 'Shutoff' - states[6] = 'Crashed' - states[7] = 'pmSuspended' - states[8] = 'Last' - return states[state] - - -def _mask_to_cpulist(mask=0): - """Create cpulist from mask, list in socket-core-thread enumerated order. - - :param extended: extended info - :param mask: cpuset mask - :returns cpulist: list of cpus in socket-core-thread enumerated order - """ - cpulist = [] - if mask is None or mask <= 0: - return cpulist - - # Assume max number of cpus for now... - max_cpus = 1024 - for cpu in range(max_cpus): - if ((1 << cpu) & mask): - cpulist.append(cpu) - return cpulist - - -class suppress_stdout_stderr(object): - """A context manager for doing a "deep suppression" of stdout and stderr in Python - - i.e. will suppress all print, even if the print originates in a compiled C/Fortran - sub-function. - This will not suppress raised exceptions, since exceptions are printed - to stderr just before a script exits, and after the context manager has - exited (at least, I think that is why it lets exceptions through). - """ - def __init__(self): - # Open a pair of null files - self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)] - # Save the actual stdout (1) and stderr (2) file descriptors. - self.save_fds = (os.dup(1), os.dup(2)) - - def __enter__(self): - # Assign the null pointers to stdout and stderr. - os.dup2(self.null_fds[0], 1) - os.dup2(self.null_fds[1], 2) - - def __exit__(self, *_): - # Re-assign the real stdout/stderr back to (1) and (2) - os.dup2(self.save_fds[0], 1) - os.dup2(self.save_fds[1], 2) - # Close the null files - os.close(self.null_fds[0]) - os.close(self.null_fds[1]) - - -class TimeoutError(Exception): - pass - - -def timeout_handler(signum, frame): - raise TimeoutError('timeout') - - -def connect_to_libvirt(): - """Connect to local libvirt.""" - duri = "qemu:///system" - try: - signal.signal(signal.SIGALRM, timeout_handler) - signal.setitimer(signal.ITIMER_REAL, LIBVIRT_TIMEOUT_SEC) - with suppress_stdout_stderr(): - conn = libvirt.openReadOnly(duri) - signal.alarm(0) - except TimeoutError: - conn = None - raise - except Exception as e: - conn = None - raise - finally: - signal.alarm(0) - return conn - - -def get_host_cpu_topology(): - """Enumerate logical cpu topology using socket_id, core_id, thread_id. - - This generates the following dictionary: - topology[socket_id][core_id][thread_id] = cpu_id - """ - global total_cpus - - # Connect to local libvirt hypervisor - conn = connect_to_libvirt() - # Get host capabilities - caps_str = conn.getCapabilities() - doc = ElementTree.fromstring(caps_str) - caps = minidom.parseString(caps_str) - caps_host = caps.getElementsByTagName('host')[0] - caps_cells = caps_host.getElementsByTagName('cells')[0] - total_cpus = caps_cells.getElementsByTagName('cpu').length - - Thread_cnt = {} - topology = {} - cells = doc.findall('./host/topology/cells/cell') - for cell in cells: - for cpu in cell.findall('./cpus/cpu'): - # obtain core_id, cpu_id, and socket_id; ignore 'siblings' since - # that can be inferred by enumeration of thread_id. - core_id = int(cpu.get('core_id')) - cpu_id = int(cpu.get('id')) - socket_id = int(cpu.get('socket_id')) - - # thread_id's are enumerated assuming cpu_id is already sorted - if socket_id not in Thread_cnt: - Thread_cnt[socket_id] = {} - if core_id not in Thread_cnt[socket_id]: - Thread_cnt[socket_id][core_id] = 0 - else: - Thread_cnt[socket_id][core_id] += 1 - thread_id = Thread_cnt[socket_id][core_id] - - # save topology[socket_id][core_id][thread_id] - if socket_id not in topology: - topology[socket_id] = {} - if core_id not in topology[socket_id]: - topology[socket_id][core_id] = {} - topology[socket_id][core_id][thread_id] = cpu_id - conn.close() - return topology - - -def get_guest_domain_info(dom): - """Obtain cpulist of pcpus in the order of vcpus. - - This applies to either pinned or floating vcpus, Note that the cpuinfo - pcpu value can be stale if we scale down cpus since it reports cpu-last-run. - For this reason use cpumap = d_vcpus[1][vcpu], instead of cpuinfo - (i.e., vcpu, state, cpuTime, pcpu = d_vcpus[0][vcpu]). - """ - uuid = dom.UUIDString() - d_state, d_maxMem_KiB, d_memory_KiB, \ - d_nrVirtCpu, d_cpuTime = dom.info() - try: - with suppress_stdout_stderr(): - d_vcpus = dom.vcpus() - except Exception as e: - d_vcpus = tuple([d_nrVirtCpu * [], - d_nrVirtCpu * [tuple(total_cpus * [False])]]) - - cpulist_p = [] - cpulist_d = {} - cpuset_total = 0 - up_total = 0 - for vcpu in range(d_nrVirtCpu): - cpuset_b = d_vcpus[1][vcpu] - cpuset = 0 - for cpu, up in enumerate(cpuset_b): - if up: - cpulist_d[vcpu] = cpu - aff = 1 << cpu - cpuset |= aff - up_total += 1 - cpuset_total |= cpuset - cpulist_f = _mask_to_cpulist(mask=cpuset_total) - for key in sorted(cpulist_d.keys()): - cpulist_p.append(cpulist_d[key]) - - # Determine if floating or pinned, display appropriate cpulist - if up_total > d_nrVirtCpu: - d_cpulist = cpulist_f - cpu_pinned = False - else: - d_cpulist = cpulist_p - cpu_pinned = True - - # Determine list of numa nodes (the hard way) - dom_xml = ElementTree.fromstring(dom.XMLDesc(0)) - nodeset = set([]) - for elem in dom_xml.findall('./numatune/memnode'): - nodes = range_to_list(elem.get('nodeset')) - nodeset.update(nodes) - d_nodelist = list(sorted(nodeset)) - - # Get pci info. - pci_addrs = set() - for interface in dom_xml.findall('./devices/interface'): - if interface.find('driver').get('name').startswith('vfio'): - addr_tag = interface.find('source/address') - if addr_tag.get('type') == 'pci': - pci_addr = "%04x:%02x:%02x.%01x" % ( - addr_tag.get('domain'), - addr_tag.get('bus'), - addr_tag.get('slot'), - addr_tag.get('function')) - pci_addrs.update([pci_addr]) - - # Update dictionary with per-domain information - domain = { - 'uuid': uuid, - 'state': _translate_virDomainState(d_state), - 'IsCpuPinned': cpu_pinned, - 'nr_vcpus': d_nrVirtCpu, - 'nodelist': d_nodelist, - 'cpulist': d_cpulist, - 'cpu_pinning': cpulist_d, - 'pci_addrs': pci_addrs - } - return domain - - -def get_guest_domain_by_uuid(conn, uuid): - try: - dom = conn.lookupByUUIDString(uuid) - except Exception as e: - LOG.warning("Failed to get domain for uuid=%s! error=%s" % (uuid, e)) - return None - domain = get_guest_domain_info(dom) - return domain diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/instance.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/instance.py deleted file mode 100644 index c4a546211..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/instance.py +++ /dev/null @@ -1,82 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Define instance related class""" - -from log import LOG - - -class numa_cell: - def __init__(self, id, cpuset, cpu_pinning): - self.id = id - self.cpuset = cpuset - self.cpu_pinning = cpu_pinning - - -class numa_topology: - def __init__(self, uuid, cells): - self.instance_uuid = uuid - self.cells = cells - - def vcpu_to_pcpu(self, vcpu): - for cell in self.cells: - if vcpu in cell.cpu_pinning.keys(): - return cell, cell.cpu_pinning[vcpu] - raise KeyError('Unable to find pCPU for vCPU %d' % vcpu) - - -class pci_device: - def __init__(self, addr): - self.address = addr - self.dev_id = "" - self.dev_type = "" - self.vendor_id = "" - self.product_id = "" - - -class instance: - def __init__(self, uuid, name, extra_spec): - self.uuid = uuid - self.name = name - self.extra_spec = extra_spec - self.pci_devices = set() - self.numa_topology = None - self.cpu_policy = 'shared' - - def update(self, domain): - cells = set() - for node_id in domain['nodelist']: - cell = numa_cell(node_id, range(domain['nr_vcpus']), domain['cpu_pinning']) - LOG.debug("cell_id=%s, vcpuset=%s, cpu_pinning=%s" - % (node_id, range(domain['nr_vcpus']), domain['cpu_pinning'])) - cells.update([cell]) - - self.numa_topology = numa_topology(self.uuid, cells) - if domain['IsCpuPinned']: - self.cpu_policy = 'dedicated' - else: - self.cpu_policy = 'shared' - - for pci_addr in domain['pci_addrs']: - pci_dev = pci_device(pci_addr) - self.pci_devices.update([pci_dev]) - - def get_cpu_policy(self): - return self.cpu_policy - - def get_numa_topology(self): - return self.numa_topology - - def get_extra_spec(self): - return self.extra_spec - - def get_pci_devices(self): - return self.pci_devices diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/log.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/log.py deleted file mode 100644 index e290f12fc..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/log.py +++ /dev/null @@ -1,28 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Define Logger class for this agent""" - -import logging -import logging.handlers - -_syslog_facility = 'local1' - - -LOG = logging.getLogger("pci-interrupt-affinity") -formatter = logging.Formatter("%(asctime)s %(threadName)s[%(process)d] " - "%(name)s.%(pathname)s.%(lineno)d - %(levelname)s " - "%(message)s") -handler = logging.handlers.SysLogHandler(address='/dev/log', - facility=_syslog_facility) -handler.setFormatter(formatter) -LOG.addHandler(handler) -LOG.setLevel(logging.INFO) diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/nova_provider.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/nova_provider.py deleted file mode 100644 index 51de754df..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/nova_provider.py +++ /dev/null @@ -1,139 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Define NovaProvider class -This class wraps novaclient access interface and expose get_instance() and -get_instances() to other agent classes. -""" - -import keyring -from novaclient import client -from keystoneauth1 import loading -from keystoneauth1 import session -import socket -from log import LOG -from config import CONF -from config import sysconfig -import instance -import guest - - -class NovaProvider: - - def __init__(self): - self._creds = self._get_keystone_creds() - self._auth = self._get_auth(self._creds) - self._hostname = self.get_hostname() - self._conn = None - - def get_hostname(self): - return socket.gethostname() - - def _get_keystone_creds(self): - creds = {} - openstackSession = 'openstack' - options = ['username', 'user_domain_name', 'project_name', - 'project_domain_name', 'keyring_service', 'auth_url'] - - try: - for option in options: - creds[option] = sysconfig.get(openstackSession, option) - - creds['password'] = keyring.get_password(creds.pop('keyring_service'), - creds['username']) - - except Exception as e: - LOG.error("Could not get keystone creds configuration! Err=%s" % e) - creds = None - - return creds - - def _get_auth(self, creds): - - if creds is not None: - loader = loading.get_plugin_loader('password') - auth = loader.load_from_options(**creds) - return auth - return None - - def get_nova(self): - try: - sess = session.Session(auth=self._auth) - nova = client.Client('2.1', session=sess) - return nova - except Exception as e: - LOG.warning("Failed to connect to nova!") - raise Exception("could not connect nova!") - - def open_libvirt_connect(self): - self._conn = guest.connect_to_libvirt() - guest.get_host_cpu_topology() - - def close_libvirt_connect(self): - self._conn.close() - - def get_instance(self, uuid): - try: - nova = self.get_nova() - server = nova.servers.get(uuid) - flavor_info = nova.flavors.get(server.flavor["id"]) - hostname = server.__dict__['OS-EXT-SRV-ATTR:host'] - except Exception as e: - LOG.warning("Could not get instance=%s from Nova! error=%s" % (uuid, e)) - return None - - LOG.debug('GET VM:%s in node:%s' % (server.name, hostname)) - - if hostname == self._hostname: - inst = instance.instance(uuid, server.name, flavor_info.get_keys()) - # get numa topology and pci info from libvirt - try: - domain = guest.get_guest_domain_by_uuid(self._conn, uuid) - if domain: - inst.update(domain) - except Exception as e: - LOG.warning("Failed to access libvirt! error=%s" % e) - return inst - else: - LOG.debug('The VM is not in current host!') - return None - - def get_instances(self, filters): - instances = set() - try: - nova = self.get_nova() - filters['host'] = self._hostname - servers = nova.servers.list(detailed=True, search_opts=filters) - flavors = nova.flavors.list() - - for server in servers: - for flavor in flavors: - if flavor.id == server.flavor["id"]: - extra_spec = flavor.get_keys() - if 'hw:cpu_policy' in extra_spec \ - and extra_spec['hw:cpu_policy'] == 'dedicated': - inst = instance.instance(server.id, server.name, extra_spec) - instances.update([inst]) - # get numa topology and pci info from libvirt - if len(instances) > 0: - for inst in instances: - domain = guest.get_guest_domain_by_uuid(self._conn, inst.uuid) - inst.update(domain) - except Exception as e: - LOG.warning("Failed to get instances info! error=%s" % e) - - return instances - - -if sysconfig.get('openstack', 'openstack_enabled') == 'true': - novaClient = NovaProvider() -else: - novaClient = None diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/utils.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/utils.py deleted file mode 100644 index 397cb7f75..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/pci_irq_affinity/utils.py +++ /dev/null @@ -1,291 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Define utility functions for this agent""" - -import os -import errno -from itertools import groupby - -from log import LOG -import instance - - -def list_to_range(input_list=None): - """Convert a list into a string of comma separate ranges. - - E.g., [1,2,3,8,9,15] is converted to '1-3,8-9,15' - """ - if input_list is None: - return '' - if len(input_list) < 3: - return ','.join(str(x) for x in input_list) - else: - G = (list(x) for _, x in groupby(enumerate(input_list), - lambda i, x: i - x)) - return ','.join( - '-'.join(map(str, (g[0][1], g[-1][1])[:len(g)])) for g in G) - - -def parse_cpu_spec(spec): - """Parse a CPU set specification. - - Each element in the list is either a single CPU number, a range of - CPU numbers, or a caret followed by a CPU number to be excluded - from a previous range. - - :param spec: cpu set string eg "1-4,^3,6" - - :returns: a set of CPU indexes - """ - cpuset_ids = set() - cpuset_reject_ids = set() - for rule in spec.split(','): - rule = rule.strip() - # Handle multi ',' - if len(rule) < 1: - continue - # Note the count limit in the .split() call - range_parts = rule.split('-', 1) - if len(range_parts) > 1: - reject = False - if range_parts[0] and range_parts[0][0] == '^': - reject = True - range_parts[0] = str(range_parts[0][1:]) - - # So, this was a range; start by converting the parts to ints - try: - start, end = [int(p.strip()) for p in range_parts] - except ValueError: - raise Exception("Invalid range expression %r" % rule) - # Make sure it's a valid range - if start > end: - raise Exception("Invalid range expression %r" % rule) - # Add available CPU ids to set - if not reject: - cpuset_ids |= set(range(start, end + 1)) - else: - cpuset_reject_ids |= set(range(start, end + 1)) - elif rule[0] == '^': - # Not a range, the rule is an exclusion rule; convert to int - try: - cpuset_reject_ids.add(int(rule[1:].strip())) - except ValueError: - raise Exception("Invalid exclusion expression %r" % rule) - else: - # OK, a single CPU to include; convert to int - try: - cpuset_ids.add(int(rule)) - except ValueError: - raise Exception("Invalid inclusion expression %r" % rule) - - # Use sets to handle the exclusion rules for us - cpuset_ids -= cpuset_reject_ids - - return cpuset_ids - - -def _get_pci_irq_affinity_mask(extra_spec): - """Parse pci irq affinity mask based on flavor extra-spec. - - Returns set of vcpu ids with corresponding pci irq affinity mask. - """ - - if 'hw:pci_irq_affinity_mask' in extra_spec: - pci_irq_affinity_mask = extra_spec['hw:pci_irq_affinity_mask'] - LOG.info("pci_irq_affinity_mask: %s" % pci_irq_affinity_mask) - else: - LOG.info('Not set pci_irq_affinity_mask!') - return None - - cpuset_ids = parse_cpu_spec(pci_irq_affinity_mask) - if not cpuset_ids: - raise Exception("No CPUs available after parsing %r" % pci_irq_affinity_mask) - return cpuset_ids - - -def get_irqs_by_pci_address(pci_addr): - """Get list of PCI IRQs based on a VF's pci address - - Raises PciDeviceNotFoundById in case the pci device is not found, - or when there is an underlying problem getting associated irqs. - :param pci_addr: PCI address - :return: irqs, msi_irqs - """ - irqs = set() - msi_irqs = set() - - dev_path = "/sys/bus/pci/devices/%s" % (pci_addr) - if not os.path.isdir(dev_path): - raise Exception("PciDeviceNotFoundById id = %r" % pci_addr) - - _irqs = set() - irq_path = "%s/irq" % (dev_path) - try: - with open(irq_path) as f: - _irqs.update([int(x) for x in f.readline().split() if int(x) > 0]) - except Exception as e: - LOG.error('get_irqs_by_pci_address: ' - 'pci_addr=%(A)s: irq_path=%(P)s; error=%(E)s', - {'A': pci_addr, 'P': irq_path, 'E': e}) - raise Exception("PciDeviceNotFoundById id = %r" % pci_addr) - - _msi_irqs = set() - msi_path = "%s/msi_irqs" % (dev_path) - try: - _msi_irqs.update([int(x) for x in os.listdir(msi_path) if int(x) > 0]) - except OSError as e: - # msi_path disappears during configuration; do not treat - # non-existance as fatal - if e.errno == errno.ENOENT: - return (irqs, msi_irqs) - else: - LOG.error('get_irqs_by_pci_address: ' - 'pci_addr=%(A)s: msi_path=%(P)s; error=%(E)s', - {'A': pci_addr, 'P': msi_path, 'E': e}) - raise Exception("PciDeviceNotFoundById id = %r" % pci_addr) - except Exception as e: - LOG.error('get_irqs_by_pci_address: ' - 'pci_addr=%(A)s: msi_path=%(P)s; error=%(E)s', - {'A': pci_addr, 'P': msi_path, 'E': e}) - raise Exception("PciDeviceNotFoundById id = %r" % pci_addr) - - # Return only configured irqs, ignore any that are missing. - for irq in _irqs: - irq_path = "/proc/irq/%s" % (irq) - if os.path.isdir(irq_path): - irqs.update([irq]) - for irq in _msi_irqs: - irq_path = "/proc/irq/%s" % (irq) - if os.path.isdir(irq_path): - msi_irqs.update([irq]) - return (irqs, msi_irqs) - - -def get_pci_irqs_pinned_cpuset(extra_spec=None, numa_topology=None, - pci_numa_node=None): - """Get pinned cpuset where pci irq are affined. - - :param extra_spec: extra_spec - :param pci_numa_node: numa node of a specific PCI device - :param numa_topology: instance numa topology - :return: cpuset, cpulist - """ - cpuset = set() - cpulist = '' - - LOG.debug("extra_spec:%s, topo:%s, numa_node:%s" % (extra_spec, numa_topology, pci_numa_node)) - if numa_topology is None or pci_numa_node is None or pci_numa_node < 0: - return (cpuset, cpulist) - - # Determine full affinity cpuset, but restrict to pci's numa node - for cell in numa_topology.cells: - if cell.id == pci_numa_node and cell.cpu_pinning is not None: - cpuset.update(set(cell.cpu_pinning.values())) - LOG.info("pinning pcpu list:%s" % cpuset) - - # Use extra-spec hw:pci_irq_affinity_mask only when the instance is pinned. - if cpuset: - pci_cpuset = _get_pci_irq_affinity_mask(extra_spec) - if pci_cpuset: - cpuset = set() - for cell in numa_topology.cells: - if cell.cpu_pinning is not None: - for vcpu in cell.cpuset: - if vcpu in pci_cpuset: - vcpu_cell, pcpu = numa_topology.vcpu_to_pcpu(vcpu) - cpuset.update(set([pcpu])) - - cpulist = list_to_range(input_list=list(cpuset)) - return (cpuset, cpulist) - - -def set_irq_affinity(set_bitmap, irqs, cpulist): - """Set irq affinity to the specified cpulist for list of irqs. - - :param set_bitmap: True: set bitmap file, False: set list file - :param irqs: irq list - :param cpulist: cpu list - """ - _irqs = set() - - if set_bitmap: - filename = 'smp_affinity' - else: - filename = 'smp_affinity_list' - - for irq in irqs: - irq_aff_path = "/proc/irq/%s/%s" % (irq, filename) - try: - with open(irq_aff_path, 'w') as f: - f.write(cpulist) - _irqs.update([irq]) - except Exception as e: - LOG.warning("Failed to write pci affine file:%(F)s, irq:%(I)s, " - "error=%(E)s" - % {"F": filename, "I": irq, "E": e}) - return _irqs - - -def set_irqs_affinity_by_pci_address(pci_addr, extra_spec=None, - numa_topology=None): - """Set cpu affinity for list of PCI IRQs with a VF's pci address, - - Restrict cpuset to the numa node of the PCI. - Return list - Raises PciDeviceNotFoundById in case the pci device is not found, - or when there is an underlying problem getting associated irqs. - :param pci_addr: PCI address - :param extra_spec: extra_spec - :param numa_topology: instance numa topology - :return: irqs, msi_irqs, numa_node, cpulist - """ - irqs = set() - msi_irqs = set() - numa_node = None - cpulist = '' - - if numa_topology is None: - return (irqs, msi_irqs, numa_node, cpulist) - - # Get the irqs associated with pci addr - _irqs, _msi_irqs = get_irqs_by_pci_address(pci_addr) - LOG.debug("pci: %s, irqs: %s, msi_irqs: %s" % (pci_addr, _irqs, _msi_irqs)) - - # Obtain physical numa_node for this pci addr - numa_path = "/sys/bus/pci/devices/%s/numa_node" % (pci_addr) - try: - with open(numa_path) as f: - numa_node = [int(x) for x in f.readline().split()][0] - except Exception as e: - LOG.error('set_irqs_affinity_by_pci_address: ' - 'pci_addr=%(A)s: numa_path=%(P)s; error=%(E)s', - {'A': pci_addr, 'P': numa_path, 'E': e}) - raise Exception("PciDeviceNotFoundById id = %r" % pci_addr) - # Skip irq configuration if there is no associated numa node - if numa_node is None or numa_node < 0: - return (irqs, msi_irqs, numa_node, cpulist) - - # Determine the pinned cpuset where irqs are to be affined - cpuset, cpulist = get_pci_irqs_pinned_cpuset(extra_spec, - numa_topology, - numa_node) - - LOG.debug("cpuset where irqs are to be affined:%s or %s" % (cpuset, cpulist)) - - # Skip irq configuration if there are no pinned cpus - if not cpuset: - return (irqs, msi_irqs, numa_node, cpulist) - - # Set IRQ affinity, but do not treat errors as fatal. - irqs = set_irq_affinity(False, _irqs, cpulist) - msi_irqs = set_irq_affinity(False, _msi_irqs, cpulist) - return (irqs, msi_irqs, numa_node, cpulist) diff --git a/utilities/pci-irq-affinity-agent/pci_irq_affinity/setup.py b/utilities/pci-irq-affinity-agent/pci_irq_affinity/setup.py deleted file mode 100644 index 6e6806641..000000000 --- a/utilities/pci-irq-affinity-agent/pci_irq_affinity/setup.py +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2019 StarlingX. -# -# SPDX-License-Identifier: Apache-2.0 -# -# flake8: noqa -# -from setuptools import setup, find_packages - -setup( - name='pci-irq-affinity-agent', - description='PCI Interrupt Affinity Agent', - version='1.0.0', - classifiers=[ - 'Environment :: OpenStack', - 'Intended Audience :: Information Technology', - 'Intended Audience :: System Administrators', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 2.6', - ], - license='Apache-2.0', - platforms=['any'], - provides='pci_irq_affinity_agent', - packages=find_packages(), - include_package_data=False, - entry_points={ - 'console_scripts': [ - 'pci-irq-affinity-agent = pci_irq_affinity.agent:process_main', - ], - } -) diff --git a/utilities/platform-util/centos/build_srpm.data b/utilities/platform-util/centos/build_srpm.data deleted file mode 100644 index 9d0006ccf..000000000 --- a/utilities/platform-util/centos/build_srpm.data +++ /dev/null @@ -1,4 +0,0 @@ -SRC_DIR="platform-util" -COPY_LIST_TO_TAR="scripts" - -TIS_PATCH_VER=17 diff --git a/utilities/platform-util/centos/platform-util.spec b/utilities/platform-util/centos/platform-util.spec deleted file mode 100644 index b54116b49..000000000 --- a/utilities/platform-util/centos/platform-util.spec +++ /dev/null @@ -1,110 +0,0 @@ -Summary: platform-util -Name: platform-util -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -BuildArch: noarch -Source: %name-%version.tar.gz - -BuildRequires: python-setuptools -BuildRequires: python2-pip -BuildRequires: python2-wheel - -%global _buildsubdir %{_builddir}/%{name}-%{version} - -%description -Platform utilities - -%package -n platform-util-noncontroller -Summary: non controller platform utilities - -%description -n platform-util-noncontroller -Platform utilities that don't get packaged on controller hosts - -%define local_dir /usr/local -%define local_bindir %{local_dir}/bin -%define local_sbindir %{local_dir}/sbin -%define pythonroot /usr/lib64/python2.7/site-packages -%define local_etc_initd %{_sysconfdir}/init.d - -%prep -%setup - -%build -%{__python} setup.py build -%py2_build_wheel - -%install - - -%{__python} setup.py install --root=$RPM_BUILD_ROOT \ - --install-lib=%{pythonroot} \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed - -mkdir -p $RPM_BUILD_ROOT/wheels -install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ - -install -d %{buildroot}%{local_bindir} -install %{_buildsubdir}/scripts/cgcs_tc_setup.sh %{buildroot}%{local_bindir} -install %{_buildsubdir}/scripts/remotelogging_tc_setup.sh %{buildroot}%{local_bindir} -install %{_buildsubdir}/scripts/connectivity_test %{buildroot}%{local_bindir} - -install -d %{buildroot}%{local_etc_initd} -install %{_buildsubdir}/scripts/log_functions.sh %{buildroot}%{local_etc_initd} - -install -d %{buildroot}%{local_sbindir} -install -m 700 -P -D %{_buildsubdir}/scripts/patch-restart-mtce %{buildroot}%{local_sbindir} -install -m 700 -p -D %{_buildsubdir}/scripts/patch-restart-processes %{buildroot}%{local_sbindir} -install -m 700 -p -D %{_buildsubdir}/scripts/patch-restart-haproxy %{buildroot}%{local_sbindir} - -install -d %{buildroot}/etc/systemd/system -install -m 644 -p -D %{_buildsubdir}/scripts/opt-platform.mount %{buildroot}/etc/systemd/system -install -m 644 -p -D %{_buildsubdir}/scripts/opt-platform.service %{buildroot}/etc/systemd/system - -# Mask the systemd ctrl-alt-delete.target, to disable reboot on ctrl-alt-del -ln -sf /dev/null %{buildroot}/etc/systemd/system/ctrl-alt-del.target - -%clean -rm -rf $RPM_BUILD_ROOT - -%post -n platform-util-noncontroller -mkdir -p /opt/platform -systemctl enable opt-platform.service - -%files -%license LICENSE -%defattr(-,root,root,-) -/usr/bin/verify-license -%{local_bindir}/cgcs_tc_setup.sh -%{local_bindir}/remotelogging_tc_setup.sh -%{local_bindir}/connectivity_test -%{local_sbindir}/patch-restart-mtce -%{local_sbindir}/patch-restart-processes -%{local_sbindir}/patch-restart-haproxy -/etc/systemd/system/ctrl-alt-del.target -%dir %{pythonroot}/platform_util -%{pythonroot}/platform_util/* -%dir %{pythonroot}/platform_util-%{version}.0-py2.7.egg-info -%{pythonroot}/platform_util-%{version}.0-py2.7.egg-info/* -%{local_etc_initd}/log_functions.sh - -%files -n platform-util-noncontroller -%defattr(-,root,root,-) -# This is necessary to mask opt-platform.mount, so that the version generated -# from parsing the fstab is not used by systemd. -/etc/systemd/system/opt-platform.mount -/etc/systemd/system/opt-platform.service - -%package wheels -Summary: %{name} wheels - -%description wheels -Contains python wheels for %{name} - -%files wheels -/wheels/* diff --git a/utilities/platform-util/platform-util/LICENSE b/utilities/platform-util/platform-util/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/utilities/platform-util/platform-util/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/utilities/platform-util/platform-util/platform_util/__init__.py b/utilities/platform-util/platform-util/platform_util/__init__.py deleted file mode 100644 index 3c32fa4c5..000000000 --- a/utilities/platform-util/platform-util/platform_util/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/utilities/platform-util/platform-util/platform_util/i18n.py b/utilities/platform-util/platform-util/platform_util/i18n.py deleted file mode 100644 index a11d35728..000000000 --- a/utilities/platform-util/platform-util/platform_util/i18n.py +++ /dev/null @@ -1,15 +0,0 @@ -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -import oslo_i18n - -DOMAIN = 'platform-util' - -_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) -_ = _translators.primary - -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error diff --git a/utilities/platform-util/platform-util/platform_util/license/__init__.py b/utilities/platform-util/platform-util/platform_util/license/__init__.py deleted file mode 100644 index 3c32fa4c5..000000000 --- a/utilities/platform-util/platform-util/platform_util/license/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/utilities/platform-util/platform-util/platform_util/license/constants.py b/utilities/platform-util/platform-util/platform_util/license/constants.py deleted file mode 100644 index 621a0f08a..000000000 --- a/utilities/platform-util/platform-util/platform_util/license/constants.py +++ /dev/null @@ -1,66 +0,0 @@ -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -# All available licenses -LICENSE_FEATURE_STD = "WR_TS" -LICENSE_FEATURE_STD_EVAL = "WR_TS_EVAL" -LICENSE_FEATURE_AIO = "WR_TS_CPE" -LICENSE_FEATURE_AIO_EVAL = "WR_TS_CPE_EVAL" -LICENSE_FEATURE_SIMPLEX = "WR_TS_CPE_SX" -LICENSE_FEATURE_SIMPLEX_EVAL = "WR_TS_CPE_SX_EVAL" - -# All supporting license names -STD_PRODUCT_CFG = "Standard_Product_Configuration" -AIO_PRODUCT_CFG = "All-In-One_Product_Configuration" -AIO_SX_PRODUCT_CFG = "All-In-One_Simplex_Product_Configuration" - -# All supporting licenses list -LICENSE_NAMES = [ - STD_PRODUCT_CFG, - AIO_PRODUCT_CFG, - AIO_SX_PRODUCT_CFG -] - -# License mapping -LICENSE_MAP = { - LICENSE_FEATURE_STD: STD_PRODUCT_CFG, - LICENSE_FEATURE_AIO: AIO_PRODUCT_CFG, - LICENSE_FEATURE_SIMPLEX: AIO_SX_PRODUCT_CFG, - LICENSE_FEATURE_STD_EVAL: STD_PRODUCT_CFG, - LICENSE_FEATURE_AIO_EVAL: AIO_PRODUCT_CFG, - LICENSE_FEATURE_SIMPLEX_EVAL: AIO_SX_PRODUCT_CFG, -} - -# Product licenses lists -STD_SYSTEM_LICENSES = [LICENSE_FEATURE_STD, LICENSE_FEATURE_STD_EVAL] -AIO_SYSTEM_LICENSES = [LICENSE_FEATURE_AIO, LICENSE_FEATURE_AIO_EVAL] -AIO_SIMPLEX_SYSTEM_LICENSES = [LICENSE_FEATURE_SIMPLEX, LICENSE_FEATURE_SIMPLEX_EVAL] - -# License check error types -NO_FEATURE_LICENSE_ERR = "No such feature exists" -EXPIRED_LICENSE_ERR = "Feature has expired" -VERSION_LICENSE_ERR = "License file does not support this version" - -# License limits -LICENSE_DATE_TEXT_MAX_CHAR = 32 -LICENSE_ERR_MSG_MAX_CHAR = 512 -LICENSE_VENDOR_MAX_CHAR = 128 - -# Package name prefix -PACKAGE_PREFIX = "NL_TS" -# Feature name prefix -FEATURE_PREFIX = "WR_TS" - -# License status -INSTALLED = "Installed" -NOT_INSTALLED = "Not-installed" -INVALID = "Invalid" - -EXPIRED = "Expired" diff --git a/utilities/platform-util/platform-util/platform_util/license/exception.py b/utilities/platform-util/platform-util/platform_util/license/exception.py deleted file mode 100644 index b940d4c6c..000000000 --- a/utilities/platform-util/platform-util/platform_util/license/exception.py +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -class ValidateError(Exception): - """Base class for license validation exceptions""" - - def __init__(self, message=None): - self.message = message - - def __str__(self): - return self.message or "" - - -class InvalidLicense(ValidateError): - """Generic invalid license error""" - pass - - -class ExpiredLicense(ValidateError): - """Expired license error""" - pass - - -class InvalidLicenseVersion(ValidateError): - """Invalid license version error""" - pass - - -class InvalidLicenseType(ValidateError): - """Invalid license type error""" - pass - - -class LicenseNotFound(ValidateError): - """License not found error""" - pass diff --git a/utilities/platform-util/platform-util/platform_util/license/license.py b/utilities/platform-util/platform-util/platform_util/license/license.py deleted file mode 100644 index 57c47204b..000000000 --- a/utilities/platform-util/platform-util/platform_util/license/license.py +++ /dev/null @@ -1,211 +0,0 @@ -# -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -from ctypes import cdll -from ctypes import util -from ctypes import c_bool -from ctypes import c_int -from ctypes import c_char_p -from ctypes import pointer -from ctypes import create_string_buffer -import logging -import os -from platform_util.license import constants -from platform_util.license import exception -import re -import sys -from sysinv.common import constants as sysinv_constants -from tsconfig.tsconfig import system_type -from tsconfig.tsconfig import system_mode -from tsconfig.tsconfig import SW_VERSION - -LOG = logging.getLogger(__name__) - -sm_common = cdll.LoadLibrary(util.find_library("sm_common")) - - -class c_char_p_sub(c_char_p): - pass - - -def get_licenses_info(): - """Get the license information""" - - feature_list = [] - sm_common.flex_lm_license_get_feature_list.restype = c_char_p_sub - features = sm_common.flex_lm_license_get_feature_list() - if features.value: - feature_list = [feature for feature in features.value.split(',') - if feature.startswith(constants.FEATURE_PREFIX)] - sm_common.flex_lm_license_free(features) - - lc_attrs_list = [] - licenses = [license for license in constants.LICENSE_NAMES] - for feature in feature_list: - try: - lc_attrs = verify_feature_license(feature) - except Exception as e: - # Set the license attributes for installed expired licenses - if constants.EXPIRED.lower() in e: - process_license = True - status = constants.INSTALLED - expiry_date = constants.EXPIRED - # Set the license attributes for installed invalid licenses - elif constants.INVALID or constants.VERSION_LICENSE_ERR in e: - process_license = True - status = constants.INVALID - expiry_date = '-' - # Unknown license - else: - process_license = False - LOG.warning("Feature %s is not supported." % feature) - - # Send supporting licenses only - name = constants.LICENSE_MAP.get(feature) - if process_license and name: - lc_attrs = dict(name=name, status=status, - expiry_date=expiry_date) - else: - lc_attrs = dict() - - if lc_attrs: - license_name = lc_attrs.get('name') - if (not any(lc.get('name') == license_name - for lc in lc_attrs_list)): - # Get the list of license attributes for all valid - # licenses and installed expired/invalid licenses - lc_attrs_list.append(lc_attrs) - if license_name in licenses: - # Get the list of not-installed license names - licenses.remove(license_name) - - # Set the license attributes for all - # not-installed licenses - for license_name in licenses: - lc_attrs = dict(name=license_name, - status=constants.NOT_INSTALLED, - expiry_date='-') - lc_attrs_list.append(lc_attrs) - - # Return the list of license attributes - # for all supporting licenses - return lc_attrs_list - - -def verify_feature_license(feature_name, feature_version=None): - """Verify a license of a feature""" - - valid = pointer(c_bool(0)) - - if not feature_version: - feature_version = SW_VERSION - - expire_days_left = pointer(c_int(0)) - expire_date_text = create_string_buffer( - constants.LICENSE_DATE_TEXT_MAX_CHAR) - vendor = create_string_buffer( - constants.LICENSE_VENDOR_MAX_CHAR) - err_msg = create_string_buffer( - constants.LICENSE_ERR_MSG_MAX_CHAR) - - LOG.info("License check. License feature name=%s version=%s", - feature_name, feature_version) - feature_check = sm_common.sm_license_check(valid, - feature_name, - feature_version, - expire_days_left, expire_date_text, - vendor, err_msg) - sm_common.sm_error_str.restype = c_char_p - if (sm_common.sm_error_str(feature_check) != 'OKAY' or - (not valid.contents.value)): - - LOG.error("License check error, error = %s\n", err_msg.value) - msg = "ERROR: License check failed; " - if constants.NO_FEATURE_LICENSE_ERR in err_msg.value: - msg += "the license file does not contain the required license." - raise exception.LicenseNotFound(msg) - elif constants.EXPIRED_LICENSE_ERR in err_msg.value: - msg += "the license file contains a license that is expired." - raise exception.ExpiredLicense(msg) - elif constants.VERSION_LICENSE_ERR in err_msg.value: - msg += "the license file contains a license which is NOT applicable " \ - "to the current system software version." - raise exception.InvalidLicenseVersion(msg) - else: - msg += "the license file contains an invalid license." - raise exception.InvalidLicense(msg) - - vendor = re.search(r'\(.*?)\<\/name\>', vendor.value) - if vendor: - license_name = vendor.group(1) - else: - license_name = constants.LICENSE_MAP.get(feature_name) - - # Return license attributes of a valid license - lc_attrs = dict(name=license_name, status=constants.INSTALLED, - expiry_date=expire_date_text.value) - - return lc_attrs - - -def verify_license(license_file): - """Verify all features in a license file""" - - os.environ["LM_LICENSE_FILE"] = license_file - os.environ["WIND_LICENSE_PROXY"] = "/usr/bin/wrlmproxy-5.0.2" - - # Get all features in the license file - feature_list = [] - sm_common.flex_lm_license_get_feature_list.restype = c_char_p_sub - features = sm_common.flex_lm_license_get_feature_list() - if features.value: - feature_list = [feature for feature in features.value.split(',') - if feature.startswith(constants.FEATURE_PREFIX)] - sm_common.flex_lm_license_free(features) - - # Validate license of each feature in the license file - for feature in feature_list: - verify_feature_license(feature) - - if system_type == sysinv_constants.TIS_AIO_BUILD: - if system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX: - product_license = constants.AIO_SIMPLEX_SYSTEM_LICENSES - elif (system_mode == sysinv_constants.SYSTEM_MODE_DUPLEX or - system_mode == sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT): - product_license = constants.AIO_SYSTEM_LICENSES - elif system_type == sysinv_constants.TIS_STD_BUILD: - product_license = constants.STD_SYSTEM_LICENSES - - # Verify the right product license is installed - if not any(feature in feature_list for feature in product_license): - raise exception.InvalidLicenseType( - "ERROR: License check failed; the license file does not contain a " - "product license for the current %s/%s." % (system_type, system_mode)) - - # Verify the licensed tech-preview technologies(ex. baremetal container..) - # Check if magnum or ironic services are currently running - # If yes, verify the feature licenses for magnum/ironic are licensed in the - # license file. - - -def main(): - if len(sys.argv) == 2: - licensefile = sys.argv[1] - else: - print("Usage: verify-license ") - exit(-1) - - try: - verify_license(licensefile) - except exception.InvalidLicenseType: - exit(1) - except exception.LicenseNotFound: - exit(2) - except exception.ExpiredLicense: - exit(3) - except exception.InvalidLicenseVersion: - exit(4) - except exception.InvalidLicense: - exit(5) diff --git a/utilities/platform-util/platform-util/setup.py b/utilities/platform-util/platform-util/setup.py deleted file mode 100644 index a02cf139a..000000000 --- a/utilities/platform-util/platform-util/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -import setuptools - -setuptools.setup( - name='platform_util', - description='Platform Util', - version='1.0.0', - license='Apache-2.0', - platforms=['any'], - packages=['platform_util', 'platform_util.license'], - entry_points={ - 'console_scripts': [ - 'verify-license = platform_util.license.license:main' - ], - } -) diff --git a/utilities/platform-util/scripts/LICENSE b/utilities/platform-util/scripts/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/utilities/platform-util/scripts/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/utilities/platform-util/scripts/cgcs_tc_setup.sh b/utilities/platform-util/scripts/cgcs_tc_setup.sh deleted file mode 100755 index 581300537..000000000 --- a/utilities/platform-util/scripts/cgcs_tc_setup.sh +++ /dev/null @@ -1,518 +0,0 @@ -#!/bin/sh - -# -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# $1 - interface -# $2 - interface type [mgmt, infra] -# $3 - link capacity -# $4 - dummy used to determine if we're backgrounded or not - -DEV=$1 -NETWORKTYPE=$2 -NETWORKSPEED=$3 - -if [ ${NETWORKTYPE} != "mgmt" -a ${NETWORKTYPE} != "infra" ]; then - exit 0 -fi - -# We want to be able to wait some time (typically <10 sec) for the -# network link to autonegotiate link speed. Re-run the script in -# the background so the parent can return right away and init can -# continue. -if [ $# -eq 3 ]; then - $0 $DEV $NETWORKTYPE $NETWORKSPEED dummy & - disown - exit 0 -fi - -function test_valid_speed { - # After the link is enabled but before the autonegotiation is complete - # the link speed may be read as either -1 or as 4294967295 (which is - # uint(-1) in twos-complement) depending on the kernel. Neither one is valid. - if (( $1 > 0 )) && (( $1 != 4294967295 )) - then - return 0 - else - return 1 - fi -} - -function log { - # It seems that syslog isn't yet running, so append directly to the syslog file - FILE=/var/log/platform.log - echo `date +%FT%T.%3N` `hostname` CGCS_TC_SETUP: $@ >> $FILE -} - -function infra_exists { - if [ -z "$infrastructure_interface" ]; then - return 1 - else - return 0 - fi -} - -function is_consolidated { - if ! infra_exists - then - return 1 - fi - - local INFRA=$infrastructure_interface - local MGMT=$management_interface - - # determine whether the management interface is a parent of the - # infrastructure interface based on name. - # eg. this matches enp0s8 to enp0s8.10 but not enp0s88 - if [[ $INFRA =~ $MGMT[\.][0-9]+$ ]]; then - return 0 - fi - return 1 -} - -function is_vlan { - if [ -f /proc/net/vlan/$DEV ]; then - return 0 - else - return 1 - fi -} - -function is_loopback { - # (from include/uapi/linux/if.h) - # IFF_LOOPBACK = 1<<3 = 8. Using a left shifted syntax can confuse bashate. - IFF_LOOPBACK=8 - - # get the interface flags - FLAGS=`cat /sys/class/net/$DEV/flags` - - if ((($IFF_LOOPBACK & $FLAGS) == 0)) - then - return 1 - else - return 0 - fi -} - -function get_tc_filter_ethertype { - local ETHERTYPE=$DEFAULT_ETHERTYPE - - if is_consolidated - then - if ! is_vlan - then - # If we have a consolidated VLAN interface, we must set the - # protocol to '802.1q' for the underlying Ethernet interface - # to be able to match on IP packets coming from the VLAN - # interface. - ETHERTYPE=802.1q - fi - fi - echo $ETHERTYPE - return 0 -} - -function setup_tc_port_filter { - local PORT=$1 - local PORTMASK=$2 - local FLOWID=$3 - local PROTOCOL=$4 - local PRIORITY=$DEFAULT_PRIORITY - local ETHERTYPE=$DEFAULT_ETHERTYPE - - ETHERTYPE=$(get_tc_filter_ethertype) - - if [ -z $PROTOCOL ]; then - # Apply to TCP and UDP - tc filter add dev $DEV protocol $ETHERTYPE parent 1:0 prio $PRIORITY \ - u32 match ip dport $PORT $PORTMASK flowid $FLOWID - tc filter add dev $DEV protocol $ETHERTYPE parent 1:0 prio $PRIORITY \ - u32 match ip sport $PORT $PORTMASK flowid $FLOWID - else - # Apply to specific protocol only - tc filter add dev $DEV protocol $ETHERTYPE parent 1:0 prio $PRIORITY \ - u32 match ip protocol $PROTOCOL 0xff match \ - ip dport $PORT $PORTMASK flowid $FLOWID - tc filter add dev $DEV protocol $ETHERTYPE parent 1:0 prio $PRIORITY \ - u32 match ip protocol $PROTOCOL 0xff match \ - ip sport $PORT $PORTMASK flowid $FLOWID - fi -} - -function setup_tc_tos_filter { - local TOS=$1 - local TOSMASK=$2 - local FLOWID=$3 - local ETHERTYPE=$4 - local PRIORITY=$5 - - if [ -z $ETHERTYPE ]; then - ETHERTYPE=$DEFAULT_ETHERTYPE - fi - - if [ -z $PRIORITY ]; then - PRIORITY=$DEFAULT_PRIORITY - fi - - tc filter add dev $DEV protocol $ETHERTYPE parent 1:0 prio $PRIORITY \ - u32 match ip tos $TOS $TOSMASK flowid $FLOWID -} - -function setup_root_tc { - # create new qdiscs, classes and queues - tc qdisc add dev $DEV root handle 1: htb default 40 - tc class add dev $DEV parent 1: classid 1:1 htb rate ${SPEED}mbit \ - burst 15k quantum 60000 -} - -function setup_default_tc { - local RATE=$1 - local CEIL=$2 - - local FLOWQ=40 - local CLASSID=1:$FLOWQ - local FLOWID=$CLASSID - - # create default qdiscs, classes - $AC $CLASSID htb rate $((${RATE}*${SPEED}/100))mbit burst 15k \ - ceil $((${CEIL}*${SPEED}/100))mbit prio 4 quantum 60000 - tc qdisc add dev $DEV parent $CLASSID handle $FLOWQ: sfq perturb 10 -} - -function setup_hiprio_tc { - local RATE=$1 - local CEIL=$2 - - local FLOWQ=10 - local CLASSID=1:$FLOWQ - local FLOWID=$CLASSID - local ETHERTYPE=$DEFAULT_ETHERTYPE - ETHERTYPE=$(get_tc_filter_ethertype) - - # create high priority qdiscs, classes, and queues - $AC $CLASSID htb rate $((${RATE}*${SPEED}/100))mbit burst 15k \ - ceil $((${CEIL}*${SPEED}/100))mbit prio 3 quantum 60000 - tc qdisc add dev $DEV parent $CLASSID handle $FLOWQ: sfq perturb 10 - - # filter for high priority traffic - setup_tc_tos_filter 0x10 0xf8 $FLOWID $ETHERTYPE - - if [ "$ETHERTYPE" != "$DEFAULT_ETHERTYPE" ]; then - # For the 'hiprio' class, a second filter at a different priority is - # needed in this case to match traffic with the default ethertype. - # (ie. high priority management traffic). - local PRIORITY - PRIORITY=$(($DEFAULT_PRIORITY + 1)) - setup_tc_tos_filter 0x10 0xf8 $FLOWID $DEFAULT_ETHERTYPE $PRIORITY - fi -} - -function setup_migration_tc { - local RATE=$1 - local CEIL=$2 - - local FLOWQ=30 - local CLASSID=1:$FLOWQ - local FLOWID=$CLASSID - - # create migration qdiscs, classes, and queues - $AC $CLASSID htb rate $((${RATE}*${SPEED}/100))mbit burst 15k \ - ceil $((${CEIL}*${SPEED}/100))mbit prio 2 quantum 60000 - tc qdisc add dev $DEV parent $CLASSID handle $FLOWQ: sfq perturb 10 - - # Migration (TCP, ports 49152-49215) - setup_tc_port_filter 49152 0xffc0 $FLOWID $TCP - - # Migration via libvirt tunnel (TCP, port 16509) - setup_tc_port_filter 16509 0xffff $FLOWID $TCP -} - -function setup_storage_tc { - local RATE=$1 - local CEIL=$2 - - local FLOWQ=20 - local CLASSID=1:$FLOWQ - local FLOWID=$CLASSID - - # create storage qdiscs, classes, and queues - $AC $CLASSID htb rate $((${RATE}*${SPEED}/100))mbit burst 15k \ - ceil $((${CEIL}*${SPEED}/100))mbit prio 1 quantum 60000 - tc qdisc add dev $DEV parent $CLASSID handle $FLOWQ: sfq perturb 10 - - # Storage, NFS (UDP/TCP, port 2049) - setup_tc_port_filter 2049 0xffff $FLOWID - - # Storage, iSCSI (UDP/TCP, port 3260) - setup_tc_port_filter 3260 0xffff $FLOWID - - # Storage, CEPH (TCP, ports 6789,6800-7100) - PORTS=( 6789 6800 6816 6912 7040 7072 7088 ) - PORTMASKS=( 0xffff 0xfff0 0xffa0 0xff80 0xffa0 0xfff0 0xfffa ) - for idx in "${!PORTS[@]}"; do - PORT=${PORTS[$idx]} - MASK=${PORTMASKS[$idx]} - setup_tc_port_filter $PORT $MASK $FLOWID $TCP - done -} - -function setup_drbd_tc { - local RATE=$1 - local CEIL=$2 - - local FLOWQ=50 - local CLASSID=1:$FLOWQ - local FLOWID=$CLASSID - - # create DRBD qdiscs, classes and queues - $AC $CLASSID htb rate $((${RATE}*${SPEED}/100))mbit burst 15k \ - ceil $((${CEIL}*${SPEED}/100))mbit quantum 60000 - - tc qdisc add dev $DEV parent $CLASSID handle $FLOWQ: sfq perturb 10 - - # DRDB (TCP, ports 7789,7790,7791,7799) - # port 7793 is used with drdb-extension - PORTS=( 7789 7790 7791 7792 7799 7793 ) - PORTMASKS=( 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff) - for idx in "${!PORTS[@]}"; do - PORT=${PORTS[$idx]} - MASK=${PORTMASKS[$idx]} - setup_tc_port_filter $PORT $MASK $FLOWID $TCP - done -} - -function setup_mgmt_tc_individual { - # Configure high priority and default traffic classes. - - setup_root_tc - - # bandwidth percentages - local HIPRIO_BW=10 - local DEFAULT_BW=10 - - # bandwidth ceiling percentages, for borrowing bandwidth. - # the management interface is not consolidated, so set the ceiling to the - # maximum rate. - local HIPRIO_CBW=100 - local DEFAULT_CBW=100 - - setup_hiprio_tc $HIPRIO_BW $HIPRIO_CBW - setup_default_tc $DEFAULT_BW $DEFAULT_CBW -} - - -function setup_mgmt_tc_vlan { - # Configure high priority and default traffic classes. - - setup_root_tc - - # bandwidth percentages - local HIPRIO_BW=10 - local DEFAULT_BW=10 - - # bandwidth ceiling percentages, for borrowing bandwidth. - # The management interface is a vlan, so reserve bandwidth - # for sibling infra vlan interfaces. - local HIPRIO_CBW=20 - local DEFAULT_CBW=20 - - setup_hiprio_tc $HIPRIO_BW $HIPRIO_CBW - setup_default_tc $DEFAULT_BW $DEFAULT_CBW -} - -function setup_mgmt_tc_consolidated { - # Configure management classes. - # All traffic coming from the infra will get treated again by the - # management traffic classes. We need to apply the same TCs as the - # infra to prevent a management application from starving the - # upper interface. - setup_root_tc - setup_tc_all -} - -function setup_mgmt_tc_infra_exists { - if is_consolidated - then - # Infra over mgmt. In this case we want to reserve - # a small portion of the link for management. - setup_mgmt_tc_consolidated - else - # Only setup hiprio and default classes. - # The infra will handle storage, migration, DRBD. - if is_vlan - then - setup_mgmt_tc_vlan - else - setup_mgmt_tc_individual - fi - fi -} - -function setup_mgmt_tc_no_infra { - # Configure traffic classes for a management interface when - # no infrastructure interface exists. Configure the full - # set of TCs. - - setup_root_tc - setup_tc_all -} - -function setup_infra_tc_consolidated { - # Configure the full set of traffic classes, but leave a small - # portion of bandwidth for the management interface. - - # reserve 1% BW for management - local RESERVED - RESERVED=$((1*${SPEED}/100)) - SPEED=$((${SPEED}-${RESERVED})) - - setup_root_tc - setup_tc_all -} - -function setup_infra_tc_individual { - # Configure the full set of traffic classes. - - setup_root_tc - if is_vlan - then - # reserve 1% BW for sibling vlan interfaces - local RESERVED - RESERVED=$((1*${SPEED}/100)) - SPEED=$((${SPEED}-${RESERVED})) - fi - setup_tc_all -} - -function setup_tc_all { - # bandwidth percentages, in case of over-percentage, bandwidth is divided based - # on bandwidth ratios - local MIG_BW=30 - local STOR_BW=50 - local DRBD_BW=80 - local HIPRIO_BW=10 - local DEFAULT_BW=10 - - # bandwidth ceiling percentages, for borrowing bandwidth - local MIG_CBW=100 - local STOR_CBW=100 - local DRBD_CBW=100 - local HIPRIO_CBW=20 - local DEFAULT_CBW=20 - - setup_hiprio_tc $HIPRIO_BW $HIPRIO_CBW - setup_storage_tc $STOR_BW $STOR_CBW - setup_migration_tc $MIG_BW $MIG_CBW - setup_default_tc $DEFAULT_BW $DEFAULT_CBW - if [ $nodetype == "controller" ]; then - setup_drbd_tc $DRBD_BW $DRBD_CBW - fi -} - -function get_dev_speed { - # If the link doesn't come up we won't go enabled, so here we can - # afford to wait forever for the link. - while true; do - if [ -e /sys/class/net/$1/bonding ]; then - for VAL in `cat /sys/class/net/$1/lower_*/speed`; do - if test_valid_speed $VAL; then - log slave for bond link $1 reported speed $VAL - echo $VAL - return 0 - else - log slave for bond link $1 reported invalid speed $VAL - fi - done - log all slaves for bond link $1 reported invalid speeds, \ - will sleep 30 sec and try again - else - VAL=`cat /sys/class/net/$1/speed` - if test_valid_speed $VAL; then - log link $1 reported speed $VAL - echo $VAL - return 0 - else - log link $1 returned invalid speed $VAL, \ - will sleep 30 sec and try again - fi - fi - sleep 30 - done -} - -function get_speed { - local dev=$1 - local networktype=$2 - local net_speed=$NETWORKSPEED - local dev_speed - dev_speed=$(get_dev_speed $DEV) - local speed=$dev_speed - if [ $net_speed != $dev_speed ]; then - log WARNING: $dev has a different operational speed [$dev_speed] \ - than configured speed [$net_speed] for network type $networktype - if test_valid_speed $net_speed; then - # Use greater of configured net speed / recorded dev speed - if [ $net_speed -gt $dev_speed ]; then - speed=$net_speed - fi - fi - fi - log using speed $speed for tc filtering on $dev - echo $speed -} - - -if is_loopback -then - # mgmt/infra uses the loopback for CPE simplex - exit 0 -fi - -log running tc setup script for $DEV $NETWORKTYPE in background - -if [ -f /etc/platform/platform.conf ]; then - source /etc/platform/platform.conf -fi - -SPEED=$(get_speed $DEV $NETWORKTYPE) - -# 1:10 = high priority class -# 1:20 = storage class -# 1:30 = migration class -# 1:40 = default class -# 1:50 = DRBD class - -# generic class add preamble -AC="tc class add dev $DEV parent 1:1 classid" - -# protocol numbers -TCP=6 -UDP=17 - -# default ethertype for filters -DEFAULT_ETHERTYPE=ip - -# default priority for filters -DEFAULT_PRIORITY=1 - -# delete existing qdiscs -tc qdisc del dev $DEV root > /dev/null 2>&1 - -if [ ${NETWORKTYPE} = "mgmt" ]; then - if infra_exists - then - setup_mgmt_tc_infra_exists - else - setup_mgmt_tc_no_infra - fi -else - if is_consolidated - then - setup_infra_tc_consolidated - else - setup_infra_tc_individual - fi -fi diff --git a/utilities/platform-util/scripts/connectivity_test b/utilities/platform-util/scripts/connectivity_test deleted file mode 100644 index cce3f879f..000000000 --- a/utilities/platform-util/scripts/connectivity_test +++ /dev/null @@ -1,58 +0,0 @@ -#! /bin/bash -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -usage () -{ - echo "Usage: `basename $0` [-t TIMEOUT] [-i INTERFACE] DEST" - echo "Tests connectivity to DEST (hostname or IP). Test is done with ping/ping6." - echo "" - echo "Options:" - echo " -t TIMEOUT how long to wait before failing - defaults to 70 seconds" - echo " -i INTERFACE interface to use for ping - default is to allow ping command to select interface" - exit 1 -} - -TIMEOUT=70 -IFARG="" - -while getopts t:i: opt; do - case $opt in - t) - TIMEOUT=$OPTARG - ;; - i) - INTERFACE=$OPTARG - ;; - *) - usage - ;; - esac -done -shift $((OPTIND-1)) - -if [ -z $1 ]; then - usage -fi - -if [ ! -z "$INTERFACE" ]; then - IFARG="-I $INTERFACE" - IFMSG="over interface $INTERFACE" -fi - -DEST=$1 -echo "Checking connectivity to $DEST for up to $TIMEOUT seconds $IFMSG" -while [ "$SECONDS" -le "$TIMEOUT" ]; do - ping -c 1 $IFARG $DEST > /dev/null 2>&1 || ping6 -c 1 $IFARG $DEST > /dev/null 2>&1 - if [ $? -eq 0 ] - then - exit 0 - fi - sleep 1 -done - -exit 1 diff --git a/utilities/platform-util/scripts/log_functions.sh b/utilities/platform-util/scripts/log_functions.sh deleted file mode 100644 index 95a72ae2d..000000000 --- a/utilities/platform-util/scripts/log_functions.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -################################################################################ -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -################################################################################ - -################################################################################ -# Log if debug is enabled via LOG_DEBUG -# -################################################################################ -function log_debug { - if [ ! -z "${LOG_DEBUG}" ]; then - logger -p debug -t "$0[${PPID}]" -s "$@" 2>&1 - fi -} - -################################################################################ -# Log unconditionally to STDERR -# -################################################################################ -function log_error { - logger -p error -t "$0[${PPID}]" -s "$@" -} - -################################################################################ -# Log unconditionally to STDOUT -# -################################################################################ -function log { - logger -p info -t "$0[${PPID}]" -s "$@" 2>&1 -} - -################################################################################ -# Utility function to print the status of a command result -# -################################################################################ -function print_status { - if [ "$1" -eq "0" ]; then - echo "[ OK ]" - else - echo "[FAILED]" - fi -} diff --git a/utilities/platform-util/scripts/opt-platform.mount b/utilities/platform-util/scripts/opt-platform.mount deleted file mode 100644 index e69de29bb..000000000 diff --git a/utilities/platform-util/scripts/opt-platform.service b/utilities/platform-util/scripts/opt-platform.service deleted file mode 100644 index 6e8e06210..000000000 --- a/utilities/platform-util/scripts/opt-platform.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Titanium Cloud opt-platform mounter -After=sw-patch.service - -[Service] -Type=oneshot -User=root -ExecStart=/usr/bin/nfs-mount controller-platform-nfs:/opt/platform /opt/platform -ExecStop=/usr/bin/umount /opt/platform -RemainAfterExit=yes -StandardOutput=syslog+console -StandardError=syslog+console - -[Install] -WantedBy=multi-user.target diff --git a/utilities/platform-util/scripts/patch-restart-haproxy b/utilities/platform-util/scripts/patch-restart-haproxy deleted file mode 100644 index 51f9c6447..000000000 --- a/utilities/platform-util/scripts/patch-restart-haproxy +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -# This script provides in-service patching restart for haproxy -# - -# The patching subsystem provides a patch-functions bash source file -# with useful function and variable definitions. -# -if [ -e "/etc/patching/patch-functions" ] ; then - . /etc/patching/patch-functions -fi - -loginfo "----------------------------------------------" -loginfo "Haproxy No-Reboot Patching Restart Request" - -# -# Declare an overall script return code -# -declare -i GLOBAL_RC=$PATCH_STATUS_OK - -SM_RESTART_CMD="sm-restart-safe" -SM_QUERY_CMD="sm-query" -SM_ENABLED_ACTIVE="enabled-active" -DAEMON_NAME=haproxy -NEUTRON_RESTART_CMD="/bin/neutron-restart" - -# check if SM managed haproxy process is running on the controllers -if is_controller -then - if [ ! -f $PATCH_FLAGDIR/$DAEMON_NAME.restarted ] - then - ${SM_QUERY_CMD} service ${DAEMON_NAME} | grep -q ${SM_ENABLED_ACTIVE} - if [ $? -eq 0 ] - then - # The daemon is running, so restart it - loginfo "$0: Restarting ${DAEMON_NAME}" - ${SM_RESTART_CMD} service "${DAEMON_NAME}" - touch $PATCH_FLAGDIR/$DAEMON_NAME.restarted - - # Wait up to 10 seconds for service to recover - let -i UNTIL=$SECONDS+10 - while [ $UNTIL -ge $SECONDS ] - do - # Check to make sure it's running - ${SM_QUERY_CMD} service ${DAEMON_NAME} | grep -q ${SM_ENABLED_ACTIVE} - if [ $? -eq 0 ] - then - break - fi - - # Not running Let's wait a couple of seconds and check again - loginfo "$0: ${DAEMON_NAME} is not running, wait for 2 seconds" - sleep 2 - done - - ${SM_QUERY_CMD} service ${DAEMON_NAME} | grep -q ${SM_ENABLED_ACTIVE} - if [ $? -ne 0 ] - then - # Still not running! Clear the flag and mark the RC as failed - GLOBAL_RC=$PATCH_STATUS_FAILED - loginfo "$0: Failed to restart ${DAEMON_NAME}" - fi - fi - fi -fi - -# check if it is a CPE system or a compute node -if is_cpe || is_compute -then - # restart neutron-agent managed haproxy processes - if [ ! -f $NEUTRON_RESTART_CMD ] - then - loginfo "$0: ${NEUTRON_RESTART_CMD} is not available for restarting ${DAEMON_NAME}" - GLOBAL_RC=$PATCH_STATUS_FAILED - else - loginfo "$0: Restarting neutron agent managed ${DAEMON_NAME}" - ${NEUTRON_RESTART_CMD} --agents-using-haproxy - if [ $? -ne $PATCH_STATUS_OK ] - then - loginfo "$0: Failed to restart neutron agent managed ${DAEMON_NAME}" - GLOBAL_RC=$PATCH_STATUS_FAILED - fi - fi -fi - -# Exit the script with the overall return code -# -exit $GLOBAL_RC diff --git a/utilities/platform-util/scripts/patch-restart-mtce b/utilities/platform-util/scripts/patch-restart-mtce deleted file mode 100755 index 4b313d74a..000000000 --- a/utilities/platform-util/scripts/patch-restart-mtce +++ /dev/null @@ -1,476 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -############################################################################## -# -# This script supports no-reboot patching of any single or -# combination of maintenance processes specified on the command line. -# -# Calling sequence: -# -# rc=mtce-restart process1 process2 process3 ... -# if [ $? != 0 ] ; then -# restart action failed -# -# -############################################################################### -# -# The patching subsystem provides a patch-functions bash source file -# with useful function and variable definitions. -# -if [ -e "/etc/patching/patch-functions" ] ; then - . /etc/patching/patch-functions -fi - -loginfo "----------------------------------------------" -loginfo "Maintenance No-Reboot Patching Restart Request" - -# -# Declare an overall script return code -# -declare -i GLOBAL_RC=$PATCH_STATUS_FAILED - -#if [ ! -e $PATCH_FLAGDIR ] ; then -# mkdir -p $PATCH_FLAGDIR -#fi - -# if set with -c or --clean options then the flag files for -# each process are removed at the start. -CLEAN=false - -# -# Completion status ; stored in PID index -# -DISABLED="disabled" -NOPID="not-running" -SKIPPED="skipped" -RESTARTED="restarted" - -# -# process query and restart executables -# -SM_RESTART_EXEC="sm-restart-safe" -SM_QUERY_EXEC="sm-query" -PMON_RESTART_EXEC="pmon-restart" - -# -# Struct indexes -# -PROCESS_INDEX=0 -PID_INDEX=1 -ALIAS_INDEX=2 - - -# -# Process Struct and List [ name ] [ alias ] [ pid | status ] -# -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# NOTE TO PATCH WRITERS: Simply Un-Comment processes you want no-reboot patch restarted. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# - -# The process restart control structure -declare sm_managed_processes="" -declare pmon_managed_processes="" - -# Build the process list. -# All arguements should be a valid maintenance process name. -# The name of the binary, not the SM alias. -# See the list below for supported process names. -while [[ ${#} > 0 ]] -do - process="${1}" - case $process in - - -c|--clean) - CLEAN=true - ;; - - # Maintenance Processes - SM managed - "mtcAgent") - sm_managed_processes=( ${sm_managed_processes[@]} "mtcAgent:0:mtc-agent") - ;; - "guestAgent") - sm_managed_processes=( ${sm_managed_processes[@]} "guestAgent:0:guest-agent") - ;; - "hwmond") - sm_managed_processes=( ${sm_managed_processes[@]} "hwmond:0:hw-mon") - ;; - - # Maintenance Processes - PMON managed - "pmond") - pmon_managed_processes=(${pmon_managed_processes[@]} "pmond:0") - ;; - "guestServer") - pmon_managed_processes=(${pmon_managed_processes[@]} "guestServer:0") - ;; - "hbsAgent") - pmon_managed_processes=(${pmon_managed_processes[@]} "hbsAgent:0") - ;; - "mtcClient") - pmon_managed_processes=(${pmon_managed_processes[@]} "mtcClient:0") - ;; - "hbsClient") - pmon_managed_processes=(${pmon_managed_processes[@]} "hbsClient:0") - ;; - "hostwd") - pmon_managed_processes=(${pmon_managed_processes[@]} "hostwd:0") - ;; - "fsmond") - pmon_managed_processes=(${pmon_managed_processes[@]} "fsmond:0") - ;; - "mtclogd") - pmon_managed_processes=(${pmon_managed_processes[@]} "mtclogd:0") - ;; - "mtcalarmd") - pmon_managed_processes=(${pmon_managed_processes[@]} "mtcalarmd:0") - ;; - "lmond") - pmon_managed_processes=(${pmon_managed_processes[@]} "lmond:0") - ;; - - *) - loginfo "Unknown process:${process}" - ;; - esac - shift -done - -# Assume both groupings are done until we know there are not -sm_done=true -pmon_done=true - -#if [ ${#sm_managed_processes[@]} -ne 0 -a is_controller ] ; then -if [ -n "${sm_managed_processes}" -a is_controller ] ; then - - # Record current process IDs - index=0 - for DAEMON in "${sm_managed_processes[@]}" - do - info=(${DAEMON//:/ }) - - if [ "${CLEAN}" = true ] ; then - rm -f $PATCH_FLAGDIR/${info[${PROCESS_INDEX}]}.restarted - fi - - info[${PID_INDEX}]=`pidof ${info[${PROCESS_INDEX}]}` - if [ -z "${info[${PID_INDEX}]}" ] ; then - loginfo "${info[${PROCESS_INDEX}]} is not running" - info[${PID_INDEX}]="${NOPID}" - fi - - # Save the PID or NOPID status to the process line - sm_managed_processes[${index}]="${info[${PROCESS_INDEX}]}:${info[${PID_INDEX}]}:${info[${ALIAS_INDEX}]}" - - ((index++)) - done - - # Restart the processes - index=0 - for DAEMON in "${sm_managed_processes[@]}" - do - info=(${DAEMON//:/ }) - - if [ -e $PATCH_FLAGDIR/${info[${PROCESS_INDEX}]}.restarted ] ; then - info[${PID_INDEX}]="${SKIPPED}" - - # Add the PID to the process line - sm_managed_processes[${index}]="${info[${PROCESS_INDEX}]}:${info[${PID_INDEX}]}:${info[${ALIAS_INDEX}]}" - ((index++)) - - continue - fi - sm_query_result=`${SM_QUERY_EXEC} service ${info[${ALIAS_INDEX}]}` - if [[ "${sm_query_result}" == *"enabled-active"* ]] ; then - # Save the original PID - info[${PID_INDEX}]=`pidof ${info[${PROCESS_INDEX}]}` - - if [ -n "${info[${PID_INDEX}]}" ] ; then - - loginfo "sm-restart of ${info[${PROCESS_INDEX}]} [pid:${info[${PID_INDEX}]}]" - touch $PATCH_FLAGDIR/${info[${PROCESS_INDEX}]}.restarted - ${SM_RESTART_EXEC} service "${info[${ALIAS_INDEX}]}" - sm_done=false - sleep 5 - - else - loginfo "${info[${PROCESS_INDEX}]} is not running ; must be on inactive controller" - info[${PID_INDEX}]="${NOPID}" - fi - elif [[ ${sm_query_result} == *"is enabling"* ]] ; then - info[${PID_INDEX}]="${NOPID}" - loginfo "sm-restart ${info[${PROCESS_INDEX}]} ; [in progress] ; [pid:${info[${PID_INDEX}]}]" - else - info[${PID_INDEX}]="${DISABLED}" - loginfo "${info[${PROCESS_INDEX}]} is not active" - fi - - # Add the PID to the process line - sm_managed_processes[${index}]="${info[${PROCESS_INDEX}]}:${info[${PID_INDEX}]}:${info[${ALIAS_INDEX}]}" - - ((index++)) - done -fi - -if [ -n "${pmon_managed_processes}" ] ; then - - echo "DEBUG: pmon_managed_processes:${pmon_managed_processes}" - - # Restart the pmond processes - index=0 - for DAEMON in "${pmon_managed_processes[@]}" - do - info=(${DAEMON//:/ }) - - if [ "${CLEAN}" = true ] ; then - rm -f $PATCH_FLAGDIR/${info[${PROCESS_INDEX}]}.restarted - fi - - if [ -e $PATCH_FLAGDIR/${info[${PROCESS_INDEX}]}.restarted ] ; then - info[${PID_INDEX}]="${SKIPPED}" - pmon_managed_processes[${index}]="${info[${PROCESS_INDEX}]}:${info[${PID_INDEX}]}" - ((index++)) - continue - fi - - # Save the original PID - info[${PID_INDEX}]=`pidof ${info[${PROCESS_INDEX}]}` - - if [ -n "${info[${PID_INDEX}]}" ] ; then - loginfo "pmon-restart of ${info[${PROCESS_INDEX}]} [pid:${info[${PID_INDEX}]}]" - touch $PATCH_FLAGDIR/${info[${PROCESS_INDEX}]}.restarted - ${PMON_RESTART_EXEC} ${info[${PROCESS_INDEX}]} - pmon_done=false - sleep 2 - - #################################################################### - # Special Handling Section - # - # - pmond needs 30 seconds to restart before it will start - # monitoring processes.We can maybe remove that in the daemon - # config file but for now its there and we have to wait. - #################################################################### - if [ "${info[${PROCESS_INDEX}]}" == "pmond" ] ; then - sleep 30 - fi - - else - info[${PID_INDEX}]="${DISABLED}" - loginfo "${info[${PROCESS_INDEX}]} is not active" - fi - - # Save the updated PID or other status to the process line - pmon_managed_processes[${index}]="${info[${PROCESS_INDEX}]}:${info[${PID_INDEX}]}" - - ((index++)) - done -fi - -# check for done. If this is not met in timeout then fail is returned -if [ "$sm_done" = true -a "$pmon_done" = true ] ; then - GLOBAL_RC=$PATCH_STATUS_OK - loginfo " SM Processes: ${sm_managed_processes[@]}" - loginfo "PMON Processes: ${pmon_managed_processes[@]}" - loginfo "Maintenance No-Reboot Patching Status: ${GLOBAL_RC} - nothing to do." - exit ${GLOBAL_RC} -fi - -# Monitor the restart of SM processes -# -# Don't want to start from the beginning of the shell -# Want time zero now plus 20 seconds. -# -SECONDS=0 -TIMEOUT=120 -let UNTIL=${SECONDS}+${TIMEOUT} -loginfo "restart timeout is ${TIMEOUT}" - -while [ ${UNTIL} -ge ${SECONDS} ] -do - if [ "$sm_done" = false ] ; then - if [ is_controller -o is_cpe ] ; then - sm_not_done=false - index=0 - for DAEMON in "${sm_managed_processes[@]}" - do - info=(${DAEMON//:/ }) - - # Don't wast time on processes that are being skipped due to past restart - if [ "${info[${PID_INDEX}]}" == "${SKIPPED}" ] ; then - ((index++)) - continue - - # Don't wast time on processes that have already restarted - elif [ "${info[${PID_INDEX}]}" == "${RESTARTED}" ] ; then - ((index++)) - continue - - # Don't look for disabled processes - elif [ "${info[${PID_INDEX}]}" == "${DISABLED}" ] ; then - ((index++)) - continue - - # Don't look at not running processes - elif [ "${info[${PID_INDEX}]}" == "${NOPID}" ] ; then - ((index++)) - continue - - elif [[ `sm-query service ${info[${ALIAS_INDEX}]}` == *"enabled-active"* ]] ; then - - # Save the original PID - new_pid=`pidof ${info[${PROCESS_INDEX}]}` - if [ $? -eq 0 -a -n ${new_pid} ] ; then - - if [ "${info[${PID_INDEX}]}" != "${new_pid}" ] ; then - loginfo "${info[${PROCESS_INDEX}]} ${RESTARTED} ok [pid:${info[${PID_INDEX}]} -> ${new_pid}]" - info[${PID_INDEX}]="${RESTARTED}" - fi - fi - fi - - if [ "${info[${PID_INDEX}]}" != "${RESTARTED}" ] ; then - sm_not_done=true - fi - - # Add the PID to the process line - sm_managed_processes[${index}]="${info[${PROCESS_INDEX}]}:${info[${PID_INDEX}]}:${info[${ALIAS_INDEX}]}" - - ((index++)) - done - fi - - # log when SM restarts are done print a summary only once - if [ "${sm_not_done}" = false -a "${sm_done}" = false ] ; then - sm_done=true - logged=false - for DAEMON in "${sm_managed_processes[@]}" - do - info=(${DAEMON//:/ }) - if [ "${info[${PID_INDEX}]}" == "${RESTARTED}" ] ; then - if [ "${logged}" = false ] ; then - loginfo "The following 'sm managed' processes have been 'restarted'" - logged=true - fi - loginfo "... process: ${info[${PROCESS_INDEX}]}" - fi - done - logged=false - for DAEMON in "${sm_managed_processes[@]}" - do - info=(${DAEMON//:/ }) - if [ "${info[${PID_INDEX}]}" == "${SKIPPED}" ] ; then - if [ "${logged}" = false ] ; then - loginfo "The following 'sm managed' processes have been 'skipped' ; due to previous restart" - logged=true - fi - loginfo "... process: ${info[${PROCESS_INDEX}]}" - fi - done - fi - fi - - ######################################################################### - # For all nodes .... - ######################################################################### - - # Loop over all PMON proceses looking for complete restarts. - # Update process struct PID field as status is learned. - - if [ "$pmon_done" = false ] ; then - # Start assuming we are not done - pmon_not_done=false - index=0 - for DAEMON in "${pmon_managed_processes[@]}" - do - info=(${DAEMON//:/ }) - - # Don't wast time on processes that are being skipped due to past restart - if [ "${info[${PID_INDEX}]}" == "${SKIPPED}" ] ; then - ((index++)) - continue - - # Don't wast time on processes that have already restarted - elif [ "${info[${PID_INDEX}]}" == "${RESTARTED}" ] ; then - ((index++)) - continue - - # Don't look for disabled processes - elif [ "${info[${PID_INDEX}]}" == "${DISABLED}" ] ; then - ((index++)) - continue - - # Don't look at not running processes - elif [ "${info[${PID_INDEX}]}" == "${NOPID}" ] ; then - ((index++)) - continue - fi - - # Save the original PID - new_pid=`pidof ${info[${PROCESS_INDEX}]}` - if [ $? -eq 0 -a "${new_pid}" != "" ] ; then - # set the process as restarted as soon as we have a new pid - if [ "${info[${PID_INDEX}]}" != "${RESTARTED}" ] ; then - loginfo "${info[${PROCESS_INDEX}]} ${RESTARTED} ok [PID: ${info[${PID_INDEX}]} -> ${new_pid}]" - info[${PID_INDEX}]=${RESTARTED} - fi - fi - - # Set not done as long as there is one process not restarted - if [ "${info[${PID_INDEX}]}" != "${RESTARTED}" ] ; then - pmon_not_done=true - fi - - # Add the PID to the process line - pmon_managed_processes[${index}]="${info[${PROCESS_INDEX}]}:${info[${PID_INDEX}]}" - - ((index++)) - done - fi - - # log when all pmond restarts are done - if [ "${pmon_not_done}" = false -a "${pmon_done}" = false ] ; then - pmon_done=true - logged=false - for DAEMON in "${pmon_managed_processes[@]}" - do - info=(${DAEMON//:/ }) - - if [ "${info[${PID_INDEX}]}" == "${RESTARTED}" ] ; then - if [ "${logged}" = false ] ; then - loginfo "The following 'pmon managed' processes have been 'restarted'" - logged=true - fi - loginfo "... process: ${info[${PROCESS_INDEX}]}" - fi - done - - logged=false - for DAEMON in "${pmon_managed_processes[@]}" - do - info=(${DAEMON//:/ }) - - if [ "${info[${PID_INDEX}]}" == "${SKIPPED}" ] ; then - if [ "${logged}" = false ] ; then - loginfo "The following 'pmon managed' processes have been 'skipped' ; due to previous restart" - logged=true - fi - loginfo "... process: ${info[${PROCESS_INDEX}]}" - fi - done - fi - - # check for done. If this is not met in timeout then fail is returned - if [ "$sm_done" = true -a "$pmon_done" = true ] ; then - GLOBAL_RC=$PATCH_STATUS_OK - break - fi - - sleep 1 -done - -loginfo "Maintenance No-Reboot Patching Status: ${GLOBAL_RC}" - -exit ${GLOBAL_RC} diff --git a/utilities/platform-util/scripts/patch-restart-processes b/utilities/platform-util/scripts/patch-restart-processes deleted file mode 100755 index 5d7b9db8a..000000000 --- a/utilities/platform-util/scripts/patch-restart-processes +++ /dev/null @@ -1,555 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2016-18 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -############################################################################## - -script=`basename "$0"` - -############################################################################## -# -# For Patch Writers -# ----------------- -# -# This script supports no-reboot process restart from command line list. -# Must be run as root or y sudo -# Calling sequence: -# -# /usr/sbin/process-restart process1 process2 ... processN -# if [ $? != 0 ] ; then -# restart action failed -# -############################################################################### -# -# For Developers -# -------------- -# -# Process restart support can be added to this script by adding your process to the -# command line parser as a new case based on the process's name in the following form. -# -# "[process-name]") -# process_list =(${process_list[@]} "[monitor] [process-name] [process-alias] [hosttype] [pidfile] [status] -# ;; -# -# Field Descriptions: all fields are manditory -# -# monitor : sm or pmon -# process-name : the name of the process -# - for sm monitored processes, this must be unique, but does -# not need to match the actual binary name -# - for pmon monitored processes, this must be unique and -# must match the actual binary name -# process-alias: the alias name that SM uses instead of the actual process name -# - valid for sm only -# - if its the same as the process name then make it the same -# hosttype : the supported hosttypes are ; stack with commas -# - all ...... all nodetypes -# - controller . controllers -# - storage .... storage nodes -# - compute .... compute nodes -# pidfile : the path to and name of the process's pidfile -# status : set as 0 -# -# Example: based on sysinv-api which is monitored by sm, only runs on the controller -# and has an sm alias. -# -# "sysinv-api") -# process_list =(${process_list[@] } "sm:sysinv-api:sysinv-inv:controller:/var/run/sysinv-api.pid:0") -# ;; -# -# start with empty process restart control structure -declare process_list="" -declare pids="" - -# pull in loginfo and nodetype -. /etc/patching/patch-functions -. /etc/platform/platform.conf - -# -# Declare an overall script return code -# -declare -i GLOBAL_RC=$PATCH_STATUS_FAILED - -# if set with -c or --clean options then the flag files for -# each process are removed at the start. -CLEAN=false - -# -# if set with -p or --parallel options then restart each process in parallel -PARALLEL=false - -# -# Completion status ; stored in PID index -# -DISABLED="disabled" -NOPID="not-running" -SKIPPED="skipped" -RESTARTED="restarted" - -# -# process query and restart executables -# -SM_RESTART_EXEC="sm-restart-safe" -SM_QUERY_EXEC="sm-query" -PMON_RESTART_EXEC="pmon-restart" - -# -# sleep delays (seconds) -# -SM_SLEEP=5 -PMON_SLEEP=2 -MONITOR_SLEEP=2 - -# -# Struct indexes -# -MONITOR_INDEX=0 -PROCESS_INDEX=1 -ALIAS_INDEX=2 -HOSTTYPE_INDEX=3 -PIDFILE_INDEX=4 -STATUS_INDEX=5 - - -# -# update_status: update the specified process index's status field -# -# ${1} = process list index -# ${2} = status -# -function update_status { - DAEMON=${process_list[${1}]} - info=(${DAEMON//:/ }) - process_list[${1}]="${info[${MONITOR_INDEX}]}:${info[${PROCESS_INDEX}]}:${info[${ALIAS_INDEX}]}:${info[${HOSTTYPE_INDEX}]}:${info[${PIDFILE_INDEX}]}:${2}" -} - - -# -# print the list of processes that this script supports restart of -# -function print_list { - printf "\nThis restart script supports post patching restart the following processes ...\n\n" - list=$(fgrep "process_list=(${process_list[@]}" ${0} | grep -v grep | cut -f 2 -d ':') - printf "${list}\n\n" -} - - -# -# print the command and option syntax as well as the list of processes supported by this script -# -function print_help { - printf "\nTiS patching process restart script.\n" - printf "\n%s {-options} [processes ...]\n" "${script}" - printf "\noptions: -l or --list prints a list of supported processes\n" - print_list -} - -# -# patching.log banner for this script -# -loginfo "------------------------------------------" -loginfo "No-Reboot Patching Process Restart Request" - -# -# Option and process list parser -# Build the process list. -# All arguements should be a valid process name, not the SM alias. -# See the list below for supported process names. -# -while [[ ${#} > 0 ]] -do - process="${1}" - case $process in - -h|--help) - print_help - exit 0 - ;; - -l|--list) - print_list - exit 0 - ;; - - -c|--clean) - CLEAN=true - ;; - - -p|--parallel) - PARALLEL=true - ;; - - # Sysinv processes - "sysinv-conductor") - process_list=(${process_list[@]} "sm:sysinv-conductor:sysinv-conductor:controller:/var/run/sysinv-conductor.pid:0") - ;; - "sysinv-api") - process_list=(${process_list[@]} "sm:sysinv-api:sysinv-inv:controller:/var/run/sysinv-api.pid:0") - ;; - "sysinv-agent") - process_list=(${process_list[@]} "pmon:sysinv-agent:sysinv-agent:all:/var/run/sysinv-agent.pid:0") - ;; - # Keystone processes - "keystone") - process_list=(${process_list[@]} "sm:keystone:keystone:controller:/var/run/openstack-keystone.pid:0") - ;; - # Barbican processes - "barbican-api") - process_list=(${process_list[@]} "sm:barbican-api:barbican-api:controller:/var/run/barbican/pid:0") - ;; - "barbican-keystone-listener") - process_list=(${process_list[@]} "sm:barbican-keystone-listener:barbican-keystone-listener:controller:/var/run/resource-agents/barbican-keystone-listener.pid:0") - ;; - "barbican-worker") - process_list=(${process_list[@]} "sm:barbican-worker:barbican-worker:controller:/var/run/resource-agents/barbican-worker.pid:0") - ;; - # IO-Monitor process - "io-monitor-manager") - process_list=(${process_list[@]} "pmon:io-monitor-manager:io-monitor-manager:controller:/var/run/io-monitor/io-monitor-manager.pid:0") - ;; - # Vim processes - "nfv-vim") - process_list=(${process_list[@]} "sm:nfv-vim:vim:controller:/var/run/nfv-vim.pid:0") - ;; - "nfv-vim-api") - process_list=(${process_list[@]} "sm:nfv-vim-api:vim-api:controller:/var/run/nfv-vim-api.pid:0") - ;; - "nfv-vim-webserver") - process_list=(${process_list[@]} "sm:nfv-vim-webserver:vim-webserver:controller:/var/run/nfv-vim-webserver.pid:0") - ;; - # Distributed Cloud processes - "dcmanager-manager") - process_list=(${process_list[@]} "sm:dcmanager-manager:dcmanager-manager:controller:/var/run/resource-agents/dcmanager-manager.pid:0") - ;; - "dcmanager-api") - process_list=(${process_list[@]} "sm:dcmanager-api:dcmanager-api:controller:/var/run/resource-agents/dcmanager-api.pid:0") - ;; - "dcorch-engine") - process_list=(${process_list[@]} "sm:dcorch-engine:dcorch-engine:controller:/var/run/resource-agents/dcorch-engine.pid:0") - ;; - "dcorch-snmp") - process_list=(${process_list[@]} "sm:dcorch-snmp:dcorch-snmp:controller:/var/run/resource-agents/dcorch-snmp.pid:0") - ;; - "dcorch-sysinv-api-proxy") - process_list=(${process_list[@]} "sm:dcorch-sysinv-api-proxy:dcorch-sysinv-api-proxy:controller:/var/run/resource-agents/dcorch-sysinv-api-proxy.pid:0") - ;; - "dcorch-patch-api-proxy") - process_list=(${process_list[@]} "sm:dcorch-patch-api-proxy:dcorch-patch-api-proxy:controller:/var/run/resource-agents/dcorch-patch-api-proxy.pid:0") - ;; - "collectd") - process_list=(${process_list[@]} "pmon:collectd:collectd:all:/var/run/collectd.pid:0") - ;; - "influxdb") - process_list=(${process_list[@]} "pmon:influxdb:influxdb:all:/var/run/influxdb/influxdb.pid:0") - ;; - - *) - echo "Unknown process:${process}" - loginfo "Unknown process:${process}" - ;; - esac - shift -done - -# Assume we are done until we know we are not -__done=true - -if [ -n "${process_list}" ] ; then - - # Record current process IDs - index=0 - for DAEMON in "${process_list[@]}" - do - info=(${DAEMON//:/ }) - - monitor="${info[${MONITOR_INDEX}]}" - pidfile="${info[${PIDFILE_INDEX}]}" - hosttype="${info[${HOSTTYPE_INDEX}]}" - process="${info[${PROCESS_INDEX}]}" - alias="${info[${ALIAS_INDEX}]}" - stat="${info[${STATUS_INDEX}]}" - - if [ "${CLEAN}" = true ] ; then - rm -f $PATCH_FLAGDIR/${process}.restarted - fi - - # default to not skipping this process - skip=true - - # filter out based on current nodetype and specified hosttype - if [ "${hosttype}" == "all" ] ; then - skip=false - else - - # check for controller function - if [[ ${hosttype} == *"controller"* ]] ; then - if [[ ${nodetype} == *"controller"* ]] ; then - skip=false - fi - fi - - # Check for compute as subfunction - if [[ "${subfunction}" == *"compute"* ]] ; then - if [[ $hosttype} == *"compute"* ]] ; then - skip=false - fi - fi - - # check for compute as main function - if [[ ${hosttype} == *"compute"* ]] ; then - if [[ ${nodetype} == *"compute"* ]] ; then - skip=false - fi - fi - - # check for storage type - if [[ ${hosttype} == *"storage"* ]] ; then - if [[ "${nodetype}" == *"storage"* ]] ; then - skip=false - fi - fi - fi - - if [ "${skip}" = true ] ; then - loginfo "${process} skipped for '${nodetype}' nodetype" - stat="${SKIPPED}" - update_status $index "$stat" - ((index++)) - continue - fi - - if [ -e ${PATCH_FLAGDIR}/${process}.restarted ] ; then - - loginfo "${process} restart skipped - already done" - stat="${SKIPPED}" - update_status ${index} "${stat}" - ((index++)) - continue - - else - - # record the existing PID for log purposes - if [ -e ${pidfile} ] ; then - - stat=$(head -1 ${pidfile} 2>/dev/null) - # check if the pid is running - kill -0 ${stat} 2>/dev/null - rc=$? - if [ ${rc} -ne 0 ] ; then - loginfo "${process} is not running" - stat="${NOPID}" - update_status ${index} "${stat}" - ((index++)) - continue - fi - - else - loginfo "${process} is not running ; missing pidfile" - stat="${NOPID}" - update_status ${index} "${stat}" - ((index++)) - continue - fi - - # - # If we get here then we want to restart this process - # for this node type and the process is running - # - # - # Now manage restart of that process based on what its monitor method is - # - if [ "${monitor}" == "sm" ] ; then - - # Managed/Monitored by SM - sm_query_result=$(${SM_QUERY_EXEC} service ${alias}) - echo "sm_query_result:${sm_query_result} - alias:${alias}" - if [[ "${sm_query_result}" == *"enabled-active"* ]] ; then - - echo "${SM_RESTART_EXEC} of ${process} [pid:${stat}]" - loginfo "${SM_RESTART_EXEC} of ${process} [pid:${stat}]" - touch $PATCH_FLAGDIR/${process}.restarted 2>/dev/null - ${SM_RESTART_EXEC} service "${alias}" - __done=false - if [ "${PARALLEL}" = true ] ; then - sleep ${SM_SLEEP} & - pids="$pids $!" - else - sleep ${SM_SLEEP} - fi - - elif [[ ${sm_query_result} == *"is enabling"* ]] ; then - loginfo "sm-restart ${process} ; [in progress] ; [pid:${info[${STATUS_INDEX}]}]" - stat="${NOPID}" - else - loginfo "${process} is not active" - stat="${DISABLED}" - fi - - else - - # Managed/Monitored by PMON - echo "${PMON_RESTART_EXEC} of ${process} [pid:${stat}]" - loginfo "${PMON_RESTART_EXEC} of ${process} [pid:${stat}]" - touch $PATCH_FLAGDIR/${process}.restarted 2>/dev/null - ${PMON_RESTART_EXEC} ${process} - __done=false - if [ "${PARALLEL}" = true ] ; then - sleep ${PMON_SLEEP} & - pids="$pids $!" - else - sleep ${PMON_SLEEP} - fi - - fi - fi - - # echo "Monitor:${monitor} Process:${process} Alias:${alias} Node:${hosttype} Pidfile:${pidfile} Status:${stat}" - - # Save the PID or NOPID status to the process line - update_status ${index} "${stat}" - - ((index++)) - done - - # wait for background sleeps - wait ${pids} -fi - -# -# Now Loop over the process list waiting for all the processes to restart. -# There is an overall timout of 20 seconds for all the processes to be restarted -# -if [ "${__done}" = true ] ; then - - GLOBAL_RC=$PATCH_STATUS_OK - loginfo "No-Reboot Patching Process Restart Status: ${GLOBAL_RC} - nothing to do." - exit ${GLOBAL_RC} -fi - -# Monitor the restart of processes -# -# Don't want to start from the beginning of the shell -# Want time zero now plus 30 seconds. -# -SECONDS=0 -TIMEOUT=120 -let UNTIL=${SECONDS}+${TIMEOUT} -loginfo "restart timeout is ${TIMEOUT}" - -while [ ${UNTIL} -ge ${SECONDS} ] -do - if [ "${__done}" = false ] ; then - index=0 - for DAEMON in "${process_list[@]}" - do - info=(${DAEMON//:/ }) - pidfile="${info[${PIDFILE_INDEX}]}" - process="${info[${PROCESS_INDEX}]}" - alias="${info[${ALIAS_INDEX}]}" - stat="${info[${STATUS_INDEX}]}" - - if [ "${stat}" != "${SKIPPED}" -a "${stat}" != "${RESTARTED}" -a "${stat}" != "${DISABLED}" -a "${stat}" != "${NOPID}" ] ; then - if [ -e ${pidfile} ] ; then - - # Get the new PID - new_pid=$(head -1 ${pidfile} 2>/dev/null) - - # check if the pid is running - kill -0 ${new_pid} 2>/dev/null - if [ $? -eq 0 -a -n ${new_pid} ] ; then - - # verify the pid is different - if [ "${stat}" != "${new_pid}" ] ; then - loginfo "${process} ${RESTARTED} ok [pid:${stat} -> ${new_pid}]" - stat="${RESTARTED}" - update_status ${index} "${stat}" - fi - fi - fi - fi - ((index++)) - done - - sleep ${MONITOR_SLEEP} - - # Loop over all proceses looking for complete restarts. - # Update process struct PID field as status is learned. - - index=0 - __not_done=false - for DAEMON in "${process_list[@]}" - do - info=(${DAEMON//:/ }) - stat="${info[${STATUS_INDEX}]}" - if [ "${stat}" != "${SKIPPED}" -a "${stat}" != "${RESTARTED}" -a "${stat}" != "${DISABLED}" -a "${stat}" != "${NOPID}" ] ; then - __not_done=true - fi - ((index++)) - done - - # Exit if done - if [ "${__not_done}" = false ] ; then - - __done=true - GLOBAL_RC=${PATCH_STATUS_OK} - break - - fi - else - # should not get here but handle anyway - GLOBAL_RC=${PATCH_STATUS_OK} - break - fi -done - -logged=false -for DAEMON in "${process_list[@]}" -do - info=(${DAEMON//:/ }) - if [ "${info[${STATUS_INDEX}]}" == "${RESTARTED}" ] ; then - if [ "${logged}" = false ] ; then - loginfo "The following processes have been 'restarted'" - logged=true - fi - loginfo "... process: ${info[${PROCESS_INDEX}]}" - fi -done - -logged=false -for DAEMON in "${process_list[@]}" -do - info=(${DAEMON//:/ }) - if [ "${info[${STATUS_INDEX}]}" == "${SKIPPED}" ] ; then - if [ "${logged}" = false ] ; then - loginfo "The following processes have been 'skipped'" - logged=true - fi - loginfo "... process: ${info[${PROCESS_INDEX}]}" - fi -done - -if [ "${__done}" = false ] ; then - loginfo "Process Restart Timeout ; waiting on " - for DAEMON in "${process_list[@]}" - do - info=(${DAEMON//:/ }) - stat="${info[${STATUS_INDEX}]}" - - if [ "${stat}" == "${SKIPPED}" ] ; then - ((index++)) - elif [ "${stat}" == "${RESTARTED}" ] ; then - ((index++)) - elif [ "${stat}" == "${DISABLED}" ] ; then - ((index++)) - elif [ "${stat}" == "${NOPID}" ] ; then - ((index++)) - else - loginfo "... process: ${stat}" - fi - ((index++)) - done -fi - -loginfo "No-Reboot Patching Process Restart Status: ${GLOBAL_RC}" - -exit ${GLOBAL_RC} diff --git a/utilities/platform-util/scripts/remotelogging_tc_setup.sh b/utilities/platform-util/scripts/remotelogging_tc_setup.sh deleted file mode 100755 index 7b7cf903a..000000000 --- a/utilities/platform-util/scripts/remotelogging_tc_setup.sh +++ /dev/null @@ -1,200 +0,0 @@ -#!/bin/sh - -# -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# $1 - listening port of remote log server -PORT=$1 - -DEFAULT_PRIORITY=1 - -function is_loopback { - # (from include/uapi/linux/if.h) - # IFF_LOOPBACK = 1<<3 = 8. Using a left shifted syntax can confuse bashate. - IFF_LOOPBACK=8 - - # get the interface flags - FLAGS=`cat /sys/class/net/$DEV/flags` - - if ((($IFF_LOOPBACK & $FLAGS) == 0)) - then - return 1 - else - return 0 - fi -} - -function log { - # It seems that syslog isn't yet running, so append directly to the syslog file - local FILE=/var/log/platform.log - echo `date +%FT%T.%3N` `hostname` CGCS_TC_SETUP: $@ >> $FILE -} - -function test_valid_speed { - # After the link is enabled but before the autonegotiation is complete - # the link speed may be read as either -1 or as 4294967295 (which is - # uint(-1) in twos-complement) depending on the kernel. Neither one is valid. - if (( $1 > 0 )) && (( $1 != 4294967295 )) - then - return 0 - else - return 1 - fi -} - -function get_dev_speed { - # If the link doesn't come up we won't go enabled, so here we can - # afford to wait forever for the link. - while true; do - if [ -e /sys/class/net/$1/bonding ]; then - for VAL in `cat /sys/class/net/$1/lower_*/speed`; do - if test_valid_speed $VAL; then - log slave for bond link $1 reported speed $VAL - echo $VAL - return 0 - else - log slave for bond link $1 reported invalid speed $VAL - fi - done - log all slaves for bond link $1 reported invalid speeds, \ - will sleep 30 sec and try again - else - VAL=`cat /sys/class/net/$1/speed` - if test_valid_speed $VAL; then - log link $1 reported speed $VAL - echo $VAL - return 0 - else - log link $1 returned invalid speed $VAL, \ - will sleep 30 sec and try again - fi - fi - sleep 30 - done -} - -if [ -f /etc/platform/platform.conf ]; then - source /etc/platform/platform.conf -else - exit 0 -fi - -# bandwitdh percentages, in case of over-percentage, bandwidth is divided based -# on bandwidth ratios -DEFAULT_BW=10 -LOG_BW=9 - -# bandwitdh ceiling percentages, for borrowing bandwidth -DEFAULT_CBW=20 -LOG_CBW=20 - -# 1:40 = default class from cgcs_tc_setup.sh -# 1:60 = LOG class - -if [ $nodetype == "controller" ]; then - # Add class and filters to the oam interface - DEV=$oam_interface - SPEED=$(get_dev_speed $DEV) - - # delete existing qdiscs - tc qdisc del dev $DEV root > /dev/null 2>&1 - - # create new qdiscs, classes and LOG filters - tc qdisc add dev $DEV root handle 1: htb default 40 - tc class add dev $DEV parent 1: classid 1:1 htb rate ${SPEED}mbit \ - burst 15k quantum 60000 - - AC="tc class add dev $DEV parent 1:1 classid" - $AC 1:40 htb rate $((${DEFAULT_BW}*${SPEED}/100))mbit burst 15k \ - ceil $((${DEFAULT_CBW}*${SPEED}/100))mbit quantum 60000 - $AC 1:60 htb rate $((${LOG_BW}*${SPEED}/100))mbit burst 15k \ - ceil $((${LOG_CBW}*${SPEED}/100))mbit quantum 60000 - - tc qdisc add dev $DEV parent 1:40 handle 40: sfq perturb 10 - tc qdisc add dev $DEV parent 1:60 handle 60: sfq perturb 10 - - tc filter add dev $DEV protocol ip parent 1:0 prio $DEFAULT_PRIORITY \ - u32 match ip dport ${PORT} 0xffff flowid 1:60 - tc filter add dev $DEV protocol ip parent 1:0 prio $DEFAULT_PRIORITY \ - u32 match ip sport ${PORT} 0xffff flowid 1:60 - -fi - -# On all node types, add LOG class 1:60 and filters to the mgmt interface -DEV=$management_interface - -if is_loopback -then - # mgmt/infra uses the loopback for CPE simplex - exit 0 -fi - -function infra_exists { - if [ -z "$infrastructure_interface" ]; then - return 1 - else - return 0 - fi -} - -function is_consolidated { - if ! infra_exists; then - return 1 - else - - local INFRA=$infrastructure_interface - local MGMT=$management_interface - - # determine whether the management interface is a parent of the - # infrastructure interface based on name. - # eg. this matches enp0s8 to enp0s8.10 but not enp0s88 - if [[ $INFRA =~ $MGMT[\.][0-9]+$ ]]; then - return 0 - fi - return 1 - fi -} - -function is_vlan { - if [ -f /proc/net/vlan/$DEV ]; then - return 0 - else - return 1 - fi -} - -function get_mgmt_tc_filter_priority { - local PRIORITY=$DEFAULT_PRIORITY - - if is_consolidated - then - if ! is_vlan - then - # If we have a consolidated VLAN interface, we must set the - # priority to $DEFAULT_PRIORITY + 1 for the underlying - # ethernet interface, as it will already have - # $DEFAULT_PRIORITY filters to catch high priority - # infra traffic - PRIORITY=$(($DEFAULT_PRIORITY + 1)) - fi - fi - echo $PRIORITY - return 0 -} - -SPEED=$(get_dev_speed $DEV) -PRIORITY=$(get_mgmt_tc_filter_priority) - -AC="tc class add dev $DEV parent 1:1 classid" -$AC 1:60 htb rate $((${LOG_BW}*${SPEED}/100))mbit burst 15k \ - ceil $((${LOG_CBW}*${SPEED}/100))mbit quantum 60000 - -tc qdisc add dev $DEV parent 1:60 handle 60: sfq perturb 10 - -tc filter add dev $DEV protocol ip parent 1:0 prio $PRIORITY \ - u32 match ip dport ${PORT} 0xffff flowid 1:60 -tc filter add dev $DEV protocol ip parent 1:0 prio $PRIORITY \ - u32 match ip sport ${PORT} 0xffff flowid 1:60 diff --git a/utilities/tis-extensions/PKG-INFO b/utilities/tis-extensions/PKG-INFO deleted file mode 100644 index 80d6c5154..000000000 --- a/utilities/tis-extensions/PKG-INFO +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 1.1 -Name: tis-extensions -Version: 1.0 -Summary: TIS Extensions to thirdparty pkgs -Home-page: -Author: Windriver -Author-email: info@windriver.com -License: Apache-2.0 - -Description: TIS Extensions to thirdparty pkgs - - -Platform: UNKNOWN diff --git a/utilities/tis-extensions/centos/build_srpm.data b/utilities/tis-extensions/centos/build_srpm.data deleted file mode 100644 index 619d51041..000000000 --- a/utilities/tis-extensions/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -SRC_DIR="files" -TIS_PATCH_VER=3 diff --git a/utilities/tis-extensions/centos/tis-extensions.spec b/utilities/tis-extensions/centos/tis-extensions.spec deleted file mode 100644 index 5b332b8ff..000000000 --- a/utilities/tis-extensions/centos/tis-extensions.spec +++ /dev/null @@ -1,60 +0,0 @@ -# -# The tis-extensions group of packages is intended to allow us to -# add files to "extend" thirdparty packages, such as by packaging -# custom systemd files into /etc/systemd to override the originals -# without modifying or rebuilding the thirdparty package. -# - -Name: tis-extensions -Version: 1.0 -Summary: TIS Extensions to thirdparty pkgs -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -Source0: %{name}-%{version}.tar.gz - -%define debug_package %{nil} - -Requires: systemd - -%description -TIS Extensions to thirdparty pkgs - -%package -n %{name}-controller -Summary: TIS Extensions to thirdparty pkg on controller -Group: base - -%description -n %{name}-controller -TIS Extensions to thirdparty pkgs on controller - -%define local_etc_systemd %{_sysconfdir}/systemd/system/ -%define local_etc_coredump %{_sysconfdir}/systemd/coredump.conf.d -%define local_etc_initd %{_sysconfdir}/init.d -%define local_etc_sysctl %{_sysconfdir}/sysctl.d -%define local_etc_modload %{_sysconfdir}/modules-load.d - -%prep -%setup - -%build - -%install - -install -d -m 755 %{buildroot}%{local_etc_sysctl} -install -p -D -m 644 coredump-sysctl.conf %{buildroot}%{local_etc_sysctl}/50-coredump.conf - -install -d -m 755 %{buildroot}%{local_etc_coredump} -install -p -D -m 644 coredump.conf %{buildroot}%{local_etc_coredump}/coredump.conf - -install -d -m 755 %{buildroot}%{local_etc_modload} -install -p -D -m 644 modules-load-vfio.conf %{buildroot}%{local_etc_modload}/vfio.conf - -%files -%defattr(-,root,root,-) -%{local_etc_sysctl}/50-coredump.conf -%{local_etc_coredump}/coredump.conf -%{local_etc_modload}/vfio.conf -%doc LICENSE - diff --git a/utilities/tis-extensions/files/LICENSE b/utilities/tis-extensions/files/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/utilities/tis-extensions/files/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/utilities/tis-extensions/files/coredump-sysctl.conf b/utilities/tis-extensions/files/coredump-sysctl.conf deleted file mode 100644 index 5a776f7a5..000000000 --- a/utilities/tis-extensions/files/coredump-sysctl.conf +++ /dev/null @@ -1,4 +0,0 @@ -# send coredumps to the systemd coredump utility. -kernel.core_pattern=|/usr/lib/systemd/systemd-coredump %p %u %g %s %t %e -kernel.core_pipe_limit = 4 -kernel.core_uses_pid = 1 diff --git a/utilities/tis-extensions/files/coredump.conf b/utilities/tis-extensions/files/coredump.conf deleted file mode 100644 index 3b966b28e..000000000 --- a/utilities/tis-extensions/files/coredump.conf +++ /dev/null @@ -1,8 +0,0 @@ -[Coredump] -Storage=external -Compress=yes -#ProcessSizeMax=2G -#ExternalSizeMax=2G -#JournalSizeMax=767M -#MaxUse= -#KeepFree= diff --git a/utilities/tis-extensions/files/modules-load-vfio.conf b/utilities/tis-extensions/files/modules-load-vfio.conf deleted file mode 100644 index 7a497c56a..000000000 --- a/utilities/tis-extensions/files/modules-load-vfio.conf +++ /dev/null @@ -1 +0,0 @@ -vfio diff --git a/utilities/update-motd/LICENSE b/utilities/update-motd/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/utilities/update-motd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/utilities/update-motd/PKG-INFO b/utilities/update-motd/PKG-INFO deleted file mode 100644 index b11affb60..000000000 --- a/utilities/update-motd/PKG-INFO +++ /dev/null @@ -1,14 +0,0 @@ -Metadata-Version: 1.1 -Name: update-motd -Version: 1.0 -Summary: dynamic MOTD generation -Home-page: -Author: -Author-email: -License: Apache-2.0 - -Description: -dynamic MOTD generation - - -Platform: UNKNOWN diff --git a/utilities/update-motd/centos/build_srpm.data b/utilities/update-motd/centos/build_srpm.data deleted file mode 100644 index 112ca54f4..000000000 --- a/utilities/update-motd/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -COPY_LIST="files/*" -TIS_PATCH_VER=2 diff --git a/utilities/update-motd/centos/update-motd.spec b/utilities/update-motd/centos/update-motd.spec deleted file mode 100644 index 11084ac00..000000000 --- a/utilities/update-motd/centos/update-motd.spec +++ /dev/null @@ -1,62 +0,0 @@ -Name: update-motd -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -Summary: dynamic MOTD generation - -Group: base -License: Apache-2.0 -URL: unknown -Source0: motd-footer -Source1: motd-header -Source2: motd-update -Source3: motd-update.cron -Source4: customize-banner -Source5: apply_banner_customization -Source6: install_banner_customization -Source7: LICENSE -Source8: motd.head - -Requires: crontabs - -%description -dynamic MOTD generation - -%prep - - -%build - - -%install -install -d %{buildroot}%{_sbindir} -install -m 700 %{SOURCE2} %{buildroot}%{_sbindir}/motd-update - -install -d %{buildroot}%{_sysconfdir} - -install -d %{buildroot}%{_sysconfdir}/motd.d -install -m 755 %{SOURCE1} %{buildroot}%{_sysconfdir}/motd.d/00-header -install -m 755 %{SOURCE0} %{buildroot}%{_sysconfdir}/motd.d/99-footer -install -m 644 %{SOURCE8} %{buildroot}%{_sysconfdir}/motd.head - -install -d %{buildroot}%{_sysconfdir}/cron.d -install -m 600 %{SOURCE3} %{buildroot}%{_sysconfdir}/cron.d/motd-update -install -m 700 %{SOURCE4} %{buildroot}%{_sbindir}/customize-banner -install -m 700 %{SOURCE5} %{buildroot}%{_sbindir}/apply_banner_customization -install -m 700 %{SOURCE6} %{buildroot}%{_sbindir}/install_banner_customization - - -%files -%license ../SOURCES/LICENSE -%dir %{_sysconfdir}/motd.d/ -%{_sysconfdir}/motd.d/* -/usr/sbin/* -/etc/motd.d/* -%{_sysconfdir}/motd.head -/etc/cron.d/* -%{_sbindir}/motd-update -%{_sbindir}/customize-banner -%{_sbindir}/apply_banner_customization -%{_sbindir}/install_banner_customization - -%changelog - diff --git a/utilities/update-motd/files/LICENSE b/utilities/update-motd/files/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/utilities/update-motd/files/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/utilities/update-motd/files/apply_banner_customization b/utilities/update-motd/files/apply_banner_customization deleted file mode 100644 index 3bf999e7b..000000000 --- a/utilities/update-motd/files/apply_banner_customization +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -. /usr/sbin/customize-banner - -banner_path="$1" -if [ -z "${banner_path}" ]; then - banner_path=$(pwd) -fi - -if ! customize_banner "${banner_path}"; then - exit 1 -fi - -install_banner_files - -# recreate /etc/motd -/usr/sbin/motd-update - -exit 0 diff --git a/utilities/update-motd/files/customize-banner b/utilities/update-motd/files/customize-banner deleted file mode 100644 index 0d798fba5..000000000 --- a/utilities/update-motd/files/customize-banner +++ /dev/null @@ -1,286 +0,0 @@ -#!/bin/bash - -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -. /usr/bin/tsconfig - -OPT_BANNER_BACKUP="/opt/banner.bk" -PLATFORM_BANNER_DIR="${CONFIG_PATH}/banner" - -# -# set BANNER_VERBOSE to enable info logs -# -export BANNER_VERBOSE - -info_log () -{ - if [ -n "${BANNER_VERBOSE}" ]; then - echo "INFO: customize-banner: $@" >&2 - fi -} - -warn_log () -{ - echo "WARNING: customize-banner: $@" >&2 -} - -error_log () -{ - echo "ERROR: customize-banner: $@" >&2 -} - -# some checks to see if the user does something odd -# Return: 1 if the file is not sane -# 0 if the file is normal looking -# -# Symbolic links are followed to their ultimate destination before -# further checks are made -file_is_sane () -{ - local fname=$1 - - if [ -z "${fname}" ]; then - error_log "no file specified" - return 1 - fi - - # resolve symlinks - if [ -h "${fname}" ]; then - local resolved=$(readlink -f "${fname}") - if [ -z "${resolved}" ]; then - error_log "file does not exist; did not resolve symbolic link" - return 1 - fi - - file_is_sane_no_link "${resolved}" - return $? - fi - - file_is_sane_no_link "${fname}" - return $? -} - -file_is_sane_no_link () -{ - local fname=$1 - # a sanity check, the banner file has to be a regular file - # symbolic links are handled in file_is_sane() - if [ ! -f "${fname}" -o -h "${fname}" ]; then - error_log "file is not a regular file: ${fname}" - return 1 - fi - - # root usually has access to write readonly files... - if [ ! -w "${fname}" ]; then - error_log "file is readonly: ${fname}" - return 1 - fi - - # Warn only - if [ -x "${fname}" ]; then - warn_log "file is executable: ${fname}" - fi -} - -# -# Prints list of files to stdout -# -get_banner_files () -{ - local flist=" \ - /etc/issue \ - /etc/issue.net \ - /etc/motd.head \ - /etc/motd.tail \ - " - - echo ${flist} -} - -# -# This is to be executed either by config_controller or by the user -# through apply_banner_customization -# -# For each customizable file, if a customization is present do a bit of -# sanity and copy the file to the platform config for banner -# customization. -# -# Return 0 on success -# Return 1 for error conditions, if any customization file failed sanity -# -customize_banner () -{ - if [ ! -d "${CONFIG_PATH}" ]; then - error_log "config path does not exist" - return 1 - fi - - banner_dir="$1" - if [ -z "${banner_dir}" ]; then - error_log "path containing customization files is required" - return 1 - fi - - if [ ! -d "${banner_dir}" ]; then - warn_log "directory does not exist: ${banner_dir}" - return 0 - fi - - local inputok=0 - local count=0 - local fname="" - for fname in $(get_banner_files); do { - local bname="$(basename $fname)" - # confname includes 'etc'; newfname does not. This seems easier - # for the user; at least until a file with a duplicate name is - # customizable - local confname="${PLATFORM_BANNER_DIR}/${fname}" - local newfname="${banner_dir}/${bname}" - - if [ -e "${newfname}" ]; then - count=$(expr $count + 1) - if ! file_is_sane ${newfname}; then - inputok=1 - continue - fi - - if [ -f "${confname}" ]; then - info_log "updating customization of ${fname}" - rm ${confname} - else - info_log "adding customization of ${fname}" - fi - - mkdir -p $(dirname ${confname}) - cp ${newfname} ${confname} - elif [ -f "${confname}" ]; then - info_log "maintaining previous customization of ${fname}" - fi - }; done - - if [ "$count" -eq 0 ]; then - warn_log "no customization files were found in $banner_dir" - fi - - return $inputok -} - -# -# Compare two banner files -# -# Return 0 if they are the same -# 1 if they are different -# -compare_file () -{ - diff -q "$1" "$2" 2>&1 > /dev/null - return $? -} - -# -# copy the file with a bkx extension, but only if it is not identical to -# another; look for an unused filename -# -# put a hard limit on the number of iterations -# -backup_file () -{ - local fname=$1 - local dname=$(dirname $fname) - local bname=$(basename $fname) - local bkdir=${OPT_BANNER_BACKUP}/${dname} - local count=0 - local newname="" - - if [ ! -f "${fname}" ]; then - warn_log "file does not exist: $fname" - return 0 - fi - - for count in $(seq 0 9); do { - newname=${bkdir}/${bname}.bk${count} - if [ -e "${newname}" ]; then - if compare_file "${newname}" "${fname}"; then - info_log "file is previously backed up ${fname} as ${newname}" - touch ${newname} - return 0 - fi - - info_log "skipping name ${newname}" - else - if [ "$count" -gt 7 ]; then - warn_log "consider cleaning up $(dirname ${newname})" - fi - - info_log "backing up ${fname} as ${newname}" - mkdir -p ${bkdir} - cp ${fname} ${newname} - return 0 - fi - }; done - - # find the oldest file and delete it. - newname=$(find ${bkdir} -maxdepth 1 -type f -name "${bname}.bk[0-9]" \ - | xargs -r ls -1t | tail -n 1) - if [ -z "${newname}" -o ! -f "${newname}" ]; then - error_log "did not find backup files for ${fname}" - return 1 - fi - - warn_log "deleting oldest backed up file ${newname}" - rm ${newname} - cp ${fname} ${newname} - - return $? -} - -# -# For each customizable file, if the file exists under -# PLATFORM_BANNER_DIR, and it passes some sanity checks, then install -# the file onto the root fs -# -install_banner_files () -{ - # quietly stop if the PLATFORM_BANNER_DIR is not setup - if [ ! -d ${PLATFORM_BANNER_DIR} ]; then - return 0 - fi - - local banner_files=$(get_banner_files) - local bfile="" - for bfile in ${banner_files}; do { - # do not attempt to install files if the directory on the root - # fs is absent; this is an unexpected condition - if [ ! -d "$(dirname ${bfile})" ]; then - error_log "directory does not exist for ${bfile}" - continue - fi - - # proceed only if the user provided a customization for the file - if [ ! -f "${PLATFORM_BANNER_DIR}/${bfile}" ]; then - info_log "file is not customized: ${bfile}" - continue - fi - - if [ -e "${bfile}" ]; then - if ! file_is_sane ${bfile}; then - continue - fi - if compare_file "${PLATFORM_BANNER_DIR}/${bfile}" "${bfile}"; then - info_log "file already installed: ${bfile}" - continue - fi - info_log "installing over existing file: ${bfile}" - backup_file ${bfile} \ - || continue - else - info_log "installing: ${bfile}" - fi - - cp ${PLATFORM_BANNER_DIR}/${bfile} ${bfile} - }; done; -} diff --git a/utilities/update-motd/files/install_banner_customization b/utilities/update-motd/files/install_banner_customization deleted file mode 100644 index f7850b92b..000000000 --- a/utilities/update-motd/files/install_banner_customization +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -. /usr/sbin/customize-banner - -install_banner_files - -# recreate /etc/motd -/usr/sbin/motd-update - -exit 0 diff --git a/utilities/update-motd/files/motd-footer b/utilities/update-motd/files/motd-footer deleted file mode 100644 index 02d6beb59..000000000 --- a/utilities/update-motd/files/motd-footer +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -# -# Copyright (c) 2014-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# motd.tail is reserved for the admin to append static -# trailing information to a dynamically generated -# /etc/motd. -# -# To add dynamic information, add a numbered -# script to /etc/motd.d/ - -[ -f /etc/motd.tail ] && cat /etc/motd.tail || true diff --git a/utilities/update-motd/files/motd-header b/utilities/update-motd/files/motd-header deleted file mode 100644 index f0edde828..000000000 --- a/utilities/update-motd/files/motd-header +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -# -# Copyright (c) 2014-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# motd.head is reserved for the admin to prepend -# static information to a dynamically generated -# /etc/motd. -# -# To add dynamic information, add a numbered -# script to /etc/motd.d/ - -[ -f /etc/motd.head ] && cat /etc/motd.head || true diff --git a/utilities/update-motd/files/motd-update b/utilities/update-motd/files/motd-update deleted file mode 100644 index e339e448e..000000000 --- a/utilities/update-motd/files/motd-update +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -# -# Copyright (c) 2014, 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -MOTD_FILE=${MOTD_FILE:-"/etc/motd"} -MOTD_PATH=${MOTD_PATH:-"/etc/motd.d"} -MOTD_TAG=${MOTD_TAG:-"motd-update"} - -if [ -d ${MOTD_PATH} ]; then - run-parts ${MOTD_PATH} 1>${MOTD_FILE} -fi diff --git a/utilities/update-motd/files/motd-update.cron b/utilities/update-motd/files/motd-update.cron deleted file mode 100644 index 19fa6681e..000000000 --- a/utilities/update-motd/files/motd-update.cron +++ /dev/null @@ -1,3 +0,0 @@ -# m h dom mon dow user command -0 * * * * root /usr/sbin/motd-update - diff --git a/utilities/update-motd/files/motd.head b/utilities/update-motd/files/motd.head deleted file mode 100644 index e2c1470e6..000000000 --- a/utilities/update-motd/files/motd.head +++ /dev/null @@ -1,5 +0,0 @@ - -WARNING: Unauthorized access to this system is forbidden and will be -prosecuted by law. By accessing this system, you agree that your -actions may be monitored if unauthorized usage is suspected. -