diff --git a/base/lighttpd-config/centos/build_srpm.data b/base/lighttpd-config/centos/build_srpm.data index da1e20bd8..2c3b2cb8b 100644 --- a/base/lighttpd-config/centos/build_srpm.data +++ b/base/lighttpd-config/centos/build_srpm.data @@ -1,2 +1,2 @@ SRC_DIR="files" -TIS_PATCH_VER=0 +TIS_PATCH_VER=1 diff --git a/base/lighttpd-config/files/lighttpd.conf b/base/lighttpd-config/files/lighttpd.conf index 31b294800..af4d0394b 100755 --- a/base/lighttpd-config/files/lighttpd.conf +++ b/base/lighttpd-config/files/lighttpd.conf @@ -140,8 +140,8 @@ static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" ) ######### Options that are good to be but not neccesary to be changed ####### -## bind to port (default: 80) -#server.port = 81 +## bind to port 8080 +server.port = 8080 ## bind to localhost (default: all interfaces) #server.bind = "grisu.home.kneschke.de" @@ -220,7 +220,7 @@ $HTTP["url"] !~ "^/(rel-[^/]*|feed|updates|static)/" { ( "localhost" => ( "host" => "127.0.0.1", - "port" => 8080 + "port" => 8008 ) ) ) @@ -244,7 +244,7 @@ $HTTP["url"] !~ "^/(rel-[^/]*|feed|updates|static)/" { # #### Listen to IPv6 -$SERVER["socket"] == "[::]:80" { } +$SERVER["socket"] == "[::]:8080" { } #### status module #status.status-url = "/server-status" diff --git a/centos_master_docker_images.inc b/centos_master_docker_images.inc new file mode 100644 index 000000000..1f8a814a3 --- /dev/null +++ b/centos_master_docker_images.inc @@ -0,0 +1 @@ +database/mariadb diff --git a/centos_pike_docker_images.inc b/centos_pike_docker_images.inc index aae003f96..f0d9ca2ae 100644 --- a/centos_pike_docker_images.inc +++ b/centos_pike_docker_images.inc @@ -1 +1,2 @@ virt/libvirt +database/mariadb diff --git a/ceph/ceph/centos/build_srpm.data b/ceph/ceph/centos/build_srpm.data index ca131ddd5..378e695ce 100644 --- a/ceph/ceph/centos/build_srpm.data +++ b/ceph/ceph/centos/build_srpm.data @@ -1,5 +1,6 @@ SRC_DIR="$CGCS_BASE/git/ceph" +COPY_LIST="files/* $DISTRO/patches/*" TIS_BASE_SRCREV=3f07f7ff1a5c7bfa8d0de12c966594d5fb7cf4ec -TIS_PATCH_VER=GITREVCOUNT +TIS_PATCH_VER=GITREVCOUNT+1 BUILD_IS_BIG=40 BUILD_IS_SLOW=26 diff --git a/ceph/ceph/centos/ceph.spec b/ceph/ceph/centos/ceph.spec deleted file mode 120000 index 5502d2f3f..000000000 --- a/ceph/ceph/centos/ceph.spec +++ /dev/null @@ -1 +0,0 @@ -../../../../git/ceph/ceph.spec \ No newline at end of file diff --git a/ceph/ceph/centos/ceph.spec b/ceph/ceph/centos/ceph.spec new file mode 100644 index 000000000..1efc87231 --- /dev/null +++ b/ceph/ceph/centos/ceph.spec @@ -0,0 +1,1893 @@ +# vim: set noexpandtab ts=8 sw=8 : +# +# spec file for package ceph +# +# Copyright (C) 2004-2019 The Ceph Project Developers. See COPYING file +# at the top-level directory of this distribution and at +# https://github.com/ceph/ceph/blob/master/COPYING +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. +# +# This file is under the GNU Lesser General Public License, version 2.1 +# +# Please submit bugfixes or comments via http://tracker.ceph.com/ +# + +###################################### +# BEGIN StarlingX specific changes # +###################################### +# StarlingX config overrides +# NOTE: +# - bcond_without tells RPM to define with_ unless +# --without- is explicitly present in the command line. +# A regular build does not use these arguments so bcond_without is +# effectively enabling +# - the same reversed logic applies to bcond_with. Its corresponding +# with_ is undefined unless --with- is explicitly +# present in the command line. +# +%define stx_rpmbuild_defaults \ + %{expand: \ + %%bcond_without client \ + %%bcond_without server \ + %%bcond_without gitversion \ + %%bcond_with subman \ + %%bcond_with coverage \ + %%bcond_with pgrefdebugging \ + %%bcond_with cephfs_java \ + %%bcond_with xio \ + %%bcond_with valgrind \ + %%bcond_with lttng \ + %%bcond_with valgrind \ + %%bcond_with selinux \ + %%bcond_with profiler \ + %%bcond_with man_pages \ + %%bcond_without rados \ + %%bcond_without rbd \ + %%bcond_without cython \ + %%bcond_without cephfs \ + %%bcond_without radosgw \ + %%bcond_with selinux \ + %%bcond_without radosstriper \ + %%bcond_without mon \ + %%bcond_without osd \ + %%bcond_without mds \ + %%bcond_with cryptopp \ + %%bcond_without nss \ + %%bcond_with profiler \ + %%bcond_with debug \ + %%bcond_without fuse \ + %%bcond_with jemalloc \ + %%bcond_without tcmalloc \ + %%bcond_with spdk \ + %%bcond_without libatomic_ops \ + %%bcond_with ocf \ + %%bcond_with kinetic \ + %%bcond_with librocksdb \ + %%bcond_without libaio \ + %%bcond_without libxfs \ + %%bcond_with libzfs \ + %%bcond_with lttng \ + %%bcond_with babeltrace \ + %%bcond_without eventfd \ + %%bcond_without openldap } + +%define stx_assert_without() \ + %{expand:%%{?with_%1: \ + %%{error:"%1" is enabled} \ + %%global stx_abort_build 1}} + +%define stx_assert_with() \ + %{expand:%%{!?with_%1: \ + %%{error:"%1" is disabled} \ + %%global stx_abort_build 1}} + +%define stx_assert_package_yes() \ + %{expand:%%stx_assert_with %1} + +%define stx_assert_package_no() \ + %{expand:%%stx_assert_without %1} + +%define stx_assert_package() \ + %{expand:%%stx_assert_package_%2 %1} + +%define stx_assert_feature_yes() \ + %{expand:%%stx_assert_with %1} + +%define stx_assert_feature_no() \ + %{expand:%%stx_assert_without %1} + +%define stx_assert_feature() \ + %{expand:%%stx_assert_feature_%2 %1} + +# StarlingX "configure" safeguards +# +%define stx_check_config \ + %undefine stx_abort_build \ + \ + %stx_assert_feature client yes \ + %stx_assert_feature server yes \ + %stx_assert_feature subman no \ + %stx_assert_feature gitversion yes \ + %stx_assert_feature coverage no \ + %stx_assert_feature pgrefdebugging no \ + %stx_assert_feature cephfs_java no \ + %stx_assert_feature xio no \ + %stx_assert_feature valgrind no \ + \ + %stx_assert_package man_pages no \ + %stx_assert_package rados yes \ + %stx_assert_package rbd yes \ + %stx_assert_package cython yes \ + %stx_assert_package cephfs yes \ + %stx_assert_package radosgw yes \ + %stx_assert_package selinux no \ + %stx_assert_package radosstriper yes \ + %stx_assert_package mon yes \ + %stx_assert_package osd yes \ + %stx_assert_package mds yes \ + %stx_assert_package cryptopp no \ + %stx_assert_package nss yes \ + %stx_assert_package profiler no \ + %stx_assert_package debug no \ + %stx_assert_package fuse yes \ + %stx_assert_package jemalloc no \ + %stx_assert_package tcmalloc yes \ + %stx_assert_package spdk no \ + %stx_assert_package libatomic_ops yes \ + %stx_assert_package ocf no \ + %stx_assert_package kinetic no \ + %stx_assert_package librocksdb no \ + %stx_assert_package libaio yes \ + %stx_assert_package libxfs yes \ + %stx_assert_package libzfs no \ + %stx_assert_package lttng no \ + %stx_assert_package babeltrace no \ + %stx_assert_package eventfd yes \ + %stx_assert_package openldap yes \ + \ + %{?stx_abort_build:exit 1} + +# StarlingX configure utils +# +%define configure_feature() %{expand:%%{?with_%{1}:--enable-%{lua: print(rpm.expand("%{1}"):gsub("_","-"):match("^%s*(.*%S)"))}}%%{!?with_%{1}:--disable-%{lua: print(rpm.expand("%{1}"):gsub("_","-"):match("^%s*(.*%S)"))}}} + +%define configure_package() %{expand:%%{?with_%{1}:--with-%{lua: print(rpm.expand("%{1}"):gsub("_","-"):match("^%s*(.*%S)"))}}%%{!?with_%{1}:--without-%{lua: print(rpm.expand("%{1}"):gsub("_","-"):match("^%s*(.*%S)"))}}} + +# special case for tcmalloc: it's actually called tc +# +%define configure_package_tc %{expand:%%{?with_tcmalloc:--with-tc}%%{!?with_tcmalloc:--without-tc}} + +###################################### +# END StarlingX specific changes # +###################################### + +%define _unpackaged_files_terminate_build 0 +%stx_rpmbuild_defaults +%bcond_without stx + +# STX: Ceph takes long time to generate debuginfo package which is not used +# so disable it here. +%define debug_package %{nil} +%define optflags -O2 + +%bcond_with ocf +%if %{without stx} +%bcond_without cephfs_java +%endif +%bcond_with tests +%bcond_with xio +%ifnarch s390 s390x +%bcond_without tcmalloc +%else +# no gperftools/tcmalloc on s390(x) +%bcond_with tcmalloc +%endif +%bcond_without libs_compat +%bcond_with lowmem_builder +%if ( 0%{?fedora} || 0%{?rhel} ) && %{without stx} +%bcond_without selinux +%endif +%if 0%{?suse_version} +%bcond_with selinux +%endif + +# LTTng-UST enabled on Fedora, RHEL 6+, and SLE (not openSUSE) +%if 0%{?fedora} || 0%{?rhel} >= 6 || 0%{?suse_version} +%if ! 0%{?is_opensuse} && %{without stx} +%bcond_without lttng +%endif +%endif + +%if %{with selinux} +# get selinux policy version +%{!?_selinux_policy_version: %global _selinux_policy_version %(sed -e 's,.*selinux-policy-\\([^/]*\\)/.*,\\1,' /usr/share/selinux/devel/policyhelp 2>/dev/null || echo 0.0.0)} +%endif + +%{!?_udevrulesdir: %global _udevrulesdir /lib/udev/rules.d} +%{!?tmpfiles_create: %global tmpfiles_create systemd-tmpfiles --create} + +# unify libexec for all targets +#%global _libexecdir %{_exec_prefix}/lib +%global _libexecdir %{_libdir} + + +################################################################################# +# common +################################################################################# +Name: ceph +Version: 10.2.6 +Release: 0.el7%{?_tis_dist}.%{tis_patch_ver} +Epoch: 1 +Summary: User space components of the Ceph file system +License: LGPL-2.1 and CC-BY-SA-1.0 and GPL-2.0 and BSL-1.0 and GPL-2.0-with-autoconf-exception and BSD-3-Clause and MIT +%if 0%{?suse_version} +Group: System/Filesystems +%endif +URL: http://ceph.com/ +Source0: http://ceph.com/download/%{name}-%{version}.tar.gz + +Source1: ceph.sh +Source2: ceph-rest-api +Source3: ceph.conf.pmon +Source4: ceph-init-wrapper.sh +Source5: ceph.conf +Source6: ceph-manage-journal.py +Source7: osd-wait-status.py +Source8: ceph.service +Source9: ceph-rest-api.service +Source10: ceph-radosgw.service + +Source11: stx_git_version +Source12: ceph-preshutdown.sh +Source13: starlingx-docker-override.conf + +Patch0001: 0001-Add-hooks-for-orderly-shutdown-on-controller.patch + +%if 0%{?suse_version} +%if 0%{?is_opensuse} +ExclusiveArch: x86_64 aarch64 ppc64 ppc64le +%else +ExclusiveArch: x86_64 aarch64 +%endif +%endif +################################################################################# +# dependencies that apply across all distro families +################################################################################# +Requires: ceph-osd = %{epoch}:%{version}-%{release} +Requires: ceph-mds = %{epoch}:%{version}-%{release} +Requires: ceph-mon = %{epoch}:%{version}-%{release} +Requires(post): binutils +%if 0%{with cephfs_java} +BuildRequires: java-devel +BuildRequires: sharutils +%endif +%if 0%{with selinux} +BuildRequires: checkpolicy +BuildRequires: selinux-policy-devel +BuildRequires: /usr/share/selinux/devel/policyhelp +%endif +BuildRequires: boost-devel +BuildRequires: cmake +BuildRequires: cryptsetup +BuildRequires: fuse-devel +BuildRequires: gcc-c++ +BuildRequires: gdbm +%if 0%{with tcmalloc} +BuildRequires: gperftools-devel +%endif +BuildRequires: hdparm +BuildRequires: leveldb-devel > 1.2 +BuildRequires: libaio-devel +BuildRequires: libatomic_ops-devel +BuildRequires: libblkid-devel >= 2.17 +BuildRequires: libcurl-devel +BuildRequires: libudev-devel +BuildRequires: libtool +BuildRequires: libxml2-devel +BuildRequires: make +BuildRequires: parted +BuildRequires: perl +BuildRequires: pkgconfig +BuildRequires: python +BuildRequires: python-devel +BuildRequires: python-nose +BuildRequires: python-requests +BuildRequires: python-sphinx +BuildRequires: python-virtualenv +BuildRequires: snappy-devel +BuildRequires: udev +BuildRequires: util-linux +BuildRequires: valgrind-devel +BuildRequires: xfsprogs +BuildRequires: xfsprogs-devel +BuildRequires: xmlstarlet +BuildRequires: yasm + +################################################################################# +# distro-conditional dependencies +################################################################################# +%if 0%{?suse_version} +BuildRequires: pkgconfig(systemd) +BuildRequires: systemd-rpm-macros +BuildRequires: systemd +%{?systemd_requires} +PreReq: %fillup_prereq +BuildRequires: net-tools +BuildRequires: libbz2-devel +BuildRequires: btrfsprogs +BuildRequires: mozilla-nss-devel +BuildRequires: keyutils-devel +BuildRequires: libopenssl-devel +BuildRequires: lsb-release +BuildRequires: openldap2-devel +BuildRequires: python-Cython +%endif +%if 0%{?fedora} || 0%{?rhel} +Requires: systemd +BuildRequires: boost-random +BuildRequires: btrfs-progs +BuildRequires: nss-devel +BuildRequires: keyutils-libs-devel +BuildRequires: openldap-devel +BuildRequires: openssl-devel +BuildRequires: redhat-lsb-core +BuildRequires: Cython +%endif +# lttng and babeltrace for rbd-replay-prep +%if %{with lttng} +%if 0%{?fedora} || 0%{?rhel} +BuildRequires: lttng-ust-devel +BuildRequires: libbabeltrace-devel +%endif +%if 0%{?suse_version} +BuildRequires: lttng-ust-devel +BuildRequires: babeltrace-devel +%endif +%endif +# expat and fastcgi for RGW +%if 0%{?suse_version} +BuildRequires: libexpat-devel +BuildRequires: FastCGI-devel +%endif +%if 0%{?rhel} || 0%{?fedora} +BuildRequires: expat-devel +BuildRequires: fcgi-devel +%endif +#hardened-cc1 +%if 0%{?fedora} || 0%{?rhel} +BuildRequires: redhat-rpm-config +%endif +# Accelio IB/RDMA +%if 0%{with xio} +BuildRequires: libxio-devel +%endif + +%description +Ceph is a massively scalable, open-source, distributed storage system that runs +on commodity hardware and delivers object, block and file system storage. + + +################################################################################# +# packages +################################################################################# +%package base +Summary: Ceph Base Package +Group: System Environment/Base +Requires: ceph-common = %{epoch}:%{version}-%{release} +Requires: librbd1 = %{epoch}:%{version}-%{release} +Requires: librados2 = %{epoch}:%{version}-%{release} +Requires: libcephfs1 = %{epoch}:%{version}-%{release} +Requires: librgw2 = %{epoch}:%{version}-%{release} +%if 0%{with selinux} +Requires: ceph-selinux = %{epoch}:%{version}-%{release} +%endif +Requires: python +Requires: python-requests +Requires: python-setuptools +Requires: grep +Requires: xfsprogs +Requires: logrotate +Requires: util-linux +Requires: hdparm +Requires: cryptsetup +Requires: findutils +Requires: which +%if 0%{?suse_version} +Recommends: ntp-daemon +%endif +%if 0%{with xio} +Requires: libxio +%endif +%description base +Base is the package that includes all the files shared amongst ceph servers + +%package -n ceph-common +Summary: Ceph Common +Group: System Environment/Base +Requires: librbd1 = %{epoch}:%{version}-%{release} +Requires: librados2 = %{epoch}:%{version}-%{release} +Requires: libcephfs1 = %{epoch}:%{version}-%{release} +Requires: python-rados = %{epoch}:%{version}-%{release} +Requires: python-rbd = %{epoch}:%{version}-%{release} +Requires: python-cephfs = %{epoch}:%{version}-%{release} +Requires: python-requests +%{?systemd_requires} +%if 0%{?suse_version} +Requires(pre): pwdutils +%endif +%if 0%{with xio} +Requires: libxio +%endif +%description -n ceph-common +Common utilities to mount and interact with a ceph storage cluster. +Comprised of files that are common to Ceph clients and servers. + +%package mds +Summary: Ceph Metadata Server Daemon +Group: System Environment/Base +Requires: ceph-base = %{epoch}:%{version}-%{release} +%description mds +ceph-mds is the metadata server daemon for the Ceph distributed file system. +One or more instances of ceph-mds collectively manage the file system +namespace, coordinating access to the shared OSD cluster. + +%package mon +Summary: Ceph Monitor Daemon +Group: System Environment/Base +Requires: ceph-base = %{epoch}:%{version}-%{release} +# For ceph-rest-api +%if 0%{?fedora} || 0%{?rhel} +Requires: python-flask +%endif +%if 0%{?suse_version} +Requires: python-Flask +%endif +%description mon +ceph-mon is the cluster monitor daemon for the Ceph distributed file +system. One or more instances of ceph-mon form a Paxos part-time +parliament cluster that provides extremely reliable and durable storage +of cluster membership, configuration, and state. + +%package fuse +Summary: Ceph fuse-based client +Group: System Environment/Base +%description fuse +FUSE based client for Ceph distributed network file system + +%package -n rbd-fuse +Summary: Ceph fuse-based client +Group: System Environment/Base +Requires: librados2 = %{epoch}:%{version}-%{release} +Requires: librbd1 = %{epoch}:%{version}-%{release} +%description -n rbd-fuse +FUSE based client to map Ceph rbd images to files + +%package -n rbd-mirror +Summary: Ceph daemon for mirroring RBD images +Group: System Environment/Base +Requires: ceph-common = %{epoch}:%{version}-%{release} +Requires: librados2 = %{epoch}:%{version}-%{release} +%description -n rbd-mirror +Daemon for mirroring RBD images between Ceph clusters, streaming +changes asynchronously. + +%package -n rbd-nbd +Summary: Ceph RBD client base on NBD +Group: System Environment/Base +Requires: librados2 = %{epoch}:%{version}-%{release} +Requires: librbd1 = %{epoch}:%{version}-%{release} +%description -n rbd-nbd +NBD based client to map Ceph rbd images to local device + +%package radosgw +Summary: Rados REST gateway +Group: Development/Libraries +Requires: ceph-common = %{epoch}:%{version}-%{release} +%if 0%{with selinux} +Requires: ceph-selinux = %{epoch}:%{version}-%{release} +%endif +Requires: librados2 = %{epoch}:%{version}-%{release} +Requires: librgw2 = %{epoch}:%{version}-%{release} +%if 0%{?rhel} || 0%{?fedora} +Requires: mailcap +# python-flask for powerdns +Requires: python-flask +%endif +%if 0%{?suse_version} +# python-Flask for powerdns +Requires: python-Flask +%endif +%description radosgw +RADOS is a distributed object store used by the Ceph distributed +storage system. This package provides a REST gateway to the +object store that aims to implement a superset of Amazon's S3 +service as well as the OpenStack Object Storage ("Swift") API. + +%if %{with ocf} +%package resource-agents +Summary: OCF-compliant resource agents for Ceph daemons +Group: System Environment/Base +License: LGPL-2.0 +Requires: ceph-base = %{epoch}:%{version} +Requires: resource-agents +%description resource-agents +Resource agents for monitoring and managing Ceph daemons +under Open Cluster Framework (OCF) compliant resource +managers such as Pacemaker. +%endif + +%package osd +Summary: Ceph Object Storage Daemon +Group: System Environment/Base +Requires: ceph-base = %{epoch}:%{version}-%{release} +# for sgdisk, used by ceph-disk +%if 0%{?fedora} || 0%{?rhel} +Requires: gdisk +%endif +%if 0%{?suse_version} +Requires: gptfdisk +%endif +Requires: parted +%description osd +ceph-osd is the object storage daemon for the Ceph distributed file +system. It is responsible for storing objects on a local file system +and providing access to them over the network. + +%package -n librados2 +Summary: RADOS distributed object store client library +Group: System Environment/Libraries +License: LGPL-2.0 +%if 0%{?rhel} || 0%{?fedora} +Obsoletes: ceph-libs < %{epoch}:%{version}-%{release} +%endif +%description -n librados2 +RADOS is a reliable, autonomic distributed object storage cluster +developed as part of the Ceph distributed storage system. This is a +shared library allowing applications to access the distributed object +store using a simple file-like interface. + +%package -n librados2-devel +Summary: RADOS headers +Group: Development/Libraries +License: LGPL-2.0 +Requires: librados2 = %{epoch}:%{version}-%{release} +Obsoletes: ceph-devel < %{epoch}:%{version}-%{release} +%description -n librados2-devel +This package contains libraries and headers needed to develop programs +that use RADOS object store. + +%package -n librgw2 +Summary: RADOS gateway client library +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: librados2 = %{epoch}:%{version}-%{release} +%description -n librgw2 +This package provides a library implementation of the RADOS gateway +(distributed object store with S3 and Swift personalities). + +%package -n librgw2-devel +Summary: RADOS gateway client library +Group: Development/Libraries +License: LGPL-2.0 +Requires: librados2 = %{epoch}:%{version}-%{release} +%description -n librgw2-devel +This package contains libraries and headers needed to develop programs +that use RADOS gateway client library. + +%package -n python-rados +Summary: Python libraries for the RADOS object store +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: librados2 = %{epoch}:%{version}-%{release} +Obsoletes: python-ceph < %{epoch}:%{version}-%{release} +%description -n python-rados +This package contains Python libraries for interacting with Cephs RADOS +object store. + +%package -n libradosstriper1 +Summary: RADOS striping interface +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: librados2 = %{epoch}:%{version}-%{release} +%description -n libradosstriper1 +Striping interface built on top of the rados library, allowing +to stripe bigger objects onto several standard rados objects using +an interface very similar to the rados one. + +%package -n libradosstriper1-devel +Summary: RADOS striping interface headers +Group: Development/Libraries +License: LGPL-2.0 +Requires: libradosstriper1 = %{epoch}:%{version}-%{release} +Requires: librados2-devel = %{epoch}:%{version}-%{release} +Obsoletes: ceph-devel < %{epoch}:%{version}-%{release} +%description -n libradosstriper1-devel +This package contains libraries and headers needed to develop programs +that use RADOS striping interface. + +%package -n librbd1 +Summary: RADOS block device client library +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: librados2 = %{epoch}:%{version}-%{release} +%if 0%{?rhel} || 0%{?fedora} +Obsoletes: ceph-libs < %{epoch}:%{version}-%{release} +%endif +%description -n librbd1 +RBD is a block device striped across multiple distributed objects in +RADOS, a reliable, autonomic distributed object storage cluster +developed as part of the Ceph distributed storage system. This is a +shared library allowing applications to manage these block devices. + +%package -n librbd1-devel +Summary: RADOS block device headers +Group: Development/Libraries +License: LGPL-2.0 +Requires: librbd1 = %{epoch}:%{version}-%{release} +Requires: librados2-devel = %{epoch}:%{version}-%{release} +Obsoletes: ceph-devel < %{epoch}:%{version}-%{release} +%description -n librbd1-devel +This package contains libraries and headers needed to develop programs +that use RADOS block device. + +%package -n python-rbd +Summary: Python libraries for the RADOS block device +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: librbd1 = %{epoch}:%{version}-%{release} +Requires: python-rados = %{epoch}:%{version}-%{release} +Obsoletes: python-ceph < %{epoch}:%{version}-%{release} +%description -n python-rbd +This package contains Python libraries for interacting with Cephs RADOS +block device. + +%package -n libcephfs1 +Summary: Ceph distributed file system client library +Group: System Environment/Libraries +License: LGPL-2.0 +%if 0%{?rhel} || 0%{?fedora} +Obsoletes: ceph-libs < %{epoch}:%{version}-%{release} +Obsoletes: ceph-libcephfs +%endif +%description -n libcephfs1 +Ceph is a distributed network file system designed to provide excellent +performance, reliability, and scalability. This is a shared library +allowing applications to access a Ceph distributed file system via a +POSIX-like interface. + +%package -n libcephfs1-devel +Summary: Ceph distributed file system headers +Group: Development/Libraries +License: LGPL-2.0 +Requires: libcephfs1 = %{epoch}:%{version}-%{release} +Requires: librados2-devel = %{epoch}:%{version}-%{release} +Obsoletes: ceph-devel < %{epoch}:%{version}-%{release} +%description -n libcephfs1-devel +This package contains libraries and headers needed to develop programs +that use Cephs distributed file system. + +%package -n python-cephfs +Summary: Python libraries for Ceph distributed file system +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: libcephfs1 = %{epoch}:%{version}-%{release} +Requires: python-rados = %{epoch}:%{version}-%{release} +Obsoletes: python-ceph < %{epoch}:%{version}-%{release} +%description -n python-cephfs +This package contains Python libraries for interacting with Cephs distributed +file system. + +%package -n ceph-test +Summary: Ceph benchmarks and test tools +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: ceph-common +Requires: xmlstarlet +%description -n ceph-test +This package contains Ceph benchmarks and test tools. + +%if 0%{with cephfs_java} + +%package -n libcephfs_jni1 +Summary: Java Native Interface library for CephFS Java bindings +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: java +Requires: libcephfs1 = %{epoch}:%{version}-%{release} +%description -n libcephfs_jni1 +This package contains the Java Native Interface library for CephFS Java +bindings. + +%package -n libcephfs_jni1-devel +Summary: Development files for CephFS Java Native Interface library +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: java +Requires: libcephfs_jni1 = %{epoch}:%{version}-%{release} +Obsoletes: ceph-devel < %{epoch}:%{version}-%{release} +%description -n libcephfs_jni1-devel +This package contains the development files for CephFS Java Native Interface +library. + +%package -n cephfs-java +Summary: Java libraries for the Ceph File System +Group: System Environment/Libraries +License: LGPL-2.0 +Requires: java +Requires: libcephfs_jni1 = %{epoch}:%{version}-%{release} +Requires: junit +BuildRequires: junit +%description -n cephfs-java +This package contains the Java libraries for the Ceph File System. + +%endif + +%if 0%{with selinux} + +%package selinux +Summary: SELinux support for Ceph MON, OSD and MDS +Group: System Environment/Base +Requires: ceph-base = %{epoch}:%{version}-%{release} +Requires: policycoreutils, libselinux-utils +Requires(post): selinux-policy-base >= %{_selinux_policy_version}, policycoreutils, gawk +Requires(postun): policycoreutils +%description selinux +This package contains SELinux support for Ceph MON, OSD and MDS. The package +also performs file-system relabelling which can take a long time on heavily +populated file-systems. + +%endif + +%if 0%{with libs_compat} + +%package libs-compat +Summary: Meta package to include ceph libraries +Group: System Environment/Libraries +License: LGPL-2.0 +Obsoletes: ceph-libs +Requires: librados2 = %{epoch}:%{version}-%{release} +Requires: librbd1 = %{epoch}:%{version}-%{release} +Requires: libcephfs1 = %{epoch}:%{version}-%{release} +Provides: ceph-libs + +%description libs-compat +This is a meta package, that pulls in librados2, librbd1 and libcephfs1. It +is included for backwards compatibility with distributions that depend on the +former ceph-libs package, which is now split up into these three subpackages. +Packages still depending on ceph-libs should be fixed to depend on librados2, +librbd1 or libcephfs1 instead. + +%endif + +%package devel-compat +Summary: Compatibility package for Ceph headers +Group: Development/Libraries +License: LGPL-2.0 +Obsoletes: ceph-devel +Requires: librados2-devel = %{epoch}:%{version}-%{release} +Requires: libradosstriper1-devel = %{epoch}:%{version}-%{release} +Requires: librbd1-devel = %{epoch}:%{version}-%{release} +Requires: libcephfs1-devel = %{epoch}:%{version}-%{release} +%if 0%{with cephfs_java} +Requires: libcephfs_jni1-devel = %{epoch}:%{version}-%{release} +%endif +Provides: ceph-devel +%description devel-compat +This is a compatibility package to accommodate ceph-devel split into +librados2-devel, librbd1-devel and libcephfs1-devel. Packages still depending +on ceph-devel should be fixed to depend on librados2-devel, librbd1-devel, +libcephfs1-devel or libradosstriper1-devel instead. + +%package -n python-ceph-compat +Summary: Compatibility package for Cephs python libraries +Group: System Environment/Libraries +License: LGPL-2.0 +Obsoletes: python-ceph +Requires: python-rados = %{epoch}:%{version}-%{release} +Requires: python-rbd = %{epoch}:%{version}-%{release} +Requires: python-cephfs = %{epoch}:%{version}-%{release} +Provides: python-ceph +%description -n python-ceph-compat +This is a compatibility package to accommodate python-ceph split into +python-rados, python-rbd and python-cephfs. Packages still depending on +python-ceph should be fixed to depend on python-rados, python-rbd or +python-cephfs instead. + +################################################################################# +# common +################################################################################# +%prep +%setup -q +%patch0001 -p1 +# StarlingX: Copy the .git_version file needed by the build +# This commit SHA is from the upstream src rpm which is the base of this repo branch +# TODO: Add a commit hook to update to our latest commit SHA +cp %{SOURCE11} %{_builddir}/%{name}-%{version}/src/.git_version + +%build +%if 0%{with cephfs_java} +# Find jni.h +for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do + [ -d $i ] && java_inc="$java_inc -I$i" +done +%endif + +./autogen.sh + +%if %{with lowmem_builder} +RPM_OPT_FLAGS="$RPM_OPT_FLAGS --param ggc-min-expand=20 --param ggc-min-heapsize=32768" +%endif +export RPM_OPT_FLAGS=`echo $RPM_OPT_FLAGS | sed -e 's/i386/i486/'` + +%if 0%{?rhel} && ! 0%{?centos} +%bcond_without subman +%endif +%bcond_without nss +%bcond_with cryptopp +%if %{without stx} +%bcond_without debug +%bcond_without man_pages +%endif +%bcond_without radosgw +%if %{without lttng} +%bcond_with lttng +%bcond_with babeltrace +%endif + +%stx_check_config +%{configure} CPPFLAGS="$java_inc" \ + --prefix=/usr \ + --libexecdir=%{_libexecdir} \ + --localstatedir=%{_localstatedir} \ + --sysconfdir=%{_sysconfdir} \ + %configure_feature client \ + %configure_feature server \ + %configure_feature subman \ + %configure_feature gitversion \ + %configure_feature coverage \ + %configure_feature pgrefdebugging \ + %configure_feature cephfs_java \ + %configure_feature xio \ + %configure_feature valgrind \ + --with-systemdsystemunitdir=%_unitdir \ + --docdir=%{_docdir}/ceph \ + %configure_package man_pages \ + --mandir="%_mandir" \ + %configure_package rados \ + %configure_package rbd \ + %configure_package cython \ + %configure_package cephfs \ + %configure_package radosgw \ + %configure_package selinux \ + %configure_package radosstriper \ + %configure_package mon \ + %configure_package osd \ + %configure_package mds \ + %configure_package cryptopp \ + %configure_package nss \ + %configure_package profiler \ + %configure_package debug \ + %configure_package fuse \ + %configure_package jemalloc \ + %configure_package tc \ + %configure_package spdk \ + %configure_package libatomic_ops \ + %configure_package ocf \ + %configure_package kinetic \ + %configure_package librocksdb \ + --with-librocksdb-static=check \ + %configure_package libaio \ + %configure_package libxfs \ + %configure_package libzfs \ + %configure_package lttng \ + %configure_package babeltrace \ + %configure_package eventfd \ + %configure_package openldap \ + $CEPH_EXTRA_CONFIGURE_ARGS \ + %{?_with_ocf} \ +%if %{without tcmalloc} + --without-tcmalloc \ +%endif + CFLAGS="$RPM_OPT_FLAGS" CXXFLAGS="$RPM_OPT_FLAGS" + +%if %{with lowmem_builder} +%if 0%{?jobs} > 8 +%define _smp_mflags -j8 +%endif +%endif + +make %{?_smp_mflags} + + +%if 0%{with tests} +%check +# run in-tree unittests +make %{?_smp_mflags} check + +%endif + + + +%install +make DESTDIR=%{buildroot} install +find %{buildroot} -type f -name "*.la" -exec rm -f {} ';' +find %{buildroot} -type f -name "*.a" -exec rm -f {} ';' +install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap +%if 0%{?fedora} || 0%{?rhel} +install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph +%endif +%if 0%{?suse_version} +install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_localstatedir}/adm/fillup-templates/sysconfig.%{name} +%endif +%if %{without stx} +install -m 0644 -D systemd/ceph.tmpfiles.d %{buildroot}%{_tmpfilesdir}/ceph-common.conf +%endif +mkdir -p %{buildroot}%{_sbindir} +install -m 0755 -D systemd/ceph %{buildroot}%{_sbindir}/rcceph +%if %{without stx} +install -m 0644 -D systemd/50-ceph.preset %{buildroot}%{_libexecdir}/systemd/system-preset/50-ceph.preset +%endif + +install -m 0644 -D src/logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ceph +chmod 0644 %{buildroot}%{_docdir}/ceph/sample.ceph.conf +chmod 0644 %{buildroot}%{_docdir}/ceph/sample.fetch_config + +# firewall templates and /sbin/mount.ceph symlink +%if 0%{?suse_version} +install -m 0644 -D etc/sysconfig/SuSEfirewall2.d/services/ceph-mon %{buildroot}%{_sysconfdir}/sysconfig/SuSEfirewall2.d/services/ceph-mon +install -m 0644 -D etc/sysconfig/SuSEfirewall2.d/services/ceph-osd-mds %{buildroot}%{_sysconfdir}/sysconfig/SuSEfirewall2.d/services/ceph-osd-mds +mkdir -p %{buildroot}/sbin +ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph +%endif + +# udev rules +install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules +install -m 0640 -D udev/60-ceph-by-parttypeuuid.rules %{buildroot}%{_udevrulesdir}/60-ceph-by-parttypeuuid.rules +%if %{without stx} +install -m 0644 -D udev/95-ceph-osd.rules %{buildroot}%{_udevrulesdir}/95-ceph-osd.rules +%endif + +#set up placeholder directories +mkdir -p %{buildroot}%{_sysconfdir}/ceph +mkdir -p %{buildroot}%{_localstatedir}/run/ceph +mkdir -p %{buildroot}%{_localstatedir}/log/ceph +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/tmp +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mon +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/osd +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mds +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/radosgw +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-osd +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mds +mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rgw + +%if %{with stx} +install -d -m 750 %{buildroot}%{_sysconfdir}/services.d/controller +install -d -m 750 %{buildroot}%{_sysconfdir}/services.d/storage +mkdir -p %{buildroot}%{_initrddir} +mkdir -p %{buildroot}%{_sysconfdir}/ceph +mkdir -p %{buildroot}%{_unitdir} + +install -m 750 %{SOURCE1} %{buildroot}%{_sysconfdir}/services.d/controller/ +install -m 750 %{SOURCE1} %{buildroot}%{_sysconfdir}/services.d/storage/ +install -m 750 %{SOURCE2} %{buildroot}%{_initrddir}/ +install -m 750 %{SOURCE3} %{buildroot}%{_sysconfdir}/ceph/ +install -m 750 %{SOURCE4} %{buildroot}/%{_initrddir}/ceph-init-wrapper +install -m 640 %{SOURCE5} %{buildroot}%{_sysconfdir}/ceph/ +install -m 700 %{SOURCE6} %{buildroot}/usr/sbin/ceph-manage-journal +install -m 700 %{SOURCE7} %{buildroot}/usr/sbin/osd-wait-status +install -m 644 %{SOURCE8} $RPM_BUILD_ROOT/%{_unitdir}/ceph.service +install -m 644 %{SOURCE9} $RPM_BUILD_ROOT/%{_unitdir}/ceph-rest-api.service +install -m 644 %{SOURCE10} $RPM_BUILD_ROOT/%{_unitdir}/ceph-radosgw.service +install -m 700 %{SOURCE12} %{buildroot}%{_sbindir}/ceph-preshutdown.sh +install -D -m 644 %{SOURCE13} $RPM_BUILD_ROOT/%{_sysconfdir}/systemd/system/docker.service.d/starlingx-docker-override.conf + +install -m 750 src/init-ceph %{buildroot}/%{_initrddir}/ceph +install -m 750 src/init-radosgw %{buildroot}/%{_initrddir}/ceph-radosgw +install -m 750 src/init-rbdmap %{buildroot}/%{_initrddir}/rbdmap +install -d -m 750 %{buildroot}/var/log/radosgw +%endif + +%clean +rm -rf %{buildroot} + +################################################################################# +# files and systemd scriptlets +################################################################################# +%files + +%files base +%defattr(-,root,root,-) +%docdir %{_docdir} +%dir %{_docdir}/ceph +%{_docdir}/ceph/sample.ceph.conf +%{_docdir}/ceph/sample.fetch_config +%{_bindir}/crushtool +%{_bindir}/monmaptool +%{_bindir}/osdmaptool +%{_bindir}/ceph-run +%{_bindir}/ceph-detect-init +%if %{with debug} +%{_bindir}/ceph-client-debug +%endif +%if %{with cephfs} +%{_bindir}/cephfs +%endif +%if %{with stx} +%{_initrddir}/ceph +%{_initrddir}/ceph-rest-api +%{_initrddir}/ceph-init-wrapper +%{_sysconfdir}/ceph/ceph.conf.pmon +%config(noreplace) %{_sysconfdir}/ceph/ceph.conf +%{_sysconfdir}/services.d/* +%{_sbindir}/ceph-manage-journal +%{_sbindir}/ceph-preshutdown.sh +%{_sysconfdir}/systemd/system/docker.service.d/starlingx-docker-override.conf +%endif +%if %{without stx} +%{_unitdir}/ceph-create-keys@.service +%{_libexecdir}/systemd/system-preset/50-ceph.preset +%endif +%{_sbindir}/ceph-create-keys +%{_sbindir}/rcceph +%dir %{_libexecdir}/ceph +%{_libexecdir}/ceph/ceph_common.sh +%dir %{_libdir}/rados-classes +%{_libdir}/rados-classes/* +%dir %{_libdir}/ceph +%dir %{_libdir}/ceph/erasure-code +%{_libdir}/ceph/erasure-code/libec_*.so* +%dir %{_libdir}/ceph/compressor +%{_libdir}/ceph/compressor/libceph_*.so* +%if %{with lttng} +%{_libdir}/libos_tp.so* +%{_libdir}/libosd_tp.so* +%endif +%config %{_sysconfdir}/bash_completion.d/ceph +%config(noreplace) %{_sysconfdir}/logrotate.d/ceph +%if 0%{?fedora} || 0%{?rhel} +%config(noreplace) %{_sysconfdir}/sysconfig/ceph +%endif +%if 0%{?suse_version} +%{_localstatedir}/adm/fillup-templates/sysconfig.* +%config %{_sysconfdir}/sysconfig/SuSEfirewall2.d/services/ceph-mon +%config %{_sysconfdir}/sysconfig/SuSEfirewall2.d/services/ceph-osd-mds +%endif +%{_unitdir}/ceph.target +%if %{with stx} +%{_unitdir}/ceph.service +%{_unitdir}/ceph-rest-api.service +%{_unitdir}/ceph-radosgw.service +%endif +%{python_sitelib}/ceph_detect_init* +%{python_sitelib}/ceph_disk* +%if %{with man_pages} +%{_mandir}/man8/ceph-deploy.8* +%{_mandir}/man8/ceph-detect-init.8* +%{_mandir}/man8/ceph-create-keys.8* +%{_mandir}/man8/ceph-run.8* +%{_mandir}/man8/crushtool.8* +%{_mandir}/man8/osdmaptool.8* +%{_mandir}/man8/monmaptool.8* +%{_mandir}/man8/cephfs.8* +%endif +#set up placeholder directories +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/tmp +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-osd +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mds +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rgw + +%if %{without stx} +%post base +/sbin/ldconfig +%if 0%{?suse_version} +%fillup_only +if [ $1 -ge 1 ] ; then + /usr/bin/systemctl preset ceph.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph.target +%endif +/usr/bin/systemctl start ceph.target >/dev/null 2>&1 || : + +%preun base +%if 0%{?suse_version} +%service_del_preun ceph.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph.target +%endif + +%postun base +/sbin/ldconfig +%if 0%{?suse_version} +DISABLE_RESTART_ON_UPDATE="yes" +%service_del_postun ceph.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_postun ceph.target +%endif +%endif + +################################################################################# +%files common +%defattr(-,root,root,-) +%{_bindir}/ceph +%{_bindir}/ceph-authtool +%{_bindir}/ceph-conf +%{_bindir}/ceph-dencoder +%{_bindir}/ceph-rbdnamer +%{_bindir}/ceph-syn +%{_bindir}/ceph-crush-location +%{_bindir}/cephfs-data-scan +%{_bindir}/cephfs-journal-tool +%{_bindir}/cephfs-table-tool +%{_bindir}/rados +%{_bindir}/rbd +%{_bindir}/rbd-replay +%{_bindir}/rbd-replay-many +%{_bindir}/rbdmap +%if %{with cephfs} +%{_sbindir}/mount.ceph +%endif +%if 0%{?suse_version} +/sbin/mount.ceph +%endif +%if %{with lttng} +%{_bindir}/rbd-replay-prep +%endif +%{_bindir}/ceph-post-file +%{_bindir}/ceph-brag +%if %{without stx} +%{_tmpfilesdir}/ceph-common.conf +%endif +%if %{with man_pages} +%{_mandir}/man8/ceph-authtool.8* +%{_mandir}/man8/ceph-conf.8* +%{_mandir}/man8/ceph-dencoder.8* +%{_mandir}/man8/ceph-rbdnamer.8* +%{_mandir}/man8/ceph-syn.8* +%{_mandir}/man8/ceph-post-file.8* +%{_mandir}/man8/ceph.8* +%{_mandir}/man8/mount.ceph.8* +%{_mandir}/man8/rados.8* +%{_mandir}/man8/rbd.8* +%{_mandir}/man8/rbdmap.8* +%{_mandir}/man8/rbd-replay.8* +%{_mandir}/man8/rbd-replay-many.8* +%{_mandir}/man8/rbd-replay-prep.8* +%endif +%dir %{_datadir}/ceph/ +%{_datadir}/ceph/known_hosts_drop.ceph.com +%{_datadir}/ceph/id_rsa_drop.ceph.com +%{_datadir}/ceph/id_rsa_drop.ceph.com.pub +%dir %{_sysconfdir}/ceph/ +%config %{_sysconfdir}/bash_completion.d/rados +%config %{_sysconfdir}/bash_completion.d/rbd +%attr(640,root,root) %config(noreplace) %{_sysconfdir}/ceph/rbdmap +%if %{with stx} +%{_initrddir}/rbdmap +%else +%{_unitdir}/rbdmap.service +%endif +%{python_sitelib}/ceph_argparse.py* +%{python_sitelib}/ceph_daemon.py* +%dir %{_udevrulesdir} +%{_udevrulesdir}/50-rbd.rules +%attr(3770,ceph,ceph) %dir %{_localstatedir}/log/ceph/ +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/ + +%pre common +%if %{without stx} +CEPH_GROUP_ID=167 +CEPH_USER_ID=167 +%if 0%{?rhel} || 0%{?fedora} +/usr/sbin/groupadd ceph -g $CEPH_GROUP_ID -o -r 2>/dev/null || : +/usr/sbin/useradd ceph -u $CEPH_USER_ID -o -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || : +%endif +%if 0%{?suse_version} +if ! getent group ceph >/dev/null ; then + CEPH_GROUP_ID_OPTION="" + getent group $CEPH_GROUP_ID >/dev/null || CEPH_GROUP_ID_OPTION="-g $CEPH_GROUP_ID" + groupadd ceph $CEPH_GROUP_ID_OPTION -r 2>/dev/null || : +fi +if ! getent passwd ceph >/dev/null ; then + CEPH_USER_ID_OPTION="" + getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID" + useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin 2>/dev/null || : +fi +usermod -c "Ceph storage service" \ + -d %{_localstatedir}/lib/ceph \ + -g ceph \ + -s /sbin/nologin \ + ceph +%endif +exit 0 +%endif + +%post common +%if %{without stx} +%tmpfiles_create %{_tmpfilesdir}/ceph-common.conf +%endif + +%postun common +# Package removal cleanup +if [ "$1" -eq "0" ] ; then + rm -rf %{_localstatedir}/log/ceph + rm -rf %{_sysconfdir}/ceph +fi + +################################################################################# +%files mds +%{_bindir}/ceph-mds +%if %{with man_pages} +%{_mandir}/man8/ceph-mds.8* +%endif +%{_unitdir}/ceph-mds@.service +%{_unitdir}/ceph-mds.target +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mds + +%post mds +%if 0%{?suse_version} +if [ $1 -ge 1 ] ; then + /usr/bin/systemctl preset ceph-mds@\*.service ceph-mds.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-mds@\*.service ceph-mds.target +%endif +/usr/bin/systemctl start ceph-mds.target >/dev/null 2>&1 || : + +%preun mds +%if 0%{?suse_version} +%service_del_preun ceph-mds@\*.service ceph-mds.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-mds@\*.service ceph-mds.target +%endif + +%postun mds +test -n "$FIRST_ARG" || FIRST_ARG=$1 +%if 0%{?suse_version} +DISABLE_RESTART_ON_UPDATE="yes" +%service_del_postun ceph-mds@\*.service ceph-mds.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_postun ceph-mds@\*.service ceph-mds.target +%endif +if [ $FIRST_ARG -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-mds@\*.service > /dev/null 2>&1 || : + fi +fi + +################################################################################# +%files mon +%{_bindir}/ceph-mon +%{_bindir}/ceph-rest-api +%if %{with man_pages} +%{_mandir}/man8/ceph-mon.8* +%{_mandir}/man8/ceph-rest-api.8* +%endif +%{python_sitelib}/ceph_rest_api.py* +%if %{without stx} +%{_unitdir}/ceph-mon@.service +%{_unitdir}/ceph-mon.target +%else +%exclude %{_unitdir}/ceph-mon@.service +%exclude %{_unitdir}/ceph-mon.target +%endif +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mon + +%if %{without stx} +%post mon +%if 0%{?suse_version} +if [ $1 -ge 1 ] ; then + /usr/bin/systemctl preset ceph-create-keys@\*.service ceph-mon@\*.service ceph-mon.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-create-keys@\*.service ceph-mon@\*.service ceph-mon.target +/usr/bin/systemctl start ceph-mon.target >/dev/null 2>&1 || : +%endif + +%preun mon +%if 0%{?suse_version} +%service_del_preun ceph-create-keys@\*.service ceph-mon@\*.service ceph-mon.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-create-keys@\*.service ceph-mon@\*.service ceph-mon.target +%endif + +%postun mon +test -n "$FIRST_ARG" || FIRST_ARG=$1 +%if 0%{?suse_version} +DISABLE_RESTART_ON_UPDATE="yes" +%service_del_postun ceph-create-keys@\*.service ceph-mon@\*.service ceph-mon.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_postun ceph-create-keys@\*.service ceph-mon@\*.service ceph-mon.target +%endif +if [ $FIRST_ARG -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-create-keys@\*.service ceph-mon@\*.service > /dev/null 2>&1 || : + fi +fi +%endif + +################################################################################# +%if %{with fuse} +%files fuse +%defattr(-,root,root,-) +%{_bindir}/ceph-fuse +%if %{with man_pages} +%{_mandir}/man8/ceph-fuse.8* +%endif +%{_sbindir}/mount.fuse.ceph +%endif + +################################################################################# +%if %{with fuse} +%files -n rbd-fuse +%defattr(-,root,root,-) +%{_bindir}/rbd-fuse +%if %{with man_pages} +%{_mandir}/man8/rbd-fuse.8* +%endif +%endif + +################################################################################# +%files -n rbd-mirror +%defattr(-,root,root,-) +%{_bindir}/rbd-mirror +%if %{with man_pages} +%{_mandir}/man8/rbd-mirror.8* +%endif +%if %{without stx} +%{_unitdir}/ceph-rbd-mirror@.service +%{_unitdir}/ceph-rbd-mirror.target +%endif + +%if %{without stx} +%post -n rbd-mirror +%if 0%{?suse_version} +if [ $1 -ge 1 ] ; then + /usr/bin/systemctl preset ceph-rbd-mirror@\*.service ceph-rbd-mirror.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%endif +/usr/bin/systemctl start ceph-rbd-mirror.target >/dev/null 2>&1 || : + +%preun -n rbd-mirror +%if 0%{?suse_version} +%service_del_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%endif + +%postun -n rbd-mirror +test -n "$FIRST_ARG" || FIRST_ARG=$1 +%if 0%{?suse_version} +DISABLE_RESTART_ON_UPDATE="yes" +%service_del_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%endif +if [ $FIRST_ARG -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-rbd-mirror@\*.service > /dev/null 2>&1 || : + fi +fi +%endif + +################################################################################# +%files -n rbd-nbd +%defattr(-,root,root,-) +%{_bindir}/rbd-nbd +%if %{with man_pages} +%{_mandir}/man8/rbd-nbd.8* +%endif + +################################################################################# +%files radosgw +%defattr(-,root,root,-) +%{_bindir}/radosgw +%{_bindir}/radosgw-admin +%{_bindir}/radosgw-token +%{_bindir}/radosgw-object-expirer +%if %{with man_pages} +%{_mandir}/man8/radosgw.8* +%{_mandir}/man8/radosgw-admin.8* +%endif +%config %{_sysconfdir}/bash_completion.d/radosgw-admin +%dir %{_localstatedir}/lib/ceph/radosgw +%if %{with stx} +%{_initrddir}/ceph-radosgw +%dir /var/log/radosgw +%else +%{_unitdir}/ceph-radosgw@.service +%{_unitdir}/ceph-radosgw.target +%endif + +%if %{without stx} +%post radosgw +%if 0%{?suse_version} +if [ $1 -ge 1 ] ; then + /usr/bin/systemctl preset ceph-radosgw@\*.service ceph-radosgw.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-radosgw@\*.service ceph-radosgw.target +%endif +/usr/bin/systemctl start ceph-radosgw.target >/dev/null 2>&1 || : + +%preun radosgw +%if 0%{?suse_version} +%service_del_preun ceph-radosgw@\*.service ceph-radosgw.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-radosgw@\*.service ceph-radosgw.target +%endif + +%postun radosgw +test -n "$FIRST_ARG" || FIRST_ARG=$1 +%if 0%{?suse_version} +DISABLE_RESTART_ON_UPDATE="yes" +%service_del_postun ceph-radosgw@\*.service ceph-radosgw.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_postun ceph-radosgw@\*.service ceph-radosgw.target +%endif +if [ $FIRST_ARG -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-radosgw@\*.service > /dev/null 2>&1 || : + fi +fi +%endif + +################################################################################# +%files osd +%{_bindir}/ceph-clsinfo +%{_bindir}/ceph-bluefs-tool +%{_bindir}/ceph-objectstore-tool +%{_bindir}/ceph-osd +%{_sbindir}/ceph-disk +%{_sbindir}/ceph-disk-udev +%if %{with stx} +%{_sbindir}/ceph-manage-journal +%endif +%{_libexecdir}/ceph/ceph-osd-prestart.sh +%dir %{_udevrulesdir} +%{_udevrulesdir}/60-ceph-by-parttypeuuid.rules +%if %{without stx} +%{_udevrulesdir}/95-ceph-osd.rules +%endif +%if %{with man_pages} +%{_mandir}/man8/ceph-clsinfo.8* +%{_mandir}/man8/ceph-disk.8* +%{_mandir}/man8/ceph-osd.8* +%endif +%if 0%{?rhel} && ! 0%{?centos} +%{_sysconfdir}/cron.hourly/subman +%endif +%if %{without stx} +%{_unitdir}/ceph-osd@.service +%{_unitdir}/ceph-osd.target +%{_unitdir}/ceph-disk@.service +%endif +%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd + +%if %{without stx} +%post osd +%if 0%{?suse_version} +if [ $1 -ge 1 ] ; then + /usr/bin/systemctl preset ceph-disk@\*.service ceph-osd@\*.service ceph-osd.target >/dev/null 2>&1 || : +fi +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_post ceph-disk@\*.service ceph-osd@\*.service ceph-osd.target +%endif +/usr/bin/systemctl start ceph-osd.target >/dev/null 2>&1 || : + +%preun osd +%if 0%{?suse_version} +%service_del_preun ceph-disk@\*.service ceph-osd@\*.service ceph-osd.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_preun ceph-disk@\*.service ceph-osd@\*.service ceph-osd.target +%endif + +%postun osd +test -n "$FIRST_ARG" || FIRST_ARG=$1 +%if 0%{?suse_version} +DISABLE_RESTART_ON_UPDATE="yes" +%service_del_postun ceph-disk@\*.service ceph-osd@\*.service ceph-osd.target +%endif +%if 0%{?fedora} || 0%{?rhel} +%systemd_postun ceph-disk@\*.service ceph-osd@\*.service ceph-osd.target +%endif +if [ $FIRST_ARG -ge 1 ] ; then + # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to + # "yes". In any case: if units are not running, do not touch them. + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart ceph-disk@\*.service ceph-osd@\*.service > /dev/null 2>&1 || : + fi +fi +%endif + +################################################################################# +%if %{with ocf} + +%files resource-agents +%defattr(0755,root,root,-) +# N.B. src/ocf/Makefile.am uses $(prefix)/lib +%dir %{_prefix}/lib/ocf +%dir %{_prefix}/lib/ocf/resource.d +%dir %{_prefix}/lib/ocf/resource.d/ceph +%{_prefix}/lib/ocf/resource.d/ceph/rbd + +%endif + +################################################################################# +%files -n librados2 +%defattr(-,root,root,-) +%{_libdir}/librados.so.* +%if %{with lttng} +%{_libdir}/librados_tp.so.* +%endif + +%post -n librados2 +/sbin/ldconfig + +%postun -n librados2 +/sbin/ldconfig + +################################################################################# +%files -n librados2-devel +%defattr(-,root,root,-) +%dir %{_includedir}/rados +%{_includedir}/rados/librados.h +%{_includedir}/rados/librados.hpp +%{_includedir}/rados/buffer.h +%{_includedir}/rados/buffer_fwd.h +%{_includedir}/rados/page.h +%{_includedir}/rados/crc32c.h +%{_includedir}/rados/rados_types.h +%{_includedir}/rados/rados_types.hpp +%{_includedir}/rados/memory.h +%{_libdir}/librados.so +%if %{with lttng} +%{_libdir}/librados_tp.so +%endif +%{_bindir}/librados-config +%if %{with man_pages} +%{_mandir}/man8/librados-config.8* +%endif + +################################################################################# +%files -n python-rados +%defattr(-,root,root,-) +%{python_sitearch}/rados.so +%{python_sitearch}/rados-*.egg-info + +################################################################################# +%files -n libradosstriper1 +%defattr(-,root,root,-) +%{_libdir}/libradosstriper.so.* + +%post -n libradosstriper1 +/sbin/ldconfig + +%postun -n libradosstriper1 +/sbin/ldconfig + +################################################################################# +%files -n libradosstriper1-devel +%defattr(-,root,root,-) +%dir %{_includedir}/radosstriper +%{_includedir}/radosstriper/libradosstriper.h +%{_includedir}/radosstriper/libradosstriper.hpp +%{_libdir}/libradosstriper.so + +################################################################################# +%files -n librbd1 +%defattr(-,root,root,-) +%{_libdir}/librbd.so.* +%if %{with lttng} +%{_libdir}/librbd_tp.so.* +%endif + +%post -n librbd1 +/sbin/ldconfig +mkdir -p /usr/lib64/qemu/ +ln -sf %{_libdir}/librbd.so.1 /usr/lib64/qemu/librbd.so.1 + +%postun -n librbd1 +/sbin/ldconfig + +################################################################################# +%files -n librbd1-devel +%defattr(-,root,root,-) +%dir %{_includedir}/rbd +%{_includedir}/rbd/librbd.h +%{_includedir}/rbd/librbd.hpp +%{_includedir}/rbd/features.h +%{_libdir}/librbd.so +%if %{with lttng} +%{_libdir}/librbd_tp.so +%endif + +################################################################################# +%files -n librgw2 +%defattr(-,root,root,-) +%{_libdir}/librgw.so.* + +%post -n librgw2 +/sbin/ldconfig + +%postun -n librgw2 +/sbin/ldconfig + +################################################################################# +%files -n librgw2-devel +%defattr(-,root,root,-) +%dir %{_includedir}/rados +%{_includedir}/rados/librgw.h +%{_includedir}/rados/rgw_file.h +%{_libdir}/librgw.so + +################################################################################# +%files -n python-rbd +%defattr(-,root,root,-) +%{python_sitearch}/rbd.so +%{python_sitearch}/rbd-*.egg-info + +################################################################################# +%if %{with cephfs} +%files -n libcephfs1 +%defattr(-,root,root,-) +%{_libdir}/libcephfs.so.* + +%post -n libcephfs1 +/sbin/ldconfig + +%postun -n libcephfs1 +/sbin/ldconfig +%endif + +################################################################################# +%if %{with cephfs} +%files -n libcephfs1-devel +%defattr(-,root,root,-) +%dir %{_includedir}/cephfs +%{_includedir}/cephfs/libcephfs.h +%{_libdir}/libcephfs.so +%endif + +################################################################################# +%if %{with cephfs} +%files -n python-cephfs +%defattr(-,root,root,-) +%{python_sitearch}/cephfs.so +%{python_sitearch}/cephfs-*.egg-info +%{python_sitelib}/ceph_volume_client.py* +%endif + +################################################################################# +%if %{with debug} +%files -n ceph-test +%defattr(-,root,root,-) +%{_bindir}/ceph_bench_log +%{_bindir}/ceph_kvstorebench +%{_bindir}/ceph_multi_stress_watch +%{_bindir}/ceph_erasure_code +%{_bindir}/ceph_erasure_code_benchmark +%{_bindir}/ceph_omapbench +%{_bindir}/ceph_objectstore_bench +%{_bindir}/ceph_perf_objectstore +%{_bindir}/ceph_perf_local +%{_bindir}/ceph_perf_msgr_client +%{_bindir}/ceph_perf_msgr_server +%{_bindir}/ceph_psim +%{_bindir}/ceph_radosacl +%{_bindir}/ceph_rgw_jsonparser +%{_bindir}/ceph_rgw_multiparser +%{_bindir}/ceph_scratchtool +%{_bindir}/ceph_scratchtoolpp +%{_bindir}/ceph_smalliobench +%{_bindir}/ceph_smalliobenchdumb +%{_bindir}/ceph_smalliobenchfs +%{_bindir}/ceph_smalliobenchrbd +%{_bindir}/ceph_test_* +%{_bindir}/librgw_file* +%{_bindir}/ceph_tpbench +%{_bindir}/ceph_xattr_bench +%{_bindir}/ceph-coverage +%{_bindir}/ceph-monstore-tool +%{_bindir}/ceph-osdomap-tool +%{_bindir}/ceph-kvstore-tool +%{_bindir}/ceph-debugpack +%if %{with man_pages} +%{_mandir}/man8/ceph-debugpack.8* +%endif +%dir %{_libdir}/ceph +%{_libdir}/ceph/ceph-monstore-update-crush.sh +%else +# instead of fixing installed but unpackaged files issue we're +# packaging them even if debug build is not enabled +%files -n ceph-test +%defattr(-,root,root,-) +%{_bindir}/ceph-coverage +%{_bindir}/ceph-debugpack +%{_libdir}/ceph/ceph-monstore-update-crush.sh +%endif + +################################################################################# +%if 0%{with cephfs_java} +%files -n libcephfs_jni1 +%defattr(-,root,root,-) +%{_libdir}/libcephfs_jni.so.* + +%post -n libcephfs_jni1 +/sbin/ldconfig + +%postun -n libcephfs_jni1 +/sbin/ldconfig + +################################################################################# +%files -n libcephfs_jni1-devel +%defattr(-,root,root,-) +%{_libdir}/libcephfs_jni.so + +################################################################################# +%files -n cephfs-java +%defattr(-,root,root,-) +%{_javadir}/libcephfs.jar +%{_javadir}/libcephfs-test.jar +%endif + +################################################################################# +%if 0%{with selinux} +%files selinux +%defattr(-,root,root,-) +%attr(0600,root,root) %{_datadir}/selinux/packages/ceph.pp +%{_datadir}/selinux/devel/include/contrib/ceph.if +%if %{with man_pages} +%{_mandir}/man8/ceph_selinux.8* +%endif + +%if %{without stx} +%post selinux +# backup file_contexts before update +. /etc/selinux/config +FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts +cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre + +# Install the policy +/usr/sbin/semodule -i %{_datadir}/selinux/packages/ceph.pp + +# Load the policy if SELinux is enabled +if ! /usr/sbin/selinuxenabled; then + # Do not relabel if selinux is not enabled + exit 0 +fi + +if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then + # Do not relabel if file contexts did not change + exit 0 +fi + +# Check whether the daemons are running +/usr/bin/systemctl status ceph.target > /dev/null 2>&1 +STATUS=$? + +# Stop the daemons if they were running +if test $STATUS -eq 0; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 +fi + +# Now, relabel the files +/usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null +rm -f ${FILE_CONTEXT}.pre +# The fixfiles command won't fix label for /var/run/ceph +/usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1 + +# Start the daemons iff they were running before +if test $STATUS -eq 0; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : +fi +exit 0 + +%postun selinux +if [ $1 -eq 0 ]; then + # backup file_contexts before update + . /etc/selinux/config + FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts + cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre + + # Remove the module + /usr/sbin/semodule -n -r ceph > /dev/null 2>&1 + + # Reload the policy if SELinux is enabled + if ! /usr/sbin/selinuxenabled ; then + # Do not relabel if SELinux is not enabled + exit 0 + fi + + # Check whether the daemons are running + /usr/bin/systemctl status ceph.target > /dev/null 2>&1 + STATUS=$? + + # Stop the daemons if they were running + if test $STATUS -eq 0; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + fi + + /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null + rm -f ${FILE_CONTEXT}.pre + # The fixfiles command won't fix label for /var/run/ceph + /usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1 + + # Start the daemons if they were running before + if test $STATUS -eq 0; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + fi +fi +exit 0 +%endif +%endif # with selinux + +################################################################################# +%if 0%{with libs_compat} +%files libs-compat +# We need an empty %%files list for ceph-libs-compat, to tell rpmbuild to actually +# build this meta package. +%endif + +################################################################################# +%files devel-compat +# We need an empty %%files list for ceph-devel-compat, to tell rpmbuild to +# actually build this meta package. + +################################################################################# +%files -n python-ceph-compat +# We need an empty %%files list for python-ceph-compat, to tell rpmbuild to +# actually build this meta package. + + +%changelog diff --git a/ceph/ceph/centos/patches/0001-Add-hooks-for-orderly-shutdown-on-controller.patch b/ceph/ceph/centos/patches/0001-Add-hooks-for-orderly-shutdown-on-controller.patch new file mode 100644 index 000000000..15bb7c3e5 --- /dev/null +++ b/ceph/ceph/centos/patches/0001-Add-hooks-for-orderly-shutdown-on-controller.patch @@ -0,0 +1,59 @@ +From 03340eaf0004e3cc8e3f8991ea96a46757d92830 Mon Sep 17 00:00:00 2001 +From: Don Penney +Date: Sat, 26 Jan 2019 13:34:55 -0500 +Subject: [PATCH] Add hooks for orderly shutdown on controller + +Hook the ceph init script to add systemd overrides to define +an orderly shutdown for StarlingX controllers. + +Signed-off-by: Don Penney +--- + src/init-ceph.in | 32 ++++++++++++++++++++++++++++++++ + 1 file changed, 32 insertions(+) + +diff --git a/src/init-ceph.in b/src/init-ceph.in +index 1fdb4b3..515d818 100644 +--- a/src/init-ceph.in ++++ b/src/init-ceph.in +@@ -861,6 +861,38 @@ for name in $what; do + fi + fi + ++ . /etc/platform/platform.conf ++ if [ "${nodetype}" = "controller" ]; then ++ # StarlingX: Hook the transient services launched by systemd-run ++ # to allow for proper cleanup and orderly shutdown ++ ++ # Set nullglob so wildcards will return empty string if no match ++ shopt -s nullglob ++ ++ OSD_SERVICES=$(for svc in /run/systemd/system/ceph-osd*.service; do basename $svc; done | xargs echo) ++ for d in /run/systemd/system/ceph-osd*.d; do ++ cat < $d/starlingx-overrides.conf ++[Unit] ++Before=docker.service ++After=sm-shutdown.service ++ ++EOF ++ done ++ ++ for d in /run/systemd/system/ceph-mon*.d; do ++ cat < $d/starlingx-overrides.conf ++[Unit] ++Before=docker.service ++After=sm-shutdown.service ${OSD_SERVICES} ++ ++EOF ++ done ++ ++ shopt -u nullglob ++ ++ systemctl daemon-reload ++ fi ++ + [ -n "$post_start" ] && do_cmd "$post_start" + [ -n "$lockfile" ] && [ "$?" -eq 0 ] && touch $lockfile + ;; +-- +1.8.3.1 + diff --git a/ceph/ceph/files/ceph-init-wrapper.sh b/ceph/ceph/files/ceph-init-wrapper.sh new file mode 100755 index 000000000..0a5cd53dd --- /dev/null +++ b/ceph/ceph/files/ceph-init-wrapper.sh @@ -0,0 +1,282 @@ +#!/bin/bash +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# This script is a helper wrapper for pmon monitoring of ceph +# processes. The "/etc/init.d/ceph" script does not know if ceph is +# running on the node. For example when the node is locked, ceph +# processes are not running. In that case we do not want pmond to +# monitor these processes. +# +# The script "/etc/services.d//ceph.sh" will create the file +# "/var/run/.ceph_started" when ceph is running and remove it when +# is not. +# +# The script also extracts one or more ceph process names that are +# reported as 'not running' or 'dead' or 'failed' by '/etc/intit.d/ceph status' +# and writes the names to a text file: /tmp/ceph_status_failure.txt for +# pmond to access. The pmond adds the text to logs and alarms. Example of text +# samples written to file by this script are: +# 'osd.1' +# 'osd.1, osd.2' +# 'mon.storage-0' +# 'mon.storage-0, osd.2' +# +# Moreover, for processes that are reported as 'hung' by '/etc/intit.d/ceph status' +# the script will try increase their logging to 'debug' for a configurable interval. +# With logging increased it will outputs a few stack traces then, at the end of this +# interval, it dumps its stack core and kills it. +# +# Return values; +# zero - /etc/init.d/ceph returned success or ceph is not running on the node +# non-zero /etc/init.d/ceph returned a failure or invalid syntax +# + +source /usr/bin/tsconfig +source /etc/platform/platform.conf + +CEPH_SCRIPT="/etc/init.d/ceph" +CEPH_FILE="$VOLATILE_PATH/.ceph_started" +CEPH_RESTARTING_FILE="$VOLATILE_PATH/.ceph_restarting" +CEPH_GET_STATUS_FILE="$VOLATILE_PATH/.ceph_getting_status" +CEPH_STATUS_FAILURE_TEXT_FILE="/tmp/ceph_status_failure.txt" + +BINDIR=/usr/bin +SBINDIR=/usr/sbin +LIBDIR=/usr/lib64/ceph +ETCDIR=/etc/ceph +source $LIBDIR/ceph_common.sh + +LOG_PATH=/var/log/ceph +LOG_FILE=$LOG_PATH/ceph-process-states.log +LOG_LEVEL=NORMAL # DEBUG +verbose=0 + +DATA_PATH=$VOLATILE_PATH/ceph_hang # folder where we keep state information +mkdir -p $DATA_PATH # make sure folder exists + +MONITORING_INTERVAL=15 +TRACE_LOOP_INTERVAL=5 +GET_STATUS_TIMEOUT=120 +CEPH_STATUS_TIMEOUT=20 + +WAIT_FOR_CMD=1 + +RC=0 + +args=("$@") + +if [ ! -z $ARGS ]; then + IFS=";" read -r -a new_args <<< "$ARGS" + args+=("${new_args[@]}") +fi + +wait_for_status () +{ + timeout=$GET_STATUS_TIMEOUT # wait for status no more than $timeout seconds + while [ -f ${CEPH_GET_STATUS_FILE} ] && [ $timeout -gt 0 ]; do + sleep 1 + let timeout-=1 + done + if [ $timeout -eq 0 ]; then + wlog "-" "WARN" "Getting status takes more than ${GET_STATUS_TIMEOUT}s, continuing" + rm -f $CEPH_GET_STATUS_FILE + fi +} + +start () +{ + if [ -f ${CEPH_FILE} ]; then + wait_for_status + ${CEPH_SCRIPT} start $1 + RC=$? + else + # Ceph is not running on this node, return success + exit 0 + fi +} + +stop () +{ + wait_for_status + ${CEPH_SCRIPT} stop $1 +} + +restart () +{ + if [ -f ${CEPH_FILE} ]; then + wait_for_status + touch $CEPH_RESTARTING_FILE + ${CEPH_SCRIPT} restart $1 + rm -f $CEPH_RESTARTING_FILE + else + # Ceph is not running on this node, return success + exit 0 + fi + +} + +log_and_restart_blocked_osds () +{ + # Log info about the blocked osd daemons and then restart it + local names=$1 + for name in $names; do + wlog $name "INFO" "Restarting OSD with blocked operations" + ${CEPH_SCRIPT} restart $name + done +} + +log_and_kill_hung_procs () +{ + # Log info about the hung processes and then kill them; later on pmon will restart them + local names=$1 + for name in $names; do + type=`echo $name | cut -c 1-3` # e.g. 'mon', if $item is 'mon1' + id=`echo $name | cut -c 4- | sed 's/^\\.//'` + get_conf run_dir "/var/run/ceph" "run dir" + get_conf pid_file "$run_dir/$type.$id.pid" "pid file" + pid=$(cat $pid_file) + wlog $name "INFO" "Dealing with hung process (pid:$pid)" + + # monitoring interval + wlog $name "INFO" "Increasing log level" + execute_ceph_cmd ret $name "ceph daemon $name config set debug_$type 20/20" + monitoring=$MONITORING_INTERVAL + while [ $monitoring -gt 0 ]; do + if [ $(($monitoring % $TRACE_LOOP_INTERVAL)) -eq 0 ]; then + date=$(date "+%Y-%m-%d_%H-%M-%S") + log_file="$LOG_PATH/hang_trace_${name}_${pid}_${date}.log" + wlog $name "INFO" "Dumping stack trace to: $log_file" + $(pstack $pid >$log_file) & + fi + let monitoring-=1 + sleep 1 + done + wlog $name "INFO" "Trigger core dump" + kill -ABRT $pid &>/dev/null + rm -f $pid_file # process is dead, core dump is archiving, preparing for restart + # Wait for pending systemd core dumps + sleep 2 # hope systemd_coredump has started meanwhile + deadline=$(( $(date '+%s') + 300 )) + while [[ $(date '+%s') -lt "${deadline}" ]]; do + systemd_coredump_pid=$(pgrep -f "systemd-coredump.*${pid}.*ceph-${type}") + [[ -z "${systemd_coredump_pid}" ]] && break + wlog $name "INFO" "systemd-coredump ceph-${type} in progress: pid ${systemd_coredump_pid}" + sleep 2 + done + kill -KILL $pid &>/dev/null + done +} + + +status () +{ + if [[ "$system_type" == "All-in-one" ]] && [[ "$system_mode" != "simplex" ]] && [[ "$1" == "osd" ]]; then + timeout $CEPH_STATUS_TIMEOUT ceph -s + if [ "$?" -ne 0 ]; then + # Ceph cluster is not accessible. Don't panic, controller swact + # may be in progress. + wlog "-" INFO "Ceph is down, ignoring OSD status." + exit 0 + fi + fi + + if [ -f ${CEPH_RESTARTING_FILE} ]; then + # Ceph is restarting, we don't report state changes on the first pass + rm -f ${CEPH_RESTARTING_FILE} + exit 0 + fi + if [ -f ${CEPH_FILE} ]; then + # Make sure the script does not 'exit' between here and the 'rm -f' below + # or the checkpoint file will be left behind + touch -f ${CEPH_GET_STATUS_FILE} + result=`${CEPH_SCRIPT} status $1` + RC=$? + if [ "$RC" -ne 0 ]; then + erred_procs=`echo "$result" | sort | uniq | awk ' /not running|dead|failed/ {printf "%s ", $1}' | sed 's/://g' | sed 's/, $//g'` + hung_procs=`echo "$result" | sort | uniq | awk ' /hung/ {printf "%s ", $1}' | sed 's/://g' | sed 's/, $//g'` + blocked_ops_procs=`echo "$result" | sort | uniq | awk ' /blocked ops/ {printf "%s ", $1}' | sed 's/://g' | sed 's/, $//g'` + invalid=0 + host=`hostname` + if [[ "$system_type" == "All-in-one" ]] && [[ "$system_mode" != "simplex" ]]; then + # On 2 node configuration we have a floating monitor + host="controller" + fi + for i in $(echo $erred_procs $hung_procs); do + if [[ "$i" =~ osd.?[0-9]?[0-9]|mon.$host ]]; then + continue + else + invalid=1 + fi + done + + log_and_restart_blocked_osds $blocked_ops_procs + log_and_kill_hung_procs $hung_procs + + hung_procs_text="" + for i in $(echo $hung_procs); do + hung_procs_text+="$i(process hung) " + done + + rm -f $CEPH_STATUS_FAILURE_TEXT_FILE + if [ $invalid -eq 0 ]; then + text="" + for i in $erred_procs; do + text+="$i, " + done + for i in $hung_procs; do + text+="$i (process hang), " + done + echo "$text" | tr -d '\n' > $CEPH_STATUS_FAILURE_TEXT_FILE + else + echo "$host: '${CEPH_SCRIPT} status $1' result contains invalid process names: $erred_procs" + echo "Undetermined osd or monitor id" > $CEPH_STATUS_FAILURE_TEXT_FILE + fi + fi + + rm -f ${CEPH_GET_STATUS_FILE} + + if [[ $RC == 0 ]] && [[ "$1" == "mon" ]] && [[ "$system_type" == "All-in-one" ]] && [[ "$system_mode" != "simplex" ]]; then + # SM needs exit code != 0 from 'status mon' argument of the init script on + # standby controller otherwise it thinks that the monitor is running and + # tries to stop it. + # '/etc/init.d/ceph status mon' checks the status of monitors configured in + # /etc/ceph/ceph.conf and if it should be running on current host. + # If it should not be running it just exits with code 0. This is what + # happens on the standby controller. + # When floating monitor is running on active controller /var/lib/ceph/mon of + # standby is not mounted (Ceph monitor partition is DRBD synced). + test -e "/var/lib/ceph/mon/ceph-controller" + if [ "$?" -ne 0 ]; then + exit 3 + fi + fi + else + # Ceph is not running on this node, return success + exit 0 + fi +} + + +case "${args[0]}" in + start) + start ${args[1]} + ;; + stop) + stop ${args[1]} + ;; + restart) + restart ${args[1]} + ;; + status) + status ${args[1]} + ;; + *) + echo "Usage: $0 {start|stop|restart|status} [{mon|osd|osd.|mon.}]" + exit 1 + ;; +esac + +exit $RC diff --git a/ceph/ceph/files/ceph-manage-journal.py b/ceph/ceph/files/ceph-manage-journal.py index 78cc6936c..5ce1dfa87 100644 --- a/ceph/ceph/files/ceph-manage-journal.py +++ b/ceph/ceph/files/ceph-manage-journal.py @@ -1,6 +1,6 @@ #!/usr/bin/python # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2019 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -12,6 +12,7 @@ import re import subprocess import sys +DEVICE_NAME_NVME = "nvme" ######### # Utils # @@ -85,7 +86,11 @@ def is_partitioning_correct(disk_path, partition_sizes): partition_index = 1 for size in partition_sizes: # Check that each partition size matches the one in input - partition_node = disk_node + str(partition_index) + if DEVICE_NAME_NVME in disk_node: + partition_node = '{}p{}'.format(disk_node, str(partition_index)) + else: + partition_node = '{}{}'.format(disk_node, str(partition_index)) + output, _, _ = command(["udevadm", "settle", "-E", partition_node]) cmd = ["parted", "-s", partition_node, "unit", "MiB", "print"] output, _, _ = command(cmd) @@ -118,7 +123,7 @@ def create_partitions(disk_path, partition_sizes): # GPT partitions on the storage node so nothing to remove in this case links = [] if os.path.isdir(DISK_BY_PARTUUID): - links = [ os.path.join(DISK_BY_PARTUUID,l) for l in os.listdir(DISK_BY_PARTUUID) + links = [ os.path.join(DISK_BY_PARTUUID,l) for l in os.listdir(DISK_BY_PARTUUID) if os.path.islink(os.path.join(DISK_BY_PARTUUID, l)) ] # Erase all partitions on current node by creating a new GPT table diff --git a/ceph/ceph/files/ceph-preshutdown.sh b/ceph/ceph/files/ceph-preshutdown.sh new file mode 100644 index 000000000..5f59bd1f8 --- /dev/null +++ b/ceph/ceph/files/ceph-preshutdown.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +script=$(basename $0) + +# Set nullglob so wildcards will return empty string if no match +shopt -s nullglob + +for dev in /dev/rbd[0-9]*; do + for mnt in $(mount | awk -v dev=$dev '($1 == dev) {print $3}'); do + logger -t ${script} "Unmounting $mnt" + /usr/bin/umount $mnt + done + logger -t ${script} "Unmounted $dev" +done + +for dev in /dev/rbd[0-9]*; do + /usr/bin/rbd unmap -o force $dev + logger -t ${script} "Unmapped $dev" +done + +lsmod | grep -q '^rbd\>' && /usr/sbin/modprobe -r rbd +lsmod | grep -q '^libceph\>' && /usr/sbin/modprobe -r libceph + +exit 0 + diff --git a/ceph/ceph/files/ceph-radosgw.service b/ceph/ceph/files/ceph-radosgw.service new file mode 100644 index 000000000..391ecf631 --- /dev/null +++ b/ceph/ceph/files/ceph-radosgw.service @@ -0,0 +1,18 @@ +[Unit] +Description=radosgw RESTful rados gateway +After=network.target +#After=remote-fs.target nss-lookup.target network-online.target time-sync.target +#Wants=network-online.target + +[Service] +Type=forking +Restart=no +KillMode=process +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/ceph-radosgw start +ExecStop=/etc/rc.d/init.d/ceph-radosgw stop +ExecReload=/etc/rc.d/init.d/ceph-radosgw reload + +[Install] +WantedBy=multi-user.target + diff --git a/ceph/ceph/files/ceph-rest-api b/ceph/ceph/files/ceph-rest-api new file mode 100644 index 000000000..a89fe62bc --- /dev/null +++ b/ceph/ceph/files/ceph-rest-api @@ -0,0 +1,92 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: ceph-rest-api +# Required-Start: $ceph +# Required-Stop: $ceph +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Ceph REST API daemon +# Description: Ceph REST API daemon +### END INIT INFO + +DESC="ceph-rest-api" +DAEMON="/usr/bin/ceph-rest-api" +RUNDIR="/var/run/ceph" +PIDFILE="${RUNDIR}/ceph-rest-api.pid" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 0 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + mkdir -p $RUNDIR + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + exit 1 + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + exit 0 + else + echo "$DESC is not running but has pid file" + exit 1 + fi + fi + echo "$DESC is not running" + exit 3 +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/ceph/ceph/files/ceph-rest-api.service b/ceph/ceph/files/ceph-rest-api.service new file mode 100644 index 000000000..491ffb7e3 --- /dev/null +++ b/ceph/ceph/files/ceph-rest-api.service @@ -0,0 +1,16 @@ +[Unit] +Description=Ceph REST API +After=network.target ceph.target + +[Service] +Type=forking +Restart=no +KillMode=process +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/ceph-rest-api start +ExecStop=/etc/rc.d/init.d/ceph-rest-api stop +ExecReload=/etc/rc.d/init.d/ceph-rest-api reload + +[Install] +WantedBy=multi-user.target + diff --git a/ceph/ceph/files/ceph.conf b/ceph/ceph/files/ceph.conf new file mode 100644 index 000000000..863f72e5a --- /dev/null +++ b/ceph/ceph/files/ceph.conf @@ -0,0 +1,50 @@ +[global] + # Unique ID for the cluster. + fsid = %CLUSTER_UUID% + # Public network where the monitor is connected to, i.e, 128.224.0.0/16 + #public network = 127.0.0.1/24 + # For version 0.55 and beyond, you must explicitly enable + # or disable authentication with "auth" entries in [global]. + auth_cluster_required = cephx + auth_service_required = cephx + auth_client_required = cephx + osd_journal_size = 1024 + + # Uncomment the following line if you are mounting with ext4 + # filestore xattr use omap = true + + # Number of replicas of objects. Write an object 2 times. + # Cluster cannot reach an active + clean state until there's enough OSDs + # to handle the number of copies of an object. In this case, it requires + # at least 2 OSDs + osd_pool_default_size = 2 + + # Allow writing one copy in a degraded state. + osd_pool_default_min_size = 1 + + # Ensure you have a realistic number of placement groups. We recommend + # approximately 100 per OSD. E.g., total number of OSDs multiplied by 100 + # divided by the number of replicas (i.e., osd pool default size). So for + # 2 OSDs and osd pool default size = 2, we'd recommend approximately + # (100 * 2) / 2 = 100. + osd_pool_default_pg_num = 64 + osd_pool_default_pgp_num = 64 + osd_crush_chooseleaf_type = 1 + setuser match path = /var/lib/ceph/$type/$cluster-$id + + # Override Jewel default of 2 reporters. StarlingX has replication factor 2 + mon_osd_min_down_reporters = 1 + + # Use Hammer's report interval default value + osd_mon_report_interval_max = 120 + +[osd] + osd_mkfs_type = xfs + osd_mkfs_options_xfs = "-f" + osd_mount_options_xfs = "rw,noatime,inode64,logbufs=8,logbsize=256k" + +[mon] + mon warn on legacy crush tunables = false + # Quiet new warnings on move to Hammer + mon pg warn max per osd = 2048 + mon pg warn max object skew = 0 diff --git a/ceph/ceph/files/ceph.conf.pmon b/ceph/ceph/files/ceph.conf.pmon new file mode 100644 index 000000000..00418b2e9 --- /dev/null +++ b/ceph/ceph/files/ceph.conf.pmon @@ -0,0 +1,26 @@ +[process] +process = ceph +script = /etc/init.d/ceph-init-wrapper + +style = lsb +severity = major ; minor, major, critical +restarts = 3 ; restart retries before error assertion +interval = 30 ; number of seconds to wait between restarts + +mode = status ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active : heartbeat monitoring, i.e. request / response messaging + ; status : determine process health with executing "status" command + ; "start" is used to start the process(es) again + ; ignore : do not monitor or stop monitoring + +; Status and Active Monitoring Options + +period = 30 ; monitor period in seconds +timeout = 120 ; for active mode, messaging timeout period in seconds, must be shorter than period + ; for status mode, max amount of time for a command to execute + +; Status Monitoring Options +start_arg = start ; start argument for the script +status_arg = status ; status argument for the script +status_failure_text = /tmp/ceph_status_failure.txt ; text to be added to alarms or logs, this is optional diff --git a/ceph/ceph/files/ceph.service b/ceph/ceph/files/ceph.service new file mode 100644 index 000000000..d3c2accfc --- /dev/null +++ b/ceph/ceph/files/ceph.service @@ -0,0 +1,16 @@ +[Unit] +Description=StarlingX Ceph Startup +After=network.target + +[Service] +Type=forking +Restart=no +KillMode=process +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/ceph start +ExecStop=/etc/rc.d/init.d/ceph stop +PIDFile=/var/run/ceph/ceph.pid + +[Install] +WantedBy=multi-user.target + diff --git a/ceph/ceph/files/ceph.sh b/ceph/ceph/files/ceph.sh new file mode 100755 index 000000000..926eb0559 --- /dev/null +++ b/ceph/ceph/files/ceph.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +INITDIR=/etc/init.d +LOGFILE=/var/log/ceph/ceph-init.log +CEPH_FILE=/var/run/.ceph_started + +# Get our nodetype +. /etc/platform/platform.conf + +# Exit immediately if ceph not configured (i.e. no mon in the config file) +if ! grep -q "mon\." /etc/ceph/ceph.conf +then + exit 0 +fi + +logecho () +{ + echo $1 + date >> ${LOGFILE} + echo $1 >> ${LOGFILE} +} + +start () +{ + if [[ "$nodetype" == "controller" ]] || [[ "$nodetype" == "storage" ]]; then + logecho "Starting ceph services..." + ${INITDIR}/ceph start >> ${LOGFILE} 2>&1 + RC=$? + + if [ ! -f ${CEPH_FILE} ]; then + touch ${CEPH_FILE} + fi + else + logecho "No ceph services on ${nodetype} node" + exit 0 + fi +} + +stop () +{ + if [[ "$nodetype" == "controller" ]] || [[ "$nodetype" == "storage" ]]; then + if [[ "$system_type" == "All-in-one" ]] && [[ "$system_mode" == "simplex" ]]; then + logecho "Ceph services will continue to run on node" + exit 0 + fi + + logecho "Stopping ceph services..." + + if [ -f ${CEPH_FILE} ]; then + rm -f ${CEPH_FILE} + fi + + ${INITDIR}/ceph stop >> ${LOGFILE} 2>&1 + RC=$? + else + logecho "No ceph services on ${nodetype} node" + exit 0 + fi +} + +RC=0 + +case "$1" in + start) + start + ;; + stop) + stop + ;; + *) + echo "Usage: $0 {start|stop}" + exit 1 + ;; +esac + +logecho "RC was: $RC" +exit $RC diff --git a/ceph/ceph/files/osd-wait-status.py b/ceph/ceph/files/osd-wait-status.py new file mode 100644 index 000000000..0c954f8b7 --- /dev/null +++ b/ceph/ceph/files/osd-wait-status.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# +# Wait for one or a group of OSDs to match one or a group of statuses +# as reported by "ceph osd tree". +# +# Examples: +# - wait for osd 0 to be up: +# osd-wait-status -o 0 -s up +# +# - wait for osd 0 and osd 1 to be up: +# osd-wait-status -o 0 1 -s up +# +# The amount of time spent waiting for OSDs to match a status can +# be limited by specifying: +# +# - the maximum retry count; the script will if the status doesn't +# match the desired one after more than retry count attempts. +# The interval between attempts is controlled by the "-i" flag. +# Example: +# osd-wait-status -o 0 -s up -c 2 -i 3 +# will call "ceph osd tree" once to get the status of osd 0 and if +# it's not "up" then it will try one more time after 3 seconds. +# +# - a deadline as the maximum interval of time the script is looping +# waiting for OSDs to match status. The interval between attempts +# is controlled by the "-i" flag. +# Example: +# osd-wait-status -o 0 -s up -d 10 -i 3 +# will call "ceph osd tree" until either osd 0 status is "up" or +# no more than 10 seconds have passed, that's 3-4 attempts depending +# on how much time it takes to run "ceph osd tree" +# +# Status match can be reversed by using "-n" flag. +# Example: +# osd-wait-status -o 0 -n -s up +# waits until osd 0 status is NOT up. +# +# osd-wait-status does not allow matching arbitrary combinations of +# OSDs and statuses. For example: "osd 0 up and osd 1 down" is not +# supported. +# +# Return code is 0 if OSDs match expected status before the +# retry count*interval / deadline limits are reached. + +import argparse +import json +import logging +import retrying +import subprocess +import sys +import time + +logging.basicConfig(level=logging.DEBUG) +LOG = logging.getLogger('osd-wait-status') + +CEPH_BINARY_PATH = '/usr/bin/ceph' +RETRY_INTERVAL_SEC = 1 +RETRY_FOREVER = 0 +NO_DEADLINE = 0 + + +class OsdException(Exception): + def __init__(self, message, restartable=False): + super(OsdException, self).__init__(message) + self.restartable = restartable + + +def get_osd_tree(): + command = [CEPH_BINARY_PATH, + 'osd', 'tree', '--format', 'json'] + try: + p = subprocess.Popen(command, + stdout = subprocess.PIPE, + stderr = subprocess.PIPE) + output, error = p.communicate() + if p.returncode != 0: + raise OsdException( + ('Command failed: command="{}", ' + 'returncode={}, output="{}"').format( + ' '.join(command), + p.returncode, + output, error), + restartable=True) + except OSError as e: + raise OsdException( + ('Command failed: command="{}", ' + 'reason="{}"').format(command, str(e))) + try: + return json.loads(output) + except ValueError as e: + raise OsdException( + ('JSON decode failed: ' + 'data="{}", error="{}"').format( + output, e)) + + +def osd_match_status(target_osd, target_status, + reverse_logic): + LOG.info(('Match status: ' + 'target_osd={}, ' + 'target status={}, ' + 'reverse_logic={}').format( + target_osd, target_status, reverse_logic)) + tree = get_osd_tree() + osd_status = {} + for node in tree.get('nodes'): + name = node.get('name') + if name in target_osd: + osd_status[name] = node.get('status') + if len(osd_status) == len(target_osd): + break + LOG.info('Current OSD(s) status: {}'.format(osd_status)) + for name in target_osd: + if name not in osd_status: + raise OsdException( + ('Unable to retrieve status ' + 'for "{}"').format( + name)) + if reverse_logic: + if osd_status[name] not in target_status: + del osd_status[name] + else: + if osd_status[name] in target_status: + del osd_status[name] + if len(osd_status) == 0: + LOG.info('OSD(s) status target reached.') + return True + else: + LOG.info('OSD(s) {}matching status {}: {}'.format( + '' if reverse_logic else 'not ', + target_status, + osd_status.keys())) + return False + + +def osd_wait_status(target_osd, target_status, + reverse_logic, + retry_count, retry_interval, + deadline): + + def retry_if_false(result): + return (result is False) + + def retry_if_restartable(exception): + return (isinstance(exception, OsdException) + and exception.restartable) + + LOG.info(('Wait options: ' + 'target_osd={}, ' + 'target_status={}, ' + 'reverse_logic={}, ' + 'retry_count={}, ' + 'retry_interval={}, ' + 'deadline={}').format( + target_osd, target_status, reverse_logic, + retry_count, retry_interval, deadline)) + kwargs = { + 'retry_on_result': retry_if_false, + 'retry_on_exception': retry_if_restartable} + if retry_count != RETRY_FOREVER: + kwargs['stop_max_attempt_number'] = retry_count + if deadline != NO_DEADLINE: + kwargs['stop_max_delay'] = deadline * 1000 + if retry_interval != 0: + kwargs['wait_fixed'] = retry_interval * 1000 + if not len(target_osd): + return + retrying.Retrying(**kwargs).call( + osd_match_status, + target_osd, target_status, + reverse_logic) + + +def non_negative_interger(value): + value = int(value) + if value < 0: + raise argparse.argumenttypeerror( + '{} is a negative integer value'.format(value)) + return value + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Wait for OSD status match') + parser.add_argument( + '-o', '--osd', + nargs='*', + help='osd id', + type=non_negative_interger, + required=True) + parser.add_argument( + '-n', '--not', + dest='reverse_logic', + help='reverse logic: wait for status NOT to match', + action='store_true', + default=False) + parser.add_argument( + '-s', '--status', + nargs='+', + help='status', + type=str, + required=True) + parser.add_argument( + '-c', '--retry-count', + help='retry count', + type=non_negative_interger, + default=RETRY_FOREVER) + parser.add_argument( + '-i', '--retry-interval', + help='retry interval (seconds)', + type=non_negative_interger, + default=RETRY_INTERVAL_SEC) + parser.add_argument( + '-d', '--deadline', + help='deadline (seconds)', + type=non_negative_interger, + default=NO_DEADLINE) + args = parser.parse_args() + start = time.time() + try: + osd_wait_status( + ['osd.{}'.format(o) for o in args.osd], + args.status, + args.reverse_logic, + args.retry_count, + args.retry_interval, + args.deadline) + LOG.info('Elapsed time: {:.02f} seconds'.format( + time.time() - start)) + sys.exit(0) + except retrying.RetryError as e: + LOG.warn( + ('Retry error: {}. ' + 'Elapsed time: {:.02f} seconds'.format( + e, time.time() - start))) + except OsdException as e: + LOG.warn( + ('OSD wait error: {}. ' + 'Elapsed time: {:.02f} seconds').format( + e, time.time() - start)) + sys.exit(1) diff --git a/ceph/ceph/files/starlingx-docker-override.conf b/ceph/ceph/files/starlingx-docker-override.conf new file mode 100644 index 000000000..5ffd85907 --- /dev/null +++ b/ceph/ceph/files/starlingx-docker-override.conf @@ -0,0 +1,3 @@ +[Service] +ExecStopPost=/usr/sbin/ceph-preshutdown.sh + diff --git a/ceph/ceph/files/stx_git_version b/ceph/ceph/files/stx_git_version new file mode 100644 index 000000000..dd8d1a2a1 --- /dev/null +++ b/ceph/ceph/files/stx_git_version @@ -0,0 +1,2 @@ +656b5b63ed7c43bd014bcafd81b001959d5f089f +v10.2.6 diff --git a/config-files/syslog-ng-config/centos/build_srpm.data b/config-files/syslog-ng-config/centos/build_srpm.data index da1e20bd8..2c3b2cb8b 100644 --- a/config-files/syslog-ng-config/centos/build_srpm.data +++ b/config-files/syslog-ng-config/centos/build_srpm.data @@ -1,2 +1,2 @@ SRC_DIR="files" -TIS_PATCH_VER=0 +TIS_PATCH_VER=1 diff --git a/config-files/syslog-ng-config/files/syslog-ng.conf b/config-files/syslog-ng-config/files/syslog-ng.conf index 32c7a3f6c..e4ebcdb5e 100644 --- a/config-files/syslog-ng-config/files/syslog-ng.conf +++ b/config-files/syslog-ng-config/files/syslog-ng.conf @@ -107,6 +107,7 @@ destination d_sm { file("/var/log/sm.log"); }; destination d_rmon { file("/var/log/rmond.log" template(t_mtc)); }; destination d_rmon_notify { file("/var/log/rmond_notify.log" template(t_mtc)); }; destination d_pmon { file("/var/log/pmond.log" template(t_mtc)); }; +destination d_lmon { file("/var/log/lmond.log" template(t_mtc)); }; destination d_hostwd { file("/var/log/hostwd.log" template(t_mtc)); }; destination d_fsmon { file("/var/log/fsmond.log" template(t_mtc)); }; destination d_hwmon { file("/var/log/hwmond.log" template(t_mtc)); }; @@ -352,6 +353,7 @@ filter f_local7 { facility(local7); }; filter f_rmon { facility(local5) and program(rmond); }; filter f_rmon_notify { facility(local5) and program(rmon_resource_notify); }; filter f_pmon { facility(local5) and program(pmond); }; +filter f_lmon { facility(local5) and program(lmond); }; filter f_hostw { facility(local5) and program(hostwd); }; filter f_fsmon { facility(local5) and program(fsmond); }; filter f_hwmon { facility(local5) and program(hwmond); }; @@ -472,6 +474,7 @@ log { source(s_src); filter(f_local3); destination(d_sm); }; log { source(s_src); filter(f_rmon); destination(d_rmon); }; log { source(s_src); filter(f_rmon_notify); destination(d_rmon_notify); }; log { source(s_src); filter(f_pmon); destination(d_pmon); }; +log { source(s_src); filter(f_lmon); destination(d_lmon); }; log { source(s_src); filter(f_hostw); destination(d_hostwd); }; log { source(s_src); filter(f_fsmon); destination(d_fsmon); }; log { source(s_src); filter(f_hwmon); destination(d_hwmon); }; diff --git a/database/mariadb/centos/docker/Dockerfile b/database/mariadb/centos/docker/Dockerfile new file mode 100644 index 000000000..1735f8669 --- /dev/null +++ b/database/mariadb/centos/docker/Dockerfile @@ -0,0 +1,6 @@ +FROM openstackhelm/mariadb:10.2.18 + +RUN apt-get update && apt-get install -y galera-arbitrator-3 + +CMD ["/usr/bin/garbd"] + diff --git a/database/mariadb/centos/stx-mariadb.master_docker_image b/database/mariadb/centos/stx-mariadb.master_docker_image new file mode 100644 index 000000000..5c5134b03 --- /dev/null +++ b/database/mariadb/centos/stx-mariadb.master_docker_image @@ -0,0 +1,3 @@ +BUILDER=docker +LABEL=stx-mariadb + diff --git a/database/mariadb/centos/stx-mariadb.pike_docker_image b/database/mariadb/centos/stx-mariadb.pike_docker_image new file mode 100644 index 000000000..5c5134b03 --- /dev/null +++ b/database/mariadb/centos/stx-mariadb.pike_docker_image @@ -0,0 +1,3 @@ +BUILDER=docker +LABEL=stx-mariadb + diff --git a/filesystem/drbd/drbd-tools/centos/build_srpm.data b/filesystem/drbd/drbd-tools/centos/build_srpm.data index 1e6684949..1873fdbff 100644 --- a/filesystem/drbd/drbd-tools/centos/build_srpm.data +++ b/filesystem/drbd/drbd-tools/centos/build_srpm.data @@ -1,4 +1,4 @@ COPY_LIST="$FILES_BASE/* \ $DISTRO/patches/* \ $CGCS_BASE/downloads/drbd-8.4.3.tar.gz" -TIS_PATCH_VER=6 +TIS_PATCH_VER=7 diff --git a/filesystem/drbd/drbd-tools/centos/drbd.spec b/filesystem/drbd/drbd-tools/centos/drbd.spec index b537a6b31..31543d91c 100644 --- a/filesystem/drbd/drbd-tools/centos/drbd.spec +++ b/filesystem/drbd/drbd-tools/centos/drbd.spec @@ -34,7 +34,7 @@ Source: http://oss.linbit.com/%{name}/8.3/%{name}-%{version}.tar.gz Source1: drbd.service -# WRS +# StarlingX Patch0001: 0001-skip_wait_con_int_on_simplex.patch Patch0002: 0002-drbd-conditional-crm-dependency.patch Patch0003: 0003-drbd_report_condition.patch @@ -43,6 +43,7 @@ Patch0005: 0005-drbd_reconnect_standby_standalone.patch Patch0006: 0006-avoid-kernel-userspace-version-check.patch Patch0007: 0007-Update-OCF-to-attempt-connect-in-certain-states.patch Patch0008: 0008-Increase-short-cmd-timeout-to-15-secs.patch +Patch0009: 0009-Check-for-mounted-device-before-demoting-Primary-DRB.patch License: GPLv2+ ExclusiveOS: linux @@ -271,6 +272,7 @@ management utility. %patch0006 -p1 %patch0007 -p1 %patch0008 -p1 +%patch0009 -p1 %build %configure \ diff --git a/filesystem/drbd/drbd-tools/centos/patches/0009-Check-for-mounted-device-before-demoting-Primary-DRB.patch b/filesystem/drbd/drbd-tools/centos/patches/0009-Check-for-mounted-device-before-demoting-Primary-DRB.patch new file mode 100644 index 000000000..ac8f41417 --- /dev/null +++ b/filesystem/drbd/drbd-tools/centos/patches/0009-Check-for-mounted-device-before-demoting-Primary-DRB.patch @@ -0,0 +1,45 @@ +From 017157d21a56410811384a43d0b0cbba6444baeb Mon Sep 17 00:00:00 2001 +From: Don Penney +Date: Wed, 6 Feb 2019 01:19:59 -0500 +Subject: [PATCH] Check for mounted device before demoting Primary DRBD + resource + +Update the OCF script to check for a mounted device when demoting +a resource that's in the Primary state. The state change will fail +if it is still in use, otherwise. + +Signed-off-by: Don Penney +--- + scripts/drbd.ocf | 16 +++++++++++++++- + 1 file changed, 15 insertions(+), 1 deletion(-) + +diff --git a/scripts/drbd.ocf b/scripts/drbd.ocf +index e03bf6d..95da11a 100644 +--- a/scripts/drbd.ocf ++++ b/scripts/drbd.ocf +@@ -720,7 +720,21 @@ drbd_stop() { + ;; + $OCF_RUNNING_MASTER) + ocf_log warn "$DRBD_RESOURCE still Primary, demoting." +- do_drbdadm secondary $DRBD_RESOURCE ++ found=no ++ for dev in ${DRBD_DEVICES[@]} ""; do ++ cat /proc/mounts | grep -q "^${dev} " ++ if [ $? -eq 0 ]; then ++ ocf_log warn "${DRBD_RESOURCE} is still mounted via $dev" ++ found=yes ++ break ++ fi ++ done ++ if [ "${found}" = "yes" ]; then ++ ocf_log warn "Waiting to drop $DRBD_RESOURCE" ++ else ++ ocf_log warn "Dropping $DRBD_RESOURCE to Secondary" ++ do_drbdadm secondary $DRBD_RESOURCE ++ fi + esac + $first_try || sleep 1 + first_try=false +-- +1.8.3.1 + diff --git a/kubernetes/kubernetes/centos/Readme.rst b/kubernetes/kubernetes/centos/Readme.rst index 5f84208de..350d6a8f8 100644 --- a/kubernetes/kubernetes/centos/Readme.rst +++ b/kubernetes/kubernetes/centos/Readme.rst @@ -1,4 +1,7 @@ -The spec file used here was from the kubernetes 1.10 src rpm. -The spec_diff shows the modifications made to that spec file. -to help understand which changes were needed, to assist with -future upversioning. +The spec file used here was from the kubernetes 1.10.0 src rpm. +The orig file is included to help show modifications made to that +spec file, to help understand which changes were needed and to +assist with future upversioning. + +The contrib tarball does not have the same versioning as kubernetes and +there is little activity in that repo. diff --git a/kubernetes/kubernetes/centos/build_srpm.data b/kubernetes/kubernetes/centos/build_srpm.data index e45cbbeba..cd5899b88 100644 --- a/kubernetes/kubernetes/centos/build_srpm.data +++ b/kubernetes/kubernetes/centos/build_srpm.data @@ -1,7 +1,8 @@ -VERSION=1.12.1 +VERSION=1.12.3 +CON_VERSION=1.12.1 TAR_NAME=kubernetes TAR="$TAR_NAME-v$VERSION.tar.gz" -CONTRIB="$TAR_NAME-contrib-v$VERSION.tar.gz" +CONTRIB="$TAR_NAME-contrib-v$CON_VERSION.tar.gz" COPY_LIST="${CGCS_BASE}/downloads/$TAR ${CGCS_BASE}/downloads/$CONTRIB $FILES_BASE/*" diff --git a/kubernetes/kubernetes/centos/kubernetes.spec b/kubernetes/kubernetes/centos/kubernetes.spec index 239b0ffdf..619ced102 100644 --- a/kubernetes/kubernetes/centos/kubernetes.spec +++ b/kubernetes/kubernetes/centos/kubernetes.spec @@ -23,7 +23,7 @@ %global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo} %global import_path k8s.io/kubernetes -%global commit 1.12.1 +%global commit 1.12.3 %global con_provider github %global con_provider_tld com @@ -32,7 +32,7 @@ # https://github.com/kubernetes/contrib %global con_commit 1.12.1 -%global kube_version 1.12.1 +%global kube_version 1.12.3 %global kube_git_version v%{kube_version} # Needed otherwise "version_ldflags=$(kube::version_ldflags)" doesn't work diff --git a/kubernetes/kubernetes/centos/kubernetes.spec.orig b/kubernetes/kubernetes/centos/kubernetes.spec.orig new file mode 100644 index 000000000..fd43ef4cd --- /dev/null +++ b/kubernetes/kubernetes/centos/kubernetes.spec.orig @@ -0,0 +1,2282 @@ +%if 0%{?fedora} +%global with_devel 1 +%global with_bundled 0 +%global with_debug 1 +%else +%global with_devel 0 +%global with_bundled 1 +%global with_debug 0 +%endif + +%if 0%{?with_debug} +# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12 +%global _dwz_low_mem_die_limit 0 +%else +%global debug_package %{nil} +%endif + +%global provider github +%global provider_tld com +%global project kubernetes +%global repo kubernetes +# https://github.com/kubernetes/kubernetes + +%global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo} +%global import_path k8s.io/kubernetes +%global commit fc32d2f3698e36b93322a3465f63a14e9f0eaead +%global shortcommit %(c=%{commit}; echo ${c:0:7}) + +%global con_provider github +%global con_provider_tld com +%global con_project kubernetes +%global con_repo contrib +# https://github.com/kubernetes/contrib +%global con_provider_prefix %{con_provider}.%{con_provider_tld}/%{con_project}/%{con_repo} +%global con_commit 5b445f1c53aa8d6457523526340077935f62e691 +%global con_shortcommit %(c=%{con_commit}; echo ${c:0:7}) + +%global kube_version 1.10.0 +%global kube_git_version v%{kube_version} + +# Needed otherwise "version_ldflags=$(kube::version_ldflags)" doesn't work +%global _buildshell /bin/bash +%global _checkshell /bin/bash + +############################################## +Name: kubernetes +Version: %{kube_version} +Release: 1%{?dist} +Summary: Container cluster management +License: ASL 2.0 +URL: https://%{import_path} +ExclusiveArch: x86_64 aarch64 ppc64le s390x +Source0: https://%{provider_prefix}/archive/%{commit}/%{repo}-%{shortcommit}.tar.gz +Source1: https://%{con_provider_prefix}/archive/%{con_commit}/%{con_repo}-%{con_shortcommit}.tar.gz +Source3: kubernetes-accounting.conf +Source4: kubeadm.conf + +Source33: genmanpages.sh + +Patch3: build-with-debug-info.patch +#Patch4: make-test-cmd-run-over-hyperkube-based-kubectl.patch +#Patch5: make-e2e_node-run-over-distro-bins.patch + +# ppc64le +Patch16: fix-support-for-ppc64le.patch + +Patch20: use_go_build-is-not-fully-propagated-so-make-it-fixe.patch + +# It obsoletes cadvisor but needs its source code (literally integrated) +Obsoletes: cadvisor + +# kubernetes is decomposed into master and node subpackages +# require both of them for updates +Requires: kubernetes-master = %{version}-%{release} +Requires: kubernetes-node = %{version}-%{release} + +%description +%{summary} + +%if 0%{?with_devel} +%package devel +Summary: %{summary} +BuildArch: noarch + +Provides: golang(%{import_path}/cmd/genutils) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/kube-apiserver/app) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/kube-apiserver/app/options) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/kube-controller-manager/app) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/kube-controller-manager/app/options) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/kube-proxy/app) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/kube-proxy/app/options) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/kubectl/app) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/kubelet/app) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/kubelet/app/options) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/args) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/args) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/generators) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/generators/fake) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/generators/normalization) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/test_apis/testgroup.k8s.io) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/test_apis/testgroup.k8s.io/install) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/test_apis/testgroup.k8s.io/v1) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/testoutput/clientset_generated/test_internalclientset) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/testoutput/clientset_generated/test_internalclientset/fake) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/testoutput/clientset_generated/test_internalclientset/typed/testgroup.k8s.io/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/client-gen/testoutput/clientset_generated/test_internalclientset/typed/testgroup.k8s.io/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/conversion-gen/generators) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/deepcopy-gen/generators) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/generator) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/go-to-protobuf/protobuf) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/import-boss/generators) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/namer) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/parser) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/set-gen/generators) = %{version}-%{release} +Provides: golang(%{import_path}/cmd/libs/go2idl/types) = %{version}-%{release} +Provides: golang(%{import_path}/federation/apis/federation) = %{version}-%{release} +Provides: golang(%{import_path}/federation/apis/federation/install) = %{version}-%{release} +Provides: golang(%{import_path}/federation/apis/federation/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_internalclientset) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_internalclientset/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_internalclientset/typed/extensions/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_internalclientset/typed/extensions/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_3) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_3/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_3/typed/core/v1) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1beta1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_4) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_4/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_4/typed/core/v1) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_4/typed/core/v1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_4/typed/extensions/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_4/typed/extensions/v1beta1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_4/typed/federation/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/federation/client/clientset_generated/federation_release_1_4/typed/federation/v1beta1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/federation/pkg/federation-controller/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/admission) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/annotations) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/endpoints) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/errors) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/errors/storage) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/meta) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/meta/metatypes) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/pod) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/resource) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/rest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/rest/resttest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/service) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/testapi) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/testing/compat) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/unversioned/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apimachinery) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apimachinery/registered) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/abac) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/abac/latest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/abac/v0) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/abac/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/apps) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/apps/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/apps/v1alpha1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/apps/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/authentication) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/authentication/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/authentication/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/authorization) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/authorization/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/authorization/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/authorization/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/autoscaling) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/autoscaling/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/autoscaling/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/autoscaling/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/batch) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/batch/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/batch/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/batch/v2alpha1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/batch/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/certificates) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/certificates/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/certificates/v1alpha1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/certificates/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/componentconfig) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/componentconfig/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/componentconfig/v1alpha1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/extensions) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/extensions/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/extensions/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/extensions/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/imagepolicy) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/imagepolicy/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/imagepolicy/v1alpha1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/policy) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/policy/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/policy/v1alpha1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/policy/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/rbac) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/rbac/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/rbac/v1alpha1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/rbac/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/storage) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/storage/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/storage/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/storage/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apiserver) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apiserver/audit) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apiserver/authenticator) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apiserver/metrics) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apiserver/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/auth/authenticator) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/auth/authenticator/bearertoken) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/auth/authorizer) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/auth/authorizer/abac) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/auth/authorizer/union) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/auth/handlers) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/auth/user) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/capabilities) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/cache) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/chaosclient) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/authentication/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/authentication/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/authorization/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/authorization/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/certificates/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/certificates/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/core/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/storage/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/internalclientset/typed/storage/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_2) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_2/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_2/typed/core/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3/typed/batch/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3/typed/core/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/authorization/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/authorization/v1beta1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/autoscaling/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/autoscaling/v1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/batch/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/batch/v1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/core/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/core/v1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/extensions/v1beta1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/policy/v1alpha1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/clientset_generated/release_1_4/typed/policy/v1alpha1/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/leaderelection) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/metrics) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/metrics/prometheus) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/record) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/restclient) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/testing/core) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/transport) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/typed/discovery) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/typed/discovery/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/typed/dynamic) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/adapters/internalclientset) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/auth) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/clientcmd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/clientcmd/api) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/clientcmd/api/latest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/clientcmd/api/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/portforward) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/remotecommand) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/testclient) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/testclient/simple) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/aws) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/azure) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/cloudstack) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/fake) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/gce) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/mesos) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/openstack) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/ovirt) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/rackspace) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/cloudprovider/providers/vsphere) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/certificates) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/daemon) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/deployment) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/deployment/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/disruption) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/endpoint) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/framework) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/framework/informers) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/garbagecollector) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/garbagecollector/metaonly) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/job) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/namespace) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/node) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/petset) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/podautoscaler) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/podautoscaler/metrics) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/podgc) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/replicaset) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/replicaset/options) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/replication) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/resourcequota) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/route) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/scheduledjob) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/service) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/serviceaccount) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/volume/attachdetach) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/volume/attachdetach/cache) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/volume/attachdetach/populator) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/volume/attachdetach/reconciler) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/volume/attachdetach/statusupdater) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/volume/attachdetach/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/volume/persistentvolume) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/volume/persistentvolume/options) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/conversion) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/conversion/queryparams) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/credentialprovider) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/credentialprovider/aws) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/credentialprovider/gcp) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/dns) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/fieldpath) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/fields) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/genericapiserver) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/genericapiserver/authorizer) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/genericapiserver/openapi) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/genericapiserver/options) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/genericapiserver/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/healthz) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/httplog) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/hyperkube) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/cmd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/cmd/config) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/cmd/rollout) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/cmd/set) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/cmd/templates) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/cmd/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/cmd/util/editor) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/cmd/util/jsonmerge) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/metricsutil) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/resource) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubectl/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/api) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/api/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/api/v1alpha1/runtime) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/api/v1alpha1/stats) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/cadvisor) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/cadvisor/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/client) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/cm) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/config) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/container) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/container/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/custommetrics) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/dockershim) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/dockertools) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/envvars) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/events) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/eviction) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/images) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/kuberuntime) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/leaky) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/lifecycle) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/metrics) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/cni) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/cni/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/exec) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/hairpin) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/hostport) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/hostport/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/kubenet) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/mock_network) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/pleg) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/pod) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/pod/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/prober) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/prober/results) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/prober/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/qos) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/remote) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/rkt) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/rkt/mock_os) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/rktshim) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/server) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/server/portforward) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/server/remotecommand) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/server/stats) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/status) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/sysctl) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/types) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/util/cache) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/util/format) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/util/ioutils) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/util/queue) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/util/sliceutils) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/volumemanager) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/volumemanager/cache) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/volumemanager/populator) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/volumemanager/reconciler) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubemark) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/labels) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/master) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/master/ports) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/metrics) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/probe) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/probe/exec) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/probe/http) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/probe/tcp) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/proxy) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/proxy/config) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/proxy/healthcheck) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/proxy/iptables) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/proxy/userspace) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/quota) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/quota/evaluator/core) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/quota/generic) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/quota/install) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/authorization/subjectaccessreview) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/authorization/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/cachesize) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/certificates) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/certificates/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/clusterrole) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/clusterrole/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/clusterrole/policybased) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/clusterrolebinding) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/clusterrolebinding/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/clusterrolebinding/policybased) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/componentstatus) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/configmap) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/configmap/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/controller) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/controller/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/daemonset) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/daemonset/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/deployment) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/deployment/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/endpoint) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/endpoint/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/event) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/event/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/experimental/controller/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/generic) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/generic/registry) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/generic/rest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/horizontalpodautoscaler) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/horizontalpodautoscaler/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/ingress) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/ingress/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/job) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/job/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/limitrange) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/limitrange/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/namespace) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/namespace/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/networkpolicy) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/networkpolicy/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/node) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/node/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/node/rest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/persistentvolume) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/persistentvolume/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/persistentvolumeclaim) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/persistentvolumeclaim/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/petset) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/petset/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/pod) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/pod/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/pod/rest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/poddisruptionbudget) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/poddisruptionbudget/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/podsecuritypolicy) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/podsecuritypolicy/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/podtemplate) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/podtemplate/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/rangeallocation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/registrytest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/replicaset) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/replicaset/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/resourcequota) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/resourcequota/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/role) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/role/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/role/policybased) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/rolebinding) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/rolebinding/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/rolebinding/policybased) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/scheduledjob) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/scheduledjob/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/secret) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/secret/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/securitycontextconstraints) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/securitycontextconstraints/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/service) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/service/allocator) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/service/allocator/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/service/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/service/ipallocator) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/service/ipallocator/controller) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/service/ipallocator/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/service/portallocator) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/service/portallocator/controller) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/serviceaccount) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/serviceaccount/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/storageclass) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/storageclass/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/thirdpartyresource) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/thirdpartyresource/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/thirdpartyresourcedata) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/thirdpartyresourcedata/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/tokenreview) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/runtime) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/runtime/serializer) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/runtime/serializer/json) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/runtime/serializer/protobuf) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/runtime/serializer/recognizer) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/runtime/serializer/streaming) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/runtime/serializer/versioning) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/runtime/serializer/yaml) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security/apparmor) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security/podsecuritypolicy) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security/podsecuritypolicy/apparmor) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security/podsecuritypolicy/capabilities) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security/podsecuritypolicy/group) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security/podsecuritypolicy/selinux) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security/podsecuritypolicy/sysctl) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security/podsecuritypolicy/user) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/security/podsecuritypolicy/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontext) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints/capabilities) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints/group) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints/seccomp) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints/selinux) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints/user) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/selection) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/serviceaccount) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/ssh) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/etcd/etcdtest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/etcd/metrics) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/etcd/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/etcd/testing/testingcert) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/etcd/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/etcd3) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/storagebackend) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/storagebackend/factory) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/storage/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/types) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/ui) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/async) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/bandwidth) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/cache) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/certificates) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/chmod) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/chown) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/clock) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/codeinspector) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/config) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/configz) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/crlf) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/crypto) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/dbus) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/diff) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/ebtables) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/env) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/errors) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/exec) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/flag) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/flock) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/flowcontrol) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/flushwriter) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/framer) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/goroutinemap) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/goroutinemap/exponentialbackoff) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/hash) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/homedir) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/httpstream) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/httpstream/spdy) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/integer) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/interrupt) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/intstr) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/io) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/iptables) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/iptables/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/json) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/jsonpath) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/keymutex) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/labels) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/limitwriter) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/logs) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/maps) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/metrics) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/mount) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/net) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/net/sets) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/node) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/oom) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/parsers) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/pod) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/procfs) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/proxy) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/rand) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/replicaset) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/resourcecontainer) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/rlimit) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/runtime) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/selinux) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/sets) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/sets/types) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/slice) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/strategicpatch) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/strings) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/sysctl) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/sysctl/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/system) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/term) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/threading) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/uuid) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/validation/field) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/wait) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/workqueue) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/wsstream) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/yaml) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/version) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/version/prometheus) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/version/verflag) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/aws_ebs) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/azure_dd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/azure_file) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/cephfs) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/cinder) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/configmap) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/downwardapi) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/empty_dir) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/fc) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/flexvolume) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/flocker) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/gce_pd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/git_repo) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/glusterfs) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/host_path) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/iscsi) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/nfs) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/quobyte) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/rbd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/secret) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/util) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/util/nestedpendingoperations) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/util/operationexecutor) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/util/types) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/util/volumehelper) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/vsphere_volume) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/watch) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/watch/json) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/watch/versioned) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/cmd/kube-scheduler/app) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/cmd/kube-scheduler/app/options) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/admit) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/alwayspullimages) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/antiaffinity) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/deny) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/exec) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/imagepolicy) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/initialresources) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/limitranger) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/namespace/autoprovision) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/namespace/exists) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/namespace/lifecycle) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/persistentvolume/label) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/resourcequota) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/security) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/security/podsecuritypolicy) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/securitycontext/scdeny) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/serviceaccount) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/storageclass/default) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/password) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/password/allow) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/password/keystone) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/password/passwordfile) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/request/basicauth) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/request/union) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/request/x509) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/token/oidc) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/token/oidc/testing) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/token/tokenfile) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/token/tokentest) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/token/webhook) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authorizer) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authorizer/rbac) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authorizer/webhook) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/client/auth) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/client/auth/gcp) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/client/auth/oidc) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/algorithm) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/algorithm/predicates) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/algorithm/priorities) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/algorithm/priorities/util) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/algorithmprovider) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/algorithmprovider/defaults) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/api) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/api/latest) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/api/v1) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/api/validation) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/factory) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/metrics) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/schedulercache) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/scheduler/testing) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/webhook) = %{version}-%{release} + +%description devel +Libraries for building packages importing k8s.io/kubernetes. +Currently, the devel is not suitable for development. +It is meant only as a buildtime dependency for other projects. + +This package contains library source intended for +building other packages which use %{project}/%{repo}. +%endif + +############################################## +%package unit-test +Summary: %{summary} - for running unit tests + +# below Rs used for testing +Requires: golang >= 1.2-7 +Requires: etcd >= 2.0.9 +Requires: hostname +Requires: rsync +Requires: NetworkManager + +%description unit-test +%{summary} - for running unit tests + +############################################## +%package master +Summary: Kubernetes services for master host + +BuildRequires: golang >= 1.2-7 +BuildRequires: systemd +BuildRequires: rsync +BuildRequires: go-md2man +BuildRequires: go-bindata + +Requires(pre): shadow-utils +Requires: kubernetes-client = %{version}-%{release} + +# if node is installed with node, version and release must be the same +Conflicts: kubernetes-node < %{version}-%{release} +Conflicts: kubernetes-node > %{version}-%{release} + +%description master +Kubernetes services for master host + +############################################## +%package node +Summary: Kubernetes services for node host + +%if 0%{?fedora} >= 27 +Requires: (docker or docker-ce) +Suggests: docker +%else +Requires: docker +%endif +Requires: conntrack-tools + +BuildRequires: golang >= 1.2-7 +BuildRequires: systemd +BuildRequires: rsync +BuildRequires: go-md2man +BuildRequires: go-bindata + +Requires(pre): shadow-utils +Requires: socat +Requires: kubernetes-client = %{version}-%{release} + +# if master is installed with node, version and release must be the same +Conflicts: kubernetes-master < %{version}-%{release} +Conflicts: kubernetes-master > %{version}-%{release} + +%description node +Kubernetes services for node host + +############################################## +%package kubeadm +Summary: Kubernetes tool for standing up clusters +Requires: kubernetes-node = %{version}-%{release} +Requires: containernetworking-cni + +%description kubeadm +Kubernetes tool for standing up clusters + +############################################## +%package client +Summary: Kubernetes client tools + +BuildRequires: golang >= 1.2-7 +BuildRequires: go-bindata + +%description client +Kubernetes client tools like kubectl + +############################################## + +%prep +%setup -q -n %{con_repo}-%{con_commit} -T -b 1 +%setup -q -n %{repo}-%{commit} + +%if 0%{?with_debug} +%patch3 -p1 +%endif + +%patch20 -p1 + +# copy contrib folder +mkdir contrib +cp -r ../%{con_repo}-%{con_commit}/init contrib/. + +#src/k8s.io/kubernetes/pkg/util/certificates +# Patch the code to remove eliptic.P224 support +for dir in vendor/github.com/google/certificate-transparency/go/x509 pkg/util/certificates; do + if [ -d "${dir}" ]; then + pushd ${dir} + sed -i "/^[^=]*$/ s/oidNamedCurveP224/oidNamedCurveP256/g" *.go + sed -i "/^[^=]*$/ s/elliptic\.P224/elliptic.P256/g" *.go + popd + fi +done + +# Move all the code under src/k8s.io/kubernetes directory +mkdir -p src/k8s.io/kubernetes +mv $(ls | grep -v "^src$") src/k8s.io/kubernetes/. + +# Patch tests to be run over distro bins +#patch4 -p1 +#patch5 -p1 + +%ifarch ppc64le +%patch16 -p1 +%endif + +############### + +%build +pushd src/k8s.io/kubernetes/ +export KUBE_GIT_TREE_STATE="clean" +export KUBE_GIT_COMMIT=%{commit} +export KUBE_GIT_VERSION=%{kube_git_version} +export KUBE_EXTRA_GOPATH=$(pwd)/Godeps/_workspace + +# https://bugzilla.redhat.com/show_bug.cgi?id=1392922#c1 +%ifarch ppc64le +export GOLDFLAGS='-linkmode=external' +%endif +make WHAT="--use_go_build cmd/hyperkube cmd/kube-apiserver cmd/kubeadm" + +# convert md to man +./hack/generate-docs.sh || true +pushd docs +pushd admin +cp kube-apiserver.md kube-controller-manager.md kube-proxy.md kube-scheduler.md kubelet.md .. +popd +cp %{SOURCE33} genmanpages.sh +bash genmanpages.sh +popd +popd + +%install +pushd src/k8s.io/kubernetes/ +. hack/lib/init.sh +kube::golang::setup_env + +%ifarch ppc64le +output_path="_output/local/go/bin" +%else +output_path="${KUBE_OUTPUT_BINPATH}/$(kube::golang::host_platform)" +%endif + +install -m 755 -d %{buildroot}%{_bindir} + +echo "+++ INSTALLING hyperkube" +install -p -m 755 -t %{buildroot}%{_bindir} ${output_path}/hyperkube + +echo "+++ INSTALLING kube-apiserver" +install -p -m 754 -t %{buildroot}%{_bindir} ${output_path}/kube-apiserver + +echo "+++ INSTALLING kubeadm" +install -p -m 755 -t %{buildroot}%{_bindir} ${output_path}/kubeadm +install -d -m 0755 %{buildroot}/%{_sysconfdir}/systemd/system/kubelet.service.d +install -p -m 0644 -t %{buildroot}/%{_sysconfdir}/systemd/system/kubelet.service.d %{SOURCE4} + +binaries=(kube-controller-manager kube-scheduler kube-proxy kubelet kubectl) +for bin in "${binaries[@]}"; do + echo "+++ HARDLINKING ${bin} to hyperkube" + ln %{buildroot}%{_bindir}/hyperkube %{buildroot}%{_bindir}/${bin} +done + +# install the bash completion +install -d -m 0755 %{buildroot}%{_datadir}/bash-completion/completions/ +%{buildroot}%{_bindir}/kubectl completion bash > %{buildroot}%{_datadir}/bash-completion/completions/kubectl + +# install config files +install -d -m 0755 %{buildroot}%{_sysconfdir}/%{name} +install -m 644 -t %{buildroot}%{_sysconfdir}/%{name} contrib/init/systemd/environ/* + +# install service files +install -d -m 0755 %{buildroot}%{_unitdir} +install -m 0644 -t %{buildroot}%{_unitdir} contrib/init/systemd/*.service + +# install manpages +install -d %{buildroot}%{_mandir}/man1 +install -p -m 644 docs/man/man1/* %{buildroot}%{_mandir}/man1 +rm %{buildroot}%{_mandir}/man1/cloud-controller-manager.* +# from k8s tarball copied docs/man/man1/*.1 + +# install the place the kubelet defaults to put volumes +install -d %{buildroot}%{_sharedstatedir}/kubelet + +# place contrib/init/systemd/tmpfiles.d/kubernetes.conf to /usr/lib/tmpfiles.d/kubernetes.conf +install -d -m 0755 %{buildroot}%{_tmpfilesdir} +install -p -m 0644 -t %{buildroot}/%{_tmpfilesdir} contrib/init/systemd/tmpfiles.d/kubernetes.conf +mkdir -p %{buildroot}/run +install -d -m 0755 %{buildroot}/run/%{name}/ + +# enable CPU and Memory accounting +install -d -m 0755 %{buildroot}/%{_sysconfdir}/systemd/system.conf.d +install -p -m 0644 -t %{buildroot}/%{_sysconfdir}/systemd/system.conf.d %{SOURCE3} + +# source codes for building projects +%if 0%{?with_devel} +install -d -p %{buildroot}/%{gopath}/src/%{import_path}/ +echo "%%dir %%{gopath}/src/%%{import_path}/." >> devel.file-list +# find all *.go but no *_test.go files and generate devel.file-list +for file in $(find . -iname "*.go" \! -iname "*_test.go") ; do + echo "%%dir %%{gopath}/src/%%{import_path}/$(dirname $file)" >> devel.file-list + install -d -p %{buildroot}/%{gopath}/src/%{import_path}/$(dirname $file) + cp -pav $file %{buildroot}/%{gopath}/src/%{import_path}/$file + echo "%%{gopath}/src/%%{import_path}/$file" >> devel.file-list +done +%endif + +%if 0%{?with_devel} +sort -u -o devel.file-list devel.file-list +%endif + +popd + +%if 0%{?with_devel} +mv src/k8s.io/kubernetes/devel.file-list . +%endif + +mv src/k8s.io/kubernetes/*.md . +mv src/k8s.io/kubernetes/LICENSE . + + +# place files for unit-test rpm +install -d -m 0755 %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/ +# basically, everything from the root directory is needed +# unit-tests needs source code +# integration tests needs docs and other files +# test-cmd.sh atm needs cluster, examples and other +cp -a src %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/ +rm -rf %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/src/k8s.io/kubernetes/_output +cp -a *.md %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/src/k8s.io/kubernetes/ + +%check +# Fedora, RHEL7 and CentOS are tested via unit-test subpackage +if [ 1 != 1 ]; then +echo "******Testing the commands*****" +hack/test-cmd.sh +echo "******Benchmarking kube********" +hack/benchmark-go.sh + +# In Fedora 20 and RHEL7 the go cover tools isn't available correctly +%if 0%{?fedora} >= 21 +echo "******Testing the go code******" +hack/test-go.sh +echo "******Testing integration******" +hack/test-integration.sh --use_go_build +%endif +fi + +############################################## +%files +# empty as it depends on master and node + +############################################## +%files master +%license LICENSE +%doc *.md +%{_mandir}/man1/kube-apiserver.1* +%{_mandir}/man1/kube-controller-manager.1* +%{_mandir}/man1/kube-scheduler.1* +%attr(754, -, kube) %caps(cap_net_bind_service=ep) %{_bindir}/kube-apiserver +%{_bindir}/kube-controller-manager +%{_bindir}/kube-scheduler +%{_bindir}/hyperkube +%{_unitdir}/kube-apiserver.service +%{_unitdir}/kube-controller-manager.service +%{_unitdir}/kube-scheduler.service +%dir %{_sysconfdir}/%{name} +%config(noreplace) %{_sysconfdir}/%{name}/apiserver +%config(noreplace) %{_sysconfdir}/%{name}/scheduler +%config(noreplace) %{_sysconfdir}/%{name}/config +%config(noreplace) %{_sysconfdir}/%{name}/controller-manager +%{_tmpfilesdir}/kubernetes.conf +%verify(not size mtime md5) %attr(755, kube,kube) %dir /run/%{name} + +############################################## +%files node +%license LICENSE +%doc *.md +%{_mandir}/man1/kubelet.1* +%{_mandir}/man1/kube-proxy.1* +%{_bindir}/kubelet +%{_bindir}/kube-proxy +%{_bindir}/hyperkube +%{_unitdir}/kube-proxy.service +%{_unitdir}/kubelet.service +%dir %{_sharedstatedir}/kubelet +%dir %{_sysconfdir}/%{name} +%config(noreplace) %{_sysconfdir}/%{name}/config +%config(noreplace) %{_sysconfdir}/%{name}/kubelet +%config(noreplace) %{_sysconfdir}/%{name}/proxy +%config(noreplace) %{_sysconfdir}/systemd/system.conf.d/kubernetes-accounting.conf +%{_tmpfilesdir}/kubernetes.conf +%verify(not size mtime md5) %attr(755, kube,kube) %dir /run/%{name} + +############################################## +%files kubeadm +%license LICENSE +%doc *.md +%{_mandir}/man1/kubeadm.1* +%{_mandir}/man1/kubeadm-* +%{_bindir}/kubeadm +%dir %{_sysconfdir}/systemd/system/kubelet.service.d +%config(noreplace) %{_sysconfdir}/systemd/system/kubelet.service.d/kubeadm.conf + +############################################## +%files client +%license LICENSE +%doc *.md +%{_mandir}/man1/kubectl.1* +%{_mandir}/man1/kubectl-* +%{_bindir}/kubectl +%{_bindir}/hyperkube +%{_datadir}/bash-completion/completions/kubectl + +############################################## +%files unit-test +%{_sharedstatedir}/kubernetes-unit-test/ + +%if 0%{?with_devel} +%files devel -f devel.file-list +%doc *.md +%dir %{gopath}/src/k8s.io +%endif + +############################################## + +%pre master +getent group kube >/dev/null || groupadd -r kube +getent passwd kube >/dev/null || useradd -r -g kube -d / -s /sbin/nologin \ + -c "Kubernetes user" kube + +%post master +%systemd_post kube-apiserver kube-scheduler kube-controller-manager + +%preun master +%systemd_preun kube-apiserver kube-scheduler kube-controller-manager + +%postun master +%systemd_postun + + +%pre node +getent group kube >/dev/null || groupadd -r kube +getent passwd kube >/dev/null || useradd -r -g kube -d / -s /sbin/nologin \ + -c "Kubernetes user" kube + +%post node +%systemd_post kubelet kube-proxy +# If accounting is not currently enabled systemd reexec +if [[ `systemctl show docker kubelet | grep -q -e CPUAccounting=no -e MemoryAccounting=no; echo $?` -eq 0 ]]; then + systemctl daemon-reexec +fi + +%preun node +%systemd_preun kubelet kube-proxy + +%postun node +%systemd_postun + +############################################ +%changelog +* Tue Mar 27 2018 Spyros Trigazis - 1.10.0-1 +- Bump to upstream v1.10.0 + +* Thu Mar 22 2018 Spyros Trigazis - 1.9.6-1 +- Bump to upstream v1.9.6 + +* Tue Mar 20 2018 Jan Chaloupka - 1.9.5-1 +- Bump to upstream v1.9.5 + resolves: #1554420 + +* Sun Feb 11 2018 Spyros Trigazis - 1.9.3-1 +- Bump to upstream v1.9.3 + +* Fri Feb 09 2018 Jan Chaloupka - 1.9.1-5 +- Add --fail-swap-on=false flag to the /etc/kubernetes/kubelet + resolves: #1542476 + +* Thu Feb 08 2018 Igor Gnatenko - 1.9.1-4 +- Escape macro in %%changelog + +* Wed Feb 07 2018 Fedora Release Engineering - 1.9.1-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild + +* Mon Jan 15 2018 Jan Chaloupka - 1.9.1-2 +- If docker is not available, try docker-ce instead (use boolean dependencies) + resolves: #1534508 + +* Fri Jan 12 2018 Spyros Trigazis - 1.9.1-1 +- Update to upstream v1.9.1 + resolves #1533794 + +* Tue Oct 24 2017 Jan Chaloupka - 1.8.1-1 +- Update to upstream v1.8.1 + resolves: #1497135 + +* Mon Oct 02 2017 Jan Chaloupka - 1.8.0-1 +- Update to upstream v1.8.0 + related: #1497625 + +* Mon Oct 02 2017 Jan Chaloupka - 1.7.7-1 +- Update to upstream v1.7.7 + resolves: #1497625 + +* Mon Sep 18 2017 Jan Chaloupka - 1.7.6-1 +- Update to upstream v1.7.6 + resolves: #1492551 + +* Mon Sep 11 2017 Jan Chaloupka - 1.7.5-1 +- Update to upstream v1.7.5 + resolves: #1490316 + +* Fri Aug 18 2017 Jan Chaloupka - 1.7.4-1 +- Fix the version + related: #1482874 + +* Fri Aug 18 2017 Jan Chaloupka - 1.7.3-2 +- Update to upstream v1.7.4 + resolves: #1482874 + +* Tue Aug 08 2017 Jan Chaloupka - 1.7.3-1 +- Update to upstream v1.7.3 + resolves: #1479685 + +* Thu Aug 03 2017 Fedora Release Engineering - 1.7.2-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild + +* Sun Jul 30 2017 Florian Weimer - 1.7.2-3 +- Rebuild with binutils fix for ppc64le (#1475636) + +* Wed Jul 26 2017 Fedora Release Engineering - 1.7.2-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + +* Mon Jul 24 2017 Jan Chaloupka - 1.7.2-1 +- Update to upstream v1.7.2 + +* Mon Jul 24 2017 Jan Chaloupka - 1.7.1-2 +- Sync kubeadm.conf with upstream service configuration (set Restart,StartLimitInterval,RestartSec) + +* Fri Jul 14 2017 Jan Chaloupka - 1.7.1-1 +- Update to upstream v1.7.1 + resolves: #1471767 + +* Sat Jul 08 2017 Jan Chaloupka - 1.6.7-1 +- Update to upstream v1.6.7 + resolves: #1468823 + resolves: #1468752 + +* Fri May 19 2017 Timothy St. Clair - 1.6.4-1 +- Add kubeadm subpackage to enable upstream deployments + +* Thu May 18 2017 Jan Chaloupka - 1.6.3-1 +- Update to upstream v1.6.3 + resolves: #1452101 + +* Fri May 12 2017 Jan Chaloupka - 1.6.2-2 +- Extend archs with s390x + resolves: #1400000 + +* Tue May 02 2017 Jan Chaloupka - 1.6.2-1 +- Update to upstream v1.6.2 + resolves: #1447338 + +* Tue Apr 11 2017 Jan Chaloupka - 1.6.1-1 +- Update to upstream v1.6.1 + related: #1422889 + +* Fri Mar 31 2017 Jan Chaloupka - 1.5.6-1 +- Update to upstream v1.5.6 + related: #1422889 + +* Mon Mar 27 2017 Jan Chaloupka - 1.5.5-4 +- Update to upstream v1.5.5 + related: #1422889 + +* Mon Mar 27 2017 Jan Chaloupka - 1.5.4-3 +- re-enable debug-info + related: #1422889 + +* Thu Mar 09 2017 Jan Chaloupka - 1.5.4-2 +- Bump to upstream 7243c69eb523aa4377bce883e7c0dd76b84709a1 + related: #1422889 + +* Thu Feb 16 2017 Jan Chaloupka - 1.5.3-1 +- Update to upstream v1.5.3 + resolves: #1422889 + +* Fri Feb 10 2017 Fedora Release Engineering - 1.5.2-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild + +* Wed Jan 18 2017 Jan Chaloupka - 1.5.2-2 +- fix rootScopeNaming generate selfLink + resolves: #1413997 + +* Fri Jan 13 2017 Jan Chaloupka - 1.5.2-1 +- Bump version as well + related: #1412996 + +* Fri Jan 13 2017 Jan Chaloupka - 1.5.1-2 +- Bump to upstream 1.5.2 + resolves: #1412996 + +* Thu Jan 05 2017 Jan Chaloupka - 1.5.1-1 +- Bump to upstream 1.5.1 + resolves: #1410186 + +* Wed Jan 04 2017 Jan Chaloupka - 1.4.7-2 +- Generate the md files before they are converted to man pages + resolves: #1409943 + +* Mon Dec 12 2016 Jan Chaloupka - 1.4.7-1 +- Bump to upstream v1.4.7 + resolves: #1403823 + New conntrack-tools dependency of kube-proxy + Build kubernetes on ppc64le with linkmode=external + resolves: #1392922 + +* Mon Nov 14 2016 jchaloup - 1.4.5-3 +- Patch unit-test subpackage to run tests over k8s distro binaries + +* Wed Nov 09 2016 jchaloup - 1.4.5-2 +- Add missing if devel around generated devel.file-list + related: #1390074 + +* Tue Nov 08 2016 jchaloup - 1.4.5-1 +- Bump to upstream v1.4.5 (flip back to upstream based Kubernetes) + related: #1390074 + +* Mon Oct 31 2016 jchaloup - 1.4.0-0.1.beta3.git52492b4 +- Update to origin v1.4.0-alpha.0 (ppc64le and arm unbuildable with the current golang version) + resolves: #1390074 + +* Mon Oct 24 2016 jchaloup - 1.3.0-0.4.git52492b4 +- Update to origin v1.3.1 + resolves: #1388092 + +* Thu Sep 08 2016 jchaloup - 1.3.0-0.3.rc1.git507d3a7 +- Update to origin v1.3.0-rc1 + resolves: #1374361 + +* Thu Aug 11 2016 Dennis Gilmore -1.3.0-0.2.git4a3f9c5 +- enable armv7hl and aarch64 + +* Tue Aug 09 2016 jchaloup - 1.3.0-0.1.git4a3f9c5 +- Update to origin v1.3.0-alpha.3 + resolves: #1365601 + +* Thu Jul 21 2016 Fedora Release Engineering - 1.2.0-0.27.git4a3f9c5 +- https://fedoraproject.org/wiki/Changes/golang1.7 + +* Sun Jul 17 2016 jchaloup - 1.2.0-0.26.git4a3f9c5 +- Update to origin v1.2.1 + resolves: #1357261 + +* Wed Jul 13 2016 jchaloup - 1.2.0-0.25.git4a3f9c5 +- Enable CPU and Memory accounting on a node + +* Wed Jun 29 2016 jchaloup - 1.2.0-0.24.git4a3f9c5 +- Be more verbose about devel subpackage + resolves: #1269449 + +* Tue Jun 28 2016 jchaloup - 1.2.0-0.23.git4a3f9c5 +- Own /run/kubernetes directory + resolves: #1264699 + +* Sat May 28 2016 jchaloup - 1.2.0-0.22.git4a3f9c5 +- Bump to origin v1.2.0 + resolves: #1340643 + +* Wed May 04 2016 jchaloup - 1.2.0-0.21.git4a3f9c5 +- Extend uni-test subpackage to run other tests + +* Mon Apr 25 2016 jchaloup - 1.2.0-0.20.git4a3f9c5 +- Update support for ppc64le to use go compiler + related: #1306214 + +* Thu Apr 21 2016 jchaloup - 1.2.0-0.19.git4a3f9c5 +- Fix support for ppc64le + related: #1306214 + +* Tue Apr 19 2016 jchaloup - 1.2.0-0.18.git4a3f9c5 +- Bump to origin v1.1.6 + resolves: #1328357 + +* Mon Apr 11 2016 jchaloup - 1.2.0-0.17.alpha6.git4a3f9c5 +- Don't disable extensions/v1beta1 by default to conform with upstream documentation + +* Wed Mar 30 2016 jchaloup - 1.2.0-0.16.alpha6.git4a3f9c5 + Update to origin's v1.1.5 + Build on ppc64le as well + resolves: #1306214 + +* Tue Mar 08 2016 jchaloup - 1.2.0-0.15.alpha6.gitf0cd09a +- hyperkube.server: don't parse args for any command + +* Fri Mar 04 2016 jchaloup - 1.2.0-0.14.alpha6.gitf0cd09a +- Disable extensions/v1beta1 implicitly + +* Tue Mar 01 2016 jchaloup - 1.2.0-0.13.alpha6.gitf0cd09a +- Hyperkube checks flags of individual commands/servers even if it does not define their flags. + Thus resulting in 'uknown shorthand flag' error + +* Mon Feb 29 2016 jchaloup - 1.2.0-0.12.alpha6.gitf0cd09a +- Disable v1beta3 +- hyperkube-kubectl-dont shift os.Args + +* Fri Feb 26 2016 jchaloup - 1.2.0-0.11.alpha6.gitf0cd09a +- add kube- prefix to controller-manager, proxy and scheduler + +* Fri Feb 26 2016 jchaloup - 1.2.0-0.10.alpha6.gitf0cd09a +- Hardlink kube-controller-manager, kuber-scheduler, kubectl, kubelet and kube-proxy into hyperkube +- Keep kube-apiserver binary as it is (it has different permission and capabilities) + +* Thu Feb 25 2016 jchaloup - 1.2.0-0.9.alpha6.gitf0cd09a +- Fix Content-Type of docker client response + resolves: #1311861 + +* Mon Feb 22 2016 Fedora Release Engineering - 1.2.0-0.8.alpha6.gitf0cd09a +- https://fedoraproject.org/wiki/Changes/golang1.6 + +* Mon Feb 22 2016 jchaloup - 1.2.0-0.7.alpha6.git4c8e6f4 +- Bump to origin 1.1.3 + kube-version-change command replaced with kubectl convert (check out docs/admin/cluster-management.md) + related: 1295066 + +* Thu Feb 04 2016 Fedora Release Engineering - 1.2.0-0.6.alpha1.git4c8e6f4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild + +* Thu Jan 21 2016 jchaloup - 1.2.0-0.5.alpha1.git4c8e6f4 +- Bump to upstream e1d9873c1d5711b83fd3dd7eefe83a88ceb92c08 + related: #1291860 + +* Thu Jan 07 2016 jchaloup - 1.2.0-0.4.alpha1.git4c8e6f4 +- Move definition of all version, git and commit macros at one place + resolves: #1291860 + +* Fri Jan 01 2016 jchaloup - 1.2.0-0.3.alpha1.git4c8e6f4 +- Bump to upstream bf56e235826baded1772fb340266b8419c3e8f30 + Rebase to origin's "v1.1.0.1 - Security Update to v1.1" release + resolves: #1295066 + +* Thu Nov 26 2015 jchaloup - 1.2.0-0.2.alpha1.git4c8e6f4 +- Bump to origin upstream a41c9ff38d52fd508481c3c2bac13d52871fde02 +- Build kubernetes from origin's Godeps using hack/build-go.sh + origin's Godeps = kubernetes upstream + additional patches + +* Tue Oct 20 2015 jchaloup - 1.2.0-0.1.alpha1.git4c8e6f4 +- Bump to upstream 403de3874fba420fd096f2329b45fe2f5ae97e46 + related: #1211266 + +* Wed Oct 14 2015 jchaloup - 1.1.0-0.41.alpha1.gite9a6ef1 +- Bump to origin upstream e9a6ef1cd4c29d45730289a497d18b19d7ba450d + related: #1211266 + +* Fri Oct 09 2015 jchaloup - 1.1.0-0.40.alpha1.git5f38cb0 +- Add missing short option for --server of kubectl +- Update unit-test-subpackage (only test-cmd.sh atm) + related: #1211266 + +* Fri Oct 09 2015 jchaloup - 1.1.0-0.39.alpha1.git5f38cb0 +- Add normalization of flags + related: #1211266 + +* Fri Oct 02 2015 jchaloup - 1.1.0-0.38.alpha1.git5f38cb0 +- Restore unit-test subpackage (not yet tested) + related: #1211266 + +* Wed Sep 30 2015 jchaloup - 1.1.0-0.37.alpha1.git5f38cb0 +- Do not unset default cluster, otherwise k8s ends with error when no cluster set + related: #1211266 + +* Wed Sep 30 2015 jchaloup - 1.1.0-0.36.alpha0.git5f38cb0 +- Bump to o4n 5f38cb0e98c9e854cafba9c7f98dafd51e955ad8 + related: #1211266 + +* Tue Sep 29 2015 jchaloup - 1.1.0-0.35.alpha1.git2695cdc +- Update git version of k8s and o4n, add macros + related: #1211266 + +* Tue Sep 29 2015 jchaloup - 1.1.0-0.34.alpha1.git2695cdc +- Built k8s from o4n tarball +- Bump to upstream 2695cdcd29a8f11ef60278758e11f4817daf3c7c + related: #1211266 + +* Tue Sep 22 2015 jchaloup - 1.1.0-0.33.alpha1.git09cf38e +- Bump to upstream 09cf38e9a80327e2d41654db277d00f19e2c84d0 + related: #1211266 + +* Thu Sep 17 2015 jchaloup - 1.1.0-0.32.alpha1.git400e685 +- Bump to upstream 400e6856b082ecf4b295568acda68d630fc000f1 + related: #1211266 + +* Wed Sep 16 2015 jchaloup - 1.1.0-0.31.gitd549fc4 +- Bump to upstream d549fc400ac3e5901bd089b40168e1e6fb17341d + related: #1211266 + +* Tue Sep 15 2015 jchaloup - 1.1.0-0.30.gitc9570e3 +- Bump to upstream c9570e34d03c6700d83f796c0125d17c5064e57d + related: #1211266 + +* Mon Sep 14 2015 jchaloup - 1.1.0-0.29.git86b4e77 +- Bump to upstream 86b4e777e1947c1bc00e422306a3ca74cbd54dbe + related: #1211266 + +* Thu Sep 10 2015 jchaloup - 1.1.0-0.28.gitf867ba3 +- Bump to upstream f867ba3ba13e3dad422efd21c74f52b9762de37e + related: #1211266 + +* Wed Sep 09 2015 jchaloup - 1.1.0-0.27.git0f4fa4e +- Bump to upstream 0f4fa4ed25ae9a9d1824fe55aeefb4d4ebfecdfd + related: #1211266 + +* Tue Sep 08 2015 jchaloup - 1.1.0-0.26.git196f58b +- Bump to upstream 196f58b9cb25a2222c7f9aacd624737910b03acb + related: #1211266 + +* Mon Sep 07 2015 jchaloup - 1.1.0-0.25.git96e0ed5 +- Bump to upstream 96e0ed5749608d4cc32f61b3674deb04c8fa90ad + related: #1211266 + +* Sat Sep 05 2015 jchaloup - 1.1.0-0.24.git2e2def3 +- Bump to upstream 2e2def36a904fe9a197da5fc70e433e2e884442f + related: #1211266 + +* Fri Sep 04 2015 jchaloup - 1.1.0-0.23.gite724a52 +- Bump to upstream e724a5210adf717f62a72162621ace1e08730c75 + related: #1211266 + +* Thu Sep 03 2015 jchaloup - 1.1.0-0.22.gitb6f2f39 +- Bump to upstream b6f2f396baec5105ff928cf61903c2c368259b21 + related: #1211266 + +* Wed Sep 02 2015 jchaloup - 1.1.0-0.21.gitb4a3698 +- Bump to upstream b4a3698faed81410468eccf9f328ca6df3d0cca3 + related: #1211266 + +* Tue Sep 01 2015 jchaloup - 1.1.0-0.20.git2f9652c +- Bump to upstream 2f9652c7f1d4b8f333c0b5c8c1270db83b913436 + related: #1211266 + +* Mon Aug 31 2015 jchaloup - 1.1.0-0.19.git66a644b +- Bump to upstream 66a644b275ede9ddb98eb3f76e8d1840cafc2147 + related: #1211266 + +* Thu Aug 27 2015 jchaloup - 1.1.0-0.18.gitab73849 +- Bump to upstream ab7384943748312f5e9294f42d42ed3983c7c96c + related: #1211266 + +* Wed Aug 26 2015 jchaloup - 1.1.0-0.17.git00e3442 +- Bump to upstream 00e34429e0242323ed34347cf0ab65b3d62b21f7 + related: #1211266 + +* Tue Aug 25 2015 jchaloup - 1.1.0-0.16.gita945785 +- Bump to upstream a945785409d5b68f3a2721d2209300edb5abf1ce + related: #1211266 + +* Mon Aug 24 2015 jchaloup - 1.1.0-0.15.git5fe7029 +- Bump to upstream 5fe7029e688e1e5873a0b95a622edda5b5156d2b + related: #1211266 + +* Fri Aug 21 2015 jchaloup - 1.1.0-0.14.gitb6f18c7 +- Bump to upstream b6f18c7ce08714c8d4f6019463879a164a41750e + related: #1211266 + +* Thu Aug 20 2015 jchaloup - 1.1.0-0.13.git44fa48e +- Bump to upstream 44fa48e5af44d3e988fa943d96a2de732d8cc666 + related: #1211266 + +* Wed Aug 19 2015 jchaloup - 1.1.0-0.12.gitb5a4a54 +- Bump to upstream b5a4a548df0cffb99bdcc3b9b9e48d4025d0541c + related: #1211266 + +* Tue Aug 18 2015 jchaloup - 1.1.0-0.11.git919c7e9 +- Bump to upstream 919c7e94e23d2dcd5bdd96896e0a7990f9ae3338 + related: #1211266 + +* Tue Aug 18 2015 jchaloup - 1.1.0-0.10.git280b66c +- Bump to upstream 280b66c9012c21e253acd4e730f8684c39ca08ec + related: #1211266 + +* Mon Aug 17 2015 jchaloup - 1.1.0-0.9.git081d9c6 +- Bump to upstream 081d9c64d25c20ec16035036536511811118173d + related: #1211266 + +* Fri Aug 14 2015 jchaloup - 1.1.0-0.8.git8dcbeba +- Bump to upstream 8dcbebae5ef6a7191d9dfb65c68833c6852a21ad + related: #1211266 + +* Thu Aug 13 2015 jchaloup - 1.1.0-0.7.git968cbbe +- Bump to upstream 968cbbee5d4964bd916ba379904c469abb53d623 + related: #1211266 + +* Wed Aug 12 2015 jchaloup - 1.1.0-0.6.gitc91950f +- Bump to upstream c91950f01cb14ad47486dfcd2fdfb4be3ee7f36b + related: #1211266 + +* Tue Aug 11 2015 jchaloup - 1.1.0-0.5.gite44c8e6 +- Bump to upstream e44c8e6661c931f7fd434911b0d3bca140e1df3a + related: #1211266 + +* Mon Aug 10 2015 jchaloup - 1.1.0-0.4.git2bfa9a1 +- Bump to upstream 2bfa9a1f98147cfdc2e9f4cf50e2c430518d91eb + related: #1243827 + +* Thu Aug 06 2015 jchaloup - 1.1.0-0.3.git4c42e13 +- Bump to upstream 4c42e1302d3b351f3cb6074d32aa420bbd45e07d +- Change import path prefix to k8s.io/kubernetes + related: #1243827 + +* Wed Aug 05 2015 jchaloup - 1.1.0-0.2.git159ba48 +- Bump to upstream 159ba489329e9f6ce422541e13f97e1166090ec8 + related: #1243827 + +* Sat Aug 01 2015 jchaloup - 1.1.0-0.1.git6129d3d +- Bump to upstream 6129d3d4eb80714286650818081a64ce2699afed + related: #1243827 + +* Fri Jul 31 2015 jchaloup - 1.0.0-0.18.gitff058a1 +- Bump to upstream ff058a1afeb63474f7a35805941f3b07c27aae0f + related: #1243827 + +* Thu Jul 30 2015 jchaloup - 1.0.0-0.17.git769230e +- Bump to upstream 769230e735993bb0bf924279a40593c147c9a6ab + related: #1243827 + +* Wed Jul 29 2015 jchaloup - 1.0.0-0.16.gitdde7222 +- Bump to upstream dde72229dc9cbbdacfb2e44b22d9d5b357027020 + related: #1243827 + +* Tue Jul 28 2015 jchaloup - 1.0.0-0.15.gitc5bffaa +- Bump to upstream c5bffaaf3166513da6259c44a5d1ba8e86bea5ce + related: #1243827 + +* Sat Jul 25 2015 jchaloup - 1.0.0-0.14.git5bd82ff +- Bump to upstream 5bd82ffe6da8f4e72e71b362635e558bfc412106 + related: #1243827 + +* Fri Jul 24 2015 jchaloup - 1.0.0-0.13.git291acd1 +- Bump to upstream 291acd1a09ac836ec7524b060a19a6498d9878dd + related: #1243827 + +* Thu Jul 23 2015 jchaloup - 1.0.0-0.12.gitfbed349 +- Bump to upstream fbed3492bfa09e59b1c423fdd7c1ecad333a06ef + related: #1243827 + +* Tue Jul 21 2015 jchaloup - 1.0.0-0.11.gitfbc85e9 +- Add runtime dependency of kubernetes-node on socat (so kubectl port-forward works on AH) + +* Tue Jul 21 2015 jchaloup - 1.0.0-0.10.gitfbc85e9 +- Update the build script for go1.5 as well +- Bump to upstream fbc85e9838f25547be94fbffeeb92a756d908ca0 + related: #1243827 + +* Mon Jul 20 2015 jchaloup - 1.0.0-0.9.git2d88675 +- Bump to upstream 2d88675f2203d316d4bac312c7ccad12991b56c2 +- Change KUBE_ETCD_SERVERS to listen on 2379 ports instead of 4001 + resolves: #1243827 +- Add kubernetes-client to provide kubectl command + resolves: #1241469 + +* Mon Jul 20 2015 jchaloup - 1.0.0-0.8.gitb2dafda +- Fix dependency and tests for go-1.5 +- with_debug off as the builds ends with error "ELFRESERVE too small: ..." + +* Sat Jul 18 2015 Eric Paris - 1.0.0-0.7.gitb2dafda +- Update apiserver binary gid + +* Fri Jul 17 2015 jchaloup - 1.0.0-0.6.gitb2dafda +- Bump to upstream b2dafdaef5aceafad503ab56254b60f80da9e980 + related: #1211266 + +* Thu Jul 16 2015 jchaloup - 1.0.0-0.5.git596a8a4 +- Bump to upstream 596a8a40d12498b5335140f50753980bfaea4f6b + related: #1211266 + +* Wed Jul 15 2015 jchaloup - 1.0.0-0.4.git6ba532b +- Bump to upstream 6ba532b218cb5f5ea3f0e8dce5395182f388536c + related: #1211266 + +* Tue Jul 14 2015 jchaloup - 1.0.0-0.3.gitc616182 +- Bump to upstream c6161824db3784e6156131307a5e94647e5557fd + related: #1211266 + +* Mon Jul 13 2015 jchaloup - 1.0.0-0.2.git2c27b1f +- Bump to upstream 2c27b1fa64f4e70f04575d1b217494f49332390e + related: #1211266 + +* Sat Jul 11 2015 jchaloup - 1.0.0-0.1.git1b37059 +- Bump to upstream 1b370599ccf271741e657335c4943cb8c7dba28b + related: #1211266 + +* Fri Jul 10 2015 jchaloup - 0.21.1-0.2.gitccc4cfc +- Bump to upstream ccc4cfc7e11e0f127ac1cea045017dd799be3c63 + related: #1211266 + +* Thu Jul 09 2015 jchaloup - 0.21.1-0.1.git41f8907 +- Update generating of man pages from md (add genmanpages.sh) +- Bump to upstream 41f89075396329cd46c58495c7d3f7e13adcaa96 + related: #1211266 + +* Wed Jul 08 2015 jchaloup - 0.20.2-0.5.git77be29e +- Bump to upstream 77be29e3da71f0a136b6aa4048b2f0575c2598e4 + related: #1211266 + +* Tue Jul 07 2015 jchaloup - 0.20.2-0.4.git639a7da +- Bump to upstream 639a7dac50a331414cc6c47083323388da0d8756 + related: #1211266 + +* Mon Jul 06 2015 jchaloup - 0.20.2-0.3.gitbb6f2f7 +- Bump to upstream bb6f2f7ad90596d624d84cc691eec0f518e90cc8 + related: #1211266 + +* Fri Jul 03 2015 jchaloup - 0.20.2-0.2.git974377b +- Bump to upstream 974377b3064ac59b6e5694bfa568d67128026171 + related: #1211266 + +* Thu Jul 02 2015 jchaloup - 0.20.2-0.1.gitef41ceb +- Bump to upstream ef41ceb3e477ceada84c5522f429f02ab0f5948e + related: #1211266 + +* Tue Jun 30 2015 jchaloup - 0.20.0-0.3.git835eded +- Bump to upstream 835eded2943dfcf13a89518715e4be842a6a3ac0 +- Generate missing man pages + related: #1211266 + +* Mon Jun 29 2015 jchaloup - 0.20.0-0.2.git1c0b765 +- Bump to upstream 1c0b765df6dabfe9bd0e20489ed3bd18e6b3bda8 + Comment out missing man pages + related: #1211266 + +* Fri Jun 26 2015 jchaloup - 0.20.0-0.1.git8ebd896 +- Bump to upstream 8ebd896351513d446d56bc5785c070d2909226a3 + related: #1211266 + +* Fri Jun 26 2015 jchaloup - 0.19.3-0.6.git712f303 +- Bump to upstream 712f303350b35e70a573f3cb19193c8ec7ee7544 + related: #1211266 + +* Thu Jun 25 2015 jchaloup - 0.19.3-0.5.git2803b86 +- Bump to upstream 2803b86a42bf187afa816a7ce14fec754cc2af51 + related: #1211266 + +* Wed Jun 24 2015 Eric Paris - 0.19.3-0.4.git5b4dc4e +- Set CAP_NET_BIND_SERVICE on the kube-apiserver so it can use 443 + +* Wed Jun 24 2015 jchaloup - 0.19.3-0.3.git5b4dc4e +- Bump to upstream 5b4dc4edaa14e1ab4e3baa19df0388fa54dab344 + pkg/cloudprovider/* packages does not conform to golang language specification + related: #1211266 + +* Tue Jun 23 2015 jchaloup - 0.19.3-0.2.gita2ce3ea +- Bump to upstream a2ce3ea5293553b1fe0db3cbc6d53bdafe061d79 + related: #1211266 + +* Mon Jun 22 2015 jchaloup - 0.19.1-0.1.gitff0546d +- Bump to upstream ff0546da4fc23598de59db9f747c535545036463 + related: #1211266 + +* Fri Jun 19 2015 jchaloup - 0.19.0-0.7.gitb2e9fed +- Bump to upstream b2e9fed3490274509506285bdba309c50afb5c39 + related: #1211266 + +* Thu Jun 18 2015 jchaloup - 0.19.0-0.6.gitf660940 +- Bump to upstream f660940dceb3fe6ffb1b14ba495a47d91b5cd910 + related: #1211266 + +* Wed Jun 17 2015 jchaloup - 0.19.0-0.5.git43889c6 +- Bump to upstream 43889c612c4d396dcd8fbf3fbd217e106eaf5bce + related: #1211266 + +* Tue Jun 16 2015 jchaloup - 0.19.0-0.4.gita8269e3 +- Bump to upstream a8269e38c9e2bf81ba18cd6420e2309745d5b0b9 + related: #1211266 + +* Sun Jun 14 2015 jchaloup - 0.19.0-0.3.git5e5c1d1 +- Bump to upstream 5e5c1d10976f2f26d356ca60ef7d0d715c9f00a2 + related: #1211266 + +* Fri Jun 12 2015 jchaloup - 0.19.0-0.2.git0ca96c3 +- Bump to upstream 0ca96c3ac8b47114169f3b716ae4521ed8c7657c + related: #1211266 + +* Thu Jun 11 2015 jchaloup - 0.19.0-0.1.git5a02fc0 +- Bump to upstream 5a02fc07d8a943132b9e68fe7169778253318487 + related: #1211266 + +* Wed Jun 10 2015 jchaloup - 0.18.2-0.3.git0dfb681 +- Bump to upstream 0dfb681ba5d5dba535895ace9d650667904b5df7 + related: #1211266 + +* Tue Jun 09 2015 jchaloup - 0.18.2-0.2.gitb68e08f +- golang-cover is not needed + +* Tue Jun 09 2015 jchaloup - 0.18.2-0.1.gitb68e08f +- Bump to upstream b68e08f55f5ae566c4ea3905d0993a8735d6d34f + related: #1211266 + +* Sat Jun 06 2015 jchaloup - 0.18.1-0.3.git0f1c4c2 +- Bump to upstream 0f1c4c25c344f70c3592040b2ef092ccdce0244f + related: #1211266 + +* Fri Jun 05 2015 jchaloup - 0.18.1-0.2.git7309e1f +- Bump to upstream 7309e1f707ea5dd08c51f803037d7d22c20e2b92 + related: #1211266 + +* Thu Jun 04 2015 jchaloup - 0.18.1-0.1.gita161edb +- Bump to upstream a161edb3960c01ff6e14813858c2eeb85910009b + related: #1211266 + +* Wed Jun 03 2015 jchaloup - 0.18.0-0.3.gitb5a91bd +- Bump to upstream b5a91bda103ed2459f933959241a2b57331747ba +- Don't run %%check section (kept only for local run). Tests are now handled via CI. + related: #1211266 + +* Tue Jun 02 2015 jchaloup - 0.18.0-0.2.git5520386 +- Bump to upstream 5520386b180d3ddc4fa7b7dfe6f52642cc0c25f3 + related: #1211266 + +* Mon Jun 01 2015 jchaloup - 0.18.0-0.1.git0bb78fe +- Bump to upstream 0bb78fe6c53ce38198cc3805c78308cdd4805ac8 + related: #1211266 + +* Fri May 29 2015 jchaloup - 0.17.1-6 +- Bump to upstream ed4898d98c46869e9cbdb44186dfdeda9ff80cc2 + related: #1211266 + +* Thu May 28 2015 jchaloup - 0.17.1-5 +- Bump to upstream 6fa2777e26559fc008eacac83eb165d25bd9a7de + related: #1211266 + +* Tue May 26 2015 jchaloup - 0.17.1-4 +- Bump to upstream 01fcb58673001e56c69e128ab57e0c3f701aeea5 + related: #1211266 + +* Mon May 25 2015 jchaloup - 0.17.1-3 +- Decompose package into master and node subpackage. + Thanks to Avesh for testing and patience. + related: #1211266 + +* Mon May 25 2015 jchaloup - 0.17.1-2 +- Bump to upstream cf7b0bdc2a41d38613ac7f8eeea91cae23553fa2 + related: #1211266 + +* Fri May 22 2015 jchaloup - 0.17.1-1 +- Bump to upstream d9d12fd3f7036c92606fc3ba9046b365212fcd70 + related: #1211266 + +* Wed May 20 2015 jchaloup - 0.17.0-12 +- Bump to upstream a76bdd97100c66a46e2b49288540dcec58a954c4 + related: #1211266 + +* Tue May 19 2015 jchaloup - 0.17.0-11 +- Bump to upstream 10339d72b66a31592f73797a9983e7c207481b22 + related: #1211266 + +* Mon May 18 2015 jchaloup - 0.17.0-10 +- Bump to upstream efb42b302d871f7217394205d84e5ae82335d786 + related: #1211266 + +* Sat May 16 2015 jchaloup - 0.17.0-9 +- Bump to upstream d51e131726b925e7088b90915e99042459b628e0 + related: #1211266 + +* Fri May 15 2015 jchaloup - 0.17.0-8 +- Bump to upstream 1ee33ac481a14db7b90e3bbac8cec4ceea822bfb + related: #1211266 + +* Fri May 15 2015 jchaloup - 0.17.0-7 +- Bump to upstream d3c6fb0d6a13c0177dcd67556d72963c959234ea + related: #1211266 + +* Fri May 15 2015 jchaloup - 0.17.0-6 +- Bump to upstream f57f31783089f41c0bdca8cb87a1001ca94e1a45 + related: #1211266 + +* Thu May 14 2015 jchaloup - 0.17.0-5 +- Bump to upstream c90d381d0d5cf8ab7b8412106f5a6991d7e13c7d + related: #1211266 + +* Thu May 14 2015 jchaloup - 0.17.0-4 +- Bump to upstream 5010b2dde0f9b9eb820fe047e3b34bc9fa6324de +- Add debug info + related: #1211266 + +* Wed May 13 2015 jchaloup - 0.17.0-3 +- Bump to upstream ec19d41b63f5fe7b2c939e7738a41c0fbe65d796 + related: #1211266 + +* Tue May 12 2015 jchaloup - 0.17.0-2 +- Provide /usr/bin/kube-version-change binary + related: #1211266 + +* Tue May 12 2015 jchaloup - 0.17.0-1 +- Bump to upstream 962f10ee580eea30e5f4ea725c4e9e3743408a58 + related: #1211266 + +* Mon May 11 2015 jchaloup - 0.16.2-7 +- Bump to upstream 63182318c5876b94ac9b264d1224813b2b2ab541 + related: #1211266 + +* Fri May 08 2015 jchaloup - 0.16.2-6 +- Bump to upstream d136728df7e2694df9e082902f6239c11b0f2b00 +- Add NetworkManager as dependency for /etc/resolv.conf + related: #1211266 + +* Thu May 07 2015 jchaloup - 0.16.2-5 +- Bump to upstream ca0f678b9a0a6dc795ac7a595350d0dbe9d0ac3b + related: #1211266 + +* Wed May 06 2015 jchaloup - 0.16.2-4 +- Add docs to kubernetes-unit-test + related: #1211266 + +* Wed May 06 2015 jchaloup - 0.16.2-3 +- Bump to upstream 3a24c0e898cb3060d7905af6df275a3be562451d + related: #1211266 + +* Tue May 05 2015 jchaloup - 0.16.2-2 +- Add api and README.md to kubernetes-unit-test + related: #1211266 + +* Tue May 05 2015 jchaloup - 0.16.2-1 +- Bump to upstream 72048a824ca16c3921354197953fabecede5af47 + related: #1211266 + +* Mon May 04 2015 jchaloup - 0.16.1-2 +- Bump to upstream 1dcd80cdf3f00409d55cea1ef0e7faef0ae1d656 + related: #1211266 + +* Sun May 03 2015 jchaloup - 0.16.1-1 +- Bump to upstream 86751e8c90a3c0e852afb78d26cb6ba8cdbc37ba + related: #1211266 + +* Fri May 01 2015 jchaloup - 0.16.0-2 +- Bump to upstream 72708d74b9801989ddbdc8403fc5ba4aafb7c1ef + related: #1211266 + +* Wed Apr 29 2015 jchaloup - 0.16.0-1 +- Bump to upstream 7dcce2eeb7f28643d599c8b6a244523670d17c93 + related: #1211266 + +* Tue Apr 28 2015 jchaloup - 0.15.0-10 +- Add unit-test subpackage + related: #1211266 + +* Tue Apr 28 2015 jchaloup - 0.15.0-9 +- Bump to upstream 99fc906f78cd2bcb08536c262867fa6803f816d5 + related: #1211266 + +* Mon Apr 27 2015 jchaloup - 0.15.0-8 +- Bump to upstream 051dd96c542799dfab39184d2a7c8bacf9e88d85 + related: #1211266 + +* Fri Apr 24 2015 jchaloup - 0.15.0-7 +- Bump to upstream 9f753c2592481a226d72cea91648db8fb97f0da8 + related: #1211266 + +* Thu Apr 23 2015 jchaloup - 0.15.0-6 +- Bump to upstream cf824ae5e07965ba0b4b15ee88e08e2679f36978 + related: #1211266 + +* Tue Apr 21 2015 jchaloup - 0.15.0-5 +- Bump to upstream 21788d8e6606038a0a465c97f5240b4e66970fbb + related: #1211266 + +* Mon Apr 20 2015 jchaloup - 0.15.0-4 +- Bump to upstream eb1ea269954da2ce557f3305fa88d42e3ade7975 + related: #1211266 + +* Fri Apr 17 2015 jchaloup - 0.15.0-3 +- Obsolete cadvisor as it is integrated in kubelet + related: #1211266 + +* Wed Apr 15 2015 jchaloup - 0.15.0-0.2.git0ea87e4 +- Bump to upstream 0ea87e486407298dc1e3126c47f4076b9022fb09 + related: #1211266 + +* Tue Apr 14 2015 jchaloup - 0.15.0-0.1.gitd02139d +- Bump to upstream d02139d2b454ecc5730cc535d415c1963a7fb2aa + related: #1211266 + +* Sun Apr 12 2015 jchaloup - 0.14.2-0.2.gitd577db9 +- Bump to upstream d577db99873cbf04b8e17b78f17ec8f3a27eca30 + +* Wed Apr 08 2015 jchaloup - 0.14.2-0.1.git2719194 +- Bump to upstream 2719194154ffd38fd1613699a9dd10a00909957e + Use etcd-2.0.8 and higher + +* Tue Apr 07 2015 jchaloup - 0.14.1-0.2.gitd2f4734 +- Bump to upstream d2f473465738e6b6f7935aa704319577f5e890ba + +* Thu Apr 02 2015 jchaloup - 0.14.1-0.1.gita94ffc8 +- Bump to upstream a94ffc8625beb5e2a39edb01edc839cb8e59c444 + +* Wed Apr 01 2015 jchaloup - 0.14.0-0.2.git8168344 +- Bump to upstream 81683441b96537d4b51d146e39929b7003401cd5 + +* Tue Mar 31 2015 jchaloup - 0.14.0-0.1.git9ed8761 +- Bump to upstream 9ed87612d07f75143ac96ad90ff1ff68f13a2c67 +- Remove [B]R from devel branch until the package has stable API + +* Mon Mar 30 2015 jchaloup - 0.13.2-0.6.git8a7a127 +- Bump to upstream 8a7a127352263439e22253a58628d37a93fdaeb2 + +* Fri Mar 27 2015 jchaloup - 0.13.2-0.5.git8d94c43 +- Bump to upstream 8d94c43e705824f23791b66ad5de4ea095d5bb32 + resolves: #1205362 + +* Wed Mar 25 2015 jchaloup - 0.13.2-0.4.git455fe82 +- Bump to upstream 455fe8235be8fd9ba0ce21bf4f50a69d42e18693 + +* Mon Mar 23 2015 jchaloup - 0.13.2-0.3.gitef75888 +- Remove runtime dependency on etcd + resolves: #1202923 + +* Sun Mar 22 2015 jchaloup - 0.13.2-0.2.gitef75888 +- Bump to upstream ef758881d108bb53a128126c503689104d17f477 + +* Fri Mar 20 2015 jchaloup - 0.13.2-0.1.gita8f2cee +- Bump to upstream a8f2cee8c5418676ee33a311fad57d6821d3d29a + +* Fri Mar 13 2015 jchaloup - 0.12.0-0.9.git53b25a7 +- Bump to upstream 53b25a7890e31bdec6f2a95b32200d6cc27ae2ca + fix kube-proxy.service and kubelet + resolves: #1200919 #1200924 + +* Fri Mar 13 2015 jchaloup - 0.12.0-0.8.git39dceb1 +- Bump to upstream 39dceb13a511a83963a766a439cb386d10764310 + +* Thu Mar 12 2015 Eric Paris - 0.12.0-0.7.gita3fd0a9 +- Move from /etc/tmpfiles.d to %%{_tmpfilesdir} + resolves: #1200969 + +* Thu Mar 12 2015 jchaloup - 0.12.0-0.6.gita3fd0a9 +- Place contrib/init/systemd/tmpfiles.d/kubernetes.conf to /etc/tmpfiles.d/kubernetes.conf + +* Thu Mar 12 2015 jchaloup - 0.12.0-0.5.gita3fd0a9 +- Bump to upstream a3fd0a9fd516bb6033f32196ae97aaecf8c096b1 + +* Tue Mar 10 2015 jchaloup - 0.12.0-0.4.gita4d871a +- Bump to upstream a4d871a10086436557f804930812f2566c9d4d39 + +* Fri Mar 06 2015 jchaloup - 0.12.0-0.3.git2700871 +- Bump to upstream 2700871b049d5498167671cea6de8317099ad406 + +* Thu Mar 05 2015 jchaloup - 0.12.0-0.2.git8b627f5 +- Bump to upstream 8b627f516fd3e4f62da90d401ceb3d38de6f8077 + +* Tue Mar 03 2015 jchaloup - 0.12.0-0.1.gitecca426 +- Bump to upstream ecca42643b91a7117de8cd385b64e6bafecefd65 + +* Mon Mar 02 2015 jchaloup - 0.11.0-0.5.git6c5b390 +- Bump to upstream 6c5b390160856cd8334043344ef6e08568b0a5c9 + +* Sat Feb 28 2015 jchaloup - 0.11.0-0.4.git0fec31a +- Bump to upstream 0fec31a11edff14715a1efb27f77262a7c3770f4 + +* Fri Feb 27 2015 jchaloup - 0.11.0-0.3.git08402d7 +- Bump to upstream 08402d798c8f207a2e093de5a670c5e8e673e2de + +* Wed Feb 25 2015 jchaloup - 0.11.0-0.2.git86434b4 +- Bump to upstream 86434b4038ab87ac40219562ad420c3cc58c7c6b + +* Tue Feb 24 2015 jchaloup - 0.11.0-0.1.git754a2a8 +- Bump to upstream 754a2a8305c812121c3845d8293efdd819b6a704 + turn off integration tests until "FAILED: unexpected endpoints: + timed out waiting for the condition" problem is resolved + Adding back devel subpackage ([B]R list outdated) + +* Fri Feb 20 2015 jchaloup - 0.10.1-0.3.git4c87805 +- Bump to upstream 4c87805870b1b22e463c4bd711238ef68c77f0af + +* Tue Feb 17 2015 jchaloup - 0.10.1-0.2.git6f84bda +- Bump to upstream 6f84bdaba853872dbac69c84d3ab4b6964e85d8c + +* Tue Feb 17 2015 jchaloup - 0.10.1-0.1.git7d6130e +- Bump to upstream 7d6130edcdfabd7dd2e6a06fdc8fe5e333f07f5c + +* Sat Feb 07 2015 jchaloup - 0.9.1-0.7.gitc9c98ab +- Bump to upstream c9c98ab19eaa6f0b2ea17152c9a455338853f4d0 + Since some dependencies are broken, we can not build Kubernetes from Fedora deps. + Switching to vendored source codes until Go draft is resolved + +* Wed Feb 04 2015 jchaloup - 0.9.1-0.6.git7f5ed54 +- Bump to upstream 7f5ed541f794348ae6279414cf70523a4d5133cc + +* Tue Feb 03 2015 jchaloup - 0.9.1-0.5.git2ac6bbb +- Bump to upstream 2ac6bbb7eba7e69eac71bd9acd192cda97e67641 + +* Mon Feb 02 2015 jchaloup - 0.9.1-0.4.gite335e2d +- Bump to upstream e335e2d3e26a9a58d3b189ccf41ceb3770d1bfa9 + +* Fri Jan 30 2015 jchaloup - 0.9.1-0.3.git55793ac +- Bump to upstream 55793ac2066745f7243c666316499e1a8cf074f0 + +* Thu Jan 29 2015 jchaloup - 0.9.1-0.2.gitca6de16 +- Bump to upstream ca6de16df7762d4fc9b4ad44baa78d22e3f30742 + +* Tue Jan 27 2015 jchaloup - 0.9.1-0.1.git3623a01 +- Bump to upstream 3623a01bf0e90de6345147eef62894057fe04b29 +- update tests for etcd-2.0 + +* Thu Jan 22 2015 jchaloup - 0.8.2-571.gitb2f287c ++- Bump to upstream b2f287c259d856f4c08052a51cd7772c563aff77 + +* Thu Jan 22 2015 Eric Paris - 0.8.2-570.gitb2f287c +- patch kubelet service file to use docker.service not docker.socket + +* Wed Jan 21 2015 jchaloup - 0.8.2-0.1.git5b04640 +- Bump to upstream 5b046406a957a1e7eda7c0c86dd7a89e9c94fc5f + +* Sun Jan 18 2015 jchaloup - 0.8.0-126.0.git68298f0 +- Add some missing dependencies +- Add devel subpackage + +* Fri Jan 09 2015 Eric Paris - 0.8.0-125.0.git68298f0 +- Bump to upstream 68298f08a4980f95dfbf7b9f58bfec1808fb2670 + +* Tue Dec 16 2014 Eric Paris - 0.7.0-18.0.git52e165a +- Bump to upstream 52e165a4fd720d1703ebc31bd6660e01334227b8 + +* Mon Dec 15 2014 Eric Paris - 0.6-297.0.git5ef34bf +- Bump to upstream 5ef34bf52311901b997119cc49eff944c610081b + +* Wed Dec 03 2014 Eric Paris +- Replace patch to use old googlecode/go.net/ with BuildRequires on golang.org/x/net/ + +* Tue Dec 02 2014 Eric Paris - 0.6-4.0.git993ef88 +- Bump to upstream 993ef88eec9012b221f79abe8f2932ee97997d28 + +* Mon Dec 01 2014 Eric Paris - 0.5-235.0.git6aabd98 +- Bump to upstream 6aabd9804fb75764b70e9172774002d4febcae34 + +* Wed Nov 26 2014 Eric Paris - 0.5-210.0.gitff1e9f4 +- Bump to upstream ff1e9f4c191342c24974c030e82aceaff8ea9c24 + +* Tue Nov 25 2014 Eric Paris - 0.5-174.0.git64e07f7 +- Bump to upstream 64e07f7fe03d8692c685b09770c45f364967a119 + +* Mon Nov 24 2014 Eric Paris - 0.5-125.0.git162e498 +- Bump to upstream 162e4983b947d2f6f858ca7607869d70627f5dff + +* Fri Nov 21 2014 Eric Paris - 0.5-105.0.git3f74a1e +- Bump to upstream 3f74a1e9f56b3c3502762930c0c551ccab0557ea + +* Thu Nov 20 2014 Eric Paris - 0.5-65.0.gitc6158b8 +- Bump to upstream c6158b8aa9c40fbf1732650a8611429536466b21 +- include go-restful build requirement + +* Tue Nov 18 2014 Eric Paris - 0.5-14.0.gitdf0981b +- Bump to upstream df0981bc01c5782ad30fc45cb6f510f365737fc1 + +* Tue Nov 11 2014 Eric Paris - 0.4-680.0.git30fcf24 +- Bump to upstream 30fcf241312f6d0767c7d9305b4c462f1655f790 + +* Mon Nov 10 2014 Eric Paris - 0.4-633.0.git6c70227 +- Bump to upstream 6c70227a2eccc23966d32ea6d558ee05df46e400 + +* Fri Nov 07 2014 Eric Paris - 0.4-595.0.gitb695650 +- Bump to upstream b6956506fa2682afa93770a58ea8c7ba4b4caec1 + +* Thu Nov 06 2014 Eric Paris - 0.4-567.0.git3b1ef73 +- Bump to upstream 3b1ef739d1fb32a822a22216fb965e22cdd28e7f + +* Thu Nov 06 2014 Eric Paris - 0.4-561.0.git06633bf +- Bump to upstream 06633bf4cdc1ebd4fc848f85025e14a794b017b4 +- Make spec file more RHEL/CentOS friendly + +* Tue Nov 04 2014 Eric Paris - 0.4-0.0.git4452163 +- rebase to v0.4 +- include man pages + +* Tue Oct 14 2014 jchaloup - 0.3-0.3.git98ac8e1 +- create /var/lib/kubelet +- Use bash completions from upstream +- Bump to upstream 98ac8e178fcf1627399d659889bcb5fe25abdca4 +- all by Eric Paris + +* Mon Sep 29 2014 Jan Chaloupka - 0.3-0.2.git88fdb65 +- replace * with coresponding files +- remove dependency on gcc + +* Wed Sep 24 2014 Eric Paris - 0.1-0.4.git6ebe69a +- prefer autosetup instead of setup (revert setup change in 0-0.3.git) +https://fedoraproject.org/wiki/Autosetup_packaging_draft +- revert version number to 0.1 + +* Mon Sep 08 2014 Lokesh Mandvekar - 0-0.3.git6ebe69a +- gopath defined in golang package already +- package owns /etc/kubernetes +- bash dependency implicit +- keep buildroot/$RPM_BUILD_ROOT macros consistent +- replace with macros wherever possible +- set version, release and source tarball prep as per +https://fedoraproject.org/wiki/Packaging:SourceURL#Github + +* Mon Sep 08 2014 Eric Paris +- make services restart automatically on error + +* Sat Sep 06 2014 Eric Paris +- update to upstream +- redo build to use project scripts +- use project scripts in %%check +- rework deletion of third_party packages to easily detect changes +- run apiserver and controller-manager as non-root + +* Mon Aug 11 2014 Adam Miller +- update to upstream +- decouple the rest of third_party + +* Thu Aug 7 2014 Eric Paris +- update to head +- update package to include config files + +* Wed Jul 16 2014 Colin Walters +- Initial package diff --git a/kubernetes/kubernetes/centos/spec_diff b/kubernetes/kubernetes/centos/spec_diff deleted file mode 100644 index 0599517b7..000000000 --- a/kubernetes/kubernetes/centos/spec_diff +++ /dev/null @@ -1,77 +0,0 @@ -26,27c26 -< %global commit fc32d2f3698e36b93322a3465f63a14e9f0eaead -< %global shortcommit %(c=%{commit}; echo ${c:0:7}) ---- -> %global commit 1.12.1 -32c31 -< %global con_repo contrib ---- -> %global con_repo kubernetes-contrib -34,36c33 -< %global con_provider_prefix %{con_provider}.%{con_provider_tld}/%{con_project}/%{con_repo} -< %global con_commit 5b445f1c53aa8d6457523526340077935f62e691 -< %global con_shortcommit %(c=%{con_commit}; echo ${c:0:7}) ---- -> %global con_commit 1.12.1 -38c35 -< %global kube_version 1.10.0 ---- -> %global kube_version 1.12.1 -48c45 -< Release: 1%{?dist} ---- -> Release: 1%{?_tis_dist}.%{tis_patch_ver} -53,54c50,51 -< Source0: https://%{provider_prefix}/archive/%{commit}/%{repo}-%{shortcommit}.tar.gz -< Source1: https://%{con_provider_prefix}/archive/%{con_commit}/%{con_repo}-%{con_shortcommit}.tar.gz ---- -> Source0: %{project}-v%{kube_version}.tar.gz -> Source1: %{con_repo}-v%{con_commit}.tar.gz -60,68d56 -< Patch3: build-with-debug-info.patch -< #Patch4: make-test-cmd-run-over-hyperkube-based-kubectl.patch -< #Patch5: make-e2e_node-run-over-distro-bins.patch -< -< # ppc64le -< Patch16: fix-support-for-ppc64le.patch -< -< Patch20: use_go_build-is-not-fully-propagated-so-make-it-fixe.patch -< -810c798 -< Suggests: docker ---- -> Suggests: docker-ce -812c800 -< Requires: docker ---- -> Requires: docker-ce -816c804 -< BuildRequires: golang >= 1.2-7 ---- -> BuildRequires: golang >= 1.10.2 -858,863d845 -< %if 0%{?with_debug} -< %patch3 -p1 -< %endif -< -< %patch20 -p1 -< -883,890d864 -< # Patch tests to be run over distro bins -< #patch4 -p1 -< #patch5 -p1 -< -< %ifarch ppc64le -< %patch16 -p1 -< %endif -< -893a868 -> export PBR_VERSION=%{version} -904c879 -< make WHAT="--use_go_build cmd/hyperkube cmd/kube-apiserver cmd/kubeadm" ---- -> make WHAT="cmd/hyperkube cmd/kube-apiserver cmd/kubeadm" -917a893 -> export PBR_VERSION=%{version} -1072a1049 -> %config(noreplace) %{_sysconfdir}/%{name}/kubelet.kubeconfig diff --git a/monitoring/collectd-extensions/centos/build_srpm.data b/monitoring/collectd-extensions/centos/build_srpm.data index 82cafe8bb..e7f74e208 100644 --- a/monitoring/collectd-extensions/centos/build_srpm.data +++ b/monitoring/collectd-extensions/centos/build_srpm.data @@ -5,6 +5,7 @@ COPY_LIST="$PKG_BASE/src/LICENSE \ $PKG_BASE/src/collectd.service \ $PKG_BASE/src/fm_notifier.py \ $PKG_BASE/src/mtce_notifier.py \ + $PKG_BASE/src/plugin_common.py \ $PKG_BASE/src/python_plugins.conf \ $PKG_BASE/src/cpu.py \ $PKG_BASE/src/cpu.conf \ @@ -13,7 +14,9 @@ COPY_LIST="$PKG_BASE/src/LICENSE \ $PKG_BASE/src/df.conf \ $PKG_BASE/src/ntpq.py \ $PKG_BASE/src/ntpq.conf \ + $PKG_BASE/src/interface.py \ + $PKG_BASE/src/interface.conf \ $PKG_BASE/src/example.py \ $PKG_BASE/src/example.conf" -TIS_PATCH_VER=6 +TIS_PATCH_VER=7 diff --git a/monitoring/collectd-extensions/centos/collectd-extensions.spec b/monitoring/collectd-extensions/centos/collectd-extensions.spec index 532c06720..0665fb650 100644 --- a/monitoring/collectd-extensions/centos/collectd-extensions.spec +++ b/monitoring/collectd-extensions/centos/collectd-extensions.spec @@ -15,12 +15,14 @@ Source2: collectd.conf.pmon # collectd python plugin files - notifiers Source3: fm_notifier.py Source4: mtce_notifier.py +Source5: plugin_common.py # collectd python plugin files - resource plugins Source11: cpu.py Source12: memory.py Source14: example.py Source15: ntpq.py +Source16: interface.py # collectd plugin conf files into /etc/collectd.d Source100: python_plugins.conf @@ -29,6 +31,7 @@ Source102: memory.conf Source103: df.conf Source104: example.conf Source105: ntpq.conf +Source106: interface.conf BuildRequires: systemd-devel @@ -64,12 +67,15 @@ install -m 600 %{SOURCE2} %{buildroot}%{local_config_extensions_dir} # collectd python plugin files - notifiers install -m 700 %{SOURCE3} %{buildroot}%{local_python_extensions_dir} install -m 700 %{SOURCE4} %{buildroot}%{local_python_extensions_dir} +install -m 700 %{SOURCE5} %{buildroot}%{local_python_extensions_dir} # collectd python plugin files - resource plugins install -m 700 %{SOURCE11} %{buildroot}%{local_python_extensions_dir} install -m 700 %{SOURCE12} %{buildroot}%{local_python_extensions_dir} install -m 700 %{SOURCE14} %{buildroot}%{local_python_extensions_dir} install -m 700 %{SOURCE15} %{buildroot}%{local_python_extensions_dir} +install -m 700 %{SOURCE16} %{buildroot}%{local_python_extensions_dir} + # collectd plugin conf files into /etc/collectd.d install -m 600 %{SOURCE100} %{buildroot}%{local_plugin_dir} @@ -78,6 +84,7 @@ install -m 600 %{SOURCE102} %{buildroot}%{local_plugin_dir} install -m 600 %{SOURCE103} %{buildroot}%{local_plugin_dir} install -m 600 %{SOURCE104} %{buildroot}%{local_plugin_dir} install -m 600 %{SOURCE105} %{buildroot}%{local_plugin_dir} +install -m 600 %{SOURCE106} %{buildroot}%{local_plugin_dir} %clean rm -rf $RPM_BUILD_ROOT diff --git a/monitoring/collectd-extensions/src/cpu.conf b/monitoring/collectd-extensions/src/cpu.conf index 75394cdb2..b1d862f18 100644 --- a/monitoring/collectd-extensions/src/cpu.conf +++ b/monitoring/collectd-extensions/src/cpu.conf @@ -13,8 +13,8 @@ Instance "used" Persist true PersistOK true - WarningMax 90.00 - FailureMax 95.00 + WarningMax 89.00 + FailureMax 94.00 Hits 2 Invert false diff --git a/monitoring/collectd-extensions/src/df.conf b/monitoring/collectd-extensions/src/df.conf index 5df943b8b..19eb764c7 100644 --- a/monitoring/collectd-extensions/src/df.conf +++ b/monitoring/collectd-extensions/src/df.conf @@ -13,6 +13,7 @@ MountPoint "/var/lock" MountPoint "/boot" MountPoint "/scratch" + MountPoint "/opt/etcd" MountPoint "/opt/cgcs" MountPoint "/opt/platform" MountPoint "/opt/extension" @@ -27,8 +28,8 @@ Instance "used" - WarningMax 80.00 - FailureMax 90.00 + WarningMax 79.00 + FailureMax 89.00 Persist true PersistOK true Hits 2 diff --git a/monitoring/collectd-extensions/src/example.conf b/monitoring/collectd-extensions/src/example.conf index fbcf5d4f9..574306027 100644 --- a/monitoring/collectd-extensions/src/example.conf +++ b/monitoring/collectd-extensions/src/example.conf @@ -4,8 +4,8 @@ Instance "used" Persist true PersistOK true - WarningMax 51.00 - FailureMax 75.00 + WarningMax 49.00 + FailureMax 74.00 Hits 1 Invert false diff --git a/monitoring/collectd-extensions/src/fm_notifier.py b/monitoring/collectd-extensions/src/fm_notifier.py index 73b7916bc..815fb07ac 100755 --- a/monitoring/collectd-extensions/src/fm_notifier.py +++ b/monitoring/collectd-extensions/src/fm_notifier.py @@ -23,17 +23,17 @@ # Collects provides information about each event as an object passed to the # notification handler ; the notification object. # -# object.host - the hostname +# object.host - the hostname. # -# object.plugin - the name of the plugin aka resource +# object.plugin - the name of the plugin aka resource. # object.plugin_instance - plugin instance string i.e. say mountpoint -# for df plugin -# object.type, - the unit i.e. percent or absolute -# object.type_instance - the attribute i.e. free, used, etc +# for df plugin or numa? node for memory. +# object.type, - the unit i.e. percent or absolute. +# object.type_instance - the attribute i.e. free, used, etc. # -# object.severity - a integer value 0=OK , 1=warning, 2=failure +# object.severity - a integer value 0=OK , 1=warning, 2=failure. # object.message - a log-able message containing the above along -# with the value +# with the value. # # This notifier uses the notification object to manage plugin/instance alarms. # @@ -86,9 +86,11 @@ import os import re import uuid import collectd +from threading import RLock as Lock from fm_api import constants as fm_constants from fm_api import fm_api import tsconfig.tsconfig as tsc +import plugin_common as pc # only load influxdb on the controller if tsc.nodetype == 'controller': @@ -116,6 +118,12 @@ PLUGIN = 'alarm notifier' # Path to the plugin's drop dir PLUGIN_PATH = '/etc/collectd.d/' +# the name of the collectd samples database +DATABASE_NAME = 'collectd samples' + +READING_TYPE__PERCENT_USAGE = '% usage' + + # collectd severity definitions ; # Note: can't seem to pull then in symbolically with a header NOTIF_FAILURE = 1 @@ -145,6 +153,7 @@ mangled_list = {"dev-shm", "etc-nova-instances", "opt-platform", "opt-cgcs", + "opt-etcd", "opt-extension", "opt-backups"} @@ -154,10 +163,20 @@ ALARM_ID__MEM = "100.103" ALARM_ID__DF = "100.104" ALARM_ID__EXAMPLE = "100.113" +ALARM_ID__VSWITCH_CPU = "100.102" +ALARM_ID__VSWITCH_MEM = "100.115" +ALARM_ID__VSWITCH_PORT = "300.001" +ALARM_ID__VSWITCH_IFACE = "300.002" + + # ADD_NEW_PLUGIN: add new alarm id to the list ALARM_ID_LIST = [ALARM_ID__CPU, ALARM_ID__MEM, ALARM_ID__DF, + ALARM_ID__VSWITCH_CPU, + ALARM_ID__VSWITCH_MEM, + ALARM_ID__VSWITCH_PORT, + ALARM_ID__VSWITCH_IFACE, ALARM_ID__EXAMPLE] # ADD_NEW_PLUGIN: add plugin name definition @@ -168,38 +187,29 @@ PLUGIN__CPU = "cpu" PLUGIN__MEM = "memory" PLUGIN__INTERFACE = "interface" PLUGIN__NTP_QUERY = "ntpq" -PLUGIN__VSWITCH_PORT = "vswitch-port" -PLUGIN__VSWITCH_CPU = "vswitch-cpu" -PLUGIN__VSWITCH_MEM = "vswitch-memory" -PLUGIN__VSWITCH_OVSDB = "vswitch-ovsdb" -PLUGIN__VSWITCH_OPENFLOW = "vswitch-openflow" -PLUGIN__VSWITCH_LACP_IFACE = "vswitch-lacp-iface" -PLUGIN__VSWITCH_IFACE = "vswitch-iface" -PLUGIN__NOVA_THINPOOL_LVM = "nova-thinpool-lvm" -PLUGIN__CINDER_THINPOOL_LVM = "cinder-thinpool-lvm" -PLUGIN__CINDER_THINPOOL_LVM_META = "cinder-thinpool-lvm-meta" +PLUGIN__VSWITCH_PORT = "vswitch_port" +PLUGIN__VSWITCH_CPU = "vswitch_cpu" +PLUGIN__VSWITCH_MEM = "vswitch_mem" +PLUGIN__VSWITCH_IFACE = "vswitch_iface" PLUGIN__EXAMPLE = "example" # ADD_NEW_PLUGIN: add plugin name to list PLUGIN_NAME_LIST = [PLUGIN__CPU, PLUGIN__MEM, PLUGIN__DF, + PLUGIN__VSWITCH_CPU, + PLUGIN__VSWITCH_MEM, + PLUGIN__VSWITCH_PORT, + PLUGIN__VSWITCH_IFACE, PLUGIN__EXAMPLE] -# ADD_NEW_PLUGIN: add alarm id and plugin to dictionary -# ALARM_ID_TO_PLUGIN_DICT = {} -# ALARM_ID_TO_PLUGIN_DICT[ALARM_ID__CPU] = PLUGIN__CPU -# ALARM_ID_TO_PLUGIN_DICT[ALARM_ID__MEM] = PLUGIN__MEM -# ALARM_ID_TO_PLUGIN_DICT[ALARM_ID__DF] = PLUGIN__DF -# ALARM_ID_TO_PLUGIN_DICT[ALARM_ID__EXAMPLE] = PLUGIN__EXAMPLE - - # PluginObject Class class PluginObject: dbObj = None # shared database connection obj host = None # saved hostname + lock = None # global lock for mread_func mutex database_setup = False # state of database setup database_setup_in_progress = False # connection mutex @@ -213,7 +223,7 @@ class PluginObject: self.plugin = plugin # name of the plugin ; df, cpu, memory ... self.plugin_instance = "" # the instance name for the plugin self.resource_name = "" # The top level name of the resource - self.instance_name = "" # The instanhce name + self.instance_name = "" # The instance name # Instance specific learned static class members. self.entity_id = "" # fm entity id host=. @@ -225,12 +235,17 @@ class PluginObject: self.value = float(0) # float value of reading # Common static class members. + self.reason_warning = "" + self.reason_failure = "" self.repair = "" - self.alarm_type = fm_constants.FM_ALARM_TYPE_7 - self.cause = fm_constants.ALARM_PROBABLE_CAUSE_50 + self.alarm_type = fm_constants.FM_ALARM_TYPE_7 # OPERATIONAL + self.cause = fm_constants.ALARM_PROBABLE_CAUSE_50 # THRESHOLD CROSS self.suppression = True self.service_affecting = False + # default most reading types are usage + self.reading_type = READING_TYPE__PERCENT_USAGE + # Severity tracking lists. # Maintains severity state between notifications. # Each is a list of entity ids for severity asserted alarms. @@ -329,7 +344,11 @@ class PluginObject: # filter out messages to ignore ; notifications that have no value if "has not been updated for" in nObject.message: - collectd.debug("%s NOT UPDATED: %s" % (PLUGIN, self.entity_id)) + collectd.info("%s %s %s (%s)" % + (PLUGIN, + self.entity_id, + nObject.message, + nObject.severity)) return "done" # Get the value from the notification message. @@ -363,8 +382,8 @@ class PluginObject: # validate the reading try: self.value = float(self.values[0]) - # get the threshold if its there - if len(self.values) == 2: + # get the threshold if its there. + if len(self.values) > 1: self.threshold = float(self.values[1]) except ValueError as ex: @@ -390,6 +409,9 @@ class PluginObject: logit = False if self.count == 0 or LOG_STEP == 0: logit = True + elif self.reading_type == "connections": + if self.value != last: + logit = True elif self.value > last: if (last + LOG_STEP) < self.value: logit = True @@ -401,18 +423,40 @@ class PluginObject: # # Note: only usage type so far if logit: - reading_type = "% usage" - tmp = str(self.value).split('.') - if len(tmp[0]) == 1: - pre = ': ' - else: - pre = ': ' - collectd.info("%s reading%s%2.2f %s - %s" % - (PLUGIN, - pre, - self.value, - reading_type, - self.instance_name)) + resource = self.resource_name + + # setup resource name for filesystem instance usage log + if self.plugin == PLUGIN__DF: + resource = self.instance + + # setup resource name for vswitch process instance name + elif self.plugin == PLUGIN__VSWITCH_MEM: + resource += ' Processor ' + resource += self.instance_name + + if self.reading_type == READING_TYPE__PERCENT_USAGE: + tmp = str(self.value).split('.') + if len(tmp[0]) == 1: + pre = ': ' + else: + pre = ': ' + collectd.info("%s reading%s%2.2f %s - %s" % + (PLUGIN, + pre, + self.value, + self.reading_type, + resource)) + + elif self.reading_type == "connections" and \ + self.instance_objects and \ + self.value != self.last_value: + if self.instance_objects: + collectd.info("%s monitor: %2d %s - %s" % + (PLUGIN, + self.value, + self.reading_type, + resource)) + self.last_value = float(self.value) ########################################################################## @@ -599,12 +643,139 @@ class PluginObject: collectd.info("%s %s no failures" % (PLUGIN, self.plugin)) + ########################################################################## + # + # Name : _get_instance_object + # + # Purpose : Safely get an object from the self instance object list + # indexed by eid. + # + ########################################################################## + def _get_instance_object(self, eid): + """ + Safely get an object from the self instance object list indexed + by eid while locked. + :param eid: + :return: object or None + """ + + try: + collectd.debug("%s %s Get Lock ..." % (PLUGIN, self.plugin)) + PluginObject.lock.acquire() + + obj = self.instance_objects[eid] + return obj + except: + collectd.error("%s failed to get instance from %s object list" % + (PLUGIN, self.plugin)) + return None + + finally: + collectd.debug("%s %s Get UnLock ..." % (PLUGIN, self.plugin)) + PluginObject.lock.release() + + ########################################################################## + # + # Name : _add_instance_object + # + # Purpose : Safely add an object to the self instance object list + # indexed by eid while locked. if found locked the instance + # add will be re-attempted on next sample. + # + ########################################################################## + def _add_instance_object(self, obj, eid): + """ + Update self instance_objects list while locked + :param obj: the object to add + :param eid: indexed by this eid + :return: nothing + """ + try: + collectd.debug("%s %s Add Lock ..." % (PLUGIN, self.plugin)) + PluginObject.lock.acquire() + + self.instance_objects[eid] = obj + except: + collectd.error("%s failed to add instance to %s object list" % + (PLUGIN, self.plugin)) + + finally: + collectd.debug("%s %s Add UnLock ..." % (PLUGIN, self.plugin)) + PluginObject.lock.release() + + ########################################################################## + # + # Name : _copy_instance_object + # + # Purpose : Copy select members of self object to target object. + # + ########################################################################## + def _copy_instance_object(self, object): + """ + Copy select members of self object to target object + """ + + object.resource_name = self.resource_name + object.instance_name = self.instance_name + object.reading_type = self.reading_type + + object.reason_warning = self.reason_warning + object.reason_failure = self.reason_failure + object.repair = self.repair + + object.alarm_type = self.alarm_type + object.cause = self.cause + object.suppression = self.suppression + object.service_affecting = self.service_affecting + + ########################################################################## + # + # Name : _create_instance_object + # + # Purpose : Create a new instance object and tack it on the supplied base + # object's instance object dictionary. + # + ########################################################################## + def _create_instance_object(self, instance): + + try: + # create a new plugin object + inst_obj = PluginObject(self.id, self.plugin) + self._copy_instance_object(inst_obj) + + # initialize the object with instance specific data + inst_obj.instance_name = instance + inst_obj.entity_id = _build_entity_id(self.plugin, + instance) + + self._add_instance_object(inst_obj, inst_obj.entity_id) + + collectd.debug("%s created %s instance (%s) object %s" % + (PLUGIN, inst_obj.resource_name, + inst_obj.entity_id, inst_obj)) + + collectd.debug("%s monitoring %s %s %s" % + (PLUGIN, + inst_obj.resource_name, + inst_obj.instance_name, + inst_obj.reading_type)) + + return inst_obj + + except: + collectd.error("%s %s:%s inst object create failed" % + (PLUGIN, inst_obj.resource_name, instance)) + return None + ########################################################################## # # Name : _create_instance_objects # # Purpose : Create a list of instance objects for 'self' type plugin and - # add those objects to the parnet's instance_objects dictionary. + # add those objects to the parent's instance_objects dictionary. + # + # Note : This is currently only used for the DF (filesystem) plugin. + # All other instance creations/allocations are done on-demand. # ########################################################################## def _create_instance_objects(self): @@ -612,11 +783,7 @@ class PluginObject: Create, initialize and add an instance object to this/self plugin """ - # ADD_NEW_PLUGIN: for plugins that have instances you need to - # add support for creating those instances and adding - # those instances to the parent instance_objects list. - - # Currently only the DF plugin has subordinate instance objects. + # Create the File System subordinate instance objects. if self.id == ALARM_ID__DF: # read the df.conf file and return/get a list of mount points @@ -651,6 +818,7 @@ class PluginObject: # initialize the object with instance specific data inst_obj.resource_name = self.resource_name inst_obj.instance_name = mp + inst_obj.instance = mp # build the plugin instance name from the mount point if mp == '/': inst_obj.plugin_instance = 'root' @@ -662,21 +830,30 @@ class PluginObject: # add this subordinate object to the parent's # instance object list - self.instance_objects[inst_obj.entity_id] = inst_obj + self._add_instance_object(inst_obj, inst_obj.entity_id) collectd.info("%s monitoring %s usage" % - (PLUGIN, mp)) + (PLUGIN, inst_obj.instance)) PluginObject.host = os.uname()[1] # ADD_NEW_PLUGIN: add plugin to this table -# This instanciates the plugin objects -PLUGINS = {PLUGIN__CPU: PluginObject(ALARM_ID__CPU, PLUGIN__CPU), - PLUGIN__MEM: PluginObject(ALARM_ID__MEM, PLUGIN__MEM), - PLUGIN__DF: PluginObject(ALARM_ID__DF, PLUGIN__DF), - PLUGIN__EXAMPLE: PluginObject(ALARM_ID__EXAMPLE, PLUGIN__EXAMPLE)} +# This instantiates the plugin objects +PLUGINS = { + PLUGIN__CPU: PluginObject(ALARM_ID__CPU, PLUGIN__CPU), + PLUGIN__MEM: PluginObject(ALARM_ID__MEM, PLUGIN__MEM), + PLUGIN__DF: PluginObject(ALARM_ID__DF, PLUGIN__DF), + PLUGIN__VSWITCH_CPU: PluginObject(ALARM_ID__VSWITCH_CPU, + PLUGIN__VSWITCH_CPU), + PLUGIN__VSWITCH_MEM: PluginObject(ALARM_ID__VSWITCH_MEM, + PLUGIN__VSWITCH_MEM), + PLUGIN__VSWITCH_PORT: PluginObject(ALARM_ID__VSWITCH_PORT, + PLUGIN__VSWITCH_PORT), + PLUGIN__VSWITCH_IFACE: PluginObject(ALARM_ID__VSWITCH_IFACE, + PLUGIN__VSWITCH_IFACE), + PLUGIN__EXAMPLE: PluginObject(ALARM_ID__EXAMPLE, PLUGIN__EXAMPLE)} def _get_base_object(alarm_id): @@ -704,27 +881,43 @@ def _get_object(alarm_id, eid): return base_obj -def is_uuid_like(val): - """Returns validation of a value as a UUID. - - For our purposes, a UUID is a canonical form string: - aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa - """ - try: - return str(uuid.UUID(val)) == val - except (TypeError, ValueError, AttributeError): - return False - - def _build_entity_id(plugin, plugin_instance): """ Builds an entity id string based on the collectd notification object. """ + inst_error = False + entity_id = 'host=' entity_id += PluginObject.host - if plugin == PLUGIN__DF: + if plugin == PLUGIN__VSWITCH_MEM: + + # host=.processor= + if plugin_instance: + entity_id += '.processor=' + plugin_instance + else: + inst_error = True + + elif plugin == PLUGIN__VSWITCH_IFACE: + + # host=.interface= + if plugin_instance: + entity_id += '.interface=' + plugin_instance + else: + inst_error = True + + elif plugin == PLUGIN__VSWITCH_PORT: + + # host=.port= + if plugin_instance: + entity_id += '.port=' + plugin_instance + else: + inst_error = True + + elif plugin == PLUGIN__DF: + + # host=.filesystem= if plugin_instance: instance = plugin_instance @@ -740,7 +933,18 @@ def _build_entity_id(plugin, plugin_instance): instance = instance.replace('-', '/') entity_id += instance - # collectd.info("%s entity_id : %s" % (PLUGIN, entity_id)) + # Will be uncommented when the numa memory monitor is added + # to the platform memory plugin. + # + #elif plugin == PLUGIN__MEM: + # if plugin_instance is not 'platform': + # # host=controller-0.numa=node0 + # entity_id += '.numa=' + # entity_id += plugin_instance + + if inst_error is True: + collectd.error("%s eid build failed ; missing instance" % plugin) + return None return entity_id @@ -773,37 +977,77 @@ def _get_df_mountpoints(): return(mountpoints) +def _print_obj(obj): + """ + Print a single object + """ + base_object = False + for plugin in PLUGIN_NAME_LIST: + if PLUGINS[plugin] == obj: + base_object = True + break + + num = len(obj.instance_objects) + if num > 0 or base_object is True: + prefix = "PLUGIN " + if num: + prefix += str(num) + else: + prefix += " " + else: + prefix = "INSTANCE" + + if obj.plugin_instance: + resource = obj.plugin + ":" + obj.plugin_instance + else: + resource = obj.plugin + + collectd.info("%s %s res: %s name: %s\n" % + (PLUGIN, prefix, resource, obj.resource_name)) + collectd.info("%s eid : %s\n" % (PLUGIN, obj.entity_id)) + collectd.info("%s inst: %s name: %s\n" % + (PLUGIN, obj.instance, obj.instance_name)) + collectd.info("%s value:%2.1f thld:%2.1f cause:%s (%d) type:%s" % + (PLUGIN, + obj.value, + obj.threshold, + obj.cause, + obj.count, + obj.reading_type)) + collectd.info("%s warn:%s fail:%s" % + (PLUGIN, obj.warnings, obj.failures)) + collectd.info("%s repair:t: %s" % + (PLUGIN, obj.repair)) + if obj.cause != fm_constants.ALARM_PROBABLE_CAUSE_50: + collectd.info("%s reason:w: %s\n" + "%s reason:f: %s\n" % + (PLUGIN, obj.reason_warning, + PLUGIN, obj.reason_failure)) + # collectd.info(" ") + + def _print_state(obj=None): """ Print the current object state """ - objs = [] - if obj is None: - objs.append(_get_base_object(ALARM_ID__CPU)) - objs.append(_get_base_object(ALARM_ID__MEM)) - objs.append(_get_base_object(ALARM_ID__DF)) - else: - objs.append(obj) - for o in objs: - collectd.info("%s PLUGIN %2d [%6s:%2.2f:%s] [w:%s f:%s] %d" % - (PLUGIN, - len(o.instance_objects), - o.plugin, - o.value, - o.entity_id, - o.warnings, - o.failures, - o.count)) - if len(o.instance_objects): - for inst_obj in o.instance_objects: - collectd.info("%s INSTANCE [%6s:%2.2f:%s] [w:%s f:%s] %d" % - (PLUGIN, - inst_obj.plugin, - inst_obj.value, - inst_obj.entity_id, - inst_obj.warnings, - inst_obj.failures, - inst_obj.count)) + try: + objs = [] + if obj is None: + for plugin in PLUGIN_NAME_LIST: + objs.append(PLUGINS[plugin]) + else: + objs.append(obj) + + collectd.debug("%s _print_state Lock ..." % PLUGIN) + PluginObject.lock.acquire() + for o in objs: + _print_obj(o) + if len(o.instance_objects): + for inst_obj in o.instance_objects: + _print_obj(o.instance_objects[inst_obj]) + finally: + collectd.debug("%s _print_state UnLock ..." % PLUGIN) + PluginObject.lock.release() def _database_setup(database): @@ -843,14 +1087,14 @@ def _database_setup(database): ############################################################ PluginObject.dbObj.create_retention_policy( - 'collectd samples', '4w', 1, database, True) + DATABASE_NAME, '4w', 1, database, True) except Exception as ex: if str(ex) == 'database already exists': try: collectd.info("%s influxdb:collectd %s" % (PLUGIN, str(ex))) PluginObject.dbObj.create_retention_policy( - 'collectd samples', '4w', 1, database, True) + DATABASE_NAME, '4w', 1, database, True) except Exception as ex: if str(ex) == 'retention policy already exists': collectd.info("%s influxdb:collectd %s" % @@ -864,15 +1108,21 @@ def _database_setup(database): error_str = "failed to connect to influxdb:" + database if not error_str: + found = False retention = \ PluginObject.dbObj.get_list_retention_policies(database) - collectd.info("%s influxdb:%s samples retention policy: %s" % - (PLUGIN, database, retention)) - collectd.info("%s influxdb:%s is setup" % (PLUGIN, database)) - PluginObject.database_setup = True - else: - collectd.error("%s influxdb:%s setup %s" % - (PLUGIN, database, error_str)) + for r in range(len(retention)): + if retention[r]["name"] == DATABASE_NAME: + collectd.info("%s influxdb:%s samples retention " + "policy: %s" % + (PLUGIN, database, retention[r])) + found = True + if found is True: + collectd.info("%s influxdb:%s is setup" % (PLUGIN, database)) + PluginObject.database_setup = True + else: + collectd.error("%s influxdb:%s retention policy NOT setup" % + (PLUGIN, database)) def _clear_alarm_for_missing_filesystems(): @@ -892,10 +1142,11 @@ def _clear_alarm_for_missing_filesystems(): if len(alarm_list): for eid in alarm_list: # search for any of them that might be alarmed. - obj = df_base_obj.instance_objects[eid] + obj = df_base_obj._get_instance_object(eid) # only care about df (file system plugins) - if obj.plugin == PLUGIN__DF and \ + if obj is not None and \ + obj.plugin == PLUGIN__DF and \ obj.entity_id == eid and \ obj.plugin_instance != 'root': @@ -912,7 +1163,6 @@ def _clear_alarm_for_missing_filesystems(): else: collectd.debug("%s maintaining alarm for %s" % (PLUGIN, path)) - return 0 # Collectd calls this function on startup. @@ -921,6 +1171,8 @@ def _clear_alarm_for_missing_filesystems(): def init_func(): """ Collectd FM Notifier Initialization Function """ + PluginObject.lock = Lock() + PluginObject.host = os.uname()[1] collectd.info("%s %s:%s init function" % (PLUGIN, tsc.nodetype, PluginObject.host)) @@ -933,15 +1185,19 @@ def init_func(): obj.repair += "contact next level of support." collectd.info("%s monitoring %s usage" % (PLUGIN, obj.resource_name)) + ########################################################################### + # Constant Memory Plugin Object settings obj = PLUGINS[PLUGIN__MEM] - obj.resource_name = "Memory" + obj.resource_name = "Platform Memory" obj.instance_name = PLUGIN__MEM obj.repair = "Monitor and if condition persists, " obj.repair += "contact next level of support; " obj.repair += "may require additional memory on Host." collectd.info("%s monitoring %s usage" % (PLUGIN, obj.resource_name)) + ########################################################################### + # Constant FileSystem Plugin Object settings obj = PLUGINS[PLUGIN__DF] obj.resource_name = "File System" @@ -954,6 +1210,63 @@ def init_func(): # Create one DF instance object per mount point obj._create_instance_objects() + # ntp query is for controllers only + if tsc.nodetype == 'worker' or 'worker' in tsc.subfunctions: + + ####################################################################### + + # Constant vSwitch CPU Usage Plugin Object settings + obj = PLUGINS[PLUGIN__VSWITCH_CPU] + obj.resource_name = "vSwitch CPU" + obj.instance_name = PLUGIN__VSWITCH_CPU + obj.repair = "Monitor and if condition persists, " + obj.repair += "contact next level of support." + collectd.info("%s monitoring %s usage" % (PLUGIN, obj.resource_name)) + + ####################################################################### + + # Constant vSwitch Memory Usage Plugin Object settings + obj = PLUGINS[PLUGIN__VSWITCH_MEM] + obj.resource_name = "vSwitch Memory" + obj.instance_name = PLUGIN__VSWITCH_MEM + obj.repair = "Monitor and if condition persists, " + obj.repair += "contact next level of support." + collectd.info("%s monitoring %s usage" % (PLUGIN, obj.resource_name)) + + ####################################################################### + + # Constant vSwitch Port State Monitor Plugin Object settings + obj = PLUGINS[PLUGIN__VSWITCH_PORT] + obj.resource_name = "vSwitch Port" + obj.instance_name = PLUGIN__VSWITCH_PORT + obj.reading_type = "state" + obj.reason_failure = "'Data' Port failed." + obj.reason_warning = "'Data' Port failed." + obj.repair = "Check cabling and far-end port configuration and " + obj.repair += "status on adjacent equipment." + obj.alarm_type = fm_constants.FM_ALARM_TYPE_4 # EQUIPMENT + obj.cause = fm_constants.ALARM_PROBABLE_CAUSE_29 # LOSS_OF_SIGNAL + obj.service_affecting = True + collectd.info("%s monitoring %s state" % (PLUGIN, obj.resource_name)) + + ####################################################################### + + # Constant vSwitch Interface State Monitor Plugin Object settings + obj = PLUGINS[PLUGIN__VSWITCH_IFACE] + obj.resource_name = "vSwitch Interface" + obj.instance_name = PLUGIN__VSWITCH_IFACE + obj.reading_type = "state" + obj.reason_failure = "'Data' Interface failed." + obj.reason_warning = "'Data' Interface degraded." + obj.repair = "Check cabling and far-end port configuration and " + obj.repair += "status on adjacent equipment." + obj.alarm_type = fm_constants.FM_ALARM_TYPE_4 # EQUIPMENT + obj.cause = fm_constants.ALARM_PROBABLE_CAUSE_29 # LOSS_OF_SIGNAL + obj.service_affecting = True + collectd.info("%s monitoring %s state" % (PLUGIN, obj.resource_name)) + + ########################################################################### + obj = PLUGINS[PLUGIN__EXAMPLE] obj.resource_name = "Example" obj.instance_name = PLUGIN__EXAMPLE @@ -981,6 +1294,7 @@ def init_func(): alarms = api.get_faults_by_id(alarm_id) if alarms: for alarm in alarms: + want_alarm_clear = False eid = alarm.entity_instance_id # ignore alarms not for this host if PluginObject.host not in eid: @@ -988,28 +1302,31 @@ def init_func(): base_obj = _get_base_object(alarm_id) if base_obj is None: - # Handle unrecognized alarm by clearing it ; - # should never happen since we are iterating - # over an internal alarm_id list. + + # might be a plugin instance - clear it + want_alarm_clear = True + + collectd.info('%s found %s %s alarm [%s]' % + (PLUGIN, + alarm.severity, + alarm_id, + eid)) + + if want_alarm_clear is True: + if api.clear_fault(alarm_id, eid) is False: - collectd.error("%s %s:%s not found ; clear failed" % + collectd.error("%s %s:%s clear failed" % (PLUGIN, alarm_id, eid)) else: - collectd.error("%s %s:%s not found ; cleared" % - (PLUGIN, - alarm_id, - eid)) + collectd.info("%s clear %s %s alarm %s" % + (PLUGIN, + alarm.severity, + alarm_id, + eid)) continue - collectd.info('%s found %s alarm with %s severity [%s:%s:%s]' % - (PLUGIN, - base_obj.id, - alarm.severity, - base_obj.plugin, - alarm_id, - eid)) if alarm.severity == "critical": sev = "failure" elif alarm.severity == "major": @@ -1019,7 +1336,8 @@ def init_func(): continue # Load the alarm severity by doing a plugin/instance lookup. - base_obj._manage_alarm(eid, sev) + if base_obj is not None: + base_obj._manage_alarm(eid, sev) # The notifier function inspects the collectd notification and determines if @@ -1067,27 +1385,68 @@ def notifier_func(nObject): base_obj = obj = PLUGINS[nObject.plugin] # if this notification is for a plugin instance then get that - # instances's object instead. if that object does not yet exists - # then create it + # instances's object instead. + # If that object does not yet exists then create it. eid = '' - if nObject.plugin_instance: + + # DF instances are statically allocated + if nObject.plugin == PLUGIN__DF: + eid = _build_entity_id(nObject.plugin, nObject.plugin_instance) + + # get this instances object + obj = base_obj._get_instance_object(eid) + if obj is None: + # path should never be hit since all DF instances + # are statically allocated. + return 0 + + elif nObject.plugin_instance: + need_instance_object_create = False + # Build the entity_id from the parent object if needed # Build the entity_id from the parent object if needed eid = _build_entity_id(nObject.plugin, nObject.plugin_instance) try: + # Need lock when reading/writing any obj.instance_objects list + collectd.debug("%s %s lock" % (PLUGIN, nObject.plugin)) + PluginObject.lock.acquire() + + #collectd.info("%s Object Search eid: %s" % + # (nObject.plugin, eid)) + + #for o in base_obj.instance_objects: + # collectd.error("%s %s inst object dict item %s : %s" % + # (PLUGIN, nObject.plugin, o, + # base_obj.instance_objects[o])) + + # we will take an exception if this object is not in the list. + # the exception handling code below will create and add this + # object for success path the next time around. inst_obj = base_obj.instance_objects[eid] - if inst_obj is None: - collectd.error("%s %s:%s instance object is None" % - (PLUGIN, - nObject.plugin, - nObject.plugin_instance)) - return 0 + + collectd.debug("%s %s instance %s already exists %s" % + (PLUGIN, nObject.plugin, eid, inst_obj)) + # _print_state(inst_obj) + except: - # o.k. , not in the list yet, lets create one - collectd.error("%s %s:%s instance object not found" % - (PLUGIN, - nObject.plugin, - nObject.plugin_instance)) - return 0 + need_instance_object_create = True + finally: + collectd.debug("%s %s unlock" % (PLUGIN, nObject.plugin)) + PluginObject.lock.release() + + if need_instance_object_create is True: + base_obj._create_instance_object(nObject.plugin_instance) + inst_obj = base_obj._get_instance_object(eid) + if inst_obj: + collectd.debug("%s %s:%s inst object created" % + (PLUGIN, + inst_obj.plugin, + inst_obj.instance)) + else: + collectd.error("%s %s:%s inst object create failed" % + (PLUGIN, + nObject.plugin, + nObject.plugin_instance)) + return 0 # re-assign the object obj = inst_obj @@ -1096,13 +1455,6 @@ def notifier_func(nObject): # Build the entity_id from the parent object if needed eid = _build_entity_id(nObject.plugin, nObject.plugin_instance) - # TODO: Needed ? - if not len(obj.instance): - obj.instance = nObject.plugin - if nObject.plugin_instance: - obj.instance += '_' + nObject.plugin_instance - - # TODO: Needed ? # update the object with the eid if its not already set. if not len(obj.entity_id): obj.entity_id = eid @@ -1112,7 +1464,8 @@ def notifier_func(nObject): (PLUGIN, nObject.plugin, nObject.plugin_instance)) return 0 - # _print_state(obj) + # if obj.warnings or obj.failures: + # _print_state(obj) # If want_state_audit is True then run the audit. # Primarily used for debug @@ -1143,21 +1496,32 @@ def notifier_func(nObject): return 0 if _alarm_state == fm_constants.FM_ALARM_STATE_CLEAR: - if api.clear_fault(base_obj.id, obj.entity_id) is False: + if api.clear_fault(obj.id, obj.entity_id) is False: collectd.error("%s %s:%s clear_fault failed" % (PLUGIN, base_obj.id, obj.entity_id)) return 0 else: - reason = obj.resource_name - reason += " threshold exceeded" - if obj.threshold: - reason += "; {:2.0f}".format(obj.threshold) + "%" - # reason += "; {:2.2f}".format(obj.threshold) + "%" - if obj.value: - reason += ", actual " + "{:2.0f}".format(obj.value) + "%" + # manage addition of the failure reason text + if obj.cause == fm_constants.ALARM_PROBABLE_CAUSE_50: + # if this is a threshold alarm then build the reason text that + # includes the threahold and the reading that caused the assertion. + reason = obj.resource_name + reason += " threshold exceeded" + if obj.threshold: + reason += "; threshold {:2.0f} ".format(obj.threshold) + "%, " + if obj.value: + reason += "actual {:2.0f}".format(obj.value) + "%" + + elif _severity_num == fm_constants.FM_ALARM_SEVERITY_CRITICAL: + reason = obj.reason_failure + + else: + reason = obj.reason_warning + + # build the alarm object fault = fm_api.Fault( - alarm_id=base_obj.id, + alarm_id=obj.id, alarm_state=_alarm_state, entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST, entity_instance_id=obj.entity_id, @@ -1170,7 +1534,7 @@ def notifier_func(nObject): suppression=base_obj.suppression) alarm_uuid = api.set_fault(fault) - if is_uuid_like(alarm_uuid) is False: + if pc.is_uuid_like(alarm_uuid) is False: collectd.error("%s %s:%s set_fault failed:%s" % (PLUGIN, base_obj.id, obj.entity_id, alarm_uuid)) return 0 @@ -1191,5 +1555,8 @@ def notifier_func(nObject): # Debug only: comment out for production code. # obj._state_audit("change") + return 0 + + collectd.register_init(init_func) collectd.register_notification(notifier_func) diff --git a/monitoring/collectd-extensions/src/interface.conf b/monitoring/collectd-extensions/src/interface.conf index c7ef627f6..de3afaf23 100644 --- a/monitoring/collectd-extensions/src/interface.conf +++ b/monitoring/collectd-extensions/src/interface.conf @@ -1,11 +1,11 @@ - - Instance "state" + + Instance "used" Persist true PersistOK true - WarningMin 50 - FailureMin 0 + WarningMin 51 + FailureMin 1 # Hits 2 Invert false diff --git a/monitoring/collectd-extensions/src/interface.py b/monitoring/collectd-extensions/src/interface.py index ae42a47d6..7b44de8e8 100755 --- a/monitoring/collectd-extensions/src/interface.py +++ b/monitoring/collectd-extensions/src/interface.py @@ -1,129 +1,934 @@ # -# Copyright (c) 2018 Wind River Systems, Inc. +# Copyright (c) 2019 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # ############################################################################ # -# This is the Host Interface Monitor plugin for Collectd. +# This is the Host Interface Monitor plugin for collectd. # -# Only mgmnt , infra and oam interfaces are supported with the following +# Only mgmt, infra and oam interfaces are supported with the following # mapping specified in /etc/platform/platform.conf # -# mgmnt - management_interface | all hosts | manditory +# oam - oam_interface | controller | mandatory +# mgmnt - management_interface | all hosts | mandatory # infa - infrastructure_interface | any host | optional -# oam - oam_interface | controller | manditory # -# This plugin reports link state inb the following way. +# This plugin queries the maintenance Link Monitor daemon 'lmon' +# for a link status summary of that hosts configured networks. # -# The plugin init function learns interface names from platform.conf +# This plugin's read_func issues an http GET request to the Link Monitor +# which responds with a json string that represents a complete summary +# of the monitored links, state and the time of the last event or when +# initial status was learned. An example of the Link Monitor response is # +# { +# "status" : "pass" +# "link_info": [ +# { "network":"mgmt", +# "type":"vlan", +# "links": [ +# { "name":"enp0s8.1", "state":"Up", "time":"5674323454567" }, +# { "name":"enp0s8.2", "state":"Up", "time":"5674323454567" }] +# }, +# { "network":"infra", +# "type":"bond", +# "bond":"bond0", +# "links": [ +# { "name":"enp0s9f1", "state":"Down", "time":"5674323454567" }, +# { "name":"enp0s9f0", "state":"Up" , "time":"5674323454567" }] +# }, +# { "network":"oam", +# "type":"single", +# "links": [ +# { "name":"enp0s3", "state":"Up", "time":"5674323454567" }] +# }] +# } +# +# On failure +# +# { +# "status" : "fail ; bad request " +# } +# +# This plugin then uses this information to manage interface alarm +# assertion and clear with appropriate severity. +# +# Severity: Interface and Port levels +# +# Alarm Level Minor Major Critical +# ----------- ----- --------------------- ---------------------------- +# Interface N/A One of lag pair is Up All Interface ports are Down +# Port N/A Physical Link is Down N/A +# +# Sample Data: represented as % of total links Up for that network interface +# +# 100 or 100% percent used - all links of interface are up. +# 50 or 50% percent used - one of lag pair is Up and the other is Down +# 0 or 0% percent used - all ports for that network are Down # ############################################################################ -import os -import random -import collectd -import tsconfig.tsconfig as tsc +import os +import time +import datetime +import collectd +import plugin_common as pc +from fm_api import constants as fm_constants +from fm_api import fm_api + +# Fault manager API Object +api = fm_api.FaultAPIs() + +# name of the plugin - all logs produced by this plugin are prefixed with this PLUGIN = 'interface plugin' -# static variables +# Interface Monitoring Interval in seconds +PLUGIN_AUDIT_INTERVAL = 10 -PLATFORM_CONF_MGMNT_LABEL = "management_interface=" -PLATFORM_CONF_INFRA_LABEL = "infrastructure_interface=" -PLATFORM_CONF_OAM_LABEL = "oam_interface=" +# Sample Data 'type' and 'instance' database field values. +PLUGIN_TYPE = 'percent' +PLUGIN_TYPE_INSTANCE = 'usage' -NETWORK_MGMNT = 'mgmnt' +# The Link Status Query URL +PLUGIN_HTTP_URL_PREFIX = 'http://localhost:' + +# This plugin's timeout +PLUGIN_HTTP_TIMEOUT = 5 + +# Specify the link monitor as the maintenance destination service +# full path should look like ; http://localhost:2122/mtce/lmon +PLUGIN_HTTP_URL_PATH = '/mtce/lmon' + +# Port and Interface Alarm Identifiers +PLUGIN_OAM_PORT_ALARMID = '100.106' # OAM Network Port +PLUGIN_OAM_IFACE_ALARMID = '100.107' # OAM Network Interface + +PLUGIN_MGMT_PORT_ALARMID = '100.108' # Management Network Port +PLUGIN_MGMT_IFACE_ALARMID = '100.109' # Management Network Interface + +PLUGIN_INFRA_PORT_ALARMID = '100.110' # Infrastructure Network Port +PLUGIN_INFRA_IFACE_ALARMID = '100.111' # Infrastructure Nwk Interface + +# List of all alarm identifiers. +ALARM_ID_LIST = [PLUGIN_OAM_PORT_ALARMID, + PLUGIN_OAM_IFACE_ALARMID, + PLUGIN_MGMT_PORT_ALARMID, + PLUGIN_MGMT_IFACE_ALARMID, + PLUGIN_INFRA_PORT_ALARMID, + PLUGIN_INFRA_IFACE_ALARMID] + +# Monitored Network Name Strings +NETWORK_MGMT = 'mgmt' NETWORK_INFRA = 'infra' NETWORK_OAM = 'oam' +# Port / Interface State strings +LINK_UP = 'Up' +LINK_DOWN = 'Down' -class iface: - def __init__(self, n, m, s): - self.master = {'network': n, 'name': m, 'state': 'down', 'slaves': s} - self.slave1 = {} - self.slave2 = {} - self.state = int(100) +# Alarm control actions +ALARM_ACTION_RAISE = 'raise' +ALARM_ACTION_CLEAR = 'clear' + +# Alarm level. +# Ports are the lowest level and represent a physical link +# Interfaces are port groupings in terms of LAG +LEVEL_PORT = 'port' +LEVEL_IFACE = 'interface' -class object: - hostname = '' +# Link Object (aka Port or Physical interface) Structure +# and member functions. +class LinkObject: - def __init__(self): - self.NETWORKS = {} - self.NETWORKS[NETWORK_MGMNT] = None - self.NETWORKS[NETWORK_INFRA] = None - self.NETWORKS[NETWORK_OAM] = None + def __init__(self, alarm_id): -obj = object() + self.name = None + self.state = LINK_UP + self.timestamp = float(0) + self.severity = fm_constants.FM_ALARM_SEVERITY_CLEAR + self.alarm_id = alarm_id + self.state_change = True + + collectd.debug("%s LinkObject constructor: %s" % + (PLUGIN, alarm_id)) + + ################################################################## + # + # Name : raise_port_alarm + # + # Purpose : This link object member function is used to + # raise link/port alarms. + # + # Parameters : Network the link is part of. + # + # Returns : True on failure and False on success. + # + ################################################################## + def raise_port_alarm(self, network): + """ Raise a port alarm """ + + if self.severity != fm_constants.FM_ALARM_SEVERITY_MAJOR: + + if manage_alarm(self.name, + network, + LEVEL_PORT, + ALARM_ACTION_RAISE, + fm_constants.FM_ALARM_SEVERITY_MAJOR, + self.alarm_id, + self.timestamp) is False: + + self.severity = fm_constants.FM_ALARM_SEVERITY_MAJOR + collectd.info("%s %s %s port alarm raised" % + (PLUGIN, self.name, self.alarm_id)) + return False + else: + return True + else: + return False + + ################################################################## + # + # Name : clear_port_alarm + # + # Purpose : This link object member function is used to + # clear link/port alarms. + # + # Parameters : Network the link is part of. + # + # Returns : True on failure and False on success. + # + ################################################################## + def clear_port_alarm(self, network): + """ Clear a port alarm """ + + if self.severity != fm_constants.FM_ALARM_SEVERITY_CLEAR: + if manage_alarm(self.name, + network, + LEVEL_PORT, + ALARM_ACTION_CLEAR, + fm_constants.FM_ALARM_SEVERITY_CLEAR, + self.alarm_id, + self.timestamp) is False: + + collectd.info("%s %s %s port alarm cleared" % + (PLUGIN, self.name, self.alarm_id)) + self.severity = fm_constants.FM_ALARM_SEVERITY_CLEAR + return False + else: + return True + else: + return False + + +# Interface (aka Network) Level Object Structure and member functions +class NetworkObject: + + def __init__(self, name): + + self.name = name + self.sample = 0 + self.sample_last = 0 + self.severity = fm_constants.FM_ALARM_SEVERITY_CLEAR + self.degraded = False + self.timestamp = float(0) + + # add the respective alarm IDs to each object + alarm_id = None + if name == NETWORK_OAM: + alarm_id = PLUGIN_OAM_PORT_ALARMID + self.alarm_id = PLUGIN_OAM_IFACE_ALARMID + elif name == NETWORK_MGMT: + alarm_id = PLUGIN_MGMT_PORT_ALARMID + self.alarm_id = PLUGIN_MGMT_IFACE_ALARMID + elif name == NETWORK_INFRA: + alarm_id = PLUGIN_INFRA_PORT_ALARMID + self.alarm_id = PLUGIN_INFRA_IFACE_ALARMID + else: + self.alarm_id = "" + collectd.error("%s unexpected network (%s)" % (PLUGIN, name)) + + collectd.debug("%s %s NetworkObject constructor: %s" % + (PLUGIN, name, self.alarm_id)) + + if alarm_id: + self.link_one = LinkObject(alarm_id) + self.link_two = LinkObject(alarm_id) + + ################################################################## + # + # Name : raise_iface_alarm + # + # Purpose : This network object member function used to + # raise interface alarms. + # + # Parameters : None + # + # Returns : True on failure and False on success. + # + ################################################################## + def raise_iface_alarm(self, severity): + """ Raise an interface alarm """ + + if severity == fm_constants.FM_ALARM_SEVERITY_CLEAR: + collectd.error("%s %s raise alarm called with clear severity" % + (PLUGIN, self.name)) + return True + + if self.severity != severity: + if manage_alarm(self.name, + self.name, + LEVEL_IFACE, + ALARM_ACTION_RAISE, + severity, + self.alarm_id, + self.timestamp) is False: + + self.severity = severity + collectd.info("%s %s %s %s interface alarm raised" % + (PLUGIN, + self.name, + self.alarm_id, + pc.get_severity_str(severity))) + return False + else: + return True + else: + return False + + ################################################################## + # + # Name : clear_iface_alarm + # + # Purpose : This network object member function used to + # clear interface alarms. + # + # Parameters : None + # + # Returns : True on failure and False on success. + # + ################################################################## + def clear_iface_alarm(self): + """ Clear an interface alarm """ + + if self.severity != fm_constants.FM_ALARM_SEVERITY_CLEAR: + if manage_alarm(self.name, + self.name, + LEVEL_IFACE, + ALARM_ACTION_CLEAR, + fm_constants.FM_ALARM_SEVERITY_CLEAR, + self.alarm_id, + self.timestamp) is False: + + collectd.info("%s %s %s %s interface alarm cleared" % + (PLUGIN, + self.name, + self.alarm_id, + pc.get_severity_str(self.severity))) + self.severity = fm_constants.FM_ALARM_SEVERITY_CLEAR + return False + else: + return True + else: + return False + + ###################################################################### + # + # Name : manage_iface_alarm + # + # Purpose : clear or raise appropriate severity level interface alarm + # + # Returns : None + # + ###################################################################### + def manage_iface_alarm(self): + """ """ + # Single Link Config + if self.link_two.name is None: + if self.link_one.state == LINK_DOWN: + if self.severity != fm_constants.FM_ALARM_SEVERITY_CRITICAL: + self.timestamp = self.link_one.timestamp + self.raise_iface_alarm( + fm_constants.FM_ALARM_SEVERITY_CRITICAL) + elif self.link_one.state == LINK_UP: + if self.severity != fm_constants.FM_ALARM_SEVERITY_CLEAR: + self.clear_iface_alarm() + + # Lagged Link Config + # + # The interface level timestamp is updated based on the failed + # link timestamps + elif self.link_one.state == LINK_UP and \ + self.link_two.state == LINK_DOWN: + if self.severity != fm_constants.FM_ALARM_SEVERITY_MAJOR: + self.timestamp = self.link_two.timestamp + self.raise_iface_alarm(fm_constants.FM_ALARM_SEVERITY_MAJOR) + + elif self.link_one.state == LINK_DOWN and \ + self.link_two.state == LINK_UP: + if self.severity != fm_constants.FM_ALARM_SEVERITY_MAJOR: + self.timestamp = self.link_one.timestamp + self.raise_iface_alarm(fm_constants.FM_ALARM_SEVERITY_MAJOR) + + elif self.link_one.state == LINK_UP and self.link_two.state == LINK_UP: + if self.severity != fm_constants.FM_ALARM_SEVERITY_CLEAR: + self.clear_iface_alarm() + + elif self.link_one.state == LINK_DOWN and \ + self.link_two.state == LINK_DOWN: + if self.severity != fm_constants.FM_ALARM_SEVERITY_CRITICAL: + if self.link_one.timestamp > self.link_two.timestamp: + self.timestamp = self.link_one.timestamp + else: + self.timestamp = self.link_two.timestamp + self.raise_iface_alarm(fm_constants.FM_ALARM_SEVERITY_CRITICAL) + + +# Plugin Control Object +obj = pc.PluginObject(PLUGIN, PLUGIN_HTTP_URL_PREFIX) + + +# Network Object List - Primary Network/Link Control Object +NETWORKS = [NetworkObject(NETWORK_MGMT), + NetworkObject(NETWORK_OAM), + NetworkObject(NETWORK_INFRA)] + + +########################################################################## +# +# Name : get_timestamp +# +# Purpose : Convert the long long int microsecond time as string +# that accompany link info from the Link Monitor (lmond) +# and catch exceptions in doing so. +# +# Parameters: lmon_time - long long int as string +# +# Returns : float time that can be consumed by datetime.fromtimestamp +# +# Returns same unit of now time if provided lmon_time is +# invalid. +# +########################################################################## +def get_timestamp(lmon_time): + """ Convert lmon time to fm timestamp time """ + + if lmon_time: + try: + return(float(float(lmon_time)/1000000)) + except: + collectd.error("%s failed to parse timestamp ;" + " using current time" % PLUGIN) + else: + collectd.error("%s no timestamp ;" + " using current time" % PLUGIN) + + return(float(time.time())) + + +def dump_network_info(network): + """ Log the specified network info """ + + link_one_event_time = datetime.datetime.fromtimestamp( + float(network.link_one.timestamp)).strftime('%Y-%m-%d %H:%M:%S') + + link_two_info = '' + if network.link_two.name is not None: + link_two_event_time = datetime.datetime.fromtimestamp( + float(network.link_two.timestamp)).strftime('%Y-%m-%d %H:%M:%S') + + link_two_info += "; link two '" + link_two_info += network.link_two.name + link_two_info += "' went " + network.link_two.state + link_two_info += " at " + link_two_event_time + + pcnt = '%' + + collectd.info("%s %5s %3d%c ; " + "link one '%s' went %s at %s %s" % + (PLUGIN, + network.name, + network.sample, + pcnt, + network.link_one.name, + network.link_one.state, + link_one_event_time, + link_two_info)) + + +######################################################################### +# +# Name : this_hosts_alarm +# +# Purpose : Determine if the supplied eid is for this host. +# +# Description: The eid formats for the alarms managed by this plugin are +# +# host=.port= +# host=.interface= +# +# Assumptions: There is no restriction preventing the system +# administrator from creating hostnames with period's ('.') +# in them. Because so the eid cannot simply be split +# around '='s and '.'s. Instead its split around this +# plugins level type '.port' or '.interface'. +# +# Returns : True if hostname is a match +# False otherwise +# +########################################################################## +def this_hosts_alarm(hostname, eid): + """ Check if the specified eid is for this host """ + + if hostname: + if eid: + # 'host=controller-0.interface=mgmt' + try: + eid_host = None + eid_disected = eid.split('=') + if len(eid_disected) == 3: + # ['host', 'controller-0.interface', 'mgmt'] + if len(eid_disected[1].split('.port')) == 2: + eid_host = eid_disected[1].split('.port')[0] + if eid_host and eid_host == hostname: + return True + elif len(eid_disected[1].split('.interface')) == 2: + eid_host = eid_disected[1].split('.interface')[0] + if eid_host and eid_host == hostname: + return True + except Exception as ex: + collectd.error("%s failed to parse alarm eid (%s)" + " [eid:%s]" % (PLUGIN, str(ex), eid)) + + return False + + +########################################################################## +# +# Name : clear_alarms +# +# Purpose : Clear all interface alarms on process startup. +# +# Description: Called after first successful Link Status query. +# +# Loops over the provided alarm id list querying all alarms +# for each. Any that are raised are precisely cleared. +# +# Prevents stuck alarms over port and interface reconfig. +# +# If the original alarm case still exists the alarm will +# be re-raised with the original link event timestamp that +# is part of the Link Status query response. +# +# Parameters : A list of this plugin's alarm ids +# +# Returns : True on failure and False on success +# +########################################################################## +def clear_alarms(alarm_id_list): + """ Clear alarm state of all plugin alarms. """ + found = False + for alarm_id in alarm_id_list: + alarms = api.get_faults_by_id(alarm_id) + if alarms: + for alarm in alarms: + eid = alarm.entity_instance_id + if this_hosts_alarm(obj.hostname, eid) is False: + # ignore other host alarms + continue + + if alarm_id == PLUGIN_OAM_PORT_ALARMID or \ + alarm_id == PLUGIN_OAM_IFACE_ALARMID or \ + alarm_id == PLUGIN_MGMT_PORT_ALARMID or \ + alarm_id == PLUGIN_MGMT_IFACE_ALARMID or \ + alarm_id == PLUGIN_INFRA_PORT_ALARMID or \ + alarm_id == PLUGIN_INFRA_IFACE_ALARMID: + eid = alarm.entity_instance_id + if api.clear_fault(alarm_id, eid) is False: + collectd.error("%s %s:%s clear_fault failed" % + (PLUGIN, alarm_id, eid)) + return True + else: + found = True + collectd.info("%s %s clearing %s alarm %s:%s" % + (PLUGIN, + NETWORK_INFRA, + alarm.severity, + alarm_id, + alarm.entity_instance_id)) + + if found is False: + collectd.info("%s found no startup alarms" % PLUGIN) + + return False + + +########################################################################## +# +# Name : manage_alarm +# +# Purpose : Raises or clears port and interface alarms based on +# calling parameters. +# +# Returns : True on failure and False on success +# +########################################################################## +def manage_alarm(name, network, level, action, severity, alarm_id, timestamp): + """ Manage raise and clear of port and interface alarms """ + + ts = datetime.datetime.fromtimestamp( + float(timestamp)).strftime('%Y-%m-%d %H:%M:%S') + collectd.debug("%s %s %s %s alarm for %s:%s [%s] %s" % (PLUGIN, + severity, level, alarm_id, network, name, action, ts)) + + if action == ALARM_ACTION_CLEAR: + alarm_state = fm_constants.FM_ALARM_STATE_CLEAR + reason = '' + repair = '' + else: + # reason ad repair strings are only needed on alarm assertion + alarm_state = fm_constants.FM_ALARM_STATE_SET + reason = "'" + network.upper() + "' " + level + repair = 'Check cabling and far-end port configuration ' \ + 'and status on adjacent equipment.' + + # build the alarm eid and name string + if level == LEVEL_PORT: + eid = 'host=' + obj.hostname + "." + level + '=' + name + reason += " failed" + else: + eid = 'host=' + obj.hostname + "." + level + '=' + network + if severity == fm_constants.FM_ALARM_SEVERITY_MAJOR: + reason += " degraded" + else: + reason += " failed" + + if alarm_state == fm_constants.FM_ALARM_STATE_CLEAR: + if api.clear_fault(alarm_id, eid) is False: + collectd.error("%s %s:%s clear_fault failed" % + (PLUGIN, alarm_id, eid)) + return True + else: + return False + else: + fault = fm_api.Fault( + uuid="", + alarm_id=alarm_id, + alarm_state=alarm_state, + entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST, + entity_instance_id=eid, + severity=severity, + reason_text=reason, + alarm_type=fm_constants.FM_ALARM_TYPE_7, + probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN, + proposed_repair_action=repair, + service_affecting=True, + timestamp=ts, + suppression=True) + + alarm_uuid = api.set_fault(fault) + if pc.is_uuid_like(alarm_uuid) is False: + collectd.error("%s %s:%s set_fault failed:%s" % + (PLUGIN, alarm_id, eid, alarm_uuid)) + return True + else: + return False # The config function - called once on collectd process startup def config_func(config): - """ - Configure the plugin - """ + """ Configure the plugin """ - collectd.debug('%s config function' % PLUGIN) + # Need to update the Link Status Query URL with the port number. + url_updated = False + + # The Link Monitor port number is first searched for in + # the /etc/mtc/lmond.conf file. + # If its not there then its taken from the plugin config. + + # /etc/mtc/lmond.conf + fn = '/etc/mtc/lmond.conf' + if (os.path.exists(fn)): + try: + with open(fn, 'r') as infile: + for line in infile: + if 'lmon_query_port' in line: + if isinstance(int(line.split()[2]), int): + + # add the port + obj.url += line.split()[2] + + # add the path /mtce/lmon + obj.url += PLUGIN_HTTP_URL_PATH + + url_updated = "config file" + break + except EnvironmentError as e: + collectd.error(str(e), UserWarning) + + if url_updated is False: + # Try the config as this might be updated by manifest + for node in config.children: + key = node.key.lower() + val = int(node.values[0]) + if key == 'port': + if isinstance(int(val), int): + + # add the port + obj.url += str(val) + + # add the path /mtce/lmon + obj.url += PLUGIN_HTTP_URL_PATH + + url_updated = "manifest" + break + + if url_updated: + collectd.info("%s configured by %s [%s]" % + (PLUGIN, url_updated, obj.url)) + obj.config_done = True + else: + collectd.error("%s config failure ; cannot monitor" % + (PLUGIN)) return 0 # The init function - called once on collectd process startup def init_func(): + """ Init the plugin """ - # get current hostname - obj.hostname = os.uname()[1] + if obj.config_done is False: + collectd.info("%s configuration failed" % PLUGIN) + time.sleep(300) + return False - # get the master interface names from /etc/platform/platform.conf - with open(tsc.PLATFORM_CONF_FILE, 'r') as infile: - for line in infile: + if obj.init_done is False: + if obj.init_ready() is False: + return False - # Management Interface - if PLATFORM_CONF_MGMNT_LABEL in line: - name = line.split('=')[1].replace('\n', '') - obj.NETWORKS[NETWORK_MGMNT] = iface(NETWORK_MGMNT, name, 0) - collectd.info("%s monitoring mgmnt interface : %s" % - (PLUGIN, - obj.NETWORKS[NETWORK_MGMNT].master['name'])) + obj.hostname = obj.gethostname() + obj.init_done = True + collectd.info("%s initialization complete" % PLUGIN) - # Infrastructure Interface - elif PLATFORM_CONF_INFRA_LABEL in line: - name = line.split('=')[1].replace('\n', '') - obj.NETWORKS[NETWORK_INFRA] = iface(NETWORK_INFRA, name, 0) - collectd.info("%s monitoring infra interface : %s" % - (PLUGIN, - obj.NETWORKS[NETWORK_INFRA].master['name'])) - - # OAM Interface - elif PLATFORM_CONF_OAM_LABEL in line: - name = line.split('=')[1].replace('\n', '') - obj.NETWORKS[NETWORK_OAM] = iface(NETWORK_OAM, name, 0) - collectd.info("%s monitoring oam interface: %s" % - (PLUGIN, - obj.NETWORKS[NETWORK_OAM].master['name'])) - - return 0 + return True # The sample read function - called on every audit interval def read_func(): + """ collectd interface monitor plugin read function """ - if obj.NETWORKS[NETWORK_MGMNT].state == 0: - obj.NETWORKS[NETWORK_MGMNT].state = 100 - else: - obj.NETWORKS[NETWORK_MGMNT].state -= 25 + if obj.init_done is False: + init_func() + return 0 + + if obj.audits == 0: + + # clear all alarms on first audit + + # block on fm availability + + # If existing raised the alarms are still valid then + # they will be re-raised with the same timestamp the + # original event occurred at once auditing resumes. + if clear_alarms(ALARM_ID_LIST) is True: + collectd.error("%s failed to clear existing alarms ; " + "retry next audit" % PLUGIN) + + # Don't proceed till we can communicate with FM and + # clear all existing interface and port alarms. + return 0 + + try: + # Issue query and construct the monitoring object + error = obj.make_http_request(to=PLUGIN_HTTP_TIMEOUT) + + if len(obj.jresp) == 0: + collectd.error("%s no json response from http request" % PLUGIN) + return 1 + + if error: + return 1 + + # Check query status + try: + if obj.jresp['status'] != 'pass': + collectd.error("%s link monitor query %s" % + (PLUGIN, obj.jresp['status'])) + return 0 + + except Exception as ex: + collectd.error("%s http request get reason failed ; %s" % + (PLUGIN, str(ex))) + collectd.info("%s resp:%d:%s" % + (PLUGIN, len(obj.jresp), obj.jresp)) + return 1 + + # log the first query response + if obj.audits == 0: + collectd.info("%s Link Status Query Response:%d:\n%s" % + (PLUGIN, len(obj.jresp), obj.jresp)) + + # uncomment below for debug purposes + # + # for network in NETWORKS: + # dump_network_info(network) + + try: + link_info = obj.jresp['link_info'] + for network_link_info in link_info: + collectd.debug("%s parse link info:%s" % + (PLUGIN, network_link_info)) + for network in NETWORKS: + if network.name == network_link_info['network']: + links = network_link_info['links'] + nname = network.name + if len(links) > 0: + link_one = links[0] + + # get initial link one name + if network.link_one.name is None: + network.link_one.name = link_one['name'] + + network.link_one.timestamp =\ + float(get_timestamp(link_one['time'])) + + # load link one state + if link_one['state'] == LINK_UP: + collectd.debug("%s %s IS Up [%s]" % + (PLUGIN, network.link_one.name, + network.link_one.state)) + if network.link_one.state != LINK_UP: + network.link_one.state_change = True + network.link_one.clear_port_alarm(nname) + network.link_one.state = LINK_UP + else: + collectd.debug("%s %s IS Down [%s]" % + (PLUGIN, network.link_one.name, + network.link_one.state)) + if network.link_one.state == LINK_UP: + network.link_one.state_change = True + network.link_one.raise_port_alarm(nname) + network.link_one.state = LINK_DOWN + + if len(links) > 1: + link_two = links[1] + + # get initial link two name + if network.link_two.name is None: + network.link_two.name = link_two['name'] + + network.link_two.timestamp =\ + float(get_timestamp(link_two['time'])) + + # load link two state + if link_two['state'] == LINK_UP: + collectd.debug("%s %s IS Up [%s]" % + (PLUGIN, network.link_two.name, + network.link_two.state)) + if network.link_two.state != LINK_UP: + network.link_two.state_change = True + network.link_two.clear_port_alarm(nname) + network.link_two.state = LINK_UP + else: + collectd.debug("%s %s IS Down [%s]" % + (PLUGIN, network.link_two.name, + network.link_two.state)) + if network.link_two.state == LINK_UP: + network.link_two.state_change = True + network.link_two.raise_port_alarm(nname) + network.link_two.state = LINK_DOWN + + # manage interface alarms + network.manage_iface_alarm() + + except Exception as ex: + collectd.error("%s link monitor query parse error: %s " % + (PLUGIN, obj.resp)) + + # handle state changes + for network in NETWORKS: + if network.link_two.name is not None and \ + network.link_one.state_change is True: + + if network.link_one.state == LINK_UP: + collectd.info("%s %s link one '%s' is Up" % + (PLUGIN, + network.name, + network.link_one.name)) + else: + collectd.info("%s %s link one '%s' is Down" % + (PLUGIN, + network.name, + network.link_one.name)) + + if network.link_two.name is not None and \ + network.link_two.state_change is True: + + if network.link_two.state == LINK_UP: + collectd.info("%s %s link two '%s' is Up" % + (PLUGIN, + network.name, + network.link_two.name)) + else: + collectd.info("%s %s link two %s 'is' Down" % + (PLUGIN, + network.name, + network.link_two.name)) + + # Dispatch usage value to collectd + val = collectd.Values(host=obj.hostname) + val.plugin = 'interface' + val.type = 'percent' + val.type_instance = 'used' + + # For each interface [ mgmt, oam, infra ] + # calculate the percentage used sample + # sample = 100 % when all its links are up + # sample = 0 % when all its links are down + # sample = 50 % when one of a lagged group is down + for network in NETWORKS: + + if network.link_one.name is not None: + + val.plugin_instance = network.name + + network.sample = 0 + + if network.link_two.name is not None: + # lagged + + if network.link_one.state == LINK_UP: + network.sample = 50 + if network.link_two.state == LINK_UP: + network.sample += 50 + else: + if network.link_one.state == LINK_UP: + network.sample = 100 + val.dispatch(values=[network.sample]) + + if network.link_one.state_change is True or \ + network.link_two.state_change is True: + + dump_network_info(network) + + network.link_one.state_change = False + network.link_two.state_change = False + + network.sample_last = network.sample + + else: + collectd.debug("%s %s network not provisioned" % + (PLUGIN, network.name)) + obj.audits += 1 + + except Exception as ex: + collectd.info("%s http request failed: %s" % (PLUGIN, str(ex))) - # Dispatch usage value to collectd - val = collectd.Values(host=obj.hostname) - val.plugin = 'interface' - val.plugin_instance = 'mgmnt' - val.type = 'absolute' - val.type_instance = 'used' - val.dispatch(values=[obj.NETWORKS[NETWORK_MGMNT].state]) return 0 # register the config, init and read functions collectd.register_config(config_func) collectd.register_init(init_func) -collectd.register_read(read_func) +collectd.register_read(read_func, interval=PLUGIN_AUDIT_INTERVAL) diff --git a/monitoring/collectd-extensions/src/memory.conf b/monitoring/collectd-extensions/src/memory.conf index 5e5195f09..997bf2d48 100644 --- a/monitoring/collectd-extensions/src/memory.conf +++ b/monitoring/collectd-extensions/src/memory.conf @@ -12,8 +12,8 @@ Instance "used" Persist true PersistOK true - WarningMax 80.00 - FailureMax 90.00 + WarningMax 79.00 + FailureMax 89.00 Hits 2 Invert false diff --git a/monitoring/collectd-extensions/src/mtce_notifier.py b/monitoring/collectd-extensions/src/mtce_notifier.py index 1ffa88a2a..1f645e0d8 100755 --- a/monitoring/collectd-extensions/src/mtce_notifier.py +++ b/monitoring/collectd-extensions/src/mtce_notifier.py @@ -39,6 +39,7 @@ import os import socket import collectd +import tsconfig.tsconfig as tsc # This plugin name PLUGIN = 'degrade notifier' @@ -65,6 +66,13 @@ ONE_EVERY = 10 PLUGIN__DF = 'df' PLUGIN__MEM = 'memory' PLUGIN__CPU = 'cpu' + +PLUGIN__VSWITCH_MEM = 'vswitch_mem' +PLUGIN__VSWITCH_CPU = 'vswitch_cpu' +PLUGIN__VSWITCH_PORT = "vswitch_port" +PLUGIN__VSWITCH_IFACE = "vswitch_iface" + + PLUGIN_INTERFACE = 'interface' PLUGIN__EXAMPLE = 'example' @@ -89,9 +97,13 @@ class collectdMtceNotifierObject: self.degrade_list__failure = [PLUGIN__DF, PLUGIN__MEM, PLUGIN__CPU, + PLUGIN__VSWITCH_MEM, + PLUGIN__VSWITCH_CPU, + PLUGIN__VSWITCH_PORT, + PLUGIN__VSWITCH_IFACE, PLUGIN_INTERFACE, PLUGIN__EXAMPLE] - self.degrade_list__warning = [] + self.degrade_list__warning = [PLUGIN_INTERFACE] # the running list of resources that require degrade. # a degrade clear message is sent whenever this list is empty. @@ -172,7 +184,7 @@ def config_func(config): Configure the maintenance degrade notifier plugin. """ - collectd.info('%s config function' % PLUGIN) + collectd.debug('%s config function' % PLUGIN) for node in config.children: key = node.key.lower() val = node.values[0] @@ -194,6 +206,10 @@ def init_func(): Collectd Mtce Notifier Initialization Function """ + obj.host = os.uname()[1] + collectd.info("%s %s:%s sending to mtce port %d" % + (PLUGIN, tsc.nodetype, obj.host, obj.port)) + collectd.debug("%s init function" % PLUGIN) @@ -241,8 +257,8 @@ def notifier_func(nObject): path = _df_instance_to_path(resource) add = os.path.ismount(path) if add is True: - collectd.debug("%s %s added to degrade list" % - (PLUGIN, resource)) + collectd.info("%s %s added to degrade list" % + (PLUGIN, resource)) obj.degrade_list.append(resource) else: # If severity is failure and no failures cause degrade @@ -264,8 +280,8 @@ def notifier_func(nObject): path = _df_instance_to_path(resource) add = os.path.ismount(path) if add is True: - collectd.debug("%s %s added to degrade list" % - (PLUGIN, resource)) + collectd.info("%s %s added to degrade list" % + (PLUGIN, resource)) obj.degrade_list.append(resource) else: # If severity is warning and no warnings cause degrade diff --git a/monitoring/collectd-extensions/src/plugin_common.py b/monitoring/collectd-extensions/src/plugin_common.py new file mode 100644 index 000000000..d6ba89894 --- /dev/null +++ b/monitoring/collectd-extensions/src/plugin_common.py @@ -0,0 +1,255 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +############################################################################ +# +# This file contains common collectd plugin constructs and utilities +# +############################################################################ + +import collectd +import json +import uuid +import httplib2 +import socket +import os +from fm_api import constants as fm_constants +import tsconfig.tsconfig as tsc + +# http request constants +PLUGIN_TIMEOUT = 10 +PLUGIN_HTTP_HEADERS = {'Accept': 'application/json', 'Connection': 'close'} + +MIN_AUDITS_B4_FIRST_QUERY = 2 + + +class PluginObject(object): + + def __init__(self, plugin, url): + + # static variables set in init_func + self.plugin = plugin # the name of this plugin + self.hostname = '' # the name of this host + self.port = 0 # the port number for this plugin + + # dynamic gate variables + self.config_complete = False # set to True once config is complete + self.config_done = False # set true if config_func completed ok + self.init_done = False # set true if init_func completed ok + + # dynamic variables set in read_func + self.usage = float(0) # last usage value recorded as float + self.audits = 0 # number of audit since init + + # http and json specific variables + self.url = url # target url + self.jresp = None # used to store the json response + self.resp = '' + + # Log controls + self.config_logged = False # used to log once the plugin config + self.error_logged = False # used to prevent log flooding + self.log_throttle_count = 0 # used to count throttle logs + self.INIT_LOG_THROTTLE = 10 # the init log throttle threshold + + collectd.debug("%s Common PluginObject constructor [%s]" % + (plugin, url)) + + ########################################################################### + # + # Name : init_ready + # + # Description: Test for init ready condition + # + # Parameters : plugin name + # + # Returns : False if initial config complete is not done + # True if initial config complete is done + # + ########################################################################### + + def init_ready(self): + """ Test for system init ready state """ + + if os.path.exists(tsc.INITIAL_CONFIG_COMPLETE_FLAG) is False: + self.log_throttle_count += 1 + if self.log_throttle_count > self.INIT_LOG_THROTTLE: + collectd.info("%s initialization needs retry" % self.plugin) + self.log_throttle_count = 0 + return False + else: + self.log_throttle_count = 0 + + return True + + ########################################################################### + # + # Name : gethostname + # + # Description: load the hostname + # + # Parameters : plugin name + # + # Returns : Success - hostname + # Failure - None + # + # Updates : obj.hostname + # + ########################################################################### + def gethostname(self): + """ Fetch the hostname """ + + # get current hostname + try: + hostname = socket.gethostname() + if hostname: + return hostname + except: + collectd.error("%s failed to get hostname" % self.plugin) + + return None + + ########################################################################### + # + # Name : check_for_fit + # + # Description: load FIT data if it is present + # + # Fit Format : unit data -> 0 89 + # - instance 0 value 89 + # + # Parameters : plugin name + # object to update with fit + # name in fit file + # unit + # + # Returns : Did a failure occur ? + # False = no + # True = yes + # + # Updates : self.usage with FIT value if FIT conditions are present + # and apply + # + ########################################################################### + def check_for_fit(self, name, unit): + """ Load FIT data into usage if it exists """ + + fit_file = '/var/run/fit/' + name + '_data' + + if os.path.exists(fit_file): + valid = False + with open(fit_file, 'r') as infile: + for line in infile: + try: + inst, val = line.split(' ') + if int(unit) == int(inst): + self.usage = float(val) + valid = True + + except: + try: + val = float(line) + self.usage = float(val) + valid = True + + except: + collectd.error("%s bad FIT data; ignoring" % + self.plugin) + + if valid is True: + collectd.info("%s %.2f usage (unit %d) (FIT)" % + (self.plugin, unit, self.usage)) + return False + + return True + + ########################################################################### + # + # Name : make_http_request + # + # Description: Issue an http request to the specified URL. + # Load and return the response + # Handling execution errors + # + # Parameters : self as current context. + # + # Optional: + # + # url - override the default self url with http address to + # issue the get request to. + # to - timeout override + # hdrs - override use of the default header list + # + # Updates : self.jresp with the json string response from the request. + # + # Returns : Error indication (True/False) + # True on error + # False on success + # + ########################################################################### + def make_http_request(self, url=None, to=None, hdrs=None): + """ Make a blocking HTTP Request and return result """ + + try: + + # handle timeout override + if to is None: + to = PLUGIN_TIMEOUT + + # handle url override + if url is None: + url = self.url + + # handle header override + if hdrs is None: + hdrs = PLUGIN_HTTP_HEADERS + + http = httplib2.Http(timeout=to) + resp = http.request(url, headers=hdrs) + + except Exception as ex: + collectd.info("%s http request failure (%s)" % + (self.plugin, str(ex))) + return True + + try: + collectd.debug("%s Resp: %s" % + (self.plugin, resp[1])) + + self.resp = resp[1] + self.jresp = json.loads(resp[1]) + + except Exception as ex: + collectd.info("%s http request parse failure (%s) (%s)" % + (self.plugin, str(ex), resp)) + return True + return False + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False + + +def get_severity_str(severity): + """ get string that represents the specified severity """ + + if severity == fm_constants.FM_ALARM_SEVERITY_CLEAR: + return "clear" + elif severity == fm_constants.FM_ALARM_SEVERITY_CRITICAL: + return "critical" + elif severity == fm_constants.FM_ALARM_SEVERITY_MAJOR: + return "major" + elif severity == fm_constants.FM_ALARM_SEVERITY_MINOR: + return "minor" + else: + return "unknown" diff --git a/monitoring/collectd-extensions/src/python_plugins.conf b/monitoring/collectd-extensions/src/python_plugins.conf index 52aa763d0..85ba02377 100644 --- a/monitoring/collectd-extensions/src/python_plugins.conf +++ b/monitoring/collectd-extensions/src/python_plugins.conf @@ -10,6 +10,10 @@ LoadPlugin python Path "/proc/meminfo" Import "ntpq" + Import "interface" + + Port 2122 + LogTraces = true Encoding "utf-8" diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py index 55a1d27c8..ee30d0c17 100644 --- a/releasenotes/source/conf.py +++ b/releasenotes/source/conf.py @@ -46,6 +46,8 @@ source_suffix = '.rst' # The master toctree document. master_doc = 'index' +project = u'stx-integ' + # Release notes are version independent, no need to set version and release release = '' version = '' diff --git a/security/swtpm/centos/build_srpm.data b/security/swtpm/centos/build_srpm.data index ca0fb04bd..c528a599d 100644 --- a/security/swtpm/centos/build_srpm.data +++ b/security/swtpm/centos/build_srpm.data @@ -1,2 +1,2 @@ COPY_LIST="$PKG_BASE/files/* $CGCS_BASE/downloads/swtpm-0.1.0-253eac5.tar.gz" -TIS_PATCH_VER=0 +TIS_PATCH_VER=1 diff --git a/security/swtpm/centos/swtpm.spec b/security/swtpm/centos/swtpm.spec index 9c44d0962..4ba9ce200 100644 --- a/security/swtpm/centos/swtpm.spec +++ b/security/swtpm/centos/swtpm.spec @@ -2,12 +2,12 @@ %define name swtpm %define version 0.1.0 -#WRS +#STX #%define release 1 %define release 2%{?_tis_dist}.%{tis_patch_ver} # Valid crypto subsystems are 'freebl' and 'openssl' -#WRS +#STX #%if "%{crypto_subsystem}" == "" %define crypto_subsystem openssl #%endif @@ -15,7 +15,7 @@ Summary: TPM Emulator Name: %{name} Version: %{version} -#WRS +#STX #Release: %{release}.dev2%{?dist} Release: %{release} License: BSD @@ -23,9 +23,8 @@ Group: Applications/Emulators Source: %{name}-%{version}-253eac5.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root -#WRS -Source1: qemu -Source2: setup_vtpm +#STX +Source1: setup_vtpm # due to gnutls backlevel API: @@ -49,11 +48,11 @@ BuildRequires: libtasn1-tools BuildRequires: kernel-modules-extra %endif -#WRS +#STX BuildRequires: openssl-devel Requires: openssl -#WRS +#STX Requires: seabios-bin >= 1.10.2-3 Requires: fuse expect libtpms >= 0.6.0 @@ -94,7 +93,7 @@ Summary: Tools for the TPM emulator License: BSD Group: Applications/Emulators Requires: swtpm fuse -#WRS +#STX #Requires: trousers >= 0.3.9 tpm-tools >= 1.3.8-6 expect bash net-tools gnutls-utils Requires: trousers >= 0.3.9 expect bash net-tools gnutls-utils @@ -106,9 +105,8 @@ Tools for the TPM emulator from the swtpm package %attr( 755, root, root) %{_bindir}/swtpm %{_mandir}/man8/swtpm.8* -#WRS +#STX /etc/libvirt/setup_vtpm -/etc/libvirt/hooks/qemu %files cuse @@ -158,7 +156,7 @@ Tools for the TPM emulator from the swtpm package %build -#WRS +#STX ./bootstrap.sh %configure \ --prefix=/usr \ @@ -180,13 +178,12 @@ make %{?_smp_mflags} check make %{?_smp_mflags} install DESTDIR=${RPM_BUILD_ROOT} rm -f ${RPM_BUILD_ROOT}%{_libdir}/*.a ${RPM_BUILD_ROOT}%{_libdir}/*.la -#WRS -mkdir -p $RPM_BUILD_ROOT/etc/libvirt/hooks +#STX +mkdir -p $RPM_BUILD_ROOT/etc/libvirt -install -m 0500 %{SOURCE1} $RPM_BUILD_ROOT/etc/libvirt/hooks/qemu -install -m 0500 %{SOURCE2} $RPM_BUILD_ROOT/etc/libvirt/setup_vtpm +install -m 0500 %{SOURCE1} $RPM_BUILD_ROOT/etc/libvirt/setup_vtpm -# WRS: Don't set (or remove on uninstall): SELINUX Policy and contexts +# STX: Don't set (or remove on uninstall): SELINUX Policy and contexts #%post cuse #if [ -n "$(type -p semodule)" ]; then # for pp in /usr/share/swtpm/*.pp ; do diff --git a/utilities/platform-util/centos/build_srpm.data b/utilities/platform-util/centos/build_srpm.data index 880171162..260eb2124 100644 --- a/utilities/platform-util/centos/build_srpm.data +++ b/utilities/platform-util/centos/build_srpm.data @@ -1,4 +1,4 @@ SRC_DIR="platform-util" COPY_LIST_TO_TAR="scripts" -TIS_PATCH_VER=15 +TIS_PATCH_VER=16 diff --git a/utilities/platform-util/scripts/patch-restart-mtce b/utilities/platform-util/scripts/patch-restart-mtce index 357369288..0f888374b 100755 --- a/utilities/platform-util/scripts/patch-restart-mtce +++ b/utilities/platform-util/scripts/patch-restart-mtce @@ -131,6 +131,9 @@ do "mtcalarmd") pmon_managed_processes=(${pmon_managed_processes[@]} "mtcalarmd:0") ;; + "lmond") + pmon_managed_processes=(${pmon_managed_processes[@]} "lmond:0") + ;; *) loginfo "Unknown process:${process}" diff --git a/virt/libvirt/centos/build_srpm.data b/virt/libvirt/centos/build_srpm.data index 29f83aaf0..ff5d0fb22 100644 --- a/virt/libvirt/centos/build_srpm.data +++ b/virt/libvirt/centos/build_srpm.data @@ -1,6 +1,7 @@ SRC_DIR="$CGCS_BASE/git/libvirt" COPY_LIST="\ libvirt/* \ + libvirt/hooks/* \ $CGCS_BASE/downloads/gnulib-ffc927e.tar.gz \ $CGCS_BASE/downloads/keycodemapdb-16e5b07.tar.gz" TIS_BASE_SRCREV=ab58260efaa712650c63bb1917122f270070fa4b diff --git a/virt/libvirt/centos/libvirt.spec b/virt/libvirt/centos/libvirt.spec index f7efcb991..e7ea8634c 100644 --- a/virt/libvirt/centos/libvirt.spec +++ b/virt/libvirt/centos/libvirt.spec @@ -16,7 +16,7 @@ # Always run autoreconf %{!?enable_autotools:%global enable_autotools 1} -# WRS: Custom build config. Based on the R2/bitbake configure line. +# STX: Custom build config. Based on the R2/bitbake configure line. %define _without_esx 1 %define _without_hyperv 1 %define _without_libxl 1 @@ -258,13 +258,14 @@ URL: https://libvirt.org/ Source0: http://libvirt.org/sources/%{?mainturl}libvirt-%{version}.tar.gz #Source1: symlinks -# WRS +# STX Source2: libvirt.logrotate Source3: libvirt.lxc Source4: libvirt.qemu Source5: libvirt.uml Source6: gnulib-ffc927e.tar.gz Source7: keycodemapdb-16e5b07.tar.gz +Source8: qemu Requires: libvirt-daemon = %{version}-%{release} Requires: libvirt-daemon-config-network = %{version}-%{release} @@ -461,9 +462,9 @@ BuildRequires: wireshark-devel >= 1.12.1 BuildRequires: libssh-devel >= 0.7.0 %endif -# WRS: For generating configure +# STX: For generating configure BuildRequires: gnulib -# WRS: Needed by bootstrap +# STX: Needed by bootstrap BuildRequires: perl-XML-XPath Provides: bundled(gnulib) @@ -1304,7 +1305,7 @@ rm -rf .git # place macros above and build commands below this comment -# WRS: Generate configure script. Default is to do a "git clone" of gnulib. +# STX: Generate configure script. Default is to do a "git clone" of gnulib. # Use the tar ball gnulib tarball instead. tar zxf %{SOURCE6} ./bootstrap --no-git --gnulib-srcdir=gnulib-ffc927e --copy @@ -1379,7 +1380,7 @@ rm -f po/stamp-po --without-dtrace \ %{arg_init_script} -#WRS: Avoid doing a 'config.status --recheck' (./configure executed twice). +#STX: Avoid doing a 'config.status --recheck' (./configure executed twice). touch -r config.status configure make %{?_smp_mflags} @@ -1470,7 +1471,7 @@ rm -rf $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/libvirtd.uml # Copied into libvirt-docs subpackage eventually mv $RPM_BUILD_ROOT%{_datadir}/doc/libvirt-%{version} libvirt-docs -# WRS: Disable dtrace +# STX: Disable dtrace # %ifarch %{power64} s390x x86_64 ia64 alpha sparc64 # mv $RPM_BUILD_ROOT%{_datadir}/systemtap/tapset/libvirt_probes.stp \ # $RPM_BUILD_ROOT%{_datadir}/systemtap/tapset/libvirt_probes-64.stp @@ -1478,7 +1479,7 @@ mv $RPM_BUILD_ROOT%{_datadir}/doc/libvirt-%{version} libvirt-docs # $RPM_BUILD_ROOT%{_datadir}/systemtap/tapset/libvirt_qemu_probes-64.stp # %endif -# WRS: Begin custom install +# STX: Begin custom install ## Enable syslog for libvirtd ( /var/log/libvirtd.log ) echo "log_outputs=\"3:syslog:libvirtd\"" >> %{buildroot}/etc/libvirt/libvirtd.conf @@ -1493,12 +1494,15 @@ install -p -D -m 644 %{SOURCE2} %{buildroot}/etc/logrotate.d/libvirtd install -p -D -m 644 %{SOURCE3} %{buildroot}/etc/logrotate.d/libvirtd.lxc install -p -D -m 644 %{SOURCE4} %{buildroot}/etc/logrotate.d/libvirtd.qemu install -p -D -m 644 %{SOURCE5} %{buildroot}/etc/logrotate.d/libvirtd.uml -# WRS: End custom install +## Install hooks +mkdir -p $RPM_BUILD_ROOT/etc/libvirt/hooks +install -m 0500 %{SOURCE8} $RPM_BUILD_ROOT/etc/libvirt/hooks/qemu +# STX: End custom install %clean rm -fr %{buildroot} -# WRS: We are not maintaining the unit tests. +# STX: We are not maintaining the unit tests. # %check # cd tests # # These tests don't current work in a mock build root @@ -1631,7 +1635,7 @@ if [ $1 -ge 1 ] ; then fi %post daemon-config-network -# WRS: The 'with_network' flag doesn't work properly. There are some packaging +# STX: The 'with_network' flag doesn't work properly. There are some packaging # errors when using it. Disable default.xml manually ... # We don't want 'virbr0' and 'virbr0-nic' interfaces created. @@ -1777,11 +1781,11 @@ exit 0 %files -# WRS: Customization +# STX: Customization %dir /data/images/ %files docs -# TODO(WRS): NEWS is not present in git source repo. +# TODO(STX): NEWS is not present in git source repo. %doc AUTHORS ChangeLog.gz README %doc libvirt-docs/* @@ -1874,8 +1878,9 @@ exit 0 %doc examples/polkit/*.rules -# WRS: Customization +# STX: Customization /etc/logrotate.d/* +/etc/libvirt/hooks/qemu %files daemon-config-network %dir %{_datadir}/libvirt/networks/ @@ -2061,7 +2066,7 @@ exit 0 %{_bindir}/virt-pki-validate %{_bindir}/virt-host-validate -# WRS: Disable dtrace +# STX: Disable dtrace # %{_datadir}/systemtap/tapset/libvirt_probes*.stp # %{_datadir}/systemtap/tapset/libvirt_qemu_probes*.stp # %{_datadir}/systemtap/tapset/libvirt_functions.stp diff --git a/security/swtpm/files/qemu b/virt/libvirt/libvirt/hooks/qemu similarity index 60% rename from security/swtpm/files/qemu rename to virt/libvirt/libvirt/hooks/qemu index 654485453..469105bd9 100755 --- a/security/swtpm/files/qemu +++ b/virt/libvirt/libvirt/hooks/qemu @@ -34,6 +34,51 @@ OPERATION=$* logger -p info -t $0 "hook qemu file guest $GUEST_NAME with operation $OPERATION" +# CPU Low latency setup: +# +# A cpu is set to low latency when: +# 1) host is set to subfunction=lowlatency in platform.conf and +# 2) domain has dedicated pinning +# +# example of section when domain has dedicated pinning: +# +# +# +# +# +# +# +# example of section when domain has shared pinning: +# +# 4096 +# +# +# +# +# +# + +if [ "${OPERATION}" == "prepare begin -" ] || [ "${OPERATION}" == "stopped end -" ]; then + # verify this host is set as lowlatency + lowlat=$(cat /etc/platform/platform.conf 2>/dev/null | grep -E 'subfunction.*lowlatency') + if [ -n "${lowlat}" ]; then + # grab the settings and remove single quotes + CPUTUNE=$(echo ${XML_DATA} | grep -oP '(?<=)' | sed "s/'//g") + + # grab all cpuset pinned to a unique CPU. Treat them as dedicated + CPUSET=($(echo ${CPUTUNE} | grep -oP '(?<=cpuset=)[^/]+(?=.+emulator)' | grep -vP '[^0-9]')) + if [ ${#CPUSET[@]} -ne 0 ]; then + # convert to a comma separated list + CPUS=$(IFS=, ; echo "${CPUSET[*]}") + if [ "${OPERATION}" == "prepare begin -" ]; then + /usr/bin/set-cpu-wakeup-latency.sh "low" "${CPUS}" + else + /usr/bin/set-cpu-wakeup-latency.sh "high" "${CPUS}" + fi + fi + fi +fi + VTPM_OPER="" if [ "$OPERATION" == "prepare begin -" ]; then