Relocated some packages to repo 'platform-armada-app'

List of relocated subdirectories:

kubernetes/platform/stx-platform/stx-platform-helm
kubernetes/helm-charts/ceph-pools-audit
kubernetes/helm-charts/rbd-provisioner

Story: 2006166
Task: 35687
Depends-On: I665dc7fabbfffc798ad57843eb74dca16e7647a3
Change-Id: I9fff4d9f973fea151812f516859b0c9ce190afb8
Signed-off-by: Scott Little <scott.little@windriver.com>
Depends-On: Ibca91cc733e27cd9fb4926b7151cfa8a7976a59d
Signed-off-by: Scott Little <scott.little@windriver.com>
This commit is contained in:
Scott Little 2019-09-04 10:14:29 -04:00
parent 1d0befb47a
commit f7a59a8dd3
31 changed files with 3 additions and 1275 deletions

View File

@ -1,39 +0,0 @@
The expected layout for this subdirectory is as follows:
kubernetes
|-- applications
| `-- <application>
| `-- <application>-helm RPM
| `-- centos
| `-- build_srpm.data
| `-- <application>-helm.spec
| `-- <application>-helm
| `-- manifests
| `-- main-manifest.yaml
| `-- alt-manifest-1.yaml
| `-- ...
| `-- alt-manifest-N.yaml
| `-- custom chart 1
| `-- Chart.yaml
| `-- ...
| `-- ...
| `-- custom chart N
| `-- Chart.yaml
| `-- ...
|-- helm-charts
| `-- chart
| `-- chart
`-- README
The idea is that all our custom helm charts that are common across applications
would go under "helm-charts". Each chart would get a subdirectory.
Custom applications would generally consist of one or more armada manifest
referencing multiple helm charts (both ours and upstream ones). The application
is packaged as an RPM. These application RPM are used to produce the build
artifacts (helm tarballs + armada manifests) but are not installed on the
system. These artifacts are extracted later for proper application packaging
with additional required metadata (TBD).
These applications would each get their own subdirectory under
"applications".

View File

@ -1,10 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
appVersion: "1.0"
description: Ceph RBD pool replication monitor chart
name: ceph-pools-audit
version: 0.1.0

View File

@ -1,9 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
dependencies:
- name: helm-toolkit
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -1,55 +0,0 @@
#!/bin/bash
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
ceph -s
if [ $? -ne 0 ]; then
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
exit 1
fi
touch /etc/ceph/ceph.client.admin.keyring
echo "RBD_POOL_CRUSH_RULE_NAME: ${RBD_POOL_CRUSH_RULE_NAME}"
if [ -z "${RBD_POOL_CRUSH_RULE_NAME}" ]; then
echo "Error: No Ceph crush rule name specified"
exit 1
fi
ceph osd crush rule ls | grep -q "${RBD_POOL_CRUSH_RULE_NAME}"
if [ $? -ne 0 ]; then
echo "Error: Ceph crush rule ${RBD_POOL_CRUSH_RULE_NAME} not found, exit"
exit 1
fi
POOLS=( $(ceph osd pool ls) )
for pool in "${POOLS[@]}"; do
echo "Check for pool name: $pool"
pool_rule=$(ceph osd pool get $pool crush_rule | awk '{print $2}')
echo "Pool crush rule name: ${pool_rule}"
if [ "${pool_rule}" != "${RBD_POOL_CRUSH_RULE_NAME}" ]; then
continue
fi
pool_size=$(ceph osd pool get $pool size | awk '{print $2}')
pool_min_size=$(ceph osd pool get $pool min_size | awk '{print $2}')
echo "===> pool_size: ${pool_size} pool_min_size: ${pool_min_size}"
if [ "${pool_size}" != "${RBD_POOL_REPLICATION}" ]; then
echo "Set size for $pool to ${RBD_POOL_REPLICATION}"
ceph osd pool set $pool size "${RBD_POOL_REPLICATION}"
fi
if [ "${pool_min_size}" != "${RBD_POOL_MIN_REPLICATION}" ]; then
echo "Set min_size for $pool to ${RBD_POOL_MIN_REPLICATION}"
ceph osd pool set $pool min_size "${RBD_POOL_MIN_REPLICATION}"
fi
done

View File

@ -1,19 +0,0 @@
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.manifests.configmap_bin }}
{{- $envAll := . }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-pools-bin
data:
ceph-pools-audit.sh: |
{{ tuple "bin/_ceph-pools-audit.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}

View File

@ -1,86 +0,0 @@
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.manifests.job_ceph_pools_audit }}
{{- $envAll := . }}
{{- $serviceAccountName := "ceph-pools-audit" }}
{{ tuple $envAll "job_ceph_pools_audit" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
#
# The CronJob makes sure all the Ceph pools have the right replication,
# as present in the attributes of the Ceph backends.
# This is needed for:
# - charts that don't manage pool configuration
# - pools created dynamically by services that may not have the current
# pool configuration uploaded (ex: swift)
# - when replication is changed and we don't want to reinstall all the
# charts that created Ceph pools
#
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: ceph-pools-audit
spec:
schedule: {{ .Values.jobs.job_ceph_pools_audit.cron | quote }}
successfulJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.success }}
failedJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.failed }}
concurrencyPolicy: Forbid
jobTemplate:
metadata:
name: "{{$envAll.Release.Name}}"
namespace: {{ $envAll.Release.namespace }}
labels:
app: ceph-pools-audit
spec:
template:
metadata:
labels:
app: ceph-pools-audit
spec:
serviceAccountName: {{ $serviceAccountName }}
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
volumes:
- name: ceph-pools-bin
configMap:
name: ceph-pools-bin
defaultMode: 0555
- name: etcceph
emptyDir: {}
- name: ceph-etc
configMap:
name: {{ $envAll.Values.ceph_client.configmap }}
defaultMode: 0444
containers:
{{- range $tierConfig := $envAll.Values.conf.ceph.storage_tiers }}
- name: ceph-pools-audit-{{- $tierConfig.name }}
image: {{ $envAll.Values.images.tags.ceph_config_helper | quote }}
env:
- name: RBD_POOL_REPLICATION
value: {{ $tierConfig.replication | quote }}
- name: RBD_POOL_MIN_REPLICATION
value: {{ $tierConfig.min_replication | quote }}
- name: RBD_POOL_CRUSH_RULE_NAME
value: {{ $tierConfig.crush_rule_name | quote }}
command:
- /tmp/ceph-pools-audit.sh
volumeMounts:
- name: ceph-pools-bin
mountPath: /tmp/ceph-pools-audit.sh
subPath: ceph-pools-audit.sh
readOnly: true
- name: etcceph
mountPath: /etc/ceph
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
{{- end }}
{{- end }}

View File

@ -1,69 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
replicaCount: 1
labels:
job:
node_selector_key: node-role.kubernetes.io/master
node_selector_value: ""
name: ceph-pools-audit
ceph_client:
configmap: ceph-etc
conf:
ceph:
storage_tiers:
- name: ceph-store
replication: 2
min_replication: 1
crush_ruleset: storage_tier_ruleset
monitors: []
images:
tags:
ceph_config_helper: docker.io/starlingx/ceph-config-helper:v1.15.0
pullPolicy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
jobs:
job_ceph_pools_audit:
cron: "*/5 * * * *"
history:
success: 3
failed: 1
resources: {}
nodeSelector: { node-role.kubernetes.io/master: "" }
tolerations: []
affinity: {}
manifests:
job_ceph_pools_audit: true
configmap_bin: true

View File

@ -1,10 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
description: rbd provisioner chart
name: rbd-provisioner
version: 0.1.0

View File

@ -1,5 +0,0 @@
RBD Provisioner Chart
-------------------------------------------------------------------------------
This chart was last validated with:
* Repo: https://github.com/kubernetes-incubator/external-storage.git
* Commit: (6776bba1) Merge pull request #1048 from AdamDang/patch-3

View File

@ -1,22 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
classdefaults:
adminId: admin
adminSecretName: ceph-admin
monitors:
- 192.168.204.4:6789
- 192.168.204.3:6789
- 192.168.204.22:6789
classes:
- name: rbd
pool: kube-rbd
userId: ceph-pool-kube-rbd
userSecretName: ceph-pool-kube-rbd
- name: gold-rbd
pool: kube-rbd-gold
userId: ceph-pool-gold-kube-rbd-gold
userSecretName: ceph-pool-gold-kube-rbd-gold

View File

@ -1,17 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
classes:
- name: slow-rbd
monitors:
- 192.168.204.3:6789
- 192.168.204.150:6789
- 192.168.204.4:6789
adminId: admin
adminSecretName: ceph-secret
pool: kube
userId: kube
userSecretName: ceph-secret-kube

View File

@ -1,9 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
dependencies:
- name: helm-toolkit
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -1,40 +0,0 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.clusterRole }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns"]
verbs: ["list", "get"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "create", "list", "update"]
{{- end}}

View File

@ -1,22 +0,0 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.clusterRoleBinding }}
subjects:
- kind: ServiceAccount
name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.rbac.clusterRole }}
apiGroup: rbac.authorization.k8s.io
{{- end}}

View File

@ -1,47 +0,0 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if eq .Values.global.deployAs "DaemonSet" }}
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: {{ .Values.global.name }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Values.global.name }}
spec:
selector:
matchLabels:
app: {{ .Values.global.name }}
template:
metadata:
labels:
app: {{ .Values.global.name }}
spec:
{{- if (.Values.global.rbac) or (.Values.global.reuseRbac)}}
serviceAccountName: {{.Values.rbac.serviceAccount}}
{{- end}}
{{- if .Values.global.tolerations }}
tolerations:
{{ .Values.global.tolerations | toYaml | trim | indent 8 }}
{{- end }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
{{- end }}
containers:
- image: {{ .Values.global.image | quote }}
name: {{ .Values.global.name }}
{{- if .Values.global.resources }}
resources:
{{ .Values.global.resources | toYaml | trim | indent 12 }}
{{- end }}
env:
- name: PROVISIONER_NAME
value: {{ .Values.global.provisioner_name }}
{{- end}}

View File

@ -1,55 +0,0 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if eq .Values.global.deployAs "Deployment" }}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ .Values.global.name }}
namespace: {{ .Release.Namespace }}
spec:
replicas: {{ .Values.global.replicas }}
strategy:
type: Recreate
template:
metadata:
labels:
app: {{ .Values.global.name }}
spec:
{{- if (.Values.global.rbac) or (.Values.global.reuseRbac)}}
serviceAccount: {{ .Values.rbac.serviceAccount }}
{{- end }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- {{ .Values.global.name }}
topologyKey: kubernetes.io/hostname
containers:
- name: {{ .Values.global.name }}
image: {{ .Values.images.tags.rbd_provisioner | quote }}
env:
- name: PROVISIONER_NAME
value: {{ .Values.global.provisioner_name }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
{{- end }}
{{- if .Values.global.tolerations }}
tolerations:
{{ .Values.global.tolerations | toYaml | trim | indent 8 }}
{{- end}}
{{- if .Values.global.resources }}
resources:
{{ .Values.global.resources | toYaml | trim | indent 8 }}
{{- end }}
{{- end }}

View File

@ -1,213 +0,0 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.job_storage_init }}
{{ $root := . }}
{{ $defaults := .Values.classdefaults}}
{{ $mount := "/tmp/mount" }}
---
apiVersion: v1
kind: ConfigMap
metadata:
creationTimestamp: 2016-02-18T19:14:38Z
name: config-{{- $root.Values.global.name }}
namespace: {{ $root.Release.Namespace }}
data:
ceph.conf: |
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
[mon.{{- $index }}]
mon_addr = {{ $element }}
{{- end }}
check_ceph.sh: |-
#!/bin/bash
# Copy from read only mount to Ceph config folder
cp {{ $mount -}}/ceph.conf /etc/ceph/
if [ -n "${CEPH_ADMIN_SECRET}" ]; then
kubectl get secret -n ${NAMESPACE} | grep ${CEPH_ADMIN_SECRET}
if [ $? -ne 0 ]; then
echo "Create ${CEPH_ADMIN_SECRET} secret"
kubectl create secret generic ${CEPH_ADMIN_SECRET} --type="kubernetes.io/rbd" --from-literal=key= --namespace=${NAMESPACE}
if [ $? -ne 0 ]; then
echo "Error creating secret ${CEPH_ADMIN_SECRET}, exit"
exit 1
fi
fi
fi
touch /etc/ceph/ceph.client.admin.keyring
# Check if ceph is accessible
echo "===================================="
ceph -s
if [ $? -ne 0 ]; then
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
exit 1
fi
set -ex
# Make sure the pool exists.
ceph osd pool stats ${POOL_NAME} || ceph osd pool create ${POOL_NAME} ${POOL_CHUNK_SIZE}
# Set pool configuration.
ceph osd pool application enable $POOL_NAME rbd
ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
set +ex
if [[ -z "${USER_ID}" && -z "${CEPH_USER_SECRET}" ]]; then
echo "No need to create secrets for pool ${POOL_NAME}"
exit 0
fi
set -ex
KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
# Set up pool key in Ceph format
CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
echo $KEYRING > $CEPH_USER_KEYRING
set +ex
if [ -n "${CEPH_USER_SECRET}" ]; then
kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
if [ $? -ne 0 ]; then
echo "Create ${CEPH_USER_SECRET} secret"
kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
if [ $? -ne 0 ]; then
echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
exit 1
fi
else
echo "Secret ${CEPH_USER_SECRET} already exists"
fi
# Support creating namespaces and Ceph user secrets for additional
# namespaces other than that which the provisioner is installed. This
# allows the provisioner to set up and provide PVs for multiple
# applications across many namespaces.
if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
for ns in $(IFS=,; echo ${ADDITIONAL_NAMESPACES}); do
kubectl get namespace $ns 2>/dev/null
if [ $? -ne 0 ]; then
kubectl create namespace $ns
if [ $? -ne 0 ]; then
echo "Error creating namespace $ns, exit"
continue
fi
fi
kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
if [ $? -ne 0 ]; then
echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
if [ $? -ne 0 ]; then
echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
fi
else
echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
fi
done
fi
fi
# Check if pool is accessible using provided credentials
echo "====================================="
timeout --preserve-status 10 rbd -p ${POOL_NAME} --user ${USER_ID} ls -K $CEPH_USER_KEYRING
if [ $? -ne 143 ]; then
if [ $? -ne 0 ]; then
echo "Error: Ceph pool ${POOL_NAME} is not accessible using credentials for user ${USER_ID}, check Pod logs for details."
exit 1
else
echo "Pool ${POOL_NAME} accessible"
fi
else
echo "rbd command timed out and was sent a SIGTERM. Make sure OSDs have been provisioned."
fi
ceph -s
---
apiVersion: batch/v1
kind: Job
metadata:
name: storage-init-{{- $root.Values.global.name }}
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
annotations:
"helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec:
backoffLimit: 5 # Limit the number of job restart in case of failure: ~5 minutes.
activeDeadlineSeconds: 360
template:
metadata:
name: "{{$root.Release.Name}}"
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
serviceAccountName: {{ $root.Values.rbac.serviceAccount }}
restartPolicy: OnFailure
volumes:
- name: config-volume-{{- $root.Values.global.name }}
configMap:
name: config-{{- $root.Values.global.name }}
containers:
{{- range $classConfig := $root.Values.classes }}
- name: storage-init-{{- $classConfig.name }}
image: {{ $root.Values.images.tags.rbd_provisioner_storage_init | quote }}
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
env:
- name: NAMESPACE
value: {{ $root.Release.Namespace }}
- name: ADDITIONAL_NAMESPACES
value: {{ include "helm-toolkit.utils.joinListWithComma" $classConfig.additionalNamespaces | quote }}
- name: CEPH_ADMIN_SECRET
value: {{ $defaults.adminSecretName }}
- name: CEPH_USER_SECRET
value: {{ $classConfig.userSecretName }}
- name: USER_ID
value: {{ $classConfig.userId }}
- name: POOL_NAME
value: {{ $classConfig.pool_name }}
- name: POOL_REPLICATION
value: {{ $classConfig.replication | quote }}
- name: POOL_CRUSH_RULE_NAME
value: {{ $classConfig.crush_rule_name | quote }}
- name: POOL_CHUNK_SIZE
value: {{ $classConfig.chunk_size | quote }}
volumeMounts:
- name: config-volume-{{- $root.Values.global.name }}
mountPath: {{ $mount }}
{{- end }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
{{- end }}
---
# This ConfigMap is needed because we're not using ceph's helm chart
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-etc
namespace: {{ $root.Release.Namespace }}
data:
ceph.conf: |
[global]
auth_supported = none
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
[mon.{{- $index }}]
mon_addr = {{ $element }}
{{- end }}
{{- end }}

View File

@ -1,22 +0,0 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Values.rbac.role }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "create", "list", "update"]
{{- end}}

View File

@ -1,23 +0,0 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Values.rbac.roleBinding }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Values.rbac.role }}
subjects:
- kind: ServiceAccount
name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- end}}

View File

@ -1,17 +0,0 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }}
imagePullSecrets:
- name: default-registry-key
{{- end }}

View File

@ -1,36 +0,0 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.provisionStorageClass }}
{{ $namespace := .Release.Namespace }}
{{ $defaults := .Values.classdefaults}}
{{ $provisioner := .Values.global.provisioner_name }}
{{ $defaultSC := .Values.global.defaultStorageClass }}
{{- range $classConfig := .Values.classes }}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
{{- if eq $defaultSC $classConfig.name}}
annotations:
"storageclass.kubernetes.io/is-default-class": "true"
{{- end }}
name: {{ $classConfig.name }}
provisioner: {{ $provisioner }}
parameters:
monitors: "{{ $monitors := or $classConfig.monitors $defaults.monitors }}{{ join "," $monitors}}"
adminId: {{ or $classConfig.adminId $defaults.adminId}}
adminSecretName: {{ or $classConfig.adminSecretName $defaults.adminSecretName }}
adminSecretNamespace: {{ $namespace }}
pool: {{ or $classConfig.pool_name $defaults.pool_name }}
userId: {{ or $classConfig.userId $defaults.userId }}
userSecretName: {{ $classConfig.userSecretName }}
imageFormat: {{ or $classConfig.imageFormat $defaults.imageFormat | quote }}
imageFeatures: {{ or $classConfig.imageFeatures $defaults.imageFeatures}}
---
{{- end }}
{{- end }}

View File

@ -1,174 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
# Global options.
# Defaults should be fine in most cases.
global:
#
# Defines the application name of the provisioner.
#
name: "rbd-provisioner"
#
# Defines the name of the provisioner associated with a set of storage classes
#
provisioner_name: "ceph.com/rbd"
#
# Execute initialization job to verify external Ceph cluster access
# and setup additional dependencies assumed by dependent helm charts
# (i.e. configmap and secrets).
# Skipping is not recommended.
#
job_storage_init: true
#
# Defines whether to reuse an already defined RBAC policy.
# Make sure that the serviceAccount defined in the RBAC section matches the one
# in the policy you reuse.
#
reuseRbac: false
#
# Defines whether to generate service account and role bindings.
#
rbac: true
#
# Provision storage class. If false you have to provision storage classes by hand.
#
provisionStorageClass: true
#
# Enable this storage class as the system default storage class
#
defaultStorageClass: fast-rbd
#
# Choose if rbd-provisioner pod should be deployed as deplyment or DaemonSet
# Values: none, Deployment, DaemonSet
#
deployAs: Deployment
#
# If configured, tolerations will add a toleration field to the Pod.
#
# Node tolerations for rbd-volume-provisioner scheduling to nodes with taints.
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
# Example:
# [
# {
# "key": "node-role.kubernetes.io/master",
# "operator": "Exists"
# }
# ]
#
tolerations: []
# If configured, resources will set the requests/limits field to the Pod.
# Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
# Example:
# {
# "limits": {
# "memory": "200Mi"
# },
# "requests": {
# "cpu": "100m",
# "memory": "200Mi"
# }
# }
resources: {}
#
# Number of replicas to start when configured as deployment
#
replicas: 1
#
# Node Selector
#
nodeSelector: { node-role.kubernetes.io/master: "" }
#
# RBAC options.
# Defaults should be fine in most cases.
rbac:
#
# Cluster Role name
#
clusterRole: rbd-provisioner
#
# Cluster Role Binding name
#
clusterRoleBinding: rbd-provisioner
#
# Role name
#
role: rbd-provisioner
#
# Role Binding name
#
roleBinding: rbd-provisioner
#
# Defines a name of the service account which Provisioner will use to communicate with API server.
#
serviceAccount: rbd-provisioner
#
# Configure storage classes.
# Defaults for storage classes. Update this if you have a single Ceph storage cluster.
# No need to add them to each class.
#
classdefaults:
# Define ip addresses of Ceph Monitors
monitors:
- 192.168.204.3:6789
- 192.168.204.150:6789
- 192.168.204.4:6789
# Ceph admin account
adminId: admin
# K8 secret name for the admin context
adminSecretName: ceph-secret
# Ceph RBD image format version
imageFormat: 2
# Ceph RBD image features.
imageFeatures: layering
#
# Configure storage classes.
# This section should be tailored to your setup. It allows you to define multiple storage
# classes for the same cluster (e.g. if you have tiers of drives with different speeds).
# If you have multiple Ceph clusters take attributes from classdefaults and add them here.
classes:
- name: fast-rbd # Name of storage class.
# Ceph pool name
pool_name: kube
# Ceph user name to access this pool
userId: kube
# K8 secret name with key for accessing the Ceph pool
userSecretName: ceph-secret-kube
# Pool replication
replication: 1
# Pool crush rule name
crush_rule_name: storage_tier_ruleset
# Pool chunk size / PG_NUM
chunk_size: 8
# Additional namespace to allow storage class access (other than where
# installed)
additionalNamespaces:
- default
- kube-public
# Configuration data for the ephemeral pool(s)
ephemeral_pools:
- chunk_size: 8
crush_rule_name: storage_tier_ruleset
pool_name: ephemeral
replication: 1
#
# Defines:
# - Provisioner's image name including container registry.
# - CEPH helper image
#
images:
tags:
rbd_provisioner: quay.io/external_storage/rbd-provisioner:v2.1.1-k8s1.11
rbd_provisioner_storage_init: docker.io/starlingx/ceph-config-helper:v1.15.0
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync

View File

@ -1,8 +0,0 @@
SRC_DIR="stx-platform-helm"
COPY_LIST_TO_TAR="\
$PKG_BASE/../../../helm-charts/node-feature-discovery \
$PKG_BASE/../../../helm-charts/rbd-provisioner \
$PKG_BASE/../../../helm-charts/ceph-pools-audit"
TIS_PATCH_VER=7

View File

@ -1,103 +0,0 @@
# Application tunables (maps to metadata)
%global app_name platform-integ-apps
%global helm_repo stx-platform
# Install location
%global app_folder /usr/local/share/applications/helm
# Build variables
%global helm_folder /usr/lib/helm
%global toolkit_version 0.1.0
Summary: StarlingX Platform Helm charts
Name: stx-platform-helm
Version: 1.0
Release: %{tis_patch_ver}%{?_tis_dist}
License: Apache-2.0
Group: base
Packager: Wind River <info@windriver.com>
URL: unknown
Source0: %{name}-%{version}.tar.gz
BuildArch: noarch
BuildRequires: helm
BuildRequires: openstack-helm-infra
%description
StarlingX Platform Helm charts
%prep
%setup
%build
# initialize helm and build the toolkit
# helm init --client-only does not work if there is no networking
# The following commands do essentially the same as: helm init
%define helm_home %{getenv:HOME}/.helm
mkdir %{helm_home}
mkdir %{helm_home}/repository
mkdir %{helm_home}/repository/cache
mkdir %{helm_home}/repository/local
mkdir %{helm_home}/plugins
mkdir %{helm_home}/starters
mkdir %{helm_home}/cache
mkdir %{helm_home}/cache/archive
# Stage a repository file that only has a local repo
cp files/repositories.yaml %{helm_home}/repository/repositories.yaml
# Stage a local repo index that can be updated by the build
cp files/index.yaml %{helm_home}/repository/local/index.yaml
# Stage helm-toolkit in the local repo
cp %{helm_folder}/helm-toolkit-%{toolkit_version}.tgz .
# Host a server for the charts
helm serve --repo-path . &
helm repo rm local
helm repo add local http://localhost:8879/charts
# Make the charts. These produce a tgz file
make rbd-provisioner
make ceph-pools-audit
make node-feature-discovery
# Terminate helm server (the last backgrounded task)
kill %1
# Create a chart tarball compliant with sysinv kube-app.py
%define app_staging %{_builddir}/staging
%define app_tarball %{app_name}-%{version}-%{tis_patch_ver}.tgz
# Setup staging
mkdir -p %{app_staging}
cp files/metadata.yaml %{app_staging}
cp manifests/manifest.yaml %{app_staging}
mkdir -p %{app_staging}/charts
cp *.tgz %{app_staging}/charts
cd %{app_staging}
# Populate metadata
sed -i 's/@APP_NAME@/%{app_name}/g' %{app_staging}/metadata.yaml
sed -i 's/@APP_VERSION@/%{version}-%{tis_patch_ver}/g' %{app_staging}/metadata.yaml
sed -i 's/@HELM_REPO@/%{helm_repo}/g' %{app_staging}/metadata.yaml
# package it up
find . -type f ! -name '*.md5' -print0 | xargs -0 md5sum > checksum.md5
tar -zcf %{_builddir}/%{app_tarball} -C %{app_staging}/ .
# Cleanup staging
rm -fr %{app_staging}
%install
install -d -m 755 %{buildroot}/%{app_folder}
install -p -D -m 755 %{_builddir}/%{app_tarball} %{buildroot}/%{app_folder}
install -d -m 755 ${RPM_BUILD_ROOT}/opt/extracharts
install -p -D -m 755 node-feature-discovery-*.tgz ${RPM_BUILD_ROOT}/opt/extracharts
%files
%defattr(-,root,root,-)
%{app_folder}/*
/opt/extracharts/*

View File

@ -1,43 +0,0 @@
#
# Copyright 2017 The Openstack-Helm Authors.
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# It's necessary to set this because some environments don't link sh -> bash.
SHELL := /bin/bash
TASK := build
EXCLUDES := helm-toolkit doc tests tools logs tmp
CHARTS := helm-toolkit $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.)))
.PHONY: $(EXCLUDES) $(CHARTS)
all: $(CHARTS)
$(CHARTS):
@if [ -d $@ ]; then \
echo; \
echo "===== Processing [$@] chart ====="; \
make $(TASK)-$@; \
fi
init-%:
if [ -f $*/Makefile ]; then make -C $*; fi
if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
lint-%: init-%
if [ -d $* ]; then helm lint $*; fi
build-%: lint-%
if [ -d $* ]; then helm package $*; fi
clean:
@echo "Clean all build artifacts"
rm -f */templates/_partials.tpl */templates/_globals.tpl
rm -f *tgz */charts/*tgz */requirements.lock
rm -rf */charts */tmpcharts
%:
@:

View File

@ -1,6 +0,0 @@
This directory contains all StarlingX charts that need to be built to support
platform integration immediately after installation. Some charts are common
across applications. These common charts reside in the
stx-config/kubernetes/helm-charts directory. To include these in this
application update the build_srpm.data file and use the COPY_LIST_TO_TAR
mechanism to populate these commom charts.

View File

@ -1,3 +0,0 @@
apiVersion: v1
entries: {}
generated: 2019-01-07T12:33:46.098166523-06:00

View File

@ -1,3 +0,0 @@
app_name: @APP_NAME@
app_version: @APP_VERSION@
helm_repo: @HELM_REPO@

View File

@ -1,12 +0,0 @@
apiVersion: v1
generated: 2019-01-02T15:19:36.215111369-06:00
repositories:
- caFile: ""
cache: /builddir/.helm/repository/cache/local-index.yaml
certFile: ""
keyFile: ""
name: local
password: ""
url: http://127.0.0.1:8879/charts
username: ""

View File

@ -1,94 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
values: {}
source:
type: tar
location: http://172.17.0.1:8080/helm_charts/stx-platform/helm-toolkit-0.1.0.tgz
subpath: helm-toolkit
reference: master
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-rbd-provisioner
data:
chart_name: rbd-provisioner
release: rbd-provisioner
namespace: kube-system
wait:
timeout: 1800
labels:
app: rbd-provisioner
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
app: rbd-provisioner
source:
type: tar
location: http://172.17.0.1:8080/helm_charts/stx-platform/rbd-provisioner-0.1.0.tgz
subpath: rbd-provisioner
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-ceph-pools-audit
data:
chart_name: ceph-pools-audit
release: ceph-pools-audit
namespace: kube-system
wait:
timeout: 1800
labels:
app: rbd-provisioner
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
app: ceph-pools-audit
source:
type: tar
location: http://172.17.0.1:8080/helm_charts/stx-platform/ceph-pools-audit-0.1.0.tgz
subpath: ceph-pools-audit
reference: master
dependencies:
- helm-toolkit
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: starlingx-ceph-charts
data:
description: StarlingX Ceph Charts
sequenced: true
chart_group:
- kube-system-rbd-provisioner
- kube-system-ceph-pools-audit
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: platform-integration-manifest
data:
release_prefix: stx
chart_groups:
- starlingx-ceph-charts

View File

@ -27,10 +27,9 @@ commands =
-print0 | xargs -0 bashate -v -e E* \
-i E006,E010"
bash -c "find {toxinidir} \
\( -name .tox \
-o -path {toxinidir}/kubernetes \) -prune \
-o -type f -name '*.yaml' \
bash -c "find {toxinidir} \
-name .tox -prune \
-o -type f -name '*.yaml' \
-print0 | xargs -0 yamllint -f parsable \
-c {toxinidir}/.yamllint"