181 lines
6.2 KiB
YAML
181 lines
6.2 KiB
YAML
{{/*
|
|
#
|
|
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
#
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
#
|
|
*/}}
|
|
|
|
{{- if .Values.global.job_storage_init }}
|
|
{{ $root := . }}
|
|
{{ $defaults := .Values.classdefaults}}
|
|
{{ $mount := "/tmp/mount" }}
|
|
---
|
|
apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
creationTimestamp: 2016-02-18T19:14:38Z
|
|
name: config-{{- $root.Values.global.name }}
|
|
namespace: {{ $root.Values.global.namespace }}
|
|
data:
|
|
ceph.conf: |
|
|
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
|
|
[mon.{{- $index }}]
|
|
mon_addr = {{ $element }}
|
|
{{- end }}
|
|
|
|
check_ceph.sh: |-
|
|
#!/bin/bash
|
|
|
|
# Copy from read only mount to Ceph config folder
|
|
cp {{ $mount -}}/ceph.conf /etc/ceph/
|
|
|
|
if [ ! -z $CEPH_ADMIN_SECRET ]; then
|
|
kubectl get secret -n kube-system | grep $CEPH_ADMIN_SECRET
|
|
ret=$?
|
|
if [ $ret -ne 0 ]; then
|
|
msg="Create $CEPH_ADMIN_SECRET secret"
|
|
echo "$msg"
|
|
kubectl create secret generic $CEPH_ADMIN_SECRET --type="kubernetes.io/rbd" --from-literal=key= --namespace=$NAMESPACE
|
|
ret=$?
|
|
if [ $ret -ne 0 ]; then
|
|
msg="Error creating secret $CEPH_ADMIN_SECRET, exit"
|
|
echo "$msg"
|
|
exit $ret
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
touch /etc/ceph/ceph.client.admin.keyring
|
|
|
|
# Check if ceph is accessible
|
|
echo "===================================="
|
|
ceph -s
|
|
ret=$?
|
|
if [ $ret -ne 0 ]; then
|
|
msg="Error: Ceph cluster is not accessible, check Pod logs for details."
|
|
echo "$msg"
|
|
exit $ret
|
|
fi
|
|
|
|
set -ex
|
|
|
|
# Get the ruleset from the rule name.
|
|
ruleset=$(ceph osd crush rule dump $POOL_CRUSH_RULE_NAME | grep "\"ruleset\":" | grep -Eo '[0-9]*')
|
|
# Make sure the pool exists.
|
|
ceph osd pool stats $POOL_NAME || ceph osd pool create $POOL_NAME $POOL_CHUNK_SIZE
|
|
# Set pool configuration.
|
|
ceph osd pool set $POOL_NAME size $POOL_REPLICATION
|
|
ceph osd pool set $POOL_NAME crush_rule $ruleset
|
|
|
|
if [[ -z $USER_ID && -z $CEPH_USER_SECRET ]]; then
|
|
msg="No need to create secrets for pool $POOL_NAME"
|
|
echo "$msg"
|
|
exit 0
|
|
fi
|
|
|
|
KEYRING=$(ceph auth get-or-create client.$USER_ID mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
|
|
# Set up pool key in Ceph format
|
|
CEPH_USER_KEYRING=/etc/ceph/ceph.client.$USER_ID.keyring
|
|
echo $KEYRING > $CEPH_USER_KEYRING
|
|
IFS=',' read -a POOL_SECRET_NAMESPACES_ARR <<< "${POOL_SECRET_NAMESPACES}"
|
|
|
|
for pool_secret_namespace in "${POOL_SECRET_NAMESPACES_ARR[@]}"
|
|
do
|
|
kubectl create secret generic $CEPH_USER_SECRET --type="kubernetes.io/rbd" --from-literal=key=$KEYRING --namespace=$pool_secret_namespace
|
|
done
|
|
|
|
set +ex
|
|
|
|
# Check if pool is accessible using provided credentials
|
|
echo "====================================="
|
|
rbd -p $POOL_NAME --user $USER_ID ls -K $CEPH_USER_KEYRING
|
|
ret=$?
|
|
if [ $ret -ne 0 ]; then
|
|
msg="Error: Ceph pool $POOL_NAME is not accessible using \
|
|
credentials for user $USER_ID, check Pod logs for details."
|
|
echo "$msg"
|
|
exit $ret
|
|
else
|
|
msg="Pool $POOL_NAME accessible"
|
|
echo "$msg"
|
|
fi
|
|
|
|
ceph -s
|
|
|
|
---
|
|
apiVersion: batch/v1
|
|
kind: Job
|
|
metadata:
|
|
name: rbd-provisioner-storage-init
|
|
namespace: {{ $root.Values.global.namespace }}
|
|
labels:
|
|
heritage: {{$root.Release.Service | quote }}
|
|
release: {{$root.Release.Name | quote }}
|
|
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
|
|
spec:
|
|
backoffLimit: 3 # Limit the number of job restart in case of failure
|
|
activeDeadlineSeconds: 180
|
|
template:
|
|
metadata:
|
|
name: "{{$root.Release.Name}}"
|
|
namespace: {{ $root.Values.global.namespace }}
|
|
labels:
|
|
heritage: {{$root.Release.Service | quote }}
|
|
release: {{$root.Release.Name | quote }}
|
|
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
|
|
spec:
|
|
serviceAccountName: {{ $root.Values.rbac.serviceAccount }}
|
|
restartPolicy: OnFailure
|
|
volumes:
|
|
- name: config-volume-{{- $root.Values.global.name }}
|
|
configMap:
|
|
name: config-{{- $root.Values.global.name }}
|
|
containers:
|
|
{{- range $classConfig := $root.Values.classes }}
|
|
- name: storage-init-{{- $classConfig.name }}
|
|
image: {{ $root.Values.images.tags.rbd_provisioner_storage_init | quote }}
|
|
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
|
|
env:
|
|
- name: NAMESPACE
|
|
value: {{ $root.Values.global.namespace }}
|
|
- name: POOL_SECRET_NAMESPACES
|
|
value: {{ $classConfig.pool_secrets_namespaces }}
|
|
- name: CEPH_ADMIN_SECRET
|
|
value: {{ $defaults.adminSecretName }}
|
|
- name: CEPH_USER_SECRET
|
|
value: {{ $classConfig.userSecretName }}
|
|
- name: USER_ID
|
|
value: {{ $classConfig.userId }}
|
|
- name: POOL_NAME
|
|
value: {{ $classConfig.pool_name }}
|
|
- name: POOL_REPLICATION
|
|
value: {{ $classConfig.replication | quote }}
|
|
- name: POOL_CRUSH_RULE_NAME
|
|
value: {{ $classConfig.crush_rule_name | quote }}
|
|
- name: POOL_CHUNK_SIZE
|
|
value: {{ $classConfig.chunk_size | quote }}
|
|
volumeMounts:
|
|
- name: config-volume-{{- $root.Values.global.name }}
|
|
mountPath: {{ $mount }}
|
|
{{- end }}
|
|
{{- range $ephemeralPool := $root.Values.ephemeral_pools }}
|
|
- name: storage-init-{{- $ephemeralPool.pool_name }}
|
|
image: {{ $root.Values.images.tags.rbd_provisioner_storage_init | quote }}
|
|
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
|
|
env:
|
|
- name: NAMESPACE
|
|
value: {{ $root.Values.global.namespace }}
|
|
- name: POOL_NAME
|
|
value: {{ $ephemeralPool.pool_name }}
|
|
- name: POOL_REPLICATION
|
|
value: {{ $ephemeralPool.replication | quote }}
|
|
- name: POOL_CRUSH_RULE_NAME
|
|
value: {{ $ephemeralPool.crush_rule_name | quote }}
|
|
- name: POOL_CHUNK_SIZE
|
|
value: {{ $ephemeralPool.chunk_size | quote }}
|
|
volumeMounts:
|
|
- name: config-volume-{{- $root.Values.global.name }}
|
|
mountPath: {{ $mount }}
|
|
{{- end }}
|
|
{{- end }} |