Enhancements for the rbd-provisioner helm chart

- remove the need to create the openstack namespace: the
  namespace will be created by default by helm when the first
  chart is installed using the namespace
- the rbd-provisioner chart is now installed in the openstack
  namespace and doesn't create resources over multiple namespaces

Change-Id: I875acd5959ceca033d9233ddf70ac951db7ca6a8
Story: 2004520
Task: 28395
Signed-off-by: Irina Mihai <irina.mihai@windriver.com>
This commit is contained in:
Irina Mihai 2018-12-10 23:19:54 +00:00
parent e307108c46
commit 6b2be98f0d
12 changed files with 48 additions and 76 deletions

View File

@ -100,11 +100,11 @@ data:
schema: armada/Chart/v1 schema: armada/Chart/v1
metadata: metadata:
schema: metadata/Document/v1 schema: metadata/Document/v1
name: kube-system-rbd-provisioner name: openstack-rbd-provisioner
data: data:
chart_name: rbd-provisioner chart_name: rbd-provisioner
release: kube-system-rbd-provisioner release: openstack-rbd-provisioner
namespace: kube-system namespace: openstack
wait: wait:
timeout: 1800 timeout: 1800
labels: labels:
@ -2409,7 +2409,7 @@ data:
description: "Provisioner" description: "Provisioner"
sequenced: false sequenced: false
chart_group: chart_group:
- kube-system-rbd-provisioner - openstack-rbd-provisioner
--- ---
schema: armada/ChartGroup/v1 schema: armada/ChartGroup/v1
metadata: metadata:

View File

@ -100,11 +100,11 @@ data:
schema: armada/Chart/v1 schema: armada/Chart/v1
metadata: metadata:
schema: metadata/Document/v1 schema: metadata/Document/v1
name: kube-system-rbd-provisioner name: openstack-rbd-provisioner
data: data:
chart_name: rbd-provisioner chart_name: rbd-provisioner
release: kube-system-rbd-provisioner release: openstack-rbd-provisioner
namespace: kube-system namespace: openstack
wait: wait:
timeout: 1800 timeout: 1800
labels: labels:
@ -2407,7 +2407,7 @@ data:
description: "Provisioner" description: "Provisioner"
sequenced: false sequenced: false
chart_group: chart_group:
- kube-system-rbd-provisioner - openstack-rbd-provisioner
--- ---
schema: armada/ChartGroup/v1 schema: armada/ChartGroup/v1
metadata: metadata:

View File

@ -14,9 +14,9 @@ metadata:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: {{ .Values.rbac.serviceAccount }} name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Values.global.namespace }} namespace: {{ .Release.Namespace }}
roleRef: roleRef:
kind: ClusterRole kind: ClusterRole
name: {{ .Values.rbac.clusterRole }} name: {{ .Values.rbac.clusterRole }}
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
{{- end}} {{- end}}

View File

@ -11,7 +11,7 @@ apiVersion: extensions/v1beta1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
name: {{ .Values.global.name }} name: {{ .Values.global.name }}
namespace: {{ .Values.global.namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
app: {{ .Values.global.name }} app: {{ .Values.global.name }}
spec: spec:

View File

@ -11,7 +11,7 @@ apiVersion: extensions/v1beta1
kind: Deployment kind: Deployment
metadata: metadata:
name: {{ .Values.global.name }} name: {{ .Values.global.name }}
namespace: {{ .Values.global.namespace }} namespace: {{ .Release.Namespace }}
spec: spec:
replicas: {{ .Values.global.replicas }} replicas: {{ .Values.global.replicas }}
strategy: strategy:

View File

@ -16,7 +16,7 @@ kind: ConfigMap
metadata: metadata:
creationTimestamp: 2016-02-18T19:14:38Z creationTimestamp: 2016-02-18T19:14:38Z
name: config-{{- $root.Values.global.name }} name: config-{{- $root.Values.global.name }}
namespace: {{ $root.Values.global.namespace }} namespace: {{ $root.Release.Namespace }}
data: data:
ceph.conf: | ceph.conf: |
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}} {{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
@ -31,7 +31,7 @@ data:
cp {{ $mount -}}/ceph.conf /etc/ceph/ cp {{ $mount -}}/ceph.conf /etc/ceph/
if [ ! -z $CEPH_ADMIN_SECRET ]; then if [ ! -z $CEPH_ADMIN_SECRET ]; then
kubectl get secret -n kube-system | grep $CEPH_ADMIN_SECRET kubectl get secret -n $NAMESPACE | grep $CEPH_ADMIN_SECRET
ret=$? ret=$?
if [ $ret -ne 0 ]; then if [ $ret -ne 0 ]; then
msg="Create $CEPH_ADMIN_SECRET secret" msg="Create $CEPH_ADMIN_SECRET secret"
@ -78,12 +78,8 @@ data:
# Set up pool key in Ceph format # Set up pool key in Ceph format
CEPH_USER_KEYRING=/etc/ceph/ceph.client.$USER_ID.keyring CEPH_USER_KEYRING=/etc/ceph/ceph.client.$USER_ID.keyring
echo $KEYRING > $CEPH_USER_KEYRING echo $KEYRING > $CEPH_USER_KEYRING
IFS=',' read -a POOL_SECRET_NAMESPACES_ARR <<< "${POOL_SECRET_NAMESPACES}"
for pool_secret_namespace in "${POOL_SECRET_NAMESPACES_ARR[@]}" kubectl create secret generic $CEPH_USER_SECRET --type="kubernetes.io/rbd" --from-literal=key=$KEYRING --namespace=$NAMESPACE
do
kubectl create secret generic $CEPH_USER_SECRET --type="kubernetes.io/rbd" --from-literal=key=$KEYRING --namespace=$pool_secret_namespace
done
set +ex set +ex
@ -108,7 +104,7 @@ apiVersion: batch/v1
kind: Job kind: Job
metadata: metadata:
name: rbd-provisioner-storage-init name: rbd-provisioner-storage-init
namespace: {{ $root.Values.global.namespace }} namespace: {{ $root.Release.Namespace }}
labels: labels:
heritage: {{$root.Release.Service | quote }} heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }} release: {{$root.Release.Name | quote }}
@ -119,7 +115,7 @@ spec:
template: template:
metadata: metadata:
name: "{{$root.Release.Name}}" name: "{{$root.Release.Name}}"
namespace: {{ $root.Values.global.namespace }} namespace: {{ $root.Release.Namespace }}
labels: labels:
heritage: {{$root.Release.Service | quote }} heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }} release: {{$root.Release.Name | quote }}
@ -138,9 +134,7 @@ spec:
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ] command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
env: env:
- name: NAMESPACE - name: NAMESPACE
value: {{ $root.Values.global.namespace }} value: {{ $root.Release.Namespace }}
- name: POOL_SECRET_NAMESPACES
value: {{ $classConfig.pool_secrets_namespaces }}
- name: CEPH_ADMIN_SECRET - name: CEPH_ADMIN_SECRET
value: {{ $defaults.adminSecretName }} value: {{ $defaults.adminSecretName }}
- name: CEPH_USER_SECRET - name: CEPH_USER_SECRET
@ -165,7 +159,7 @@ spec:
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ] command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
env: env:
- name: NAMESPACE - name: NAMESPACE
value: {{ $root.Values.global.namespace }} value: {{ $root.Release.Namespace }}
- name: POOL_NAME - name: POOL_NAME
value: {{ $ephemeralPool.pool_name }} value: {{ $ephemeralPool.pool_name }}
- name: POOL_REPLICATION - name: POOL_REPLICATION
@ -185,7 +179,7 @@ kind: ConfigMap
metadata: metadata:
name: ceph-etc name: ceph-etc
# This is the name of the openstack application's namespace # This is the name of the openstack application's namespace
namespace: openstack namespace: {{ $root.Release.Namespace }}
data: data:
ceph.conf: | ceph.conf: |
[global] [global]
@ -194,4 +188,13 @@ data:
[mon.{{- $index }}] [mon.{{- $index }}]
mon_addr = {{ $element }} mon_addr = {{ $element }}
{{- end }} {{- end }}
---
# Create the pvc-ceph-client-key. We need this here as we're not launching
# Ceph using the Helm chart.
apiVersion: v1
kind: Secret
type: kubernetes.io/rbd
metadata:
name: pvc-ceph-client-key
namespace: {{ $root.Release.Namespace }}
{{- end }} {{- end }}

View File

@ -11,9 +11,9 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role
metadata: metadata:
name: {{ .Values.rbac.role }} name: {{ .Values.rbac.role }}
namespace: {{ .Values.global.namespace }} namespace: {{ .Release.Namespace }}
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["secrets"] resources: ["secrets"]
verbs: ["get", "create", "list", "update"] verbs: ["get", "create", "list", "update"]
{{- end}} {{- end}}

View File

@ -11,7 +11,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding
metadata: metadata:
name: {{ .Values.rbac.roleBinding }} name: {{ .Values.rbac.roleBinding }}
namespace: {{ .Values.global.namespace }} namespace: {{ .Release.Namespace }}
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: Role kind: Role
@ -19,5 +19,5 @@ roleRef:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: {{ .Values.rbac.serviceAccount }} name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Values.global.namespace }} namespace: {{ .Release.Namespace }}
{{- end}} {{- end}}

View File

@ -11,5 +11,5 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ .Values.rbac.serviceAccount }} name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Values.global.namespace }} namespace: {{ .Release.Namespace }}
{{- end }} {{- end }}

View File

@ -7,7 +7,7 @@
*/}} */}}
{{- if .Values.global.provisionStorageClass }} {{- if .Values.global.provisionStorageClass }}
{{ $namespace := .Values.global.namespace }} {{ $namespace := .Release.Namespace }}
{{ $defaults := .Values.classdefaults}} {{ $defaults := .Values.classdefaults}}
{{- range $classConfig := .Values.classes }} {{- range $classConfig := .Values.classes }}
apiVersion: storage.k8s.io/v1 apiVersion: storage.k8s.io/v1

View File

@ -13,10 +13,6 @@ global:
# #
name: "rbd-provisioner" name: "rbd-provisioner"
# #
# Defines the namespace where provisioner runs.
#
namespace: kube-system
#
# Execute initialization job to verify external Ceph cluster access # Execute initialization job to verify external Ceph cluster access
# and setup additional dependencies assumed by dependent helm charts # and setup additional dependencies assumed by dependent helm charts
# (i.e. configmap and secrets). # (i.e. configmap and secrets).
@ -135,10 +131,6 @@ classes:
userId: kube userId: kube
# K8 secret name with key for accessing the Ceph pool # K8 secret name with key for accessing the Ceph pool
userSecretName: ceph-secret-kube userSecretName: ceph-secret-kube
# Namespaces for creating the k8s secrets for accessing the Ceph pools
pool_secrets_namespaces: kube-system
# Name of pool to configure
pool_name: kube-rbd
# Pool replication # Pool replication
replication: 1 replication: 1
# Pool crush rule name # Pool crush rule name

View File

@ -20,7 +20,7 @@ class RbdProvisionerHelm(base.BaseHelm):
CHART = constants.HELM_CHART_RBD_PROVISIONER CHART = constants.HELM_CHART_RBD_PROVISIONER
SUPPORTED_NAMESPACES = [ SUPPORTED_NAMESPACES = [
common.HELM_NS_KUBE_SYSTEM common.HELM_NS_OPENSTACK
] ]
SERVICE_NAME = 'rbd-provisioner' SERVICE_NAME = 'rbd-provisioner'
@ -39,20 +39,10 @@ class RbdProvisionerHelm(base.BaseHelm):
def get_overrides(self, namespace=None): def get_overrides(self, namespace=None):
def is_rbd_provisioner_bk(bk):
if bk.services is None:
return False
# Note: No support yet for external ceph. For it to work we need to
# get the ip addresses of the monitors from external ceph conf file
# or add them as overrides.
return (bk.backend == constants.CINDER_BACKEND_CEPH and
constants.SB_SVC_RBD_PROVISIONER in bk.services)
backends = self.dbapi.storage_backend_get_list() backends = self.dbapi.storage_backend_get_list()
rbd_provisioner_bks = [bk for bk in backends if is_rbd_provisioner_bk(bk)] ceph_bks = [bk for bk in backends if bk.backend == constants.SB_TYPE_CEPH]
if not rbd_provisioner_bks: if not ceph_bks:
return {} # ceph is not configured return {} # ceph is not configured
classdefaults = { classdefaults = {
@ -67,7 +57,7 @@ class RbdProvisionerHelm(base.BaseHelm):
constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH] constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH]
classes = [] classes = []
for bk in rbd_provisioner_bks: for bk in ceph_bks:
# Get the ruleset for the new kube-rbd pool. # Get the ruleset for the new kube-rbd pool.
tier = next((t for t in tiers if t.forbackendid == bk.id), None) tier = next((t for t in tiers if t.forbackendid == bk.id), None)
if not tier: if not tier:
@ -78,19 +68,9 @@ class RbdProvisionerHelm(base.BaseHelm):
constants.CEPH_CRUSH_TIER_SUFFIX, constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_') "-ruleset").replace('-', '_')
# Check namespaces. We need to know on what namespaces to create
# the secrets for the kube-rbd pools.
pool_secrets_namespaces = bk.capabilities.get(
constants.K8S_RBD_PROV_NAMESPACES)
if not pool_secrets_namespaces:
raise Exception("Please specify the rbd_provisioner_namespaces"
" for the %s backend." % bk.name)
cls = { cls = {
"name": K8RbdProvisioner.get_storage_class_name(bk), "name": K8RbdProvisioner.get_storage_class_name(bk),
"pool_name": K8RbdProvisioner.get_pool(bk), "pool_name": K8RbdProvisioner.get_pool(bk),
"pool_secrets_namespaces": pool_secrets_namespaces.encode(
'utf8', 'strict'),
"replication": int(bk.capabilities.get("replication")), "replication": int(bk.capabilities.get("replication")),
"crush_rule_name": rule_name, "crush_rule_name": rule_name,
"chunk_size": 64, "chunk_size": 64,
@ -109,8 +89,6 @@ class RbdProvisionerHelm(base.BaseHelm):
sb_list_ext = self.dbapi.storage_backend_get_list_by_type( sb_list_ext = self.dbapi.storage_backend_get_list_by_type(
backend_type=constants.SB_TYPE_CEPH_EXTERNAL) backend_type=constants.SB_TYPE_CEPH_EXTERNAL)
sb_list = self.dbapi.storage_backend_get_list_by_type(
backend_type=constants.SB_TYPE_CEPH)
if sb_list_ext: if sb_list_ext:
for sb in sb_list_ext: for sb in sb_list_ext:
@ -124,17 +102,16 @@ class RbdProvisionerHelm(base.BaseHelm):
} }
ephemeral_pools.append(ephemeral_pool) ephemeral_pools.append(ephemeral_pool)
# Treat internal CEPH. # Treat internal CEPH.
if sb_list: ephemeral_pool = {
ephemeral_pool = { "pool_name": constants.CEPH_POOL_EPHEMERAL_NAME,
"pool_name": constants.CEPH_POOL_EPHEMERAL_NAME, "replication": int(ceph_bks[0].capabilities.get("replication")),
"replication": int(sb_list[0].capabilities.get("replication")), "crush_rule_name": rule_name,
"crush_rule_name": rule_name, "chunk_size": 64,
"chunk_size": 64, }
} ephemeral_pools.append(ephemeral_pool)
ephemeral_pools.append(ephemeral_pool)
overrides = { overrides = {
common.HELM_NS_KUBE_SYSTEM: { common.HELM_NS_OPENSTACK: {
"classdefaults": classdefaults, "classdefaults": classdefaults,
"classes": classes, "classes": classes,
"ephemeral_pools": ephemeral_pools, "ephemeral_pools": ephemeral_pools,