Enhancements for the rbd-provisioner helm chart

- remove the need to create the openstack namespace: the
  namespace will be created by default by helm when the first
  chart is installed using the namespace
- the rbd-provisioner chart is now installed in the openstack
  namespace and doesn't create resources over multiple namespaces

Change-Id: I875acd5959ceca033d9233ddf70ac951db7ca6a8
Story: 2004520
Task: 28395
Signed-off-by: Irina Mihai <irina.mihai@windriver.com>
This commit is contained in:
Irina Mihai 2018-12-10 23:19:54 +00:00
parent e307108c46
commit 6b2be98f0d
12 changed files with 48 additions and 76 deletions

View File

@ -100,11 +100,11 @@ data:
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-rbd-provisioner
name: openstack-rbd-provisioner
data:
chart_name: rbd-provisioner
release: kube-system-rbd-provisioner
namespace: kube-system
release: openstack-rbd-provisioner
namespace: openstack
wait:
timeout: 1800
labels:
@ -2409,7 +2409,7 @@ data:
description: "Provisioner"
sequenced: false
chart_group:
- kube-system-rbd-provisioner
- openstack-rbd-provisioner
---
schema: armada/ChartGroup/v1
metadata:

View File

@ -100,11 +100,11 @@ data:
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-rbd-provisioner
name: openstack-rbd-provisioner
data:
chart_name: rbd-provisioner
release: kube-system-rbd-provisioner
namespace: kube-system
release: openstack-rbd-provisioner
namespace: openstack
wait:
timeout: 1800
labels:
@ -2407,7 +2407,7 @@ data:
description: "Provisioner"
sequenced: false
chart_group:
- kube-system-rbd-provisioner
- openstack-rbd-provisioner
---
schema: armada/ChartGroup/v1
metadata:

View File

@ -14,9 +14,9 @@ metadata:
subjects:
- kind: ServiceAccount
name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Values.global.namespace }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.rbac.clusterRole }}
apiGroup: rbac.authorization.k8s.io
{{- end}}
{{- end}}

View File

@ -11,7 +11,7 @@ apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: {{ .Values.global.name }}
namespace: {{ .Values.global.namespace }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Values.global.name }}
spec:

View File

@ -11,7 +11,7 @@ apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ .Values.global.name }}
namespace: {{ .Values.global.namespace }}
namespace: {{ .Release.Namespace }}
spec:
replicas: {{ .Values.global.replicas }}
strategy:

View File

@ -16,7 +16,7 @@ kind: ConfigMap
metadata:
creationTimestamp: 2016-02-18T19:14:38Z
name: config-{{- $root.Values.global.name }}
namespace: {{ $root.Values.global.namespace }}
namespace: {{ $root.Release.Namespace }}
data:
ceph.conf: |
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
@ -31,7 +31,7 @@ data:
cp {{ $mount -}}/ceph.conf /etc/ceph/
if [ ! -z $CEPH_ADMIN_SECRET ]; then
kubectl get secret -n kube-system | grep $CEPH_ADMIN_SECRET
kubectl get secret -n $NAMESPACE | grep $CEPH_ADMIN_SECRET
ret=$?
if [ $ret -ne 0 ]; then
msg="Create $CEPH_ADMIN_SECRET secret"
@ -78,12 +78,8 @@ data:
# Set up pool key in Ceph format
CEPH_USER_KEYRING=/etc/ceph/ceph.client.$USER_ID.keyring
echo $KEYRING > $CEPH_USER_KEYRING
IFS=',' read -a POOL_SECRET_NAMESPACES_ARR <<< "${POOL_SECRET_NAMESPACES}"
for pool_secret_namespace in "${POOL_SECRET_NAMESPACES_ARR[@]}"
do
kubectl create secret generic $CEPH_USER_SECRET --type="kubernetes.io/rbd" --from-literal=key=$KEYRING --namespace=$pool_secret_namespace
done
kubectl create secret generic $CEPH_USER_SECRET --type="kubernetes.io/rbd" --from-literal=key=$KEYRING --namespace=$NAMESPACE
set +ex
@ -108,7 +104,7 @@ apiVersion: batch/v1
kind: Job
metadata:
name: rbd-provisioner-storage-init
namespace: {{ $root.Values.global.namespace }}
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
@ -119,7 +115,7 @@ spec:
template:
metadata:
name: "{{$root.Release.Name}}"
namespace: {{ $root.Values.global.namespace }}
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
@ -138,9 +134,7 @@ spec:
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
env:
- name: NAMESPACE
value: {{ $root.Values.global.namespace }}
- name: POOL_SECRET_NAMESPACES
value: {{ $classConfig.pool_secrets_namespaces }}
value: {{ $root.Release.Namespace }}
- name: CEPH_ADMIN_SECRET
value: {{ $defaults.adminSecretName }}
- name: CEPH_USER_SECRET
@ -165,7 +159,7 @@ spec:
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
env:
- name: NAMESPACE
value: {{ $root.Values.global.namespace }}
value: {{ $root.Release.Namespace }}
- name: POOL_NAME
value: {{ $ephemeralPool.pool_name }}
- name: POOL_REPLICATION
@ -185,7 +179,7 @@ kind: ConfigMap
metadata:
name: ceph-etc
# This is the name of the openstack application's namespace
namespace: openstack
namespace: {{ $root.Release.Namespace }}
data:
ceph.conf: |
[global]
@ -194,4 +188,13 @@ data:
[mon.{{- $index }}]
mon_addr = {{ $element }}
{{- end }}
---
# Create the pvc-ceph-client-key. We need this here as we're not launching
# Ceph using the Helm chart.
apiVersion: v1
kind: Secret
type: kubernetes.io/rbd
metadata:
name: pvc-ceph-client-key
namespace: {{ $root.Release.Namespace }}
{{- end }}

View File

@ -11,9 +11,9 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Values.rbac.role }}
namespace: {{ .Values.global.namespace }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "list", "update"]
{{- end}}
{{- end}}

View File

@ -11,7 +11,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Values.rbac.roleBinding }}
namespace: {{ .Values.global.namespace }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@ -19,5 +19,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Values.global.namespace }}
{{- end}}
namespace: {{ .Release.Namespace }}
{{- end}}

View File

@ -11,5 +11,5 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Values.global.namespace }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -7,7 +7,7 @@
*/}}
{{- if .Values.global.provisionStorageClass }}
{{ $namespace := .Values.global.namespace }}
{{ $namespace := .Release.Namespace }}
{{ $defaults := .Values.classdefaults}}
{{- range $classConfig := .Values.classes }}
apiVersion: storage.k8s.io/v1

View File

@ -13,10 +13,6 @@ global:
#
name: "rbd-provisioner"
#
# Defines the namespace where provisioner runs.
#
namespace: kube-system
#
# Execute initialization job to verify external Ceph cluster access
# and setup additional dependencies assumed by dependent helm charts
# (i.e. configmap and secrets).
@ -135,10 +131,6 @@ classes:
userId: kube
# K8 secret name with key for accessing the Ceph pool
userSecretName: ceph-secret-kube
# Namespaces for creating the k8s secrets for accessing the Ceph pools
pool_secrets_namespaces: kube-system
# Name of pool to configure
pool_name: kube-rbd
# Pool replication
replication: 1
# Pool crush rule name

View File

@ -20,7 +20,7 @@ class RbdProvisionerHelm(base.BaseHelm):
CHART = constants.HELM_CHART_RBD_PROVISIONER
SUPPORTED_NAMESPACES = [
common.HELM_NS_KUBE_SYSTEM
common.HELM_NS_OPENSTACK
]
SERVICE_NAME = 'rbd-provisioner'
@ -39,20 +39,10 @@ class RbdProvisionerHelm(base.BaseHelm):
def get_overrides(self, namespace=None):
def is_rbd_provisioner_bk(bk):
if bk.services is None:
return False
# Note: No support yet for external ceph. For it to work we need to
# get the ip addresses of the monitors from external ceph conf file
# or add them as overrides.
return (bk.backend == constants.CINDER_BACKEND_CEPH and
constants.SB_SVC_RBD_PROVISIONER in bk.services)
backends = self.dbapi.storage_backend_get_list()
rbd_provisioner_bks = [bk for bk in backends if is_rbd_provisioner_bk(bk)]
ceph_bks = [bk for bk in backends if bk.backend == constants.SB_TYPE_CEPH]
if not rbd_provisioner_bks:
if not ceph_bks:
return {} # ceph is not configured
classdefaults = {
@ -67,7 +57,7 @@ class RbdProvisionerHelm(base.BaseHelm):
constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH]
classes = []
for bk in rbd_provisioner_bks:
for bk in ceph_bks:
# Get the ruleset for the new kube-rbd pool.
tier = next((t for t in tiers if t.forbackendid == bk.id), None)
if not tier:
@ -78,19 +68,9 @@ class RbdProvisionerHelm(base.BaseHelm):
constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
# Check namespaces. We need to know on what namespaces to create
# the secrets for the kube-rbd pools.
pool_secrets_namespaces = bk.capabilities.get(
constants.K8S_RBD_PROV_NAMESPACES)
if not pool_secrets_namespaces:
raise Exception("Please specify the rbd_provisioner_namespaces"
" for the %s backend." % bk.name)
cls = {
"name": K8RbdProvisioner.get_storage_class_name(bk),
"pool_name": K8RbdProvisioner.get_pool(bk),
"pool_secrets_namespaces": pool_secrets_namespaces.encode(
'utf8', 'strict'),
"replication": int(bk.capabilities.get("replication")),
"crush_rule_name": rule_name,
"chunk_size": 64,
@ -109,8 +89,6 @@ class RbdProvisionerHelm(base.BaseHelm):
sb_list_ext = self.dbapi.storage_backend_get_list_by_type(
backend_type=constants.SB_TYPE_CEPH_EXTERNAL)
sb_list = self.dbapi.storage_backend_get_list_by_type(
backend_type=constants.SB_TYPE_CEPH)
if sb_list_ext:
for sb in sb_list_ext:
@ -124,17 +102,16 @@ class RbdProvisionerHelm(base.BaseHelm):
}
ephemeral_pools.append(ephemeral_pool)
# Treat internal CEPH.
if sb_list:
ephemeral_pool = {
"pool_name": constants.CEPH_POOL_EPHEMERAL_NAME,
"replication": int(sb_list[0].capabilities.get("replication")),
"crush_rule_name": rule_name,
"chunk_size": 64,
}
ephemeral_pools.append(ephemeral_pool)
ephemeral_pool = {
"pool_name": constants.CEPH_POOL_EPHEMERAL_NAME,
"replication": int(ceph_bks[0].capabilities.get("replication")),
"crush_rule_name": rule_name,
"chunk_size": 64,
}
ephemeral_pools.append(ephemeral_pool)
overrides = {
common.HELM_NS_KUBE_SYSTEM: {
common.HELM_NS_OPENSTACK: {
"classdefaults": classdefaults,
"classes": classes,
"ephemeral_pools": ephemeral_pools,