Split of values into seperate files for the remaining infra (#12)

This commit is contained in:
2025-04-22 11:07:53 +02:00
parent 9068fc25ae
commit f62e9b5c40
22 changed files with 251 additions and 173 deletions

View File

@@ -13,52 +13,6 @@ spec:
version: 1.16.3
interval: 15m
timeout: 5m
values:
toolbox:
enabled: true
# TODO: Not sure we really need this is we have prometheus + grafana set up
ingress:
dashboard:
annotations:
traefik.ingress.kubernetes.io/router.entryPoints: "websecure"
traefik.ingress.kubernetes.io/router.middlewares: "authelia-forwardauth-authelia@kubernetescrd"
traefik.ingress.kubernetes.io/router.tls: "true"
host:
name: ceph.${domain}
tls:
- hosts:
- ceph.${domain}
secretName: ${domain//./-}-tls
# Uncomment once prometheus stack has been added
# monitoring:
# enabled: true
# createPrometheusRules: true
cephBlockPoolsVolumeSnapshotClass:
enabled: true
cephFileSystemVolumeSnapshotClass:
enabled: true
cephClusterSpec:
dashboard:
ssl: false
storage:
useAllDevices: false
deviceFilter: "^nvme."
resources:
mgr:
limits:
memory: "1Gi"
requests:
cpu: "50m"
memory: "512Mi"
mon:
limits:
memory: "2Gi"
requests:
cpu: "100m"
memory: "1Gi"
osd:
limits:
memory: "2Gi"
requests:
cpu: "100m"
memory: "1Gi"
valuesFrom:
- kind: ConfigMap
name: rook-cepth-cluster-values

View File

@@ -4,3 +4,11 @@ namespace: rook-ceph
resources:
- ./helm-release.yaml
- ./access-control-rule.yaml
configurations:
- ../../common/name-reference/helm-release.yaml
configMapGenerator:
- name: rook-cepth-cluster-values
files:
- ./values.yaml

View File

@@ -0,0 +1,48 @@
toolbox:
enabled: true
# TODO: Not sure we really need this is we have prometheus + grafana set up
ingress:
dashboard:
annotations:
traefik.ingress.kubernetes.io/router.entryPoints: "websecure"
traefik.ingress.kubernetes.io/router.middlewares: "authelia-forwardauth-authelia@kubernetescrd"
traefik.ingress.kubernetes.io/router.tls: "true"
host:
name: ceph.${domain}
tls:
- hosts:
- ceph.${domain}
secretName: ${domain//./-}-tls
# Uncomment once prometheus stack has been added
# monitoring:
# enabled: true
# createPrometheusRules: true
cephBlockPoolsVolumeSnapshotClass:
enabled: true
cephFileSystemVolumeSnapshotClass:
enabled: true
cephClusterSpec:
dashboard:
ssl: false
storage:
useAllDevices: false
deviceFilter: "^nvme."
resources:
mgr:
limits:
memory: "1Gi"
requests:
cpu: "50m"
memory: "512Mi"
mon:
limits:
memory: "2Gi"
requests:
cpu: "100m"
memory: "1Gi"
osd:
limits:
memory: "2Gi"
requests:
cpu: "100m"
memory: "1Gi"