65 lines
1.6 KiB
YAML
65 lines
1.6 KiB
YAML
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
kind: HelmRelease
|
|
metadata:
|
|
name: rook-ceph-cluster
|
|
spec:
|
|
chart:
|
|
spec:
|
|
chart: rook-ceph-cluster
|
|
reconcileStrategy: ChartVersion
|
|
sourceRef:
|
|
kind: HelmRepository
|
|
name: rook-release
|
|
version: 1.16.3
|
|
interval: 15m
|
|
timeout: 5m
|
|
values:
|
|
toolbox:
|
|
enabled: true
|
|
# TODO: Not sure we really need this is we have prometheus + grafana set up
|
|
ingress:
|
|
dashboard:
|
|
annotations:
|
|
traefik.ingress.kubernetes.io/router.entryPoints: "websecure"
|
|
traefik.ingress.kubernetes.io/router.middlewares: "authelia-forwardauth-authelia@kubernetescrd"
|
|
traefik.ingress.kubernetes.io/router.tls: "true"
|
|
host:
|
|
name: ceph.${domain}
|
|
tls:
|
|
- hosts:
|
|
- ceph.${domain}
|
|
secretName: ${domain//./-}-tls
|
|
# Uncomment once prometheus stack has been added
|
|
# monitoring:
|
|
# enabled: true
|
|
# createPrometheusRules: true
|
|
cephBlockPoolsVolumeSnapshotClass:
|
|
enabled: true
|
|
cephFileSystemVolumeSnapshotClass:
|
|
enabled: true
|
|
cephClusterSpec:
|
|
dashboard:
|
|
ssl: false
|
|
storage:
|
|
useAllDevices: false
|
|
deviceFilter: "^nvme."
|
|
resources:
|
|
mgr:
|
|
limits:
|
|
memory: "1Gi"
|
|
requests:
|
|
cpu: "50m"
|
|
memory: "512Mi"
|
|
mon:
|
|
limits:
|
|
memory: "2Gi"
|
|
requests:
|
|
cpu: "100m"
|
|
memory: "1Gi"
|
|
osd:
|
|
limits:
|
|
memory: "2Gi"
|
|
requests:
|
|
cpu: "100m"
|
|
memory: "1Gi"
|