feat: Added kube-prometheus-stack
Some checks failed
kustomization/flux-system/5c1f7af0 reconciliation succeeded
kustomization/dragonfly-operator/5c1f7af0 reconciliation succeeded
kustomization/kube-prometheus-stack/5c1f7af0 health check failed
kustomization/reflector/5c1f7af0 reconciliation succeeded
kustomization/cilium/5c1f7af0 reconciliation succeeded
kustomization/spegel/5c1f7af0 reconciliation succeeded
kustomization/longhorn-jobs/5c1f7af0 reconciliation succeeded
kustomization/local-path-provisioner/5c1f7af0 reconciliation succeeded
kustomization/alerts/5c1f7af0 reconciliation succeeded
kustomization/longhorn/5c1f7af0 reconciliation succeeded
kustomization/cilium-config/5c1f7af0 reconciliation succeeded
kustomization/authelia/5c1f7af0 reconciliation succeeded

This commit is contained in:
2025-12-24 03:52:43 +01:00
parent 5c56e25fd8
commit be28c37eeb
16 changed files with 313 additions and 0 deletions

View File

@@ -49,3 +49,8 @@ spec:
copy:
- from: "@foundation/controllers/dragonfly-operator/**"
to: "@artifact/"
- name: kube-prometheus-stack
originRevision: "@foundation"
copy:
- from: "@foundation/controllers/kube-prometheus-stack/**"
to: "@artifact/"

View File

@@ -0,0 +1,16 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
spec:
bootstrap:
recovery:
source: source
externalClusters:
- name: source
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: garage-store
serverName: db
plugins: []

View File

@@ -0,0 +1,15 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
namespace: monitoring
# TODO: Add labels?
spec:
storage:
size: 8Gi
storageClass: local-path
plugins:
- name: barman-cloud.cloudnative-pg.io
isWALArchiver: true
parameters:
barmanObjectName: garage-store

View File

@@ -0,0 +1,7 @@
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: dragonfly
namespace: monitoring
spec:
replicas: 1

View File

@@ -0,0 +1,53 @@
# This is only the ldap config, you also need to enable ldap support in the main config file
# of Grafana. See https://grafana.com/docs/grafana/latest/auth/ldap/#enable-ldap
# You can test that it is working correctly by trying usernames at: https://<your grafana instance>/admin/ldap
[[servers]]
# Ldap server host (specify multiple hosts space separated)
host = "lldap.lldap.svc.cluster.local"
# Default port is 389 or 636 if use_ssl = true
port = 3890
# Set to true if LDAP server should use an encrypted TLS connection (either with STARTTLS or LDAPS)
use_ssl = false
# If set to true, use LDAP with STARTTLS instead of LDAPS
start_tls = false
# set to true if you want to skip SSL cert validation
ssl_skip_verify = false
# set to the path to your root CA certificate or leave unset to use system defaults
# root_ca_cert = "/path/to/certificate.crt"
# Authentication against LDAP servers requiring client certificates
# client_cert = "/path/to/client.crt"
# client_key = "/path/to/client.key"
# Search user bind dn
bind_dn = "${BIND_DN}"
# Search user bind password
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
bind_password = "${LDAP_PASSWORD}"
# User search filter
search_filter = "(&(|(uid=%s)(mail=%s))(objectClass=person))"
# If you want to limit to only users of a specific group use this instead:
# search_filter = "(&(uid=%s)(memberOf=cn=<your group>,ou=groups,dc=huizinga,dc=dev))"
# An array of base dns to search through
search_base_dns = ["dc=huizinga,dc=dev"]
# Specify names of the LDAP attributes your LDAP uses
[servers.attributes]
member_of = "memberOf"
email = "mail"
name = "givenName"
surname = "sn"
username = "uid"
# If you want to map your ldap groups to grafana's groups, see: https://grafana.com/docs/grafana/latest/auth/ldap/#group-mappings
# As a quick example, here is how you would map lldap's admin group to grafana's admin
[[servers.group_mappings]]
group_dn = "cn=lldap_admin,ou=groups,dc=huizinga,dc=dev"
org_role = "Admin"
grafana_admin = true
[[servers.group_mappings]]
group_dn = "*"
org_role = "Viewer"

View File

@@ -0,0 +1,26 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: kube-prometheus-stack
namespace: monitoring
spec:
interval: 12h
install:
strategy:
name: RetryOnFailure
retryInterval: 2m
upgrade:
strategy:
name: RetryOnFailure
retryInterval: 3m
chart:
spec:
chart: kube-prometheus-stack
version: "80.6.x"
sourceRef:
kind: HelmRepository
name: prometheus-community
interval: 24h
valuesFrom:
- kind: ConfigMap
name: values-base

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: prometheus-community
namespace: monitoring
spec:
type: "oci"
interval: 24h
url: oci://ghcr.io/prometheus-community/charts/

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: kube-prometheus-stack
namespace: flux-system
spec:
interval: 1h
retryInterval: 2m
timeout: 5m
dependsOn:
- name: dragonfly-operator
- name: lldap-controller
sourceRef:
kind: ExternalArtifact
name: kube-prometheus-stack
path: ./
prune: true
wait: true

View File

@@ -0,0 +1,31 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- helm-repository.yaml
- helm-release.yaml
- dragonfly.yaml
- cluster.yaml
- secret-s3-garage.yaml
- object-store.yaml
configurations:
- name-reference.yaml
configMapGenerator:
- name: values-base
namespace: monitoring
files:
- values.yaml
secretGenerator:
- name: grafana-ldap-toml
namespace: monitoring
files:
- ldap-toml=grafana-ldap.toml
# Uncomment to restore database from backup
# patches:
# - path: cluster-restore.yaml
# target:
# kind: Cluster

View File

@@ -0,0 +1,6 @@
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@@ -0,0 +1,20 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: garage-store
namespace: monitoring
spec:
configuration:
destinationPath: s3://cnpg-backup/authelia
s3Credentials:
accessKeyId:
name: s3-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: s3-garage
key: ACCESS_SECRET_KEY
region:
name: s3-garage
key: REGION
wal:
compression: gzip

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: s3-garage
namespace: monitoring
annotations:
reflector.v1.k8s.emberstack.com/reflects: "cnpg-system/s3-garage"
type: Opaque
data: {}

View File

@@ -0,0 +1,6 @@
apiVersion: lldap.huizinga.dev/v1
kind: ServiceUser
metadata:
name: grafana
namespace: monitoring
spec: {}

View File

@@ -0,0 +1,87 @@
alertmanager:
alertsmanagerSpec:
replicas: 1
route:
main:
enabled: true
hostnames:
- "alerts.staging.huizinga.dev"
parentRefs:
- name: gateway
namespace: default
prometheus:
prometheusSpec:
replicas: 1
route:
main:
enabled: true
hostnames:
- "prometheus.staging.huizinga.dev"
parentRefs:
- name: gateway
namespace: default
grafana:
replicas: 1
# ingress:
# enabled: true
# hosts:
# - grafana.${domain}
# tls:
# - secretName: ${domain//./-}-tls
# annotations:
# traefik.ingress.kubernetes.io/router.entryPoints: "websecure"
# traefik.ingress.kubernetes.io/router.middlewares: "authelia-forwardauth-authelia@kubernetescrd"
# traefik.ingress.kubernetes.io/router.tls: "true"
envValueFrom:
BIND_DN:
secretKeyRef:
name: grafana-lldap-credentials
key: bind_dn
LDAP_PASSWORD:
secretKeyRef:
name: grafana-lldap-credentials
key: password
grafana.ini:
auth.ldap:
enabled: true
# auth.proxy:
# enabled: true
# header_name: Remote-User
# header_property: username
# auto_sign_up: true
# headers: Groups:Remote-Group
# enable_login_token: false
# sync_ttl: 0
# signout_redirect_url: https://login.${domain}/logout?rd=https://grafana.${domain}
database:
type: postgres
host: $__file{/etc/secrets/db/host}
name: $__file{/etc/secrets/db/dbname}
user: $__file{/etc/secrets/db/user}
password: $__file{/etc/secrets/db/password}
remote_cache:
type: redis
connstr: addr=dragonflydb.monitoring:6379
ldap:
enabled: true
existingSecret: grafana-ldap-toml
extraSecretMounts:
- name: db-app-mount
secretName: db-app
defaultMode: 0440
mountPath: /etc/secrets/db
readOnly: true
# We are not running kube-proxy
kubeProxy:
enabled: false