Compare commits

..

6 Commits

Author SHA1 Message Date
2e5d187f01 feat: Added kube-prometheus-stack
Some checks failed
kustomization/dragonfly-operator/5c1f7af0 reconciliation succeeded
kustomization/flux-system/5c1f7af0 reconciliation succeeded
kustomization/certificates/5c1f7af0 reconciliation succeeded
kustomization/kube-prometheus-stack/5c1f7af0 health check failed
kustomization/lldap/5c1f7af0 reconciliation succeeded
kustomization/authelia/5c1f7af0 reconciliation succeeded
2025-12-24 05:44:09 +01:00
5c56e25fd8 chore: No ./ in kustomization.yaml
All checks were successful
kustomization/flux-system/5c1f7af0 reconciliation succeeded
kustomization/cert-manager/5c1f7af0 reconciliation succeeded
kustomization/cnpg/5c1f7af0 reconciliation succeeded
kustomization/lldap/5c1f7af0 reconciliation succeeded
kustomization/certificates/5c1f7af0 reconciliation succeeded
kustomization/authelia/5c1f7af0 reconciliation succeeded
2025-12-24 03:29:44 +01:00
94af8edee7 chore: Consistent use of oci helm release
All checks were successful
kustomization/flux-system/5c1f7af0 reconciliation succeeded
kustomization/certificates/5c1f7af0 dependency not ready
kustomization/lldap/5c1f7af0 dependency not ready
kustomization/authelia/5c1f7af0 dependency not ready
kustomization/cnpg/5c1f7af0 dependency not ready
2025-12-24 03:28:01 +01:00
a9fcc5e07c feat: Added authelia 2025-12-23 06:04:27 +01:00
2e918de78a feat: Added dragonfly operator 2025-12-23 06:04:27 +01:00
cc76529d5f feat: Added authelia-controller 2025-12-23 06:04:26 +01:00
27 changed files with 376 additions and 25 deletions

View File

@@ -11,6 +11,7 @@ spec:
- name: cnpg
- name: lldap-controller
- name: dragonfly-operator
- name: authelia-controller
sourceRef:
kind: ExternalArtifact
name: authelia

View File

@@ -56,6 +56,6 @@ configMap:
filesystem:
enabled: true
# access_control:
# secret:
# existingSecret: authelia-acl
access_control:
secret:
existingSecret: authelia-acl

View File

@@ -2,5 +2,6 @@ apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: dragonfly
namespace: authelia
spec:
replicas: 2

View File

@@ -2,5 +2,6 @@ apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: dragonfly
namespace: authelia
spec:
replicas: 1

View File

@@ -11,7 +11,9 @@ resources:
- ../../controllers/cnpg/cnpg.yaml
- ../../controllers/reflector/reflector.yaml
- ../../controllers/lldap-controller/lldap-controller.yaml
- ../../controllers/authelia-controller/authelia-controller.yaml
- ../../controllers/dragonfly-operator/dragonfly-operator.yaml
- ../../controllers/kube-prometheus-stack/kube-prometheus-stack.yaml
- ../../configs/artifacts.yaml
- ../../configs/cilium-config/cilium-config.yaml

View File

@@ -49,3 +49,8 @@ spec:
copy:
- from: "@foundation/controllers/dragonfly-operator/**"
to: "@artifact/"
- name: kube-prometheus-stack
originRevision: "@foundation"
copy:
- from: "@foundation/controllers/kube-prometheus-stack/**"
to: "@artifact/"

View File

@@ -0,0 +1,26 @@
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: OCIRepository
metadata:
name: authelia-controller
namespace: flux-system
spec:
interval: 1m0s
url: oci://git.huizinga.dev/infra/authelia-controller/manifests
ref:
tag: edge
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: authelia-controller
namespace: flux-system
spec:
interval: 1h
retryInterval: 2m
timeout: 5m
sourceRef:
kind: OCIRepository
name: authelia-controller
path: ./
prune: true
wait: true

View File

@@ -13,9 +13,14 @@ spec:
strategy:
name: RetryOnFailure
retryInterval: 3m
chartRef:
kind: OCIRepository
chart:
spec:
chart: cert-manager
version: "1.19.x"
sourceRef:
kind: HelmRepository
name: cert-manager
interval: 24h
values:
crds:
enabled: true

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: cert-manager
namespace: cert-manager
spec:
type: "oci"
interval: 24h
url: oci://quay.io/jetstack/charts/

View File

@@ -1,6 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./namespace.yaml
- ./oci-repository.yaml
- ./helm-release.yaml
- namespace.yaml
- helm-repository.yaml
- helm-release.yaml

View File

@@ -1,13 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: cert-manager
namespace: cert-manager
spec:
interval: 24h
url: oci://quay.io/jetstack/charts/cert-manager
layerSelector:
mediaType: "application/vnd.cncf.helm.chart.content.v1.tar+gzip"
operation: copy
ref:
semver: "1.19.x"

View File

@@ -29,5 +29,3 @@ gatewayAPI:
enabled: true
enableAlpn: true
enableAppProtocol: true
loadBalancer:
mode: dsr

View File

@@ -2,7 +2,7 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: dragonfly-operator
namespace: dragonfly-operator
namespace: flux-system
spec:
interval: 1h
retryInterval: 2m

View File

@@ -0,0 +1,16 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
spec:
bootstrap:
recovery:
source: source
externalClusters:
- name: source
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: garage-store
serverName: db
plugins: []

View File

@@ -0,0 +1,15 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
namespace: monitoring
# TODO: Add labels?
spec:
storage:
size: 8Gi
storageClass: local-path
plugins:
- name: barman-cloud.cloudnative-pg.io
isWALArchiver: true
parameters:
barmanObjectName: garage-store

View File

@@ -0,0 +1,7 @@
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: dragonfly
namespace: monitoring
spec:
replicas: 1

View File

@@ -0,0 +1,53 @@
# This is only the ldap config, you also need to enable ldap support in the main config file
# of Grafana. See https://grafana.com/docs/grafana/latest/auth/ldap/#enable-ldap
# You can test that it is working correctly by trying usernames at: https://<your grafana instance>/admin/ldap
[[servers]]
# Ldap server host (specify multiple hosts space separated)
host = "lldap.lldap.svc.cluster.local"
# Default port is 389 or 636 if use_ssl = true
port = 3890
# Set to true if LDAP server should use an encrypted TLS connection (either with STARTTLS or LDAPS)
use_ssl = false
# If set to true, use LDAP with STARTTLS instead of LDAPS
start_tls = false
# set to true if you want to skip SSL cert validation
ssl_skip_verify = false
# set to the path to your root CA certificate or leave unset to use system defaults
# root_ca_cert = "/path/to/certificate.crt"
# Authentication against LDAP servers requiring client certificates
# client_cert = "/path/to/client.crt"
# client_key = "/path/to/client.key"
# Search user bind dn
bind_dn = "${BIND_DN}"
# Search user bind password
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
bind_password = "${LDAP_PASSWORD}"
# User search filter
search_filter = "(&(|(uid=%s)(mail=%s))(objectClass=person))"
# If you want to limit to only users of a specific group use this instead:
# search_filter = "(&(uid=%s)(memberOf=cn=<your group>,ou=groups,dc=huizinga,dc=dev))"
# An array of base dns to search through
search_base_dns = ["dc=huizinga,dc=dev"]
# Specify names of the LDAP attributes your LDAP uses
[servers.attributes]
member_of = "memberOf"
email = "mail"
name = "givenName"
surname = "sn"
username = "uid"
# If you want to map your ldap groups to grafana's groups, see: https://grafana.com/docs/grafana/latest/auth/ldap/#group-mappings
# As a quick example, here is how you would map lldap's admin group to grafana's admin
[[servers.group_mappings]]
group_dn = "cn=lldap_admin,ou=groups,dc=huizinga,dc=dev"
org_role = "Admin"
grafana_admin = true
[[servers.group_mappings]]
group_dn = "*"
org_role = "Viewer"

View File

@@ -0,0 +1,30 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: kube-prometheus-stack
namespace: monitoring
spec:
interval: 12h
install:
strategy:
name: RetryOnFailure
retryInterval: 2m
upgrade:
strategy:
name: RetryOnFailure
retryInterval: 3m
chart:
spec:
chart: kube-prometheus-stack
version: "80.6.x"
sourceRef:
kind: HelmRepository
name: prometheus-community
interval: 24h
values:
grafana:
ldap:
existingSecret: grafana-ldap-toml
valuesFrom:
- kind: ConfigMap
name: values-base

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: prometheus-community
namespace: monitoring
spec:
type: "oci"
interval: 24h
url: oci://ghcr.io/prometheus-community/charts/

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: kube-prometheus-stack
namespace: flux-system
spec:
interval: 1h
retryInterval: 2m
timeout: 5m
dependsOn:
- name: dragonfly-operator
- name: lldap-controller
sourceRef:
kind: ExternalArtifact
name: kube-prometheus-stack
path: ./
prune: true
wait: true

View File

@@ -0,0 +1,32 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- helm-repository.yaml
- helm-release.yaml
- dragonfly.yaml
- cluster.yaml
- secret-s3-garage.yaml
- object-store.yaml
- service-user.yaml
configurations:
- name-reference.yaml
configMapGenerator:
- name: values-base
namespace: monitoring
files:
- values.yaml
secretGenerator:
- name: grafana-ldap-toml
namespace: monitoring
files:
- ldap-toml=grafana-ldap.toml
# Uncomment to restore database from backup
# patches:
# - path: cluster-restore.yaml
# target:
# kind: Cluster

View File

@@ -0,0 +1,11 @@
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease
- kind: Secret
version: v1
fieldSpecs:
- path: spec/values/grafana/ldap/existingSecret
kind: HelmRelease

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@@ -0,0 +1,20 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: garage-store
namespace: monitoring
spec:
configuration:
destinationPath: s3://cnpg-backup/authelia
s3Credentials:
accessKeyId:
name: s3-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: s3-garage
key: ACCESS_SECRET_KEY
region:
name: s3-garage
key: REGION
wal:
compression: gzip

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: s3-garage
namespace: monitoring
annotations:
reflector.v1.k8s.emberstack.com/reflects: "cnpg-system/s3-garage"
type: Opaque
data: {}

View File

@@ -0,0 +1,6 @@
apiVersion: lldap.huizinga.dev/v1
kind: ServiceUser
metadata:
name: grafana
namespace: monitoring
spec: {}

View File

@@ -0,0 +1,86 @@
alertmanager:
alertsmanagerSpec:
replicas: 1
route:
main:
enabled: true
hostnames:
- "alerts.staging.huizinga.dev"
parentRefs:
- name: gateway
namespace: default
prometheus:
prometheusSpec:
replicas: 1
route:
main:
enabled: true
hostnames:
- "prometheus.staging.huizinga.dev"
parentRefs:
- name: gateway
namespace: default
grafana:
replicas: 1
# ingress:
# enabled: true
# hosts:
# - grafana.${domain}
# tls:
# - secretName: ${domain//./-}-tls
# annotations:
# traefik.ingress.kubernetes.io/router.entryPoints: "websecure"
# traefik.ingress.kubernetes.io/router.middlewares: "authelia-forwardauth-authelia@kubernetescrd"
# traefik.ingress.kubernetes.io/router.tls: "true"
envValueFrom:
BIND_DN:
secretKeyRef:
name: grafana-lldap-credentials
key: bind_dn
LDAP_PASSWORD:
secretKeyRef:
name: grafana-lldap-credentials
key: password
grafana.ini:
auth.ldap:
enabled: true
# auth.proxy:
# enabled: true
# header_name: Remote-User
# header_property: username
# auto_sign_up: true
# headers: Groups:Remote-Group
# enable_login_token: false
# sync_ttl: 0
# signout_redirect_url: https://login.${domain}/logout?rd=https://grafana.${domain}
database:
type: postgres
host: $__file{/etc/secrets/db/host}
name: $__file{/etc/secrets/db/dbname}
user: $__file{/etc/secrets/db/user}
password: $__file{/etc/secrets/db/password}
remote_cache:
type: redis
connstr: addr=dragonflydb.monitoring:6379
ldap:
enabled: true
extraSecretMounts:
- name: db-app-mount
secretName: db-app
defaultMode: 0440
mountPath: /etc/secrets/db
readOnly: true
# We are not running kube-proxy
kubeProxy:
enabled: false