Compare commits

..

1 Commits

Author SHA1 Message Date
d5e336c80b feat: Added patch for restoring database
All checks were successful
kustomization/flux-system/7f3678ff reconciliation succeeded
kustomization/lldap/7f3678ff reconciliation succeeded
kustomization/spegel/7f3678ff reconciliation succeeded
kustomization/longhorn-jobs/7f3678ff reconciliation succeeded
kustomization/cilium-config/7f3678ff reconciliation succeeded
kustomization/reflector/7f3678ff reconciliation succeeded
kustomization/alerts/7f3678ff reconciliation succeeded
kustomization/cilium/7f3678ff reconciliation succeeded
kustomization/cert-manager/7f3678ff reconciliation succeeded
kustomization/local-path-provisioner/7f3678ff reconciliation succeeded
kustomization/longhorn/7f3678ff reconciliation succeeded
kustomization/cnpg/7f3678ff reconciliation succeeded
kustomization/certificates/7f3678ff reconciliation succeeded
2025-12-19 04:05:10 +01:00
47 changed files with 21 additions and 739 deletions

View File

@@ -19,8 +19,3 @@ spec:
copy:
- from: "@foundation/apps/lldap/**"
to: "@artifact/"
- name: authelia
originRevision: "@foundation"
copy:
- from: "@foundation/apps/authelia/**"
to: "@artifact/"

View File

@@ -1,20 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: authelia
namespace: flux-system
spec:
interval: 1h
retryInterval: 2m
timeout: 5m
dependsOn:
- name: cnpg
- name: lldap-controller
- name: dragonfly-operator
- name: authelia-controller
sourceRef:
kind: ExternalArtifact
name: authelia
path: ./${cluster_env}
prune: true
wait: true

View File

@@ -1,16 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
spec:
bootstrap:
recovery:
source: source
externalClusters:
- name: source
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: garage-store
serverName: db
plugins: []

View File

@@ -1,15 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
namespace: authelia
# TODO: Add labels?
spec:
storage:
size: 8Gi
storageClass: local-path
plugins:
- name: barman-cloud.cloudnative-pg.io
isWALArchiver: true
parameters:
barmanObjectName: garage-store

View File

@@ -1,29 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: authelia
namespace: authelia
spec:
interval: 12h
install:
strategy:
name: RetryOnFailure
retryInterval: 2m
upgrade:
strategy:
name: RetryOnFailure
retryInterval: 3m
chart:
spec:
chart: authelia
version: "0.10.x"
sourceRef:
kind: HelmRepository
name: authelia
interval: 24h
valuesFrom:
- kind: ConfigMap
name: values-base
- kind: ConfigMap
name: values-overlay
optional: true

View File

@@ -1,8 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: authelia
namespace: authelia
spec:
interval: 24h
url: https://charts.authelia.com

View File

@@ -1,25 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- cluster.yaml
- service-user.yaml
- helm-repository.yaml
- helm-release.yaml
- secret-s3-garage.yaml
- object-store.yaml
configurations:
- name-reference.yaml
configMapGenerator:
- name: values-base
namespace: authelia
files:
- values.yaml
# Uncomment to restore database from backup
# patches:
# - path: cluster-restore.yaml
# target:
# kind: Cluster

View File

@@ -1,6 +0,0 @@
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease

View File

@@ -1,5 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: authelia
namespace: authelia

View File

@@ -1,20 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: garage-store
namespace: authelia
spec:
configuration:
destinationPath: s3://cnpg-backup/authelia
s3Credentials:
accessKeyId:
name: s3-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: s3-garage
key: ACCESS_SECRET_KEY
region:
name: s3-garage
key: REGION
wal:
compression: gzip

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: s3-garage
namespace: authelia
annotations:
reflector.v1.k8s.emberstack.com/reflects: "cnpg-system/s3-garage"
type: Opaque
data: {}

View File

@@ -1,6 +0,0 @@
apiVersion: lldap.huizinga.dev/v1
kind: ServiceUser
metadata:
name: authelia
namespace: authelia
spec: {}

View File

@@ -1,61 +0,0 @@
pod:
kind: Deployment
replicas: 2
ingress:
enabled: true
gatewayAPI:
enabled: true
parentRefs:
- name: gateway
namespace: default
secret:
additionalSecrets:
db-app:
key: db-app
authelia-lldap-credentials:
key: authelia-lldap-credentials
configMap:
authentication_backend:
ldap:
enabled: true
implementation: lldap
address: ldap://lldap.lldap.svc.cluster.local:3890
base_dn: dc=huizinga,dc=dev
additional_users_dn: ou=people
users_filter: "(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))"
additional_groups_dn: ou=groups
groups_filter: "(member={dn})"
attributes:
display_name: displayName
username: uid
group_name: cn
mail: mail
user: uid=authelia.authelia,ou=people,dc=huizinga,dc=dev
password:
secret_name: authelia-lldap-credentials
path: password
session:
redis:
enabled: true
host: dragonfly.authelia
storage:
postgres:
enabled: true
address: tcp://db-rw.authelia:5432
database: app
username: app
password:
secret_name: db-app
path: password
notifier:
filesystem:
enabled: true
access_control:
secret:
existingSecret: authelia-acl

View File

@@ -1,6 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
spec:
instances: 2

View File

@@ -1,7 +0,0 @@
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: dragonfly
namespace: authelia
spec:
replicas: 2

View File

@@ -1,16 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base
- dragonfly.yaml
patches:
- path: cluster.yaml
target:
kind: Cluster
configMapGenerator:
- name: values-overlay
namespace: authelia
files:
- values.yaml

View File

@@ -1,8 +0,0 @@
pod:
replicas: 2
configMap:
session:
cookies:
- subdomain: auth
domain: huizinga.dev

View File

@@ -1,6 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
spec:
instances: 1

View File

@@ -1,7 +0,0 @@
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: dragonfly
namespace: authelia
spec:
replicas: 1

View File

@@ -1,19 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base
- dragonfly.yaml
patches:
- path: cluster.yaml
target:
kind: Cluster
- path: object-store.yaml
target:
kind: ObjectStore
configMapGenerator:
- name: values-overlay
namespace: authelia
files:
- values.yaml

View File

@@ -1,8 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: garage-store
namespace: lldap
spec:
configuration:
endpointURL: http://192.168.1.1:3900

View File

@@ -1,11 +0,0 @@
pod:
replicas: 1
configMap:
log:
level: debug
session:
cookies:
- subdomain: auth
domain: staging.huizinga.dev

View File

@@ -1,7 +1,7 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
name: lldap-db
spec:
bootstrap:
recovery:
@@ -12,5 +12,5 @@ spec:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: garage-store
serverName: db
serverName: lldap-db
plugins: []

View File

@@ -10,10 +10,6 @@ resources:
- ../../controllers/local-path-provisioner/local-path-provisioner.yaml
- ../../controllers/cnpg/cnpg.yaml
- ../../controllers/reflector/reflector.yaml
- ../../controllers/lldap-controller/lldap-controller.yaml
- ../../controllers/authelia-controller/authelia-controller.yaml
- ../../controllers/dragonfly-operator/dragonfly-operator.yaml
- ../../controllers/kube-prometheus-stack/kube-prometheus-stack.yaml
- ../../configs/artifacts.yaml
- ../../configs/cilium-config/cilium-config.yaml
@@ -24,4 +20,3 @@ resources:
- ../../apps/artifacts.yaml
- ../../apps/spegel/spegel.yaml
- ../../apps/lldap/lldap.yaml
- ../../apps/authelia/authelia.yaml

View File

@@ -44,13 +44,3 @@ spec:
copy:
- from: "@foundation/controllers/reflector/**"
to: "@artifact/"
- name: dragonfly-operator
originRevision: "@foundation"
copy:
- from: "@foundation/controllers/dragonfly-operator/**"
to: "@artifact/"
- name: kube-prometheus-stack
originRevision: "@foundation"
copy:
- from: "@foundation/controllers/kube-prometheus-stack/**"
to: "@artifact/"

View File

@@ -1,26 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: OCIRepository
metadata:
name: authelia-controller
namespace: flux-system
spec:
interval: 1m0s
url: oci://git.huizinga.dev/infra/authelia-controller/manifests
ref:
tag: edge
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: authelia-controller
namespace: flux-system
spec:
interval: 1h
retryInterval: 2m
timeout: 5m
sourceRef:
kind: OCIRepository
name: authelia-controller
path: ./
prune: true
wait: true

View File

@@ -13,14 +13,9 @@ spec:
strategy:
name: RetryOnFailure
retryInterval: 3m
chart:
spec:
chart: cert-manager
version: "1.19.x"
sourceRef:
kind: HelmRepository
chartRef:
kind: OCIRepository
name: cert-manager
interval: 24h
values:
crds:
enabled: true

View File

@@ -1,9 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: cert-manager
namespace: cert-manager
spec:
type: "oci"
interval: 24h
url: oci://quay.io/jetstack/charts/

View File

@@ -1,6 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- helm-repository.yaml
- helm-release.yaml
- ./namespace.yaml
- ./oci-repository.yaml
- ./helm-release.yaml

View File

@@ -0,0 +1,13 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: cert-manager
namespace: cert-manager
spec:
interval: 24h
url: oci://quay.io/jetstack/charts/cert-manager
layerSelector:
mediaType: "application/vnd.cncf.helm.chart.content.v1.tar+gzip"
operation: copy
ref:
semver: "1.19.x"

View File

@@ -1,15 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: dragonfly-operator
namespace: flux-system
spec:
interval: 1h
retryInterval: 2m
timeout: 5m
sourceRef:
kind: ExternalArtifact
name: dragonfly-operator
path: .
prune: true
wait: true

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://raw.githubusercontent.com/dragonflydb/dragonfly-operator/v1.3.1/manifests/dragonfly-operator.yaml

View File

@@ -1,16 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
spec:
bootstrap:
recovery:
source: source
externalClusters:
- name: source
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: garage-store
serverName: db
plugins: []

View File

@@ -1,15 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: db
namespace: monitoring
# TODO: Add labels?
spec:
storage:
size: 8Gi
storageClass: local-path
plugins:
- name: barman-cloud.cloudnative-pg.io
isWALArchiver: true
parameters:
barmanObjectName: garage-store

View File

@@ -1,7 +0,0 @@
apiVersion: dragonflydb.io/v1alpha1
kind: Dragonfly
metadata:
name: dragonfly
namespace: monitoring
spec:
replicas: 1

View File

@@ -1,53 +0,0 @@
# This is only the ldap config, you also need to enable ldap support in the main config file
# of Grafana. See https://grafana.com/docs/grafana/latest/auth/ldap/#enable-ldap
# You can test that it is working correctly by trying usernames at: https://<your grafana instance>/admin/ldap
[[servers]]
# Ldap server host (specify multiple hosts space separated)
host = "lldap.lldap.svc.cluster.local"
# Default port is 389 or 636 if use_ssl = true
port = 3890
# Set to true if LDAP server should use an encrypted TLS connection (either with STARTTLS or LDAPS)
use_ssl = false
# If set to true, use LDAP with STARTTLS instead of LDAPS
start_tls = false
# set to true if you want to skip SSL cert validation
ssl_skip_verify = false
# set to the path to your root CA certificate or leave unset to use system defaults
# root_ca_cert = "/path/to/certificate.crt"
# Authentication against LDAP servers requiring client certificates
# client_cert = "/path/to/client.crt"
# client_key = "/path/to/client.key"
# Search user bind dn
bind_dn = "${BIND_DN}"
# Search user bind password
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
bind_password = "${LDAP_PASSWORD}"
# User search filter
search_filter = "(&(|(uid=%s)(mail=%s))(objectClass=person))"
# If you want to limit to only users of a specific group use this instead:
# search_filter = "(&(uid=%s)(memberOf=cn=<your group>,ou=groups,dc=huizinga,dc=dev))"
# An array of base dns to search through
search_base_dns = ["dc=huizinga,dc=dev"]
# Specify names of the LDAP attributes your LDAP uses
[servers.attributes]
member_of = "memberOf"
email = "mail"
name = "givenName"
surname = "sn"
username = "uid"
# If you want to map your ldap groups to grafana's groups, see: https://grafana.com/docs/grafana/latest/auth/ldap/#group-mappings
# As a quick example, here is how you would map lldap's admin group to grafana's admin
[[servers.group_mappings]]
group_dn = "cn=lldap_admin,ou=groups,dc=huizinga,dc=dev"
org_role = "Admin"
grafana_admin = true
[[servers.group_mappings]]
group_dn = "*"
org_role = "Viewer"

View File

@@ -1,30 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: kube-prometheus-stack
namespace: monitoring
spec:
interval: 12h
install:
strategy:
name: RetryOnFailure
retryInterval: 2m
upgrade:
strategy:
name: RetryOnFailure
retryInterval: 3m
chart:
spec:
chart: kube-prometheus-stack
version: "80.6.x"
sourceRef:
kind: HelmRepository
name: prometheus-community
interval: 24h
values:
grafana:
ldap:
existingSecret: grafana-ldap-toml
valuesFrom:
- kind: ConfigMap
name: values-base

View File

@@ -1,9 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: prometheus-community
namespace: monitoring
spec:
type: "oci"
interval: 24h
url: oci://ghcr.io/prometheus-community/charts/

View File

@@ -1,18 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: kube-prometheus-stack
namespace: flux-system
spec:
interval: 1h
retryInterval: 2m
timeout: 5m
dependsOn:
- name: dragonfly-operator
- name: lldap-controller
sourceRef:
kind: ExternalArtifact
name: kube-prometheus-stack
path: ./
prune: true
wait: true

View File

@@ -1,32 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- helm-repository.yaml
- helm-release.yaml
- dragonfly.yaml
- cluster.yaml
- secret-s3-garage.yaml
- object-store.yaml
- service-user.yaml
configurations:
- name-reference.yaml
configMapGenerator:
- name: values-base
namespace: monitoring
files:
- values.yaml
secretGenerator:
- name: grafana-ldap-toml
namespace: monitoring
files:
- ldap-toml=grafana-ldap.toml
# Uncomment to restore database from backup
# patches:
# - path: cluster-restore.yaml
# target:
# kind: Cluster

View File

@@ -1,11 +0,0 @@
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease
- kind: Secret
version: v1
fieldSpecs:
- path: spec/values/grafana/ldap/existingSecret
kind: HelmRelease

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@@ -1,20 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: garage-store
namespace: monitoring
spec:
configuration:
destinationPath: s3://cnpg-backup/authelia
s3Credentials:
accessKeyId:
name: s3-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: s3-garage
key: ACCESS_SECRET_KEY
region:
name: s3-garage
key: REGION
wal:
compression: gzip

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: s3-garage
namespace: monitoring
annotations:
reflector.v1.k8s.emberstack.com/reflects: "cnpg-system/s3-garage"
type: Opaque
data: {}

View File

@@ -1,6 +0,0 @@
apiVersion: lldap.huizinga.dev/v1
kind: ServiceUser
metadata:
name: grafana
namespace: monitoring
spec: {}

View File

@@ -1,86 +0,0 @@
alertmanager:
alertsmanagerSpec:
replicas: 1
route:
main:
enabled: true
hostnames:
- "alerts.staging.huizinga.dev"
parentRefs:
- name: gateway
namespace: default
prometheus:
prometheusSpec:
replicas: 1
route:
main:
enabled: true
hostnames:
- "prometheus.staging.huizinga.dev"
parentRefs:
- name: gateway
namespace: default
grafana:
replicas: 1
# ingress:
# enabled: true
# hosts:
# - grafana.${domain}
# tls:
# - secretName: ${domain//./-}-tls
# annotations:
# traefik.ingress.kubernetes.io/router.entryPoints: "websecure"
# traefik.ingress.kubernetes.io/router.middlewares: "authelia-forwardauth-authelia@kubernetescrd"
# traefik.ingress.kubernetes.io/router.tls: "true"
envValueFrom:
BIND_DN:
secretKeyRef:
name: grafana-lldap-credentials
key: bind_dn
LDAP_PASSWORD:
secretKeyRef:
name: grafana-lldap-credentials
key: password
grafana.ini:
auth.ldap:
enabled: true
# auth.proxy:
# enabled: true
# header_name: Remote-User
# header_property: username
# auto_sign_up: true
# headers: Groups:Remote-Group
# enable_login_token: false
# sync_ttl: 0
# signout_redirect_url: https://login.${domain}/logout?rd=https://grafana.${domain}
database:
type: postgres
host: $__file{/etc/secrets/db/host}
name: $__file{/etc/secrets/db/dbname}
user: $__file{/etc/secrets/db/user}
password: $__file{/etc/secrets/db/password}
remote_cache:
type: redis
connstr: addr=dragonflydb.monitoring:6379
ldap:
enabled: true
extraSecretMounts:
- name: db-app-mount
secretName: db-app
defaultMode: 0440
mountPath: /etc/secrets/db
readOnly: true
# We are not running kube-proxy
kubeProxy:
enabled: false

View File

@@ -1,28 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: OCIRepository
metadata:
name: lldap-controller
namespace: flux-system
spec:
interval: 1m0s
url: oci://git.huizinga.dev/infra/lldap-controller/manifests
ref:
tag: edge
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: lldap-controller
namespace: flux-system
spec:
interval: 1h
retryInterval: 2m
timeout: 5m
dependsOn:
- name: lldap
sourceRef:
kind: OCIRepository
name: lldap-controller
path: ./
prune: true
wait: true