Compare commits

..

2 Commits

Author SHA1 Message Date
66d2c03be4 Added cilium 2025-11-12 05:46:48 +01:00
3200aaebaa Deepmerge node configs 2025-11-12 04:20:21 +01:00
6 changed files with 121 additions and 11 deletions

View File

@@ -65,3 +65,9 @@ Upgrading talos or changing the schematic:
```bash ```bash
talosctl upgrade --nodes <node_id> --image factory.talos.dev/metal-installer/<schematic_id>:<version> talosctl upgrade --nodes <node_id> --image factory.talos.dev/metal-installer/<schematic_id>:<version>
``` ```
To upgrade kubernetes or inline manifests, first apply the updated controlplane configs, then run:
```bash
talosctl upgrade-k8s
```

31
nodes/_cilium_values.yaml Normal file
View File

@@ -0,0 +1,31 @@
ipam:
mode: kubernetes
kubeProxyReplacement: true
securityContext:
capabilities:
ciliumAgent:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
cleanCiliumState:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE
cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup
k8sServiceHost: localhost
k8sServicePort: 7445
gatewayAPI:
enabled: true
enableAlpn: true
enableAppProtocol: true

View File

@@ -2,6 +2,10 @@ schematicId: !schematic default
arch: amd64 arch: amd64
talosVersion: v1.11.3 talosVersion: v1.11.3
kubernesVersion: v1.34.1 kubernesVersion: v1.34.1
cluster:
cilium:
version: 1.18.3
valuesFile: !realpath _cilium_values.yaml
kernelArgs: kernelArgs:
- talos.platform=metal - talos.platform=metal
- console=tty0 - console=tty0
@@ -27,5 +31,6 @@ patches:
- !patch network - !patch network
- !patch vip - !patch vip
- !patch tailscale - !patch tailscale
- !patch cilium
patchesControlPlane: patchesControlPlane:
- !patch allow-control-plane-workloads - !patch allow-control-plane-workloads

11
patches/cilium.yaml Normal file
View File

@@ -0,0 +1,11 @@
machine:
features:
hostDNS:
# This option is enabled by default and causes issues with cilium
forwardKubeDNSToHost: false
cluster:
network:
cni:
name: none
proxy:
disabled: true

View File

@@ -2,6 +2,36 @@
set -euo pipefail set -euo pipefail
CONFIGS={{ root }}/configs CONFIGS={{ root }}/configs
function create_inline_manifest() {
# Add indentation
CONTENT=$(echo "$3" | sed 's/^/ /')
# Create inline manifest patch
cat > $2 << EOF
cluster:
inlineManifests:
- name: ${1}
contents: |
${CONTENT}
EOF
}
helm repo add cilium https://helm.cilium.io/
helm repo update
{% for cluster in clusters -%}
{% if "cilium" in cluster -%}
# Generate manifests
CONTENT=$(helm template \
cilium \
cilium/cilium \
--version {{ cluster.cilium.version }} \
--namespace kube-system \
--values {{ cluster.cilium.valuesFile }})
create_inline_manifest cilium ${CONFIGS}/{{cluster.name}}/cilium.yaml "${CONTENT}"
{% endif %}
{%- endfor %}
# Generate the configuration for each node # Generate the configuration for each node
{% for node in nodes -%} {% for node in nodes -%}
talosctl gen config {{ node.cluster.name }} https://{{ node.cluster.controlPlaneIp }}:6443 -f \ talosctl gen config {{ node.cluster.name }} https://{{ node.cluster.controlPlaneIp }}:6443 -f \
@@ -17,6 +47,9 @@ talosctl gen config {{ node.cluster.name }} https://{{ node.cluster.controlPlane
{% for patch in node.patchesControlPlane -%} {% for patch in node.patchesControlPlane -%}
--config-patch-control-plane {{ patch|tojson|tojson }} \ --config-patch-control-plane {{ patch|tojson|tojson }} \
{% endfor -%} {% endfor -%}
{% if "cilium" in node.cluster -%}
--config-patch-control-plane "@${CONFIGS}/{{node.cluster.name}}/cilium.yaml" \
{%- endif %}
--with-docs=false \ --with-docs=false \
--with-examples=false \ --with-examples=false \
-o ${CONFIGS}/{{ node.filename }}.yaml -o ${CONFIGS}/{{ node.filename }}.yaml

View File

@@ -12,7 +12,7 @@ import git
import requests import requests
import yaml import yaml
from jinja2 import Environment, FileSystemLoader, StrictUndefined, Template from jinja2 import Environment, FileSystemLoader, StrictUndefined, Template
from mergedeep import merge from mergedeep import Strategy, merge
from netaddr import IPAddress from netaddr import IPAddress
REPO = git.Repo(sys.path[0], search_parent_directories=True) REPO = git.Repo(sys.path[0], search_parent_directories=True)
@@ -38,12 +38,24 @@ TEMPLATES = Environment(
) )
# When we try to make a deep copy of the nodes dict it fails as the Template
# does not implement __deepcopy__, so this wrapper type facilitates that
class TemplateWrapper:
def __init__(self, template: Template):
self.template = template
def __deepcopy__(self, memo):
# NOTE: This is not a true deepcopy, but since we know we won't modify
# the template this is fine.
return self
def render_templates(node: dict, args: dict): def render_templates(node: dict, args: dict):
class Inner(json.JSONEncoder): class Inner(json.JSONEncoder):
def default(self, o): def default(self, o):
if isinstance(o, Template): if isinstance(o, TemplateWrapper):
try: try:
rendered = o.render(args | {"node": node}) rendered = o.template.render(args | {"node": node})
except Exception as e: except Exception as e:
e.add_note(f"While rendering for: {node['hostname']}") e.add_note(f"While rendering for: {node['hostname']}")
raise e raise e
@@ -84,7 +96,7 @@ def template_constructor(environment: Environment):
patch_name = loader.construct_scalar(node) patch_name = loader.construct_scalar(node)
try: try:
template = environment.get_template(f"{patch_name}.yaml") template = environment.get_template(f"{patch_name}.yaml")
return template return TemplateWrapper(template)
except Exception: except Exception:
raise yaml.MarkedYAMLError("Failed to load patch", node.start_mark) raise yaml.MarkedYAMLError("Failed to load patch", node.start_mark)
@@ -125,7 +137,12 @@ def get_defaults(directory: pathlib.Path, root: pathlib.Path):
# Stop recursion when reaching root directory # Stop recursion when reaching root directory
if directory != root: if directory != root:
return get_defaults(directory.parent, root) | yml_data return merge(
{},
get_defaults(directory.parent, root),
yml_data,
strategy=Strategy.TYPESAFE_REPLACE,
)
else: else:
return yml_data return yml_data
@@ -143,7 +160,7 @@ def main():
config = yaml.safe_load(fyaml) config = yaml.safe_load(fyaml)
with open(ROOT.joinpath("secrets.yaml")) as fyaml: with open(ROOT.joinpath("secrets.yaml")) as fyaml:
merge(config, yaml.safe_load(fyaml)) merge(config, yaml.safe_load(fyaml), strategy=Strategy.TYPESAFE_REPLACE)
template_args = { template_args = {
"config": config, "config": config,
@@ -157,7 +174,12 @@ def main():
with open(fullname) as fyaml: with open(fullname) as fyaml:
yml_data = yaml.load(fyaml, Loader=get_loader(fullname.parent)) yml_data = yaml.load(fyaml, Loader=get_loader(fullname.parent))
yml_data = get_defaults(fullname.parent, NODES) | yml_data yml_data = merge(
{},
get_defaults(fullname.parent, NODES),
yml_data,
strategy=Strategy.TYPESAFE_REPLACE,
)
yml_data["hostname"] = fullname.stem yml_data["hostname"] = fullname.stem
yml_data["filename"] = filename yml_data["filename"] = filename
nodes.append(yml_data) nodes.append(yml_data)
@@ -172,11 +194,13 @@ def main():
) )
) )
# Get all clusters # HACK: We can't hash a dict, so we first convert it to json, the use set
# to get all the unique entries, and then convert it back
# NOTE: This assumes that all nodes in the cluster use the same definition for the cluster # NOTE: This assumes that all nodes in the cluster use the same definition for the cluster
clusters = [ clusters = list(
dict(s) for s in set(frozenset(node["cluster"].items()) for node in nodes) json.loads(cluster)
] for cluster in set(json.dumps(node["cluster"]) for node in nodes)
)
template_args |= {"nodes": nodes, "clusters": clusters} template_args |= {"nodes": nodes, "clusters": clusters}