Compare commits

...

21 Commits

Author SHA1 Message Date
e43e4847e9 Try to delete network even if vm does not exist 2025-11-09 04:05:38 +01:00
af49354786 Made secrets file configurable 2025-11-09 03:43:52 +01:00
83cadd99e4 Added yaml constructor that get the realpath of a file 2025-11-09 03:42:55 +01:00
8b806b7a38 Improved how the cluster is defined 2025-11-09 03:22:29 +01:00
20e96b33df Fixed wornding of control plane 2025-11-09 03:14:15 +01:00
2b7434c0e7 Removed unneeded --- from patches 2025-11-09 03:11:20 +01:00
08d73b95d4 Added source script to set environment variables 2025-11-09 03:07:20 +01:00
2cca38c860 Made repo root available for templates
This allows for embedding the repo root inside of, for example, scripts
to make them function properly no matter where they are run from.
2025-11-09 03:03:46 +01:00
d2a1eca146 Find root of repo that contains the actual script
This makes it possible to run the render script from anywhere and have
it still function correctly.
2025-11-09 03:03:21 +01:00
0049b5cb46 Moved logic for getting clusters to render script 2025-11-09 02:58:01 +01:00
3f8389ddd2 Made yaml template loader more generic 2025-11-09 02:26:35 +01:00
dac2864b2d Store template resolved nodes back in nodes object 2025-11-09 02:16:10 +01:00
85368a3126 Added template for config generation script 2025-11-09 02:16:10 +01:00
18b5d8fd18 Store patches as objects instead of strings 2025-11-09 02:05:08 +01:00
21ae5bc2c4 Added node types 2025-11-09 01:42:52 +01:00
17b0b05410 Added kubernetes version 2025-11-09 01:42:44 +01:00
8832371b99 Added jinja2 do extensions 2025-11-09 01:41:57 +01:00
a9fbf9aad8 Use consistent capitalization 2025-11-08 22:27:37 +01:00
4f072d7cb7 Moved around node config params 2025-11-08 22:23:43 +01:00
235ab5add7 Make python script runnable from anywhere 2025-11-08 21:47:45 +01:00
b0ac551c21 Render all template using python and jinja 2025-11-08 21:47:35 +01:00
24 changed files with 261 additions and 174 deletions

View File

@@ -7,6 +7,6 @@ indent_style = tab
indent_style = space indent_style = space
indent_size = 4 indent_size = 4
[{*.py,tools/merge}] [{*.py,tools/render}]
indent_style = space indent_style = space
indent_size = 4 indent_size = 4

2
.gitattributes vendored
View File

@@ -1 +1 @@
secrets.yaml filter=git-crypt diff=git-crypt _secrets.yaml filter=git-crypt diff=git-crypt

View File

@@ -1,6 +1,7 @@
schematicID: !schematic default schematicId: !schematic default
arch: amd64 arch: amd64
talosVersion: v1.11.3 talosVersion: v1.11.3
kubernesVersion: v1.34.1
kernelArgs: kernelArgs:
- talos.platform=metal - talos.platform=metal
- console=tty0 - console=tty0
@@ -18,12 +19,11 @@ dns:
- 1.1.1.1 - 1.1.1.1
- 8.8.8.8 - 8.8.8.8
ntp: nl.pool.ntp.org ntp: nl.pool.ntp.org
installDisk: /dev/sda install: true
install: false
patches: patches:
- !patch hostname - !patch hostname
- !patch install-disk - !patch install-disk
- !patch network - !patch network
- !patch vip - !patch vip
patchesControlplane: patchesControlPlane:
- !patch allow-controlplane-workloads - !patch allow-control-plane-workloads

View File

@@ -1,4 +1,7 @@
netmask: 255.255.252.0 netmask: 255.255.252.0
gateway: 10.0.0.1 gateway: 10.0.0.1
install: true installDisk: /dev/sda
controlplaneIp: 10.0.2.1 cluster:
name: hellas
controlPlaneIp: 10.0.2.1
secretsFile: !realpath _secrets.yaml

BIN
nodes/hellas/_secrets.yaml Normal file

Binary file not shown.

View File

@@ -1,3 +1,4 @@
serial: 5CZ7NX2 serial: 5CZ7NX2
interface: enp2s0 interface: enp2s0
ip: 10.0.0.202 ip: 10.0.0.202
type: "controlplane"

View File

@@ -1,3 +1,4 @@
serial: F3PKRH2 serial: F3PKRH2
interface: enp3s0 interface: enp3s0
ip: 10.0.0.201 ip: 10.0.0.201
type: "controlplane"

View File

@@ -1,3 +1,4 @@
serial: J33CHY2 serial: J33CHY2
interface: enp2s0 interface: enp2s0
ip: 10.0.0.203 ip: 10.0.0.203
type: "controlplane"

View File

@@ -1,5 +1,7 @@
netmask: 255.255.255.0 netmask: 255.255.255.0
gateway: 192.168.1.1 gateway: 192.168.1.1
clusterName: testing installDisk: /dev/vda
controlplaneIp: 192.168.1.100 cluster:
instalDisk: /dev/vda name: testing
controlPlaneIp: 192.168.1.100
secretsFile: !realpath _secrets.yaml

View File

@@ -1,4 +1,4 @@
serial: talos-vm serial: talos-vm
interface: eth0 interface: eth0
ip: 192.168.1.2 ip: 192.168.1.2
install: true type: "controlplane"

View File

@@ -1,3 +1,2 @@
---
cluster: cluster:
allowSchedulingOnControlPlanes: true allowSchedulingOnControlPlanes: true

View File

@@ -1,4 +1,3 @@
---
machine: machine:
network: network:
hostname: {{hostname}} hostname: {{hostname}}

View File

@@ -1,4 +1,3 @@
---
machine: machine:
install: install:
disk: {{installDisk}} disk: {{installDisk}}

View File

@@ -1,4 +1,3 @@
---
machine: machine:
network: network:
interfaces: interfaces:

View File

@@ -1,7 +1,6 @@
---
machine: machine:
network: network:
interfaces: interfaces:
- interface: {{interface}} - interface: {{interface}}
vip: vip:
ip: {{controlplaneIp}} ip: {{cluster.controlPlaneIp}}

View File

@@ -1,3 +1,4 @@
PyYAML==6.0.3 PyYAML==6.0.3
requests==2.32.5 requests==2.32.5
Jinja2==3.1.6 Jinja2==3.1.6
GitPython==3.1.45

View File

@@ -5,21 +5,18 @@ dhcp
echo Starting ${serial} echo Starting ${serial}
:start :start
# Is a known serial is set, execute that goto node_${serial} || exit
# If an unknown serial is set, exit
# If no serial is set, ask the user
goto node_${serial} || shell
# Default behavior (non install mode) is to exit iPXE script # Default behavior (non install mode) is to exit iPXE script
{{ range datasource "nodes" }} {% for node in nodes %}
{{- if .install }} {%- if node.install -%}
# {{ .filename }} # {{ node.filename }}
:node_{{ .serial }} :node_{{ node.serial }}
{{- $ipArg := printf "ip=%s::%s:%s:%s:%s::%s:%s:%s" .ip .gateway .netmask .hostname .interface (index .dns 0) (index .dns 1) .ntp }} {% set ipArg = "ip=" ~ node.ip ~ "::" ~ node.gateway ~ ":" ~ node.netmask ~ ":" ~ node.hostname ~ ":" ~ node.interface ~ "::" ~ node.dns[0] ~ ":" ~ node.dns[1] ~ ":" ~ node.ntp -%}
{{- $kernelArgs := printf "%s %s %s" $ipArg (join .kernelArgs " ") (join .extraKernelArgs " ") }} {% set kernelArgs = ipArg ~ " " ~ node.kernelArgs ~ " " ~ node.extraKernelArgs -%}
imgfree imgfree
kernel https://pxe.factory.talos.dev/image/{{ .schematicID }}/{{ .talosVersion }}/kernel-{{ .arch }} {{ $kernelArgs }} kernel https://pxe.factory.talos.dev/image/{{ node.schematicId }}/{{ node.talosVersion }}/kernel-{{ node.arch }} {{ kernelArgs }}
initrd https://pxe.factory.talos.dev/image/{{ .schematicID }}/{{ .talosVersion }}/initramfs-{{ .arch }}.xz initrd https://pxe.factory.talos.dev/image/{{ node.schematicId }}/{{ node.talosVersion }}/initramfs-{{ node.arch }}.xz
boot boot
{{- end }} {% endif %}
{{ end }} {% endfor %}

View File

@@ -1,4 +1,4 @@
{{ $tftpIp := (ds "config").dhcp.tftpIp -}} {% set tftpIp = config.dhcp.tftpIp -%}
enable-tftp enable-tftp
tftp-root=/tftproot tftp-root=/tftproot
@@ -9,9 +9,9 @@ dhcp-vendorclass=UEFI,PXEClient:Arch:00007
dhcp-vendorclass=UEFI64,PXEClient:Arch:00009 dhcp-vendorclass=UEFI64,PXEClient:Arch:00009
# 1st stage: pxe rom boot on ipxe # 1st stage: pxe rom boot on ipxe
dhcp-boot=net:BIOS,ipxe.pxe,{{ $tftpIp }},{{ $tftpIp }} dhcp-boot=net:BIOS,ipxe.pxe,{{ tftpIp }},{{ tftpIp }}
dhcp-boot=net:UEFI,ipxe.efi,{{ $tftpIp }},{{ $tftpIp }} dhcp-boot=net:UEFI,ipxe.efi,{{ tftpIp }},{{ tftpIp }}
dhcp-boot=net:UEFI64,ipxe.efi,{{ $tftpIp }},{{ $tftpIp }} dhcp-boot=net:UEFI64,ipxe.efi,{{ tftpIp }},{{ tftpIp }}
# Based on logic in https://gist.github.com/robinsmidsrod/4008017 # Based on logic in https://gist.github.com/robinsmidsrod/4008017
# iPXE sends a 175 option, checking suboptions # iPXE sends a 175 option, checking suboptions
@@ -30,11 +30,11 @@ tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-https
# these create option 43 cruft, which is required in proxy mode # these create option 43 cruft, which is required in proxy mode
# TFTP IP is required on all dhcp-boot lines (unless dnsmasq itself acts as tftp server?) # TFTP IP is required on all dhcp-boot lines (unless dnsmasq itself acts as tftp server?)
pxe-service=tag:!ipxe-ok,X86PC,PXE,undionly.kpxe,{{ $tftpIp }} pxe-service=tag:!ipxe-ok,X86PC,PXE,undionly.kpxe,{{ tftpIp }}
pxe-service=tag:!ipxe-ok,IA32_EFI,PXE,snponlyx32.efi,{{ $tftpIp }} pxe-service=tag:!ipxe-ok,IA32_EFI,PXE,snponlyx32.efi,{{ tftpIp }}
pxe-service=tag:!ipxe-ok,BC_EFI,PXE,snponly.efi,{{ $tftpIp }} pxe-service=tag:!ipxe-ok,BC_EFI,PXE,snponly.efi,{{ tftpIp }}
pxe-service=tag:!ipxe-ok,X86-64_EFI,PXE,snponly.efi,{{ $tftpIp }} pxe-service=tag:!ipxe-ok,X86-64_EFI,PXE,snponly.efi,{{ tftpIp }}
# later match overrides previous, keep ipxe script last # later match overrides previous, keep ipxe script last
# server address must be non zero, but can be anything as long as iPXE script is not fetched over TFTP # server address must be non zero, but can be anything as long as iPXE script is not fetched over TFTP
dhcp-boot=tag:ipxe-ok,boot.ipxe,,{{ $tftpIp }} dhcp-boot=tag:ipxe-ok,boot.ipxe,,{{ tftpIp }}

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -euo pipefail
CONFIGS={{ root }}/configs
# Generate the configuration for each node
{% for node in nodes -%}
talosctl gen config {{ node.cluster.name }} https://{{ node.cluster.controlPlaneIp }}:6443 -f \
--with-secrets {{ node.cluster.secretsFile }} \
--talos-version {{ node.talosVersion }} \
--kubernetes-version {{ node.kubernesVersion }} \
--output-types {{ node.type }} \
--install-image factory.talos.dev/metal-installer/{{ node.schematicId }}:{{ node.talosVersion }} \
{% for patch in node.patches -%}
{# The double call to tojson is needed to properly escape the patch (object -> json -> string) -#}
--config-patch {{ patch|tojson|tojson }} \
{% endfor -%}
{% for patch in node.patchesControlPlane -%}
--config-patch-control-plane {{ patch|tojson|tojson }} \
{% endfor -%}
--with-docs=false \
--with-examples=false \
-o ${CONFIGS}/{{ node.filename }}.yaml
{% endfor %}
# Generate the talosconfig file for each cluster
{% for cluster in clusters -%}
talosctl gen config {{ cluster.name }} https://{{ cluster.controlPlaneIp }}:6443 -f \
--with-secrets {{ cluster.secretsFile }} \
--output-types talosconfig \
-o ${CONFIGS}/{{ cluster.name }}/talosconfig
{% endfor %}
# Create merged talosconfig
export TALOSCONFIG=${CONFIGS}/talosconfig
rm -f ${TALOSCONFIG}
{% for cluster in clusters -%}
talosctl config merge ${CONFIGS}/{{ cluster.name }}/talosconfig
{% endfor %}

6
templates/source.sh Normal file
View File

@@ -0,0 +1,6 @@
export TALOSCONFIG={{ root }}/configs/talosconfig
{% set paths = [] %}
{%- for cluster in clusters -%}
{%- do paths.append(root ~ "/configs/" ~ cluster.name ~ "/kubeconfig") -%}
{% endfor -%}
export KUBECONFIG={{ paths|join(":") }}

View File

@@ -1,119 +0,0 @@
#!/usr/bin/env python3
# Adapted from: https://enix.io/en/blog/pxe-talos/
import functools
import json
import pathlib
import requests
import yaml
from jinja2 import Environment, FileSystemLoader, StrictUndefined, Template
NODES = pathlib.Path("nodes")
SCHEMATICS = pathlib.Path("schematics")
PATCHES = Environment(loader=FileSystemLoader("patches"), undefined=StrictUndefined)
TEMPLATES = Environment(loader=FileSystemLoader("templates"), undefined=StrictUndefined)
def node_encoder(node: dict):
class Inner(json.JSONEncoder):
def default(self, o):
if isinstance(o, Template):
try:
rendered = o.render(node)
except Exception as e:
e.add_note(f"While rendering for: {node['hostname']}")
raise e
# Parse the rendered yaml and convert it to a json patch
return json.dumps(yaml.safe_load(rendered))
return super().default(o)
return Inner
@functools.cache
def get_schematic_id(schematic: str):
"""Lookup the schematic id associated with a given schematic"""
r = requests.post("https://factory.talos.dev/schematics", data=schematic)
r.raise_for_status()
data = r.json()
return data["id"]
def schematic_constructor(loader: yaml.SafeLoader, node: yaml.nodes.ScalarNode):
"""Load specified schematic file and get the assocatied schematic id"""
schematic_name = loader.construct_yaml_str(node)
try:
schematic = SCHEMATICS.joinpath(schematic_name).with_suffix(".yaml").read_text()
return get_schematic_id(schematic)
except Exception:
raise yaml.MarkedYAMLError("Failed to load schematic", node.start_mark)
def patch_constructor(loader: yaml.SafeLoader, node: yaml.nodes.ScalarNode):
patch_name = loader.construct_scalar(node)
try:
template = PATCHES.get_template(f"{patch_name}.yaml")
return template
except Exception:
raise yaml.MarkedYAMLError("Failed to load patch", node.start_mark)
def get_loader():
"""Add special constructors to yaml loader"""
loader = yaml.SafeLoader
loader.add_constructor("!schematic", schematic_constructor)
loader.add_constructor("!patch", patch_constructor)
return loader
@functools.cache
def get_defaults(directory: pathlib.Path, root: pathlib.Path):
"""Compute the defaults from the provided directory and parents."""
try:
with open(directory.joinpath("_defaults.yaml")) as fyaml:
yml_data = yaml.load(fyaml, Loader=get_loader())
except OSError:
yml_data = {}
# Stop recursion when reaching root directory
if directory != root:
return get_defaults(directory.parent, root) | yml_data
else:
return yml_data
def walk_files(root: pathlib.Path):
"""Get all files that do not start with and underscore"""
for dirpath, _dirnames, filenames in root.walk():
for fn in filenames:
if not fn.startswith("_"):
yield dirpath.joinpath(fn)
def main():
nodes = []
for fullname in walk_files(NODES):
filename = str(fullname.relative_to(NODES).parent) + "/" + fullname.stem
with open(fullname) as fyaml:
yml_data = yaml.load(fyaml, Loader=get_loader())
yml_data = get_defaults(fullname.parent, NODES) | yml_data
yml_data["hostname"] = fullname.stem
yml_data["filename"] = filename
nodes.append(yml_data)
final_nodes = []
for node in nodes:
# Quick and dirty way to resolve all the templates using a custom encoder
final_nodes.append(json.loads(json.dumps(node, cls=node_encoder(node))))
# Dump everything to json
print(json.dumps(final_nodes, indent=4))
if __name__ == "__main__":
main()

View File

@@ -1,11 +1,173 @@
#!/usr/bin/env bash #!/usr/bin/env python3
set -euo pipefail
ROOT=$(git rev-parse --show-toplevel)
RENDERED=${ROOT}/rendered
TEMPLATES=${ROOT}/templates
${ROOT}/tools/merge ./nodes > ${RENDERED}/nodes.json # Adapted from: https://enix.io/en/blog/pxe-talos/
gomplate --input-dir ${TEMPLATES} --output-dir ${RENDERED} \ import functools
-d nodes=file://${RENDERED}/nodes.json \ import json
-d config=${ROOT}/config.yaml \ import pathlib
import sys
import git
import requests
import yaml
from jinja2 import Environment, FileSystemLoader, StrictUndefined, Template
REPO = git.Repo(sys.path[0], search_parent_directories=True)
assert REPO.working_dir is not None
ROOT = pathlib.Path(REPO.working_dir)
NODES = ROOT.joinpath("nodes")
SCHEMATICS = ROOT.joinpath("schematics")
RENDERED = ROOT.joinpath("rendered")
EXTENSIONS = ["jinja2.ext.do"]
PATCHES = Environment(
loader=FileSystemLoader(ROOT.joinpath("patches")),
undefined=StrictUndefined,
extensions=EXTENSIONS,
)
TEMPLATES = Environment(
loader=FileSystemLoader(ROOT.joinpath("templates")),
undefined=StrictUndefined,
extensions=EXTENSIONS,
)
def render_templates(node: dict):
class Inner(json.JSONEncoder):
def default(self, o):
if isinstance(o, Template):
try:
rendered = o.render(node)
except Exception as e:
e.add_note(f"While rendering for: {node['hostname']}")
raise e
# Parse the rendered yaml
return yaml.safe_load(rendered)
return super().default(o)
return Inner
@functools.cache
def get_schematic_id(schematic: str):
"""Lookup the schematic id associated with a given schematic"""
r = requests.post("https://factory.talos.dev/schematics", data=schematic)
r.raise_for_status()
data = r.json()
return data["id"]
def schematic_constructor(loader: yaml.SafeLoader, node: yaml.nodes.ScalarNode):
"""Load specified schematic file and get the assocatied schematic id"""
schematic_name = loader.construct_yaml_str(node)
try:
schematic = SCHEMATICS.joinpath(schematic_name).with_suffix(".yaml").read_text()
return get_schematic_id(schematic)
except Exception:
raise yaml.MarkedYAMLError("Failed to load schematic", node.start_mark)
def template_constructor(environment: Environment):
def inner(loader: yaml.SafeLoader, node: yaml.nodes.ScalarNode):
patch_name = loader.construct_scalar(node)
try:
template = environment.get_template(f"{patch_name}.yaml")
return template
except Exception:
raise yaml.MarkedYAMLError("Failed to load patch", node.start_mark)
return inner
def realpath_constructor(directory: pathlib.Path):
def inner(loader: yaml.SafeLoader, node: yaml.nodes.ScalarNode):
try:
realpath = directory.joinpath(loader.construct_scalar(node)).resolve(
strict=True
)
return str(realpath)
except Exception:
raise yaml.MarkedYAMLError("Failed to get real path", node.start_mark)
return inner
def get_loader(directory: pathlib.Path):
"""Add special constructors to yaml loader"""
loader = yaml.SafeLoader
loader.add_constructor("!realpath", realpath_constructor(directory))
loader.add_constructor("!schematic", schematic_constructor)
loader.add_constructor("!patch", template_constructor(PATCHES))
return loader
@functools.cache
def get_defaults(directory: pathlib.Path, root: pathlib.Path):
"""Compute the defaults from the provided directory and parents."""
try:
with open(directory.joinpath("_defaults.yaml")) as fyaml:
yml_data = yaml.load(fyaml, Loader=get_loader(directory))
except OSError:
yml_data = {}
# Stop recursion when reaching root directory
if directory != root:
return get_defaults(directory.parent, root) | yml_data
else:
return yml_data
def walk_files(root: pathlib.Path):
"""Get all files that do not start with and underscore"""
for dirpath, _dirnames, filenames in root.walk():
for fn in filenames:
if not fn.startswith("_"):
yield dirpath.joinpath(fn)
def main():
nodes = []
for fullname in walk_files(NODES):
filename = str(fullname.relative_to(NODES).parent) + "/" + fullname.stem
with open(fullname) as fyaml:
yml_data = yaml.load(fyaml, Loader=get_loader(fullname.parent))
yml_data = get_defaults(fullname.parent, NODES) | yml_data
yml_data["hostname"] = fullname.stem
yml_data["filename"] = filename
nodes.append(yml_data)
# Quick and dirty way to resolve all the templates using a custom encoder
nodes = list(
map(
lambda node: json.loads(json.dumps(node, cls=render_templates(node))), nodes
)
)
# Get all clusters
# NOTE: This assumes that all nodes in the cluster use the same definition for the cluster
clusters = [
dict(s) for s in set(frozenset(node["cluster"].items()) for node in nodes)
]
with open(ROOT.joinpath("config.yaml")) as fyaml:
config = yaml.safe_load(fyaml)
RENDERED.mkdir(exist_ok=True)
for template_name in TEMPLATES.list_templates():
template = TEMPLATES.get_template(template_name)
rendered = template.render(
nodes=nodes, clusters=clusters, config=config, root=ROOT
)
with open(RENDERED.joinpath(template_name), "w") as f:
f.write(rendered)
if __name__ == "__main__":
main()

View File

@@ -111,13 +111,12 @@ function delete() {
virsh --connect="${CONNECTION}" destroy "${VM_NAME}" virsh --connect="${CONNECTION}" destroy "${VM_NAME}"
fi fi
virsh --connect="${CONNECTION}" undefine "${VM_NAME}" --remove-all-storage virsh --connect="${CONNECTION}" undefine "${VM_NAME}" --remove-all-storage
else
echo "VM doest not exists"
exit -1
fi fi
if [[ $(virsh --connect="${CONNECTION}" net-list --all | grep -c "${NETWORK}") > "0" ]]; then if [[ $(virsh --connect="${CONNECTION}" net-list --all | grep -c "${NETWORK}") > "0" ]]; then
if [[ $(virsh --connect="${CONNECTION}" list | grep -c "${VM_NAME}") > "0" ]]; then
virsh --connect="${CONNECTION}" net-destroy "${NETWORK}" virsh --connect="${CONNECTION}" net-destroy "${NETWORK}"
fi
virsh --connect="${CONNECTION}" net-undefine "${NETWORK}" virsh --connect="${CONNECTION}" net-undefine "${NETWORK}"
fi fi
} }