Compare commits

..

16 Commits

Author SHA1 Message Date
2b7434c0e7 Removed unneeded --- from patches 2025-11-09 03:11:20 +01:00
08d73b95d4 Added source script to set environment variables 2025-11-09 03:07:20 +01:00
2cca38c860 Made repo root available for templates
This allows for embedding the repo root inside of, for example, scripts
to make them function properly no matter where they are run from.
2025-11-09 03:03:46 +01:00
d2a1eca146 Find root of repo that contains the actual script
This makes it possible to run the render script from anywhere and have
it still function correctly.
2025-11-09 03:03:21 +01:00
0049b5cb46 Moved logic for getting clusters to render script 2025-11-09 02:58:01 +01:00
3f8389ddd2 Made yaml template loader more generic 2025-11-09 02:26:35 +01:00
dac2864b2d Store template resolved nodes back in nodes object 2025-11-09 02:16:10 +01:00
85368a3126 Added template for config generation script 2025-11-09 02:16:10 +01:00
18b5d8fd18 Store patches as objects instead of strings 2025-11-09 02:05:08 +01:00
21ae5bc2c4 Added node types 2025-11-09 01:42:52 +01:00
17b0b05410 Added kubernetes version 2025-11-09 01:42:44 +01:00
8832371b99 Added jinja2 do extensions 2025-11-09 01:41:57 +01:00
a9fbf9aad8 Use consistent capitalization 2025-11-08 22:27:37 +01:00
4f072d7cb7 Moved around node config params 2025-11-08 22:23:43 +01:00
235ab5add7 Make python script runnable from anywhere 2025-11-08 21:47:45 +01:00
b0ac551c21 Render all template using python and jinja 2025-11-08 21:47:35 +01:00
61 changed files with 305 additions and 405 deletions

View File

@@ -5,4 +5,8 @@ indent_style = tab
[*.yaml]
indent_style = space
indent_size = 2
indent_size = 4
[{*.py,tools/render}]
indent_style = space
indent_size = 4

1
.gitattributes vendored
View File

@@ -1,2 +1 @@
*.key filter=git-crypt diff=git-crypt
secrets.yaml filter=git-crypt diff=git-crypt

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
.ipxe/
rendered/
configs/
*.egg-info

View File

@@ -1,48 +0,0 @@
default_install_hook_types:
- pre-commit
- commit-msg
default_stages:
- pre-commit
repos:
- repo: meta
hooks:
- id: check-hooks-apply
- id: check-useless-excludes
- repo: builtin
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-toml
- id: check-added-large-files
- id: check-merge-conflict
- id: check-executables-have-shebangs
- repo: https://github.com/jmlrt/check-yamlschema
rev: v0.0.7
hooks:
- id: check-yamlschema
files: ^patches/.*\.yaml$
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.1.0
hooks:
- id: prettier
- repo: https://github.com/crate-ci/typos
rev: v1.40.0
hooks:
- id: typos
- repo: https://github.com/sirwart/ripsecrets
rev: v0.1.11
hooks:
- id: ripsecrets-system
- repo: https://github.com/crate-ci/committed
rev: v1.1.8
hooks:
- id: committed

View File

@@ -1,2 +0,0 @@
secrets.yaml
*.key

View File

@@ -65,9 +65,3 @@ Upgrading talos or changing the schematic:
```bash
talosctl upgrade --nodes <node_id> --image factory.talos.dev/metal-installer/<schematic_id>:<version>
```
To upgrade kubernetes or inline manifests, first apply the updated controlplane configs, then run:
```bash
talosctl upgrade-k8s
```

View File

@@ -1,2 +0,0 @@
style = "conventional"
ignore_author_re = "Flux"

2
config.yaml Normal file
View File

@@ -0,0 +1,2 @@
dhcp:
tftpIp: 10.0.0.3

29
nodes/_defaults.yaml Normal file
View File

@@ -0,0 +1,29 @@
schematicId: !schematic default
arch: amd64
talosVersion: v1.11.3
kubernesVersion: v1.34.1
kernelArgs:
- talos.platform=metal
- console=tty0
- init_on_alloc=1
- init_on_free=1
- slab_nomerge
- pti=on
- consoleblank=0
- nvme_core.io_timeout=4294967295
- printk.devkmsg=on
- selinux=1
- lockdown=confidentiality
extraKernelArgs: []
dns:
- 1.1.1.1
- 8.8.8.8
ntp: nl.pool.ntp.org
install: true
patches:
- !patch hostname
- !patch install-disk
- !patch network
- !patch vip
patchesControlplane:
- !patch allow-controlplane-workloads

View File

@@ -0,0 +1,5 @@
netmask: 255.255.252.0
gateway: 10.0.0.1
clusterName: hellas
controlplaneIp: 10.0.2.1
installDisk: /dev/sda

4
nodes/hellas/helios.yaml Normal file
View File

@@ -0,0 +1,4 @@
serial: 5CZ7NX2
interface: enp2s0
ip: 10.0.0.202
type: "controlplane"

View File

@@ -0,0 +1,4 @@
serial: F3PKRH2
interface: enp3s0
ip: 10.0.0.201
type: "controlplane"

4
nodes/hellas/selene.yaml Normal file
View File

@@ -0,0 +1,4 @@
serial: J33CHY2
interface: enp2s0
ip: 10.0.0.203
type: "controlplane"

View File

@@ -0,0 +1,5 @@
netmask: 255.255.255.0
gateway: 192.168.1.1
clusterName: testing
controlplaneIp: 192.168.1.100
installDisk: /dev/vda

View File

@@ -0,0 +1,4 @@
serial: talos-vm
interface: eth0
ip: 192.168.1.2
type: "controlplane"

View File

@@ -0,0 +1,2 @@
cluster:
allowSchedulingOnControlPlanes: true

3
patches/hostname.yaml Normal file
View File

@@ -0,0 +1,3 @@
machine:
network:
hostname: {{hostname}}

View File

@@ -0,0 +1,3 @@
machine:
install:
disk: {{installDisk}}

10
patches/network.yaml Normal file
View File

@@ -0,0 +1,10 @@
machine:
network:
interfaces:
- interface: {{interface}}
dhcp: false
addresses:
- {{ip}}
routes:
- network: 0.0.0.0/0
gateway: {{gateway}}

6
patches/vip.yaml Normal file
View File

@@ -0,0 +1,6 @@
machine:
network:
interfaces:
- interface: {{interface}}
vip:
ip: {{controlplaneIp}}

4
requirements.txt Normal file
View File

@@ -0,0 +1,4 @@
PyYAML==6.0.3
requests==2.32.5
Jinja2==3.1.6
GitPython==3.1.45

7
schematics/default.yaml Normal file
View File

@@ -0,0 +1,7 @@
customization:
systemExtensions:
officialExtensions:
- siderolabs/iscsi-tools
- siderolabs/util-linux-tools
- siderolabs/intel-ucode
- siderolabs/i915

BIN
secrets.yaml Normal file

Binary file not shown.

View File

@@ -1,53 +0,0 @@
# yaml-language-server: $schema=../../schemas/cluster.json
version:
kubernetes: 1.34.1
talos: 1.11.3
base:
kernelArgs:
- talos.platform=metal
- console=tty0
- init_on_alloc=1
- init_on_free=1
- slab_nomerge
- pti=on
- consoleblank=0
- nvme_core.io_timeout=4294967295
- printk.devkmsg=on
- selinux=1
- lockdown=confidentiality
patches:
all:
- system/hostname.yaml
- system/install-disk.yaml
- system/network.yaml
- networking/vip.yaml
- networking/tailscale.yaml
- networking/cilium.yaml
- spegel.yaml
- storage/longhorn.yaml
- storage/longhorn/user-volume.yaml
- storage/local-path-provisioner/user-volume.yaml
- storage/limit-ephemeral.yaml
- metrics/all.yaml
controlPlane:
- system/allow-control-plane-workloads.yaml
- sops.yaml
- flux/cluster-variables.yaml
- metrics/control-plane.yaml
- networking/gateway-api.yaml
default:
arch: amd64
schematic: default.yaml
network:
dns:
- 1.1.1.1
- 8.8.8.8
tailscale:
server: https://headscale.huizinga.dev
authKey:
file: tailscale.key
advertiseRoutes: true
ntp: nl.pool.ntp.org
install:
auto: true

View File

@@ -1,16 +0,0 @@
# yaml-language-server: $schema=../../schemas/cluster.json
clusterEnv: staging
controlPlaneIp: 192.168.1.100
secretsFile: testing/secrets.yaml
nodes:
- testing/talos-vm
default:
network:
interface: enp1s0
netmask: 255.255.255.0
gateway: 192.168.1.1
sops:
file: testing/age.key
install:
disk: /dev/vda

View File

@@ -1,17 +0,0 @@
# yaml-language-server: $schema=../../schemas/cluster.json
clusterEnv: production
controlPlaneIp: 10.0.2.1
secretsFile: titan/secrets.yaml
nodes:
- titan/hyperion
- titan/helios
- titan/selene
default:
network:
netmask: 255.255.252.0
gateway: 10.0.0.1
sops:
file: titan/age.key
install:
disk: /dev/sda

View File

@@ -1,6 +0,0 @@
# yaml-language-server: $schema=../../../schemas/node.json
type: controlPlane
install:
serial: talos-vm
network:
ip: 192.168.1.2

View File

@@ -1,7 +0,0 @@
# yaml-language-server: $schema=../../../schemas/node.json
type: controlPlane
install:
serial: 5CZ7NX2
network:
interface: enp2s0
ip: 10.0.0.202

View File

@@ -1,7 +0,0 @@
# yaml-language-server: $schema=../../../schemas/node.json
type: controlPlane
install:
serial: F3PKRH2
network:
interface: enp3s0
ip: 10.0.0.201

View File

@@ -1,7 +0,0 @@
# yaml-language-server: $schema=../../../schemas/node.json
type: controlPlane
install:
serial: J33CHY2
network:
interface: enp2s0
ip: 10.0.0.203

View File

@@ -1,18 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
cluster:
inlineManifests:
- name: cluster-variables
contents: |
---
apiVersion: v1
kind: Namespace
metadata:
name: flux-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-variables
namespace: flux-system
data:
cluster_env: {{ cluster.clusterEnv }}

View File

@@ -1,5 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
machine:
kubelet:
extraArgs:
rotate-server-certificates: "true"

View File

@@ -1,5 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
cluster:
extraManifests:
- https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/main/deploy/standalone-install.yaml
- https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

View File

@@ -1,12 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
machine:
features:
hostDNS:
# This option is enabled by default and causes issues with cilium
forwardKubeDNSToHost: false
cluster:
network:
cni:
name: none
proxy:
disabled: true

View File

@@ -1,4 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
cluster:
extraManifests:
- https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.1/standard-install.yaml

View File

@@ -1,8 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
apiVersion: v1alpha1
kind: ExtensionServiceConfig
name: tailscale
environment:
- TS_AUTHKEY={{ node.network.tailscale.authKey }}
- TS_EXTRA_ARGS={% if node.network.tailscale.server %}--login-server {{ node.network.tailscale.server }} {% endif %}--advertise-tags=tag:cluster-{{ cluster.name }}
- TS_ROUTES={% if node.network.tailscale.advertiseRoutes %}{{node.network.ip}}/{{ node.network.netmask | to_prefix }}{% endif %}

View File

@@ -1,7 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
machine:
network:
interfaces:
- interface: "{{node.network.interface}}"
vip:
ip: "{{cluster.controlPlaneIp}}"

View File

@@ -1,18 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
cluster:
inlineManifests:
- name: sops-key
contents: |
apiVersion: v1
kind: Namespace
metadata:
name: flux-system
---
apiVersion: v1
kind: Secret
metadata:
name: sops-gpg
namespace: flux-system
data:
age.agekey: |
{{ node.sops | indent(6*2) }}

View File

@@ -1,8 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
machine:
files:
- path: /etc/cri/conf.d/20-customization.part
op: create
content: |
[plugins."io.containerd.cri.v1.images"]
discard_unpacked_layers = false

View File

@@ -1,6 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
apiVersion: v1alpha1
kind: VolumeConfig
name: EPHEMERAL
provisioning:
maxSize: 30GB

View File

@@ -1,9 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
apiVersion: v1alpha1
kind: UserVolumeConfig
name: local-path-provisioner
provisioning:
diskSelector:
match: system_disk
grow: true
maxSize: 10GB

View File

@@ -1,11 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
machine:
kubelet:
extraMounts:
- destination: /var/lib/longhorn
type: bind
source: /var/lib/longhorn
options:
- bind
- rshared
- rw

View File

@@ -1,9 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
apiVersion: v1alpha1
kind: UserVolumeConfig
name: longhorn
provisioning:
diskSelector:
match: system_disk
grow: true
maxSize: 2000GB

View File

@@ -1,17 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
machine:
# This is only needed on nodes that will have storage
sysctls:
vm.nr_hugepages: "1024"
nodeLabels:
openebs.io/engine: mayastor
# This is needed on ALL nodes
kubelet:
extraMounts:
- destination: /var/local
type: bind
source: /var/local
options:
- bind
- rshared
- rw

View File

@@ -1,3 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
cluster:
allowSchedulingOnControlPlanes: true

View File

@@ -1,4 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
machine:
network:
hostname: "{{node.hostname}}"

View File

@@ -1,4 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
machine:
install:
disk: "{{node.install.disk}}"

View File

@@ -1,11 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/siderolabs/talos/refs/heads/release-1.11/website/content/v1.11/schemas/config.schema.json
machine:
network:
interfaces:
- interface: "{{node.network.interface}}"
dhcp: false
addresses:
- "{{node.network.ip}}"
routes:
- network: 0.0.0.0/0
gateway: "{{node.network.gateway}}"

View File

@@ -1,8 +0,0 @@
customization:
systemExtensions:
officialExtensions:
- siderolabs/iscsi-tools
- siderolabs/util-linux-tools
- siderolabs/intel-ucode
- siderolabs/i915
- siderolabs/tailscale

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,5 +1,5 @@
{% set httpUrl = "http://192.168.1.1:8000" -%}
#!ipxe
dhcp
echo Starting ${serial}
@@ -8,16 +8,15 @@ echo Starting ${serial}
goto node_${serial} || exit
# Default behavior (non install mode) is to exit iPXE script
{% for cluster in clusters%}
{% for node in cluster.nodes %}
{%- if node.install.serial -%}
# {{ cluster.name }}/{{ node.hostname }}
:node_{{ node.install.serial }}
{% set ipArg = "ip=" ~ [node.network.ip, "" , node.network.gateway, node.network.netmask, node.hostname, node.network.interface, "", node.network.dns[0], node.network.dns[1], node.ntp]|join(":") -%}
{% for node in nodes %}
{%- if node.install -%}
# {{ node.filename }}
:node_{{ node.serial }}
{% set ipArg = "ip=" ~ node.ip ~ "::" ~ node.gateway ~ ":" ~ node.netmask ~ ":" ~ node.hostname ~ ":" ~ node.interface ~ "::" ~ node.dns[0] ~ ":" ~ node.dns[1] ~ ":" ~ node.ntp -%}
{% set kernelArgs = ipArg ~ " " ~ node.kernelArgs ~ " " ~ node.extraKernelArgs -%}
imgfree
kernel https://pxe.factory.talos.dev/image/{{ node.schematic }}/v{{ cluster.version.talos }}/kernel-{{ node.arch }} {{ ipArg }} {{ node.kernelArgs|join(" ") }} {% if node.install.auto %}talos.config={{httpUrl}}/configs/{{cluster.name}}/{{node.hostname}}.yaml{% endif +%}
initrd https://pxe.factory.talos.dev/image/{{ node.schematic }}/v{{ cluster.version.talos }}/initramfs-{{ node.arch }}.xz
kernel https://pxe.factory.talos.dev/image/{{ node.schematicId }}/{{ node.talosVersion }}/kernel-{{ node.arch }} {{ kernelArgs }}
initrd https://pxe.factory.talos.dev/image/{{ node.schematicId }}/{{ node.talosVersion }}/initramfs-{{ node.arch }}.xz
boot
{% endif %}
{% endfor %}
{% endfor %}

View File

@@ -1,4 +1,4 @@
{% set tftpIp = "192.168.1.1" -%}
{% set tftpIp = config.dhcp.tftpIp -%}
enable-tftp
tftp-root=/tftproot

View File

@@ -1,38 +1,40 @@
#!/usr/bin/env bash
set -euo pipefail
CONFIGS={{ root }}/configs
TALOSCONFIG=${CONFIGS}/talosconfig
rm -f ${CONFIGS}
ROOT={{ root }}
CONFIGS=${ROOT}/configs
# Generate the configuration for each node
{% for cluster in clusters %}
{% for node in cluster.nodes -%}
talosctl gen config {{ cluster.name }} https://{{ cluster.controlPlaneIp }}:6443 -f \
--with-secrets {{ cluster.secretsFile }} \
--talos-version v{{ cluster.version.talos }} \
--kubernetes-version v{{ cluster.version.kubernetes }} \
{% for node in nodes -%}
talosctl gen config {{ node.clusterName }} https://{{ node.controlplaneIp }}:6443 -f \
--with-secrets ${ROOT}/secrets.yaml \
--talos-version {{ node.talosVersion }} \
--kubernetes-version {{ node.kubernesVersion }} \
--output-types {{ node.type }} \
--install-image factory.talos.dev/metal-installer/{{ node.schematic }}:v{{ cluster.version.talos }} \
{% for patch in node.patches.all -%}
--install-image factory.talos.dev/metal-installer/{{ node.schematicId }}:{{ node.talosVersion }} \
{% for patch in node.patches -%}
{# The double call to tojson is needed to properly escape the patch (object -> json -> string) -#}
--config-patch {{ patch|tojson|tojson }} \
{% endfor -%}
{% for patch in node.patches.controlPlane -%}
{% for patch in node.patchesControlplane -%}
--config-patch-control-plane {{ patch|tojson|tojson }} \
{% endfor -%}
--with-docs=false \
--with-examples=false \
-o ${CONFIGS}/{{ cluster.name }}/{{ node.hostname }}.yaml
-o ${CONFIGS}/{{ node.filename }}.yaml
{% endfor %}
# Generate the talosconfig file for each cluster
talosctl gen config {{ cluster.name }} https://{{ cluster.controlPlaneIp }}:6443 -f \
--with-secrets {{ cluster.secretsFile }} \
{# NOTE: This assumes that each node in a cluster specifies the same controlplane IP -#}
{% for cluster in clusters -%}
talosctl gen config {{ cluster.name }} https://{{ cluster.controlplaneIp }}:6443 -f \
--with-secrets ${ROOT}/secrets.yaml \
--output-types talosconfig \
-o ${CONFIGS}/{{ cluster.name }}/talosconfig
{% endfor %}
# Create merged talosconfig
talosctl config --talosconfig=${CONFIGS}/{{ cluster.name }}/talosconfig endpoint {{ cluster.controlPlaneIp }}
talosctl config --talosconfig=${TALOSCONFIG} merge ${CONFIGS}/{{ cluster.name }}/talosconfig
export TALOSCONFIG=${CONFIGS}/talosconfig
rm -f ${TALOSCONFIG}
{% for cluster in clusters -%}
talosctl config merge ${CONFIGS}/{{ cluster.name }}/talosconfig
{% endfor %}

View File

@@ -1,2 +1,6 @@
export TALOSCONFIG={{ root }}/configs/talosconfig
export KUBECONFIG={{ clusters|map(attribute='name')|kubeconfig|join(":") }}
{% set paths = [] %}
{%- for cluster_name in nodes|map(attribute="clusterName")|unique -%}
{%- do paths.append(root ~ "/configs/" ~ cluster_name ~ "/kubeconfig") -%}
{% endfor -%}
export KUBECONFIG={{ paths|join(":") }}

163
tools/render Executable file
View File

@@ -0,0 +1,163 @@
#!/usr/bin/env python3
# Adapted from: https://enix.io/en/blog/pxe-talos/
import functools
import json
import pathlib
import sys
import git
import requests
import yaml
from jinja2 import Environment, FileSystemLoader, StrictUndefined, Template
REPO = git.Repo(sys.path[0], search_parent_directories=True)
assert REPO.working_dir is not None
ROOT = pathlib.Path(REPO.working_dir)
NODES = ROOT.joinpath("nodes")
SCHEMATICS = ROOT.joinpath("schematics")
RENDERED = ROOT.joinpath("rendered")
EXTENSIONS = ["jinja2.ext.do"]
PATCHES = Environment(
loader=FileSystemLoader(ROOT.joinpath("patches")),
undefined=StrictUndefined,
extensions=EXTENSIONS,
)
TEMPLATES = Environment(
loader=FileSystemLoader(ROOT.joinpath("templates")),
undefined=StrictUndefined,
extensions=EXTENSIONS,
)
def render_templates(node: dict):
class Inner(json.JSONEncoder):
def default(self, o):
if isinstance(o, Template):
try:
rendered = o.render(node)
except Exception as e:
e.add_note(f"While rendering for: {node['hostname']}")
raise e
# Parse the rendered yaml
return yaml.safe_load(rendered)
return super().default(o)
return Inner
@functools.cache
def get_schematic_id(schematic: str):
"""Lookup the schematic id associated with a given schematic"""
r = requests.post("https://factory.talos.dev/schematics", data=schematic)
r.raise_for_status()
data = r.json()
return data["id"]
def schematic_constructor(loader: yaml.SafeLoader, node: yaml.nodes.ScalarNode):
"""Load specified schematic file and get the assocatied schematic id"""
schematic_name = loader.construct_yaml_str(node)
try:
schematic = SCHEMATICS.joinpath(schematic_name).with_suffix(".yaml").read_text()
return get_schematic_id(schematic)
except Exception:
raise yaml.MarkedYAMLError("Failed to load schematic", node.start_mark)
def template_constructor(environment: Environment):
def inner(loader: yaml.SafeLoader, node: yaml.nodes.ScalarNode):
patch_name = loader.construct_scalar(node)
try:
template = environment.get_template(f"{patch_name}.yaml")
return template
except Exception:
raise yaml.MarkedYAMLError("Failed to load patch", node.start_mark)
return inner
def get_loader():
"""Add special constructors to yaml loader"""
loader = yaml.SafeLoader
loader.add_constructor("!schematic", schematic_constructor)
loader.add_constructor("!patch", template_constructor(PATCHES))
return loader
@functools.cache
def get_defaults(directory: pathlib.Path, root: pathlib.Path):
"""Compute the defaults from the provided directory and parents."""
try:
with open(directory.joinpath("_defaults.yaml")) as fyaml:
yml_data = yaml.load(fyaml, Loader=get_loader())
except OSError:
yml_data = {}
# Stop recursion when reaching root directory
if directory != root:
return get_defaults(directory.parent, root) | yml_data
else:
return yml_data
def walk_files(root: pathlib.Path):
"""Get all files that do not start with and underscore"""
for dirpath, _dirnames, filenames in root.walk():
for fn in filenames:
if not fn.startswith("_"):
yield dirpath.joinpath(fn)
def main():
nodes = []
for fullname in walk_files(NODES):
filename = str(fullname.relative_to(NODES).parent) + "/" + fullname.stem
with open(fullname) as fyaml:
yml_data = yaml.load(fyaml, Loader=get_loader())
yml_data = get_defaults(fullname.parent, NODES) | yml_data
yml_data["hostname"] = fullname.stem
yml_data["filename"] = filename
nodes.append(yml_data)
# Quick and dirty way to resolve all the templates using a custom encoder
nodes = list(
map(
lambda node: json.loads(json.dumps(node, cls=render_templates(node))), nodes
)
)
# Get all clusterName & controlplaneIp pairs
clusters = map(
lambda node: {
"name": node["clusterName"],
"controlplaneIp": node["controlplaneIp"],
},
nodes,
)
clusters = [dict(s) for s in set(frozenset(d.items()) for d in clusters)]
with open(ROOT.joinpath("config.yaml")) as fyaml:
config = yaml.safe_load(fyaml)
RENDERED.mkdir(exist_ok=True)
for template_name in TEMPLATES.list_templates():
template = TEMPLATES.get_template(template_name)
rendered = template.render(
nodes=nodes, clusters=clusters, config=config, root=ROOT
)
with open(RENDERED.joinpath(template_name), "w") as f:
f.write(rendered)
if __name__ == "__main__":
main()

View File

@@ -6,7 +6,6 @@ IPXE_VERSION=b41bda4413bf286d7b7a449bc05e1531da1eec2e
IPXE_BIN=(bin/ipxe.pxe bin-x86_64-efi/ipxe.efi)
IPXE_DIR=${ROOT}/.ipxe/ipxe-${IPXE_VERSION}
HTTP_URL=$(cat ${ROOT}/config.yaml | yq .server.httpUrl)
function download_ipxe() {
base_dir=$(dirname ${IPXE_DIR})
@@ -26,8 +25,7 @@ function patch_ipxe() {
#!ipxe
dhcp
chain ${HTTP_URL}/boot.ipxe || shell
# chain boot.ipxe || shell
chain boot.ipxe || shell
EOF
cd - > /dev/null
@@ -46,7 +44,6 @@ function build_ipxe() {
function render() {
${ROOT}/tools/render
${ROOT}/rendered/generate_configs.sh
}
function host_tftp() {
@@ -67,30 +64,8 @@ function host_tftp() {
sudo in.tftpd --verbosity 100 --permissive -L --secure ${TFTP_DIR}
}
function host_http() {
HTTP_DIR=$(mktemp --tmpdir -d http.XXX)
chmod 755 ${HTTP_DIR}
function cleanup() {
rm -rf ${HTTP_DIR}
}
trap cleanup EXIT
ln -s ${ROOT}/rendered/boot.ipxe ${HTTP_DIR}
for bin in "${IPXE_BIN[@]}"; do
path=${IPXE_DIR}/src/${bin}
ln -s ${path} ${HTTP_DIR}
done
ln -s ${ROOT}/configs ${HTTP_DIR}
echo "Starting http"
cd ${HTTP_DIR}
python -m http.server 8000
cd -
}
download_ipxe
patch_ipxe
build_ipxe
render
host_http
host_tftp

View File

@@ -3,9 +3,9 @@ set -euo pipefail
ROOT=$(git rev-parse --show-toplevel)
VM_NAME="talos-vm"
VCPUS="6"
RAM_MB="16384"
DISK_GB="100"
VCPUS="2"
RAM_MB="2048"
DISK_GB="10"
NETWORK=talos
CONNECTION="qemu:///system"
@@ -21,7 +21,7 @@ function define_network() {
<ip address="192.168.1.1" netmask="255.255.255.0">
<dhcp>
<range start="192.168.1.2" end="192.168.1.254"/>
<bootp file='http://192.168.1.1:8000/ipxe.pxe'/>
<bootp file='ipxe.pxe'/>
</dhcp>
</ip>
</network>
@@ -111,12 +111,13 @@ function delete() {
virsh --connect="${CONNECTION}" destroy "${VM_NAME}"
fi
virsh --connect="${CONNECTION}" undefine "${VM_NAME}" --remove-all-storage
else
echo "VM doest not exists"
exit -1
fi
if [[ $(virsh --connect="${CONNECTION}" net-list --all | grep -c "${NETWORK}") > "0" ]]; then
if [[ $(virsh --connect="${CONNECTION}" list | grep -c "${VM_NAME}") > "0" ]]; then
virsh --connect="${CONNECTION}" net-destroy "${NETWORK}"
fi
virsh --connect="${CONNECTION}" net-undefine "${NETWORK}"
fi
}