Initial commit

This commit is contained in:
2025-03-22 14:29:48 +01:00
commit da7781035d
87 changed files with 3160 additions and 0 deletions

98
infra/pve/docker.tf Normal file
View File

@@ -0,0 +1,98 @@
/*
* Docker machine terraform file
*/
resource "proxmox_virtual_environment_file" "docker-machine-cloud-config" {
content_type = "snippets"
datastore_id = "local"
node_name = "pve"
source_raw {
data = <<-EOF
#cloud-config
package_update: true
packages:
- git
- ca-certificates
- wget
- curl
- gnupg2
- qemu-guest-agent
runcmd:
- systemctl enable --now qemu-guest-agent
- install -m 0755 -d /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc
- chmod a+r /etc/apt/keyrings/docker.asc
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
- apt-get update
- apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- docker swarm init
EOF
file_name = "docker-machine-cloud-config.yaml"
}
}
resource "proxmox_virtual_environment_vm" "docker-machine" {
name = "docker-machine"
node_name = "pve"
on_boot = true
agent {
enabled = true
}
tags = ["debian", "debian-latest", "docker", "terraform"]
cpu {
type = "kvm64"
cores = 4
sockets = 1
flags = []
}
memory {
dedicated = 16192
}
network_device {
bridge = "vmbr0"
model = "virtio"
vlan_id = 2
}
lifecycle {
ignore_changes = [
network_interface_names,
mac_addresses,
ipv4_addresses,
ipv6_addresses,
id,
disk,
initialization,
vga
]
}
boot_order = ["scsi0"]
scsi_hardware = "virtio-scsi-single"
disk {
interface = "scsi0"
iothread = true
datastore_id = "local-lvm"
size = 8
discard = "ignore"
}
clone {
vm_id = data.proxmox_virtual_environment_vms.debian_vm_template.vms[0].vm_id
}
vm_id = 701
initialization {
datastore_id = "local-lvm"
interface = "ide2"
vendor_data_file_id = proxmox_virtual_environment_file.docker-machine-cloud-config.id
}
}

370
infra/pve/kube.tf Normal file
View File

@@ -0,0 +1,370 @@
/*
* Kubernetes cluster terraform file
*/
resource "proxmox_virtual_environment_download_file" "talos-cloudimg" {
content_type = "iso"
datastore_id = "local"
file_name = "talos-v1.9.4-nocloud-amd64.iso"
node_name = "pve"
url = "https://factory.talos.dev/image/ce4c980550dd2ab1b17bbf2b08801c7eb59418eafe8f279833297925d67c7515/v1.9.4/nocloud-amd64.iso"
}
resource "proxmox_virtual_environment_vm" "kube" {
name = "kube-talos"
description = "Kubernetes Talos Linux"
tags = ["kubernetes", "talos", "terraform"]
node_name = "pve"
vm_id = 702
machine = "q35"
keyboard_layout = "fr"
agent {
enabled = true
}
stop_on_destroy = true
cpu {
cores = 4
type = "x86-64-v3"
}
memory {
dedicated = 16192
floating = 16192
}
boot_order = ["scsi0", "ide0"]
scsi_hardware = "virtio-scsi-single"
cdrom {
enabled = true
file_id = proxmox_virtual_environment_download_file.talos-cloudimg.id
interface = "ide0"
}
disk {
interface = "scsi0"
iothread = true
datastore_id = "local-lvm"
size = 16
discard = "ignore"
file_format = "raw"
}
vga {
type = "serial0"
}
initialization {
datastore_id = "local-lvm"
interface = "ide2"
ip_config {
ipv4 {
address = "dhcp"
}
}
user_account {
keys = [trimspace(var.ssh_public_key)]
password = var.machine_root_password
username = "root"
}
}
lifecycle {
ignore_changes = [
ipv4_addresses, ipv6_addresses, network_interface_names
]
}
network_device {
bridge = "vmbr0"
model = "virtio"
mac_address = "BC:24:11:F6:E1:C9"
vlan_id = 2
}
operating_system {
type = "l26"
}
tpm_state {
version = "v2.0"
}
serial_device {}
}
resource "talos_machine_secrets" "kube" {}
data "talos_machine_configuration" "kube" {
cluster_name = "kube"
machine_type = "controlplane"
cluster_endpoint = "https://kube-talos.local:6443"
machine_secrets = talos_machine_secrets.kube.machine_secrets
config_patches = [
yamlencode({
machine = {
install = {
image = "factory.talos.dev/installer/ce4c980550dd2ab1b17bbf2b08801c7eb59418eafe8f279833297925d67c7515:v1.9.4"
}
network = {
nameservers = [
"10.1.2.3"
]
}
}
cluster = {
allowSchedulingOnControlPlanes = true
apiServer = {
certSANs = [
"kube-talos.local"
]
}
network = {
dnsDomain = "kube-talos.local"
cni = {
name: "none"
}
}
proxy = {
disabled = true
}
}
})
]
}
data "talos_client_configuration" "kube" {
cluster_name = "kube"
client_configuration = talos_machine_secrets.kube.client_configuration
nodes = ["kube-talos.local"]
}
resource "talos_machine_configuration_apply" "kube" {
client_configuration = talos_machine_secrets.kube.client_configuration
machine_configuration_input = data.talos_machine_configuration.kube.machine_configuration
node = proxmox_virtual_environment_vm.kube.ipv4_addresses[7][0] # lo + 6 talos-created interfaces before eth0
depends_on = [ proxmox_virtual_environment_vm.kube ]
lifecycle {
replace_triggered_by = [ proxmox_virtual_environment_vm.kube ]
}
}
resource "talos_machine_bootstrap" "kube" {
node = proxmox_virtual_environment_vm.kube.ipv4_addresses[7][0] # lo + 6 talos-created interfaces before eth0
client_configuration = talos_machine_secrets.kube.client_configuration
depends_on = [ talos_machine_configuration_apply.kube ]
lifecycle {
replace_triggered_by = [ proxmox_virtual_environment_vm.kube ]
}
}
resource "talos_cluster_kubeconfig" "kube" {
node = proxmox_virtual_environment_vm.kube.ipv4_addresses[7][0] # lo + 6 talos-created interfaces before eth0
depends_on = [ talos_machine_bootstrap.kube ]
client_configuration = talos_machine_secrets.kube.client_configuration
}
output "kubeconfig" {
sensitive = true
value = talos_cluster_kubeconfig.kube.kubeconfig_raw
}
resource "local_file" "kubeconfig" {
content = "${talos_cluster_kubeconfig.kube.kubeconfig_raw}"
filename = "${path.module}/kubeconfig"
depends_on = [ talos_cluster_kubeconfig.kube ]
}
# TODO : Wait for talos_cluster_kubeconfig...
resource "helm_release" "cilium" {
name = "cilium"
namespace = "kube-system"
repository = "https://helm.cilium.io/"
chart = "cilium"
wait = false
depends_on = [ local_file.kubeconfig ]
set {
name = "ipam.mode"
value = "kubernetes"
}
set {
name = "kubeProxyReplacement"
value = true
}
set {
name = "securityContext.capabilities.ciliumAgent"
value = "{CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}"
}
set {
name = "securityContext.capabilities.cleanCiliumState"
value = "{NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}"
}
set {
name = "cgroup.autoMount.enabled"
value = false
}
set {
name = "cgroup.hostRoot"
value = "/sys/fs/cgroup"
}
set {
name = "k8sServiceHost"
value = "localhost"
}
set {
name = "k8sServicePort"
value = 7445
}
set {
name = "etcd.clusterDomain"
value = "kube-talos.local"
}
set {
name = "hubble.relay.enabled"
value = true
}
# Enable hubble ui
set {
name = "hubble.ui.enabled"
value = true
}
# Gateway API support
set {
name = "gatewayAPI.enabled"
value = true
}
set {
name = "gatewayAPI.enableAlpn"
value = true
}
set {
name = "gatewayAPI.enableAppProtocol"
value = true
}
# Gateway API trusted hops : for reverse proxy
set {
name = "gatewayAPI.xffNumTrustedHops"
value = 1
}
# Single-node cluster, so 1 operator only
set {
name = "operator.replicas"
value = 1
}
# L2 announcements
set {
name = "l2announcements.enabled"
value = true
}
set {
name = "externalIPs.enabled"
value = true
}
# Disable ingress controller (traefik will be used for now)
set {
name = "ingressController.enabled"
value = false
}
set {
name = "ingressController.loadbalancerMode"
value = "shared"
}
# Ingress controller for external : behind reverse proxy, trust 1 hop
set {
name = "envoy.xffNumTrustedHopsL7PolicyIngress"
value = 1
}
# Set cilium as default ingress controller
set {
name = "ingressController.default"
value = true
}
set {
name = "ingressController.service.externalTrafficPolicy"
value = "Local"
}
}
resource "kubernetes_namespace" "flux-system" {
metadata {
name = "flux-system"
}
lifecycle {
ignore_changes = [ metadata[0].annotations, metadata[0].labels ]
}
depends_on = [ talos_cluster_kubeconfig.kube, local_file.kubeconfig, helm_release.cilium ]
}
resource "kubernetes_secret" "flux-sops" {
metadata {
name = "flux-sops"
namespace = "flux-system"
}
type = "generic"
data = {
"sops.asc"=var.sops_private_key
}
depends_on = [ kubernetes_namespace.flux-system ]
}
resource "helm_release" "flux-operator" {
name = "flux-operator"
namespace = "flux-system"
repository = "oci://ghcr.io/controlplaneio-fluxcd/charts"
chart = "flux-operator"
wait = true
depends_on = [ kubernetes_secret.flux-sops ]
}
resource "helm_release" "flux-instance" {
name = "flux"
namespace = "flux-system"
repository = "oci://ghcr.io/controlplaneio-fluxcd/charts"
chart = "flux-instance"
values = [
file("values/components.yaml")
]
set {
name = "instance.distribution.version"
value = "2.x"
}
set {
name = "instance.distribution.registry"
value = "ghcr.io/fluxcd"
}
set {
name = "instance.sync.name"
value = "homeprod"
}
set {
name = "instance.sync.kind"
value = "GitRepository"
}
set {
name = "instance.sync.url"
value = "https://github.com/vhaudiquet/homeprod"
}
set {
name = "instance.sync.path"
value = "kubernetes/"
}
set {
name = "instance.sync.ref"
value = "refs/heads/main"
}
depends_on = [ helm_release.flux-operator ]
}

52
infra/pve/main.tf Normal file
View File

@@ -0,0 +1,52 @@
# Terraform providers configuration
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.69.1"
}
talos = {
source = "siderolabs/talos"
version = "0.7.1"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.36.0"
}
helm = {
source = "hashicorp/helm"
version = "2.17.0"
}
}
}
# Proxmox configuration
provider "proxmox" {
endpoint = "https://pve.local:8006/"
api_token = var.api_token
insecure = true
ssh {
agent = true
username = "root"
}
}
# Talos configuration
provider "talos" {}
# Kubernetes configuration
provider "kubernetes" {
config_path = "${path.module}/kubeconfig"
}
# Helm configuration
provider "helm" {
kubernetes {
config_path = "${path.module}/kubeconfig"
}
}
# debian-latest vm template(s), cloned to make other vms
data "proxmox_virtual_environment_vms" "debian_vm_template" {
node_name = "pve"
tags = ["template", "debian-latest"]
}

170
infra/pve/template.tf Normal file
View File

@@ -0,0 +1,170 @@
/*
* Terraform Proxmox templates
* VM and container templates, used to derive others
*/
# Debian Latest CLOUD disk image
resource "proxmox_virtual_environment_download_file" "debian-latest-cloudimg" {
content_type = "iso"
datastore_id = "local"
file_name = "debian-12-generic-amd64.qcow2.img"
node_name = "pve"
url = "https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
}
# Base cloud-config ('vendor') file for VM templates
resource "proxmox_virtual_environment_file" "cloud_config" {
content_type = "snippets"
datastore_id = "local"
node_name = "pve"
source_raw {
data = <<-EOF
#cloud-config
package_update: true
packages:
- git
- ca-certificates
- wget
- curl
- gnupg2
- qemu-guest-agent
runcmd:
- systemctl enable --now qemu-guest-agent
EOF
file_name = "cloud-config.yaml"
}
}
# Debian Latest VM template
resource "proxmox_virtual_environment_vm" "debian-latest-template" {
name = "debian-latest-template"
description = "Debian latest template VM from Terraform"
tags = ["debian", "debian-latest", "template", "terraform"]
node_name = "pve"
vm_id = 9002
template = true
machine = "q35"
keyboard_layout = "fr"
agent {
enabled = true
}
stop_on_destroy = true
cpu {
cores = 2
type = "x86-64-v2-AES"
}
memory {
dedicated = 2048
floating = 2048
}
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.debian-latest-cloudimg.id
interface = "scsi0"
}
vga {
type = "serial0"
}
initialization {
datastore_id = "local-lvm"
interface = "ide2"
ip_config {
ipv4 {
address = "dhcp"
}
}
user_account {
keys = [trimspace(var.ssh_public_key)]
password = var.machine_root_password
username = "root"
}
vendor_data_file_id = proxmox_virtual_environment_file.cloud_config.id
}
lifecycle {
ignore_changes = [
ipv4_addresses, ipv6_addresses, network_interface_names
]
}
network_device {
bridge = "vmbr0"
vlan_id = 2
}
operating_system {
type = "l26"
}
tpm_state {
version = "v2.0"
}
serial_device {}
}
# Debian Latest LXC container image
resource "proxmox_virtual_environment_download_file" "debian-latest-lxc-img" {
content_type = "vztmpl"
datastore_id = "local"
node_name = "pve"
url = "http://download.proxmox.com/images/system/debian-12-standard_12.7-1_amd64.tar.zst"
}
# Debian Latest LXC container template
resource "proxmox_virtual_environment_container" "debian-latest-container-template" {
description = "Debian latest template container from Terraform"
node_name = "pve"
vm_id = 9003
template = true
cpu {
cores = 2
}
memory {
dedicated = 512
}
disk {
datastore_id = "local-lvm"
size = 4 # 4 Gigabytes
}
initialization {
hostname = "debian-latest-container-template"
ip_config {
ipv4 {
address = "dhcp"
}
}
user_account {
keys = [trimspace(var.ssh_public_key)]
password = var.machine_root_password
}
}
network_interface {
name = "veth0"
vlan_id = 2
}
operating_system {
template_file_id = proxmox_virtual_environment_download_file.debian-latest-lxc-img.id
type = "debian"
}
}

View File

@@ -0,0 +1,34 @@
instance:
components:
- source-controller
- kustomize-controller
- helm-controller
- notification-controller
- image-reflector-controller
- image-automation-controller
cluster:
type: kubernetes
multitenant: false
networkPolicy: true
domain: "kube-talos.local"
kustomize:
patches:
- target:
kind: Deployment
name: "(kustomize-controller|helm-controller)"
patch: |
- op: add
path: /spec/template/spec/containers/0/args/-
value: --concurrent=10
- op: add
path: /spec/template/spec/containers/0/args/-
value: --requeue-dependency=10s
- patch: |
- op: add
path: /spec/decryption
value:
provider: sops
secretRef:
name: flux-sops
target:
kind: Kustomization

19
infra/pve/variables.tf Normal file
View File

@@ -0,0 +1,19 @@
variable "api_token" {
description = "Token to connect Proxmox API"
type = string
}
variable "machine_root_password" {
description = "Root password for VMs and containers"
type = string
}
variable "ssh_public_key" {
description = "Public SSH key authorized access for VMs and containers"
type = string
}
variable "sops_private_key" {
description = "Private SOPS GPG key for flux/kubernetes to decrypt secrets"
type = string
}