mirror of
https://github.com/vhaudiquet/homeprod.git
synced 2026-01-11 20:47:20 +00:00
infra: r740 updates
This commit is contained in:
311
infra/r740/kube/main.tf
Normal file
311
infra/r740/kube/main.tf
Normal file
@@ -0,0 +1,311 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
talos = {
|
||||
source = "siderolabs/talos"
|
||||
version = "0.9.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "2.36.0"
|
||||
}
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = "2.17.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Talos configuration
|
||||
provider "talos" {}
|
||||
|
||||
# Kubernetes configuration
|
||||
provider "kubernetes" {
|
||||
config_path = "${path.module}/kubeconfig"
|
||||
}
|
||||
# Helm configuration
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
config_path = "${path.module}/kubeconfig"
|
||||
}
|
||||
}
|
||||
|
||||
resource "talos_machine_secrets" "kube" {}
|
||||
|
||||
data "talos_machine_configuration" "kube" {
|
||||
cluster_name = "kube-${var.physical_hostname}"
|
||||
machine_type = "controlplane"
|
||||
cluster_endpoint = "https://${var.kube_host}:6443"
|
||||
machine_secrets = talos_machine_secrets.kube.machine_secrets
|
||||
config_patches = [
|
||||
yamlencode({
|
||||
machine = {
|
||||
install = {
|
||||
image = "factory.talos.dev/installer/ce4c980550dd2ab1b17bbf2b08801c7eb59418eafe8f279833297925d67c7515:v1.11.5"
|
||||
}
|
||||
network = {
|
||||
nameservers = [
|
||||
"10.1.2.3"
|
||||
]
|
||||
}
|
||||
certSANs = [
|
||||
"${var.kube_host}", "${var.kube_hostname}"
|
||||
]
|
||||
}
|
||||
cluster = {
|
||||
clusterName = "kube-${var.physical_hostname}"
|
||||
allowSchedulingOnControlPlanes = true
|
||||
apiServer = {
|
||||
certSANs = [
|
||||
"${var.kube_host}", "${var.kube_hostname}"
|
||||
]
|
||||
}
|
||||
network = {
|
||||
dnsDomain = "cluster.local"
|
||||
cni = {
|
||||
name: "none"
|
||||
}
|
||||
}
|
||||
proxy = {
|
||||
disabled = true
|
||||
}
|
||||
}
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
data "talos_client_configuration" "kube" {
|
||||
cluster_name = "kube-${var.physical_hostname}"
|
||||
client_configuration = talos_machine_secrets.kube.client_configuration
|
||||
nodes = ["${var.kube_host}"]
|
||||
}
|
||||
|
||||
resource "talos_machine_configuration_apply" "kube" {
|
||||
client_configuration = talos_machine_secrets.kube.client_configuration
|
||||
machine_configuration_input = data.talos_machine_configuration.kube.machine_configuration
|
||||
node = var.kube_host
|
||||
depends_on = [ talos_machine_secrets.kube ]
|
||||
}
|
||||
|
||||
resource "talos_machine_bootstrap" "kube" {
|
||||
node = var.kube_host
|
||||
client_configuration = talos_machine_secrets.kube.client_configuration
|
||||
depends_on = [ talos_machine_configuration_apply.kube, talos_machine_secrets.kube ]
|
||||
}
|
||||
|
||||
resource "talos_cluster_kubeconfig" "kube" {
|
||||
node = var.kube_host
|
||||
depends_on = [ talos_machine_bootstrap.kube ]
|
||||
client_configuration = talos_machine_secrets.kube.client_configuration
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
sensitive = true
|
||||
value = talos_cluster_kubeconfig.kube.kubeconfig_raw
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
content = "${talos_cluster_kubeconfig.kube.kubeconfig_raw}"
|
||||
filename = "${path.module}/kubeconfig"
|
||||
depends_on = [ talos_cluster_kubeconfig.kube ]
|
||||
}
|
||||
|
||||
data "talos_client_configuration" "talosconfig" {
|
||||
cluster_name = "kube-${var.physical_hostname}"
|
||||
client_configuration = talos_machine_secrets.kube.client_configuration
|
||||
nodes = [var.kube_host]
|
||||
}
|
||||
|
||||
resource "local_file" "talosconfig" {
|
||||
content = "${data.talos_client_configuration.talosconfig.talos_config}"
|
||||
filename = "${path.module}/talosconfig"
|
||||
depends_on = [ data.talos_client_configuration.talosconfig ]
|
||||
}
|
||||
|
||||
# TODO : Wait for talos_cluster_kubeconfig...
|
||||
resource "helm_release" "cilium" {
|
||||
name = "cilium"
|
||||
namespace = "kube-system"
|
||||
repository = "https://helm.cilium.io/"
|
||||
chart = "cilium"
|
||||
wait = false
|
||||
depends_on = [ local_file.kubeconfig, talos_cluster_kubeconfig.kube ]
|
||||
|
||||
set {
|
||||
name = "ipam.mode"
|
||||
value = "kubernetes"
|
||||
}
|
||||
set {
|
||||
name = "kubeProxyReplacement"
|
||||
value = true
|
||||
}
|
||||
set {
|
||||
name = "securityContext.capabilities.ciliumAgent"
|
||||
value = "{CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}"
|
||||
}
|
||||
set {
|
||||
name = "securityContext.capabilities.cleanCiliumState"
|
||||
value = "{NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}"
|
||||
}
|
||||
set {
|
||||
name = "cgroup.autoMount.enabled"
|
||||
value = false
|
||||
}
|
||||
set {
|
||||
name = "cgroup.hostRoot"
|
||||
value = "/sys/fs/cgroup"
|
||||
}
|
||||
set {
|
||||
name = "k8sServiceHost"
|
||||
value = "localhost"
|
||||
}
|
||||
set {
|
||||
name = "k8sServicePort"
|
||||
value = 7445
|
||||
}
|
||||
set {
|
||||
name = "etcd.clusterDomain"
|
||||
value = "cluster.local"
|
||||
}
|
||||
set {
|
||||
name = "hubble.relay.enabled"
|
||||
value = true
|
||||
}
|
||||
# Enable hubble ui
|
||||
set {
|
||||
name = "hubble.ui.enabled"
|
||||
value = true
|
||||
}
|
||||
# Gateway API support
|
||||
set {
|
||||
name = "gatewayAPI.enabled"
|
||||
value = true
|
||||
}
|
||||
set {
|
||||
name = "gatewayAPI.enableAlpn"
|
||||
value = true
|
||||
}
|
||||
set {
|
||||
name = "gatewayAPI.enableAppProtocol"
|
||||
value = true
|
||||
}
|
||||
# Gateway API trusted hops : for reverse proxy
|
||||
set {
|
||||
name = "gatewayAPI.xffNumTrustedHops"
|
||||
value = 1
|
||||
}
|
||||
# Single-node cluster, so 1 operator only
|
||||
set {
|
||||
name = "operator.replicas"
|
||||
value = 1
|
||||
}
|
||||
# L2 announcements
|
||||
set {
|
||||
name = "l2announcements.enabled"
|
||||
value = true
|
||||
}
|
||||
set {
|
||||
name = "externalIPs.enabled"
|
||||
value = true
|
||||
}
|
||||
# Disable ingress controller (traefik will be used for now)
|
||||
set {
|
||||
name = "ingressController.enabled"
|
||||
value = false
|
||||
}
|
||||
set {
|
||||
name = "ingressController.loadbalancerMode"
|
||||
value = "shared"
|
||||
}
|
||||
# Ingress controller for external : behind reverse proxy, trust 1 hop
|
||||
set {
|
||||
name = "envoy.xffNumTrustedHopsL7PolicyIngress"
|
||||
value = 1
|
||||
}
|
||||
# Set cilium as default ingress controller
|
||||
set {
|
||||
name = "ingressController.default"
|
||||
value = true
|
||||
}
|
||||
set {
|
||||
name = "ingressController.service.externalTrafficPolicy"
|
||||
value = "Local"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "flux-system" {
|
||||
metadata {
|
||||
name = "flux-system"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [ metadata[0].annotations, metadata[0].labels ]
|
||||
}
|
||||
|
||||
depends_on = [ talos_cluster_kubeconfig.kube, local_file.kubeconfig, helm_release.cilium ]
|
||||
}
|
||||
|
||||
resource "kubernetes_secret" "flux-sops" {
|
||||
metadata {
|
||||
name = "flux-sops"
|
||||
namespace = "flux-system"
|
||||
}
|
||||
|
||||
type = "generic"
|
||||
|
||||
data = {
|
||||
"sops.asc"=var.sops_private_key
|
||||
}
|
||||
|
||||
depends_on = [ kubernetes_namespace.flux-system ]
|
||||
}
|
||||
|
||||
resource "helm_release" "flux-operator" {
|
||||
name = "flux-operator"
|
||||
namespace = "flux-system"
|
||||
repository = "oci://ghcr.io/controlplaneio-fluxcd/charts"
|
||||
chart = "flux-operator"
|
||||
wait = true
|
||||
depends_on = [ kubernetes_secret.flux-sops ]
|
||||
}
|
||||
|
||||
resource "helm_release" "flux-instance" {
|
||||
name = "flux"
|
||||
namespace = "flux-system"
|
||||
repository = "oci://ghcr.io/controlplaneio-fluxcd/charts"
|
||||
chart = "flux-instance"
|
||||
|
||||
values = [
|
||||
file("values/components.yaml")
|
||||
]
|
||||
set {
|
||||
name = "instance.distribution.version"
|
||||
value = "2.x"
|
||||
}
|
||||
set {
|
||||
name = "instance.distribution.registry"
|
||||
value = "ghcr.io/fluxcd"
|
||||
}
|
||||
set {
|
||||
name = "instance.sync.name"
|
||||
value = "homeprod"
|
||||
}
|
||||
set {
|
||||
name = "instance.sync.kind"
|
||||
value = "GitRepository"
|
||||
}
|
||||
set {
|
||||
name = "instance.sync.url"
|
||||
value = "https://github.com/vhaudiquet/homeprod"
|
||||
}
|
||||
set {
|
||||
name = "instance.sync.path"
|
||||
value = "kubernetes/"
|
||||
}
|
||||
set {
|
||||
name = "instance.sync.ref"
|
||||
value = "refs/heads/main"
|
||||
}
|
||||
|
||||
|
||||
depends_on = [ helm_release.flux-operator ]
|
||||
}
|
||||
16
infra/r740/kube/variables.tf
Normal file
16
infra/r740/kube/variables.tf
Normal file
@@ -0,0 +1,16 @@
|
||||
variable "sops_private_key" {
|
||||
description = "Private SOPS GPG key for flux/kubernetes to decrypt secrets"
|
||||
type = string
|
||||
}
|
||||
variable "kube_hostname" {
|
||||
description = "Kubernetes cluster hostname"
|
||||
type = string
|
||||
}
|
||||
variable "kube_host" {
|
||||
description = "Kubernetes cluster host"
|
||||
type = string
|
||||
}
|
||||
variable "physical_hostname" {
|
||||
description = "Host name of the physical host for the kubernetes VM"
|
||||
type = string
|
||||
}
|
||||
Reference in New Issue
Block a user