this repo has no description
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat(infra): multi region, IPv6 only with Wireguard encryption

Khue Doan 5532a95c 9817f7e1

+219 -22
+6 -2
infra/_modules/hetzner-vm/outputs.tf
··· 1 - output "ipv6_addresses" { 2 - value = { for node in hcloud_server.nodes : node.name => node.ipv6_address } 1 + output "hosts" { 2 + value = { 3 + for node in hcloud_server.nodes : node.name => { 4 + ipv6_address = node.ipv6_address 5 + } 6 + } 3 7 }
+5 -1
infra/_modules/nixos/configuration.nix
··· 17 17 }; 18 18 19 19 networking = { 20 - networkmanager = { 20 + tempAddresses = "disabled"; 21 + }; 22 + 23 + systemd = { 24 + network = { 21 25 enable = true; 22 26 }; 23 27 };
+77 -3
infra/_modules/nixos/flake.nix
··· 32 32 ./profiles/k3s.nix 33 33 { 34 34 networking.hostName = "kube-1"; 35 - services.k3s.clusterInit = true; 35 + systemd.network.networks."30-wan" = { 36 + matchConfig.Name = "ens18"; 37 + networkConfig.DHCP = "ipv4"; 38 + address = [ 39 + hosts.kube-1.ipv6_address 40 + ]; 41 + routes = [ 42 + { Gateway = "fe80::1"; } 43 + ]; 44 + }; 45 + services.k3s = { 46 + clusterInit = true; 47 + extraFlags = nixpkgs.lib.mkAfter [ 48 + "--node-external-ip=${hosts.kube-1.ipv6_address}" 49 + ]; 50 + }; 36 51 } 37 52 ]; 38 53 }; ··· 46 61 ./profiles/k3s.nix 47 62 { 48 63 networking.hostName = "kube-2"; 49 - services.k3s.serverAddr = "https://[${hosts.kube-1.ipv6_address}]:6443"; 64 + systemd.network.networks."30-wan" = { 65 + matchConfig.Name = "ens18"; 66 + networkConfig.DHCP = "ipv4"; 67 + address = [ 68 + hosts.kube-2.ipv6_address 69 + ]; 70 + routes = [ 71 + { Gateway = "fe80::1"; } 72 + ]; 73 + }; 74 + services.k3s = { 75 + serverAddr = "https://[${hosts.kube-1.ipv6_address}]:6443"; 76 + extraFlags = nixpkgs.lib.mkAfter [ 77 + "--node-external-ip=${hosts.kube-2.ipv6_address}" 78 + ]; 79 + }; 50 80 } 51 81 ]; 52 82 }; ··· 60 90 ./profiles/k3s.nix 61 91 { 62 92 networking.hostName = "kube-3"; 63 - services.k3s.serverAddr = "https://[${hosts.kube-1.ipv6_address}]:6443"; 93 + systemd.network.networks."30-wan" = { 94 + matchConfig.Name = "ens18"; 95 + networkConfig.DHCP = "ipv4"; 96 + address = [ 97 + hosts.kube-3.ipv6_address 98 + ]; 99 + routes = [ 100 + { Gateway = "fe80::1"; } 101 + ]; 102 + }; 103 + services.k3s = { 104 + serverAddr = "https://[${hosts.kube-1.ipv6_address}]:6443"; 105 + extraFlags = nixpkgs.lib.mkAfter [ 106 + "--node-external-ip=${hosts.kube-3.ipv6_address}" 107 + ]; 108 + }; 109 + } 110 + ]; 111 + }; 112 + kube-4 = nixpkgs.lib.nixosSystem { 113 + system = "aarch64-linux"; 114 + modules = [ 115 + disko.nixosModules.disko 116 + sops-nix.nixosModules.sops 117 + ./configuration.nix 118 + ./disks.nix 119 + ./profiles/k3s-agent.nix 120 + { 121 + networking.hostName = "kube-4"; 122 + systemd.network.networks."30-wan" = { 123 + matchConfig.Name = "enp1s0"; 124 + networkConfig.DHCP = "ipv4"; 125 + address = [ 126 + hosts.kube-4.ipv6_address 127 + ]; 128 + routes = [ 129 + { Gateway = "fe80::1"; } 130 + ]; 131 + }; 132 + services.k3s = { 133 + serverAddr = "https://[${hosts.kube-1.ipv6_address}]:6443"; 134 + extraFlags = nixpkgs.lib.mkAfter [ 135 + "--node-external-ip=${hosts.kube-4.ipv6_address}" 136 + ]; 137 + }; 64 138 } 65 139 ]; 66 140 };
+1 -1
infra/_modules/nixos/hosts.json
··· 1 - {"kube-1":{"ipv6_address":"2402:800:63e2:5af5:1b0:f4a:e854:5355"},"kube-2":{"ipv6_address":"2402:800:63e2:5af5:cb46:f8e4:131b:93c5"},"kube-3":{"ipv6_address":"2402:800:63e2:5af5:1ced:4595:b4bf:3634"}} 1 + {"kube-1":{"ipv6_address":"2402:800:63e2:5af5:be24:11ff:fe24:6daf"},"kube-2":{"ipv6_address":"2402:800:63e2:5af5:be24:11ff:fe34:24df"},"kube-3":{"ipv6_address":"2402:800:63e2:5af5:be24:11ff:fe50:9cba"},"kube-4":{"ipv6_address":"2a01:4f9:c012:3cad::1"}}
+4
infra/_modules/nixos/main.tf
··· 21 21 extra_environment = { 22 22 SOPS_FILE = var.sops_file 23 23 } 24 + 25 + depends_on = [ 26 + local_file.hosts 27 + ] 24 28 }
+4
infra/_modules/nixos/profiles/installer.nix
··· 5 5 (modulesPath + "/installer/cd-dvd/installation-cd-minimal.nix") 6 6 ]; 7 7 8 + networking = { 9 + tempAddresses = "disabled"; 10 + }; 11 + 8 12 services = { 9 13 openssh = { 10 14 enable = true;
+24
infra/_modules/nixos/profiles/k3s-agent.nix
··· 1 + { config, ... }: 2 + 3 + { 4 + networking = { 5 + firewall = { 6 + # https://docs.k3s.io/installation/requirements#inbound-rules-for-k3s-nodes 7 + allowedTCPPorts = [ 8 + 10250 # Kubelet metrics 9 + ]; 10 + allowedUDPPorts = [ 11 + 51820 # Flannel Wireguard with IPv4 12 + 51821 # Flannel Wireguard with IPv6 13 + ]; 14 + }; 15 + }; 16 + 17 + services = { 18 + k3s = { 19 + enable = true; 20 + role = "agent"; 21 + tokenFile = config.sops.secrets.k3s_token.path; 22 + }; 23 + }; 24 + }
+15 -4
infra/_modules/nixos/profiles/k3s.nix
··· 3 3 { 4 4 networking = { 5 5 firewall = { 6 - # https://docs.k3s.io/installation/requirements#inbound-rules-for-k3s-server-nodes 6 + # https://docs.k3s.io/installation/requirements#inbound-rules-for-k3s-nodes 7 7 allowedTCPPorts = [ 8 - 6443 9 - 10250 8 + 6443 # K3s supervisor and Kubernetes API Server 9 + 10250 # Kubelet metrics 10 10 ]; 11 11 allowedTCPPortRanges = [ 12 + # Required only for HA with embedded etcd 12 13 { from = 2379; to = 2380; } 13 14 ]; 15 + allowedUDPPorts = [ 16 + 51820 # Flannel Wireguard with IPv4 17 + 51821 # Flannel Wireguard with IPv6 18 + ]; 14 19 }; 15 20 }; 16 21 ··· 19 24 enable = true; 20 25 role = "server"; 21 26 tokenFile = config.sops.secrets.k3s_token.path; 22 - extraFlags = toString [ 27 + extraFlags = [ 23 28 "--disable-helm-controller" 24 29 "--disable-network-policy" 25 30 "--disable=traefik" 31 + "--cluster-cidr=fd6a:7c7b:3e12:0::/56" # TODO proper ULA planning 32 + "--service-cidr=fd6a:7c7b:3e12:100::/112" # TODO proper ULA planning 33 + "--flannel-backend=wireguard-native" 34 + "--flannel-external-ip" 35 + "--flannel-ipv6-masq" # Enable IPv6 NAT, as per default pods use their pod IPv6 address for outgoing traffic 36 + # TODO net.ipv6.conf.all.accept_ra=2 26 37 ]; 27 38 }; 28 39 };
+73 -1
infra/_modules/proxmox-vm/outputs.tf
··· 1 + # TODO wtf 2 + # Build MAC-derived EUI-64 suffix per VM, no bitwise ops used 3 + locals { 4 + eui64_suffix_by_node = { 5 + for node in proxmox_virtual_environment_vm.main : 6 + node.id => ( 7 + # Parse MAC -> 6 bytes 8 + # Example: BC:24:11:EB:01:83 9 + # Flip the U/L bit (bit 1) of the first byte: if it’s 0 add 2, else subtract 2 10 + # Then insert ff:fe and format into 4x16-bit hex groups. 11 + # Result suffix for example above: be24:11ff:feeb:183 12 + ( 13 + # lowercase, split, parse 14 + # keep these short to avoid paren hell 15 + # b = bytes[0..5], b0f = flipped first byte 16 + # words w1..w4 form the IID 17 + # join as hex groups 18 + # (all Terraform/HCL functions; no bitwise) 19 + # -- 20 + # Precompute the bytes list 21 + # NOTE: locals inside expressions are not supported, so repeat split/parse where needed 22 + # 23 + # first byte (original) 24 + # b0 = parseint(split(":", lower(node.network_device[0].mac_address))[0], 16) 25 + # bit1 = floor(b0/2) % 2 -> 0 or 1 26 + # b0f = bit1 == 0 ? b0 + 2 : b0 - 2 27 + # 28 + join(":", [ 29 + format( 30 + "%x", 31 + ( 32 + ( 33 + ( (floor(parseint(split(":", lower(node.network_device[0].mac_address))[0], 16) / 2)) % 2 ) == 0 34 + ) 35 + ? parseint(split(":", lower(node.network_device[0].mac_address))[0], 16) + 2 36 + : parseint(split(":", lower(node.network_device[0].mac_address))[0], 16) - 2 37 + ) * 256 38 + + parseint(split(":", lower(node.network_device[0].mac_address))[1], 16) 39 + ), 40 + format( 41 + "%x", 42 + parseint(split(":", lower(node.network_device[0].mac_address))[2], 16) * 256 + 255 43 + ), 44 + format( 45 + "%x", 46 + 254 * 256 + parseint(split(":", lower(node.network_device[0].mac_address))[3], 16) 47 + ), 48 + format( 49 + "%x", 50 + parseint(split(":", lower(node.network_device[0].mac_address))[4], 16) * 256 51 + + parseint(split(":", lower(node.network_device[0].mac_address))[5], 16) 52 + ) 53 + ]) 54 + ) 55 + ) 56 + } 57 + } 58 + 59 + # Pick the IPv6 whose IID matches the MAC-derived EUI-64; fallback to first global (non-loopback, non-link-local) 1 60 output "hosts" { 2 61 value = { 3 62 for node in proxmox_virtual_environment_vm.main : node.name => { 4 - ipv6_address = node.ipv6_addresses[1][0] 63 + ipv6_address = ( 64 + length([ 65 + for ip in flatten(node.ipv6_addresses) : 66 + ip if endswith(lower(ip), local.eui64_suffix_by_node[node.id]) 67 + ]) > 0 68 + ? [ 69 + for ip in flatten(node.ipv6_addresses) : 70 + ip if endswith(lower(ip), local.eui64_suffix_by_node[node.id]) 71 + ][0] 72 + : [ 73 + for ip in flatten(node.ipv6_addresses) : 74 + ip if ip != "::1" && !startswith(lower(ip), "fe80:") 75 + ][0] 76 + ) 5 77 } 6 78 } 7 79 }
+2 -9
infra/production/hetzner/compute/terragrunt.hcl
··· 1 - # TODO temporarily disable Hetzner until I fix the IPv6 issue 2 - # https://wiki.nixos.org/wiki/Install_NixOS_on_Hetzner_Cloud 3 - skip = true 4 - 5 1 include "root" { 6 2 path = find_in_parent_folders("root.hcl") 7 3 expose = true ··· 13 9 14 10 inputs = { 15 11 nodes = { 16 - "master-1" = { 12 + "kube-4" = { 17 13 location = "hel1" 18 14 } 19 - # "worker-1" = { 15 + # "kube-5" = { 20 16 # location = "nbg1" 21 - # } 22 - # "worker-2" = { 23 - # location = "fsn1" 24 17 # } 25 18 } 26 19 }
+8 -1
infra/production/nixos/terragrunt.hcl
··· 11 11 config_path = "../proxmox/compute" 12 12 } 13 13 14 + dependency "hetzner" { 15 + config_path = "../hetzner/compute" 16 + } 17 + 14 18 inputs = { 15 19 flake = "${find_in_parent_folders("_modules")}//nixos" 16 - hosts = dependency.proxmox.outputs.hosts 20 + hosts = merge( 21 + dependency.proxmox.outputs.hosts, 22 + dependency.hetzner.outputs.hosts, 23 + ) 17 24 sops_file = find_in_parent_folders("secrets.yaml") 18 25 }