forked from
tangled.org/core
Monorepo for Tangled
1{
2 nixpkgs,
3 system,
4 hostSystem,
5 self,
6}: let
7 lib = nixpkgs.lib;
8
9 envVar = name: let
10 var = builtins.getEnv name;
11 in
12 if var == ""
13 then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details"
14 else var;
15 envVarOr = name: default: let
16 var = builtins.getEnv name;
17 in
18 if var != ""
19 then var
20 else default;
21
22 plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory";
23 jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe";
24
25 checkFile = value: path: if builtins.pathExists path then lib.hasPrefix value (builtins.readFile path) else false;
26 _nestedVirt =
27 (checkFile "1" /sys/module/kvm_amd/parameters/nested)
28 || (checkFile "Y" /sys/module/kvm_intel/parameters/nested);
29 nestedVirtWarning = ''
30 KVM nested virtualisation is not enabled on this host.
31 You should enable it if you can for better performance when testing the QEMU spindle engine!
32 '';
33 nestedVirt = lib.warnIf (!_nestedVirt) nestedVirtWarning _nestedVirt;
34in
35 lib.nixosSystem {
36 inherit system;
37 modules = [
38 self.nixosModules.knot
39 self.nixosModules.spindle
40 self.nixosModules.knotmirror
41 ({
42 lib,
43 config,
44 pkgs,
45 ...
46 }: {
47 virtualisation.vmVariant.virtualisation = {
48 host.pkgs = import nixpkgs {system = hostSystem;};
49
50 graphics = false;
51 memorySize = 3072;
52 diskSize = 20 * 1024;
53 cores = 2;
54 qemu.options = lib.optionals nestedVirt ["-enable-kvm" "-cpu host"];
55
56 forwardPorts = [
57 # ssh
58 {
59 from = "host";
60 host.port = 2222;
61 guest.port = 22;
62 }
63 # knot
64 {
65 from = "host";
66 host.port = 6444;
67 guest.port = 6444;
68 }
69 # spindle
70 {
71 from = "host";
72 host.port = 6555;
73 guest.port = 6555;
74 }
75 # knotmirror
76 {
77 from = "host";
78 host.port = 7007; # 7000 is deserved in macos for Airplay
79 guest.port = 7000;
80 }
81 # knotmirror-tap
82 {
83 from = "host";
84 host.port = 7480;
85 guest.port = 7480;
86 }
87 # knotmirror-admin
88 {
89 from = "host";
90 host.port = 7200;
91 guest.port = 7200;
92 }
93 ];
94 sharedDirectories = {
95 # We can't use the 9p mounts directly for most of these
96 # as SQLite is incompatible with them. So instead we
97 # mount the shared directories to a different location
98 # and copy the contents around on service start/stop.
99 knotData = {
100 source = "$TANGLED_VM_DATA_DIR/knot";
101 target = "/mnt/knot-data";
102 };
103 spindleData = {
104 source = "$TANGLED_VM_DATA_DIR/spindle";
105 target = "/mnt/spindle-data";
106 };
107 spindleLogs = {
108 source = "$TANGLED_VM_DATA_DIR/spindle-logs";
109 target = "/var/log/spindle";
110 };
111 };
112 };
113 # This is fine because any and all ports that are forwarded to host are explicitly marked above, we don't need a separate guest firewall
114 networking.firewall.enable = false;
115 time.timeZone = "Europe/London";
116 services.getty.autologinUser = "root";
117 environment.systemPackages = with pkgs; [curl vim git sqlite litecli postgresql_14];
118 services.tangled.knot = {
119 enable = true;
120 motd = "Welcome to the development knot!\n";
121 server = {
122 owner = envVar "TANGLED_VM_KNOT_OWNER";
123 hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6444";
124 plcUrl = plcUrl;
125 jetstreamEndpoint = jetstream;
126 listenAddr = "0.0.0.0:6444";
127 dev = true;
128 };
129 knotmirrors = [
130 "http://localhost:7000"
131 ];
132 };
133 services.tangled.spindle = {
134 enable = true;
135 server = {
136 owner = envVar "TANGLED_VM_SPINDLE_OWNER";
137 hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555";
138 plcUrl = plcUrl;
139 jetstreamEndpoint = jetstream;
140 listenAddr = "0.0.0.0:6555";
141 dev = true;
142 queueSize = 100;
143 maxJobCount = 2;
144 secrets = {
145 provider = "sqlite";
146 };
147 };
148
149 pipelines = {
150 logBucket = envVarOr "SPINDLE_S3_LOG_BUCKET" "";
151 qemu = {
152 memory = "1024";
153 smp = 2;
154 };
155 };
156 };
157 services.postgresql = {
158 enable = true;
159 package = pkgs.postgresql_14;
160 ensureDatabases = ["mirror" "tap"];
161 ensureUsers = [
162 {name = "tnglr";}
163 ];
164 authentication = ''
165 local all tnglr trust
166 host all tnglr 127.0.0.1/32 trust
167 '';
168 };
169 services.tangled.knotmirror = {
170 enable = true;
171 listenAddr = "0.0.0.0:7000";
172 adminListenAddr = "0.0.0.0:7200";
173 hostname = "localhost:7000";
174 dbUrl = "postgresql://tnglr@127.0.0.1:5432/mirror";
175 fullNetwork = false;
176 tap.dbUrl = "postgresql://tnglr@127.0.0.1:5432/tap";
177 };
178 users = {
179 # So we don't have to deal with permission clashing between
180 # blank disk VMs and existing state
181 users.${config.services.tangled.knot.gitUser}.uid = 666;
182 groups.${config.services.tangled.knot.gitUser}.gid = 666;
183
184 # TODO: separate spindle user
185 };
186 systemd.services = let
187 mkDataSyncScripts = source: target: {
188 enableStrictShellChecks = true;
189
190 preStart = lib.mkBefore ''
191 mkdir -p ${target}
192 ${lib.getExe pkgs.rsync} -a ${source}/ ${target}
193 '';
194
195 postStop = lib.mkAfter ''
196 ${lib.getExe pkgs.rsync} -a ${target}/ ${source}
197 '';
198
199 serviceConfig.PermissionsStartOnly = true;
200 };
201 in {
202 knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled.knot.stateDir;
203 spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled.spindle.server.dbPath);
204 knotmirror.after = ["postgresql.target"];
205 tap-knotmirror.after = ["postgresql.target"];
206 };
207 })
208 ];
209 }