this repo has no description
2
fork

Configure Feed

Select the types of activity you want to include in your feed.

updating some stuff, trying to get woodpecker, and grafana alerts off the ground

+619 -3
+2 -1
hosts/curve/default.nix
··· 24 24 services.NetworkManager-wait-online.wantedBy = pkgs.lib.mkForce [ ]; # Normally ["network-online.target"] 25 25 }; 26 26 27 - programs.gnupg.agent.pinentryFlavor = "gtk2"; 27 + programs.gnupg.agent.enable = true; 28 + programs.gnupg.agent.pinentryFlavor = "gnome3"; 28 29 29 30 fileSystems."/mnt/ftp" = { 30 31 device = "192.168.1.240:/home/ftp";
+2 -1
hosts/profiles/desktop/default.nix
··· 58 58 unstable.sublime-music 59 59 unstable.nheko 60 60 unstable.tootle 61 + newsflash 62 + liferea 61 63 62 64 scrot 63 65 ripcord ··· 69 71 calibre 70 72 fractal 71 73 mpv 72 - newsflash 73 74 zeal 74 75 xclip 75 76 xdotool
+300
hosts/profiles/monitoring/alert-rules.yaml
··· 1 + groups: 2 + - name: hosts 3 + rules: 4 + - alert: HostOutOfMemory 5 + expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10 6 + for: 2m 7 + labels: 8 + severity: warning 9 + annotations: 10 + summary: Host out of memory (instance {{ $labels.instance }}) 11 + description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 12 + 13 + - alert: HostMemoryUnderMemoryPressure 14 + expr: rate(node_vmstat_pgmajfault[1m]) > 1000 15 + for: 2m 16 + labels: 17 + severity: warning 18 + annotations: 19 + summary: Host memory under memory pressure (instance {{ $labels.instance }}) 20 + description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 21 + 22 + - alert: HostUnusualNetworkThroughputIn 23 + expr: sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100 24 + for: 5m 25 + labels: 26 + severity: warning 27 + annotations: 28 + summary: Host unusual network throughput in (instance {{ $labels.instance }}) 29 + description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 30 + 31 + - alert: HostUnusualNetworkThroughputOut 32 + expr: sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100 33 + for: 5m 34 + labels: 35 + severity: warning 36 + annotations: 37 + summary: Host unusual network throughput out (instance {{ $labels.instance }}) 38 + description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 39 + 40 + # - alert: HostUnusualDiskReadRate 41 + # expr: sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 100 42 + # for: 5m 43 + # labels: 44 + # severity: warning 45 + # annotations: 46 + # summary: Host unusual disk read rate (instance {{ $labels.instance }}) 47 + # description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 48 + # 49 + # - alert: HostUnusualDiskWriteRate 50 + # expr: sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50 51 + # for: 2m 52 + # labels: 53 + # severity: warning 54 + # annotations: 55 + # summary: Host unusual disk write rate (instance {{ $labels.instance }}) 56 + # description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 57 + 58 + # Please add ignored mountpoints in node_exporter parameters like 59 + # "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)". 60 + # Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users. 61 + - alert: HostOutOfDiskSpace 62 + expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0 63 + for: 2m 64 + labels: 65 + severity: warning 66 + annotations: 67 + summary: Host out of disk space (instance {{ $labels.instance }}) 68 + description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 69 + 70 + # Please add ignored mountpoints in node_exporter parameters like 71 + # "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)". 72 + # Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users. 73 + - alert: HostDiskWillFillIn24Hours 74 + expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0 75 + for: 2m 76 + labels: 77 + severity: warning 78 + annotations: 79 + summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) 80 + description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 81 + 82 + - alert: HostOutOfInodes 83 + expr: node_filesystem_files_free{mountpoint ="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0 84 + for: 2m 85 + labels: 86 + severity: warning 87 + annotations: 88 + summary: Host out of inodes (instance {{ $labels.instance }}) 89 + description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 90 + 91 + - alert: HostInodesWillFillIn24Hours 92 + expr: node_filesystem_files_free{mountpoint ="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{mountpoint="/rootfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0 93 + for: 2m 94 + labels: 95 + severity: warning 96 + annotations: 97 + summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }}) 98 + description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 99 + 100 + # - alert: HostUnusualDiskReadLatency 101 + # expr: rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0 102 + # for: 2m 103 + # labels: 104 + # severity: warning 105 + # annotations: 106 + # summary: Host unusual disk read latency (instance {{ $labels.instance }}) 107 + # description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 108 + # 109 + # - alert: HostUnusualDiskWriteLatency 110 + # expr: rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 2 and rate(node_disk_writes_completed_total[1m]) > 0 111 + # for: 2m 112 + # labels: 113 + # severity: warning 114 + # annotations: 115 + # summary: Host unusual disk write latency (instance {{ $labels.instance }}) 116 + # description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 117 + 118 + - alert: HostHighCpuLoad 119 + expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 90 120 + for: 0m 121 + labels: 122 + severity: warning 123 + annotations: 124 + summary: Host high CPU load (instance {{ $labels.instance }}) 125 + description: "CPU load is > 90%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 126 + 127 + - alert: HostCpuStealNoisyNeighbor 128 + expr: avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10 129 + for: 0m 130 + labels: 131 + severity: warning 132 + annotations: 133 + summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }}) 134 + description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 135 + 136 + # 10000 context switches is an arbitrary number. 137 + # Alert threshold depends on nature of application. 138 + # Please read: https://github.com/samber/awesome-prometheus-alerts/issues/58 139 + - alert: HostContextSwitching 140 + expr: (rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000 141 + for: 0m 142 + labels: 143 + severity: warning 144 + annotations: 145 + summary: Host context switching (instance {{ $labels.instance }}) 146 + description: "Context switching is growing on node (> 1000 / s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 147 + 148 + - alert: HostSwapIsFillingUp 149 + expr: (1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80 150 + for: 2m 151 + labels: 152 + severity: warning 153 + annotations: 154 + summary: Host swap is filling up (instance {{ $labels.instance }}) 155 + description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 156 + 157 + - alert: HostSystemdServiceCrashed 158 + expr: node_systemd_unit_state{state="failed"} == 1 159 + for: 0m 160 + labels: 161 + severity: warning 162 + annotations: 163 + summary: Host systemd service crashed (instance {{ $labels.instance }}) 164 + description: "systemd service crashed: {{ $labels.name }} on {{ $labels.job }}" 165 + 166 + - alert: HostPhysicalComponentTooHot 167 + expr: node_hwmon_temp_celsius > 75 168 + for: 5m 169 + labels: 170 + severity: warning 171 + annotations: 172 + summary: Host physical component too hot (instance {{ $labels.instance }}) 173 + description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 174 + 175 + - alert: HostNodeOvertemperatureAlarm 176 + expr: node_hwmon_temp_crit_alarm_celsius == 1 177 + for: 0m 178 + labels: 179 + severity: critical 180 + annotations: 181 + summary: Host node overtemperature alarm (instance {{ $labels.instance }}) 182 + description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 183 + 184 + - alert: HostRaidArrayGotInactive 185 + expr: node_md_state{state="inactive"} > 0 186 + for: 0m 187 + labels: 188 + severity: critical 189 + annotations: 190 + summary: Host RAID array got inactive (instance {{ $labels.instance }}) 191 + description: "RAID array {{ $labels.device }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 192 + 193 + - alert: HostRaidDiskFailure 194 + expr: node_md_disks{state="failed"} > 0 195 + for: 2m 196 + labels: 197 + severity: warning 198 + annotations: 199 + summary: Host RAID disk failure (instance {{ $labels.instance }}) 200 + description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 201 + 202 + - alert: HostKernelVersionDeviations 203 + expr: count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 2 204 + for: 6h 205 + labels: 206 + severity: warning 207 + annotations: 208 + summary: Host kernel version deviations (instance {{ $labels.instance }}) 209 + description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 210 + 211 + - alert: HostOomKillDetected 212 + expr: increase(node_vmstat_oom_kill[1m]) > 0 213 + for: 0m 214 + labels: 215 + severity: warning 216 + annotations: 217 + summary: Host OOM kill detected (instance {{ $labels.instance }}) 218 + description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 219 + 220 + - alert: HostEdacCorrectableErrorsDetected 221 + expr: increase(node_edac_correctable_errors_total[1m]) > 0 222 + for: 0m 223 + labels: 224 + severity: info 225 + annotations: 226 + summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }}) 227 + description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 228 + 229 + - alert: HostEdacUncorrectableErrorsDetected 230 + expr: node_edac_uncorrectable_errors_total > 0 231 + for: 0m 232 + labels: 233 + severity: warning 234 + annotations: 235 + summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }}) 236 + description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 237 + 238 + - alert: HostNetworkReceiveErrors 239 + expr: rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01 240 + for: 2m 241 + labels: 242 + severity: warning 243 + annotations: 244 + summary: Host Network Receive Errors (instance {{ $labels.instance }}) 245 + description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last five minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 246 + 247 + - alert: HostNetworkTransmitErrors 248 + expr: rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01 249 + for: 2m 250 + labels: 251 + severity: warning 252 + annotations: 253 + summary: Host Network Transmit Errors (instance {{ $labels.instance }}) 254 + description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last five minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 255 + 256 + - alert: HostNetworkInterfaceSaturated 257 + expr: (rate(node_network_receive_bytes_total{device!~"^tap.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*"} > 0.8 258 + for: 1m 259 + labels: 260 + severity: warning 261 + annotations: 262 + summary: Host Network Interface Saturated (instance {{ $labels.instance }}) 263 + description: "The network interface \"{{ $labels.interface }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 264 + 265 + - alert: HostConntrackLimit 266 + expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8 267 + for: 5m 268 + labels: 269 + severity: warning 270 + annotations: 271 + summary: Host conntrack limit (instance {{ $labels.instance }}) 272 + description: "The number of conntrack is approching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 273 + 274 + - alert: HostClockSkew 275 + expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0) 276 + for: 2m 277 + labels: 278 + severity: warning 279 + annotations: 280 + summary: Host clock skew (instance {{ $labels.instance }}) 281 + description: "Clock skew detected. Clock is out of sync.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 282 + 283 + - alert: HostClockNotSynchronising 284 + expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16 285 + for: 2m 286 + labels: 287 + severity: warning 288 + annotations: 289 + summary: Host clock not synchronising (instance {{ $labels.instance }}) 290 + description: "Clock not synchronising.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 291 + 292 + # 26 hours since last backup 293 + - alert: NoRecentBackup 294 + expr: (time()*1000 - borgbackup_last_backup) > (1000 * 60 * 60 * 26) 295 + for: 2m 296 + labels: 297 + severity: warning 298 + annotations: 299 + summary: No backup in the last 26 hours (instance {{ $labels.exported_job }}) 300 + description: "to repository {{ $labels.repo }}"
+3 -1
hosts/profiles/monitoring/default.nix
··· 1 - { config, pkgs, ... }: { 1 + { self, config, pkgs, ... }: { 2 2 # grafana configuration 3 3 services.grafana = { 4 4 enable = true; ··· 7 7 http_port = 2342; 8 8 http_addr = "127.0.0.1"; 9 9 }; 10 + # TODO enable 11 + # provision.alerting.rules.path = "${self}/hosts/profiles/monitoring/alert-rules.yaml"; 10 12 }; 11 13 12 14 # nginx reverse proxy
+65
hosts/profiles/woodpecker-agent/default.nix
··· 1 + { 2 + 3 + users.users.woodpecker-runner = { 4 + isSystemUser = true; 5 + group = "woodpecker-runner"; 6 + }; 7 + users.groups.woodpecker-runner = { }; 8 + # Allow the exec runner to write to build with nix 9 + nix.allowedUsers = [ "woodpecker-runner" ]; 10 + 11 + systemd.services.woodpecker-runner-exec = { 12 + enable = true; 13 + wantedBy = [ "multi-user.target" ]; 14 + ### MANUALLY RESTART SERVICE IF CHANGED 15 + restartIfChanged = true; 16 + confinement.enable = true; 17 + confinement.packages = [ 18 + pkgs.git 19 + pkgs.gnutar 20 + pkgs.bash 21 + pkgs.nixFlakes 22 + pkgs.gzip 23 + ]; 24 + path = [ 25 + pkgs.git 26 + pkgs.gnutar 27 + pkgs.bash 28 + pkgs.nixFlakes 29 + pkgs.gzip 30 + ]; 31 + serviceConfig = { 32 + Environment = [ 33 + "WOODPECKER_RUNNER_CAPACITY=2" 34 + "WOODPECKER_RUNNER_NAME=woodpecker-agent" 35 + "NIX_REMOTE=daemon" 36 + "PAGER=cat" 37 + ]; 38 + BindPaths = [ 39 + "/nix/var/nix/daemon-socket/socket" 40 + "/run/nscd/socket" 41 + # "/var/lib/woodpecker" 42 + ]; 43 + BindReadOnlyPaths = [ 44 + "/etc/passwd:/etc/passwd" 45 + "/etc/group:/etc/group" 46 + "/nix/var/nix/profiles/system/etc/nix:/etc/nix" 47 + "${config.environment.etc."ssl/certs/ca-certificates.crt".source}:/etc/ssl/certs/ca-certificates.crt" 48 + "${config.environment.etc."ssh/ssh_known_hosts".source}:/etc/ssh/ssh_known_hosts" 49 + "${builtins.toFile "ssh_config" '' 50 + Host git.sealight.xyz 51 + ForwardAgent yes 52 + ''}:/etc/ssh/ssh_config" 53 + "/etc/machine-id" 54 + "/etc/resolv.conf" 55 + "/nix/" 56 + ]; 57 + EnvironmentFile = [ 58 + # /run/agenix/woodpecker-agent-secret 59 + ]; 60 + ExecStart = "${pkgs.woodpecker-agent}/bin/woodpecker-agent"; 61 + User = "woodpecker-runner"; 62 + Group = "woodpecker-runner"; 63 + }; 64 + }; 65 + }
+52
hosts/profiles/woodpecker-server/default.nix
··· 1 + { 2 + users.users.woodpecker = { 3 + group = woodpecker; 4 + description = "woodpecker user"; 5 + home = "/var/lib/woodpecker"; 6 + createHome = true; 7 + }; 8 + 9 + users.groups.woodpecker = { }; 10 + 11 + services.nginx.virtualHosts."ci.sealight.xyz" = { 12 + enableACME = true; 13 + forceSSL = true; 14 + locations."/".proxyPass = "http://localhost:3030/"; 15 + }; 16 + 17 + services.postgresql = { 18 + ensureDatabases = [ "woodpecker" ]; 19 + ensureUsers = [{ 20 + name = "woodpecker"; 21 + ensurePermissions = { 22 + "DATABASE woodpecker" = "ALL PRIVILEGES"; 23 + }; 24 + }]; 25 + }; 26 + 27 + systemd.services.woodpecker-server = { 28 + wantedBy = [ "multi-user.target" ]; 29 + serviceConfig = { 30 + EnvironmentFile = [ 31 + # GITEA_CLIENT_SECRET etc 32 + # /run/agenix/woodpecker 33 + ]; 34 + Environment = [ 35 + "WOODPECKER_OPEN=true" 36 + "WOODPECKER_AGENT_SECRET=${WOODPECKER_AGENT_SECRET}" 37 + 38 + "WOODPECKER_GITEA=true" 39 + "WOODPECKER_GITEA_URL=https://git.sealight.xyz" 40 + 41 + "WOODPECKER_DATABASE_DATASOURCE=postgres:///woodpecker?host=/run/postgresql" 42 + "WOODPECKER_DATABASE_DRIVER=postgres" 43 + "WOODPECKER_SERVER_PORT=:3030" 44 + "WOODPECKER_USER_CREATE=username:aynish,admin:true" # set your admin username 45 + ]; 46 + ExecStart = "${pkgs.woodpecker-server}/bin/woodpecker-server"; 47 + User = woodpecker; 48 + Group = woodpecker; 49 + }; 50 + }; 51 + 52 + }
+195
modules/nixos/woodpecker.nix
··· 1 + { config, pkgs, lib, ... }: 2 + # I gave up writing this lol 3 + 4 + with lib; 5 + 6 + let 7 + cfg = config.services.woodpecker; 8 + droneserver = config.users.users.droneserver.name; 9 + 10 + in 11 + { 12 + options = { 13 + enable = mkEnableOption "Woodpecker CI/CD"; 14 + 15 + user = mkOption { 16 + type = types.str; 17 + default = "woodpeckerserver"; 18 + description = "User account under which gonic runs."; 19 + }; 20 + 21 + group = mkOption { 22 + type = types.str; 23 + default = "woodpeckerserver"; 24 + description = "Group account under which gonic runs."; 25 + }; 26 + 27 + hostname = mkOption { 28 + type = types.str; 29 + example = "woodpecker.example.com"; 30 + description = "VirtualHost created for nginx"; 31 + }; 32 + }; 33 + 34 + config = mkIf cfg.enable { 35 + users.users = optionalAttrs (cfg.user == name) { 36 + ${name} = { 37 + group = cfg.group; 38 + description = "woodpecker user"; 39 + home = cfg.dataDir; 40 + createHome = true; 41 + }; 42 + }; 43 + 44 + users.groups = optionalAttrs (cfg.group == name) { 45 + ${name}.gid = config.ids.gids.headphones; 46 + }; 47 + 48 + services.nginx.virtualHosts."drone.my-server.tld" = { 49 + enableACME = true; 50 + forceSSL = true; 51 + locations."/".proxyPass = "http://localhost:3030/"; 52 + }; 53 + 54 + services.postgresql = { 55 + ensureDatabases = [ droneserver ]; 56 + ensureUsers = [{ 57 + name = droneserver; 58 + ensurePermissions = { 59 + "DATABASE ${droneserver}" = "ALL PRIVILEGES"; 60 + }; 61 + }]; 62 + }; 63 + 64 + # Secrets configured: 65 + # - DRONE_GITEA_CLIENT_ID 66 + # - DRONE_GITEA_CLIENT_SECRET 67 + # - DRONE_RPC_SECRET 68 + # To get these secrets, please check Drone's documentation for Gitea integration: 69 + # https://docs.drone.io/server/provider/gitea/ 70 + 71 + sops.secrets.drone = { 72 + sopsFile = ../.secrets/drone.yaml; 73 + }; 74 + 75 + systemd.services.drone-server = { 76 + wantedBy = [ "multi-user.target" ]; 77 + serviceConfig = { 78 + EnvironmentFile = [ 79 + config.sops.secrets.drone.path 80 + ]; 81 + Environment = [ 82 + "DRONE_DATABASE_DATASOURCE=postgres:///droneserver?host=/run/postgresql" 83 + "DRONE_DATABASE_DRIVER=postgres" 84 + "DRONE_SERVER_PORT=:3030" 85 + "DRONE_USER_CREATE=username:viperML,admin:true" # set your admin username 86 + 87 + "DRONE_GITEA_SERVER=https://git.my-domain.tld" 88 + "DRONE_SERVER_HOST=drone.my-domain.tld" 89 + "DRONE_SERVER_PROTO=https" 90 + ]; 91 + ExecStart = "${pkgs.drone}/bin/drone-server"; 92 + User = droneserver; 93 + Group = droneserver; 94 + }; 95 + }; 96 + 97 + ### Docker runner 98 + 99 + users.users.drone-runner-docker = { 100 + isSystemUser = true; 101 + group = "drone-runner-docker"; 102 + }; 103 + users.groups.drone-runner-docker = { }; 104 + # Allow the runner to use docker 105 + users.groups.docker.members = [ "drone-runner-docker" ]; 106 + 107 + systemd.services.drone-runner-docker = { 108 + enable = true; 109 + wantedBy = [ "multi-user.target" ]; 110 + ### MANUALLY RESTART SERVICE IF CHANGED 111 + restartIfChanged = false; 112 + serviceConfig = { 113 + Environment = [ 114 + "DRONE_RPC_PROTO=http" 115 + "DRONE_RPC_HOST=localhost:3030" 116 + "DRONE_RUNNER_CAPACITY=2" 117 + "DRONE_RUNNER_NAME=drone-runner-docker" 118 + ]; 119 + EnvironmentFile = [ 120 + config.sops.secrets.drone.path 121 + ]; 122 + ExecStart = "${pkgs.drone-runner-docker}/bin/drone-runner-docker"; 123 + User = "drone-runner-docker"; 124 + Group = "drone-runner-docker"; 125 + }; 126 + }; 127 + 128 + ### Exec runner 129 + 130 + users.users.drone-runner-exec = { 131 + isSystemUser = true; 132 + group = "drone-runner-exec"; 133 + }; 134 + users.groups.drone-runner-exec = { }; 135 + # Allow the exec runner to write to build with nix 136 + nix.allowedUsers = [ "drone-runner-exec" ]; 137 + 138 + systemd.services.drone-runner-exec = { 139 + enable = true; 140 + wantedBy = [ "multi-user.target" ]; 141 + ### MANUALLY RESTART SERVICE IF CHANGED 142 + restartIfChanged = true; 143 + confinement.enable = true; 144 + confinement.packages = [ 145 + pkgs.git 146 + pkgs.gnutar 147 + pkgs.bash 148 + pkgs.nixFlakes 149 + pkgs.gzip 150 + ]; 151 + path = [ 152 + pkgs.git 153 + pkgs.gnutar 154 + pkgs.bash 155 + pkgs.nixFlakes 156 + pkgs.gzip 157 + ]; 158 + serviceConfig = { 159 + Environment = [ 160 + "DRONE_RPC_PROTO=http" 161 + "DRONE_RPC_HOST=127.0.0.1:3030" 162 + "DRONE_RUNNER_CAPACITY=2" 163 + "DRONE_RUNNER_NAME=drone-runner-exec" 164 + "NIX_REMOTE=daemon" 165 + "PAGER=cat" 166 + "DRONE_DEBUG=true" 167 + ]; 168 + BindPaths = [ 169 + "/nix/var/nix/daemon-socket/socket" 170 + "/run/nscd/socket" 171 + # "/var/lib/drone" 172 + ]; 173 + BindReadOnlyPaths = [ 174 + "/etc/passwd:/etc/passwd" 175 + "/etc/group:/etc/group" 176 + "/nix/var/nix/profiles/system/etc/nix:/etc/nix" 177 + "${config.environment.etc."ssl/certs/ca-certificates.crt".source}:/etc/ssl/certs/ca-certificates.crt" 178 + "${config.environment.etc."ssh/ssh_known_hosts".source}:/etc/ssh/ssh_known_hosts" 179 + "${builtins.toFile "ssh_config" '' 180 + Host git.ayats.org 181 + ForwardAgent yes 182 + ''}:/etc/ssh/ssh_config" 183 + "/etc/machine-id" 184 + "/etc/resolv.conf" 185 + "/nix/" 186 + ]; 187 + EnvironmentFile = [ 188 + config.sops.secrets.drone.path 189 + ]; 190 + ExecStart = "${pkgs.drone-runner-exec}/bin/drone-runner-exec"; 191 + User = "drone-runner-exec"; 192 + Group = "drone-runner-exec"; 193 + }; 194 + }; 195 + };