because I got bored of customising my CV for every job
1services:
2 db:
3 image: postgres:16-alpine
4 environment:
5 POSTGRES_USER: ${POSTGRES_USER:-cv}
6 POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-cv}
7 POSTGRES_DB: ${POSTGRES_DB:-cv}
8 ports:
9 - "${DB_PORT:-5432}:5432"
10 volumes:
11 - db-data:/var/lib/postgresql/data
12 healthcheck:
13 test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-cv} -d ${POSTGRES_DB:-cv}"]
14 interval: 10s
15 timeout: 5s
16 retries: 3
17
18 server:
19 build:
20 context: .
21 dockerfile: apps/server/Dockerfile
22 environment:
23 PORT: ${SERVER_PORT:-3000}
24 JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-key-here}
25 JWT_ACCESS_TOKEN_EXPIRY: ${JWT_ACCESS_TOKEN_EXPIRY:-15m}
26 JWT_REFRESH_TOKEN_EXPIRY: ${JWT_REFRESH_TOKEN_EXPIRY:-7d}
27 DATABASE_URL: ${DATABASE_URL:-postgresql://cv:cv@db:5432/cv}
28 POSTGRES_USER: ${POSTGRES_USER:-cv}
29 POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-cv}
30 POSTGRES_DB: ${POSTGRES_DB:-cv}
31 ENCRYPTION_KEY: ${ENCRYPTION_KEY:-dev-encryption-key-32-chars-long!}
32 RESEND_API_KEY: ${RESEND_API_KEY:-}
33 LLAMA_URL: ${LLAMA_URL:-http://llama:8080}
34 AI_TIMEOUT: ${AI_TIMEOUT:-300000}
35 AI_MAX_TOKENS: ${AI_MAX_TOKENS:-4096}
36 depends_on:
37 db:
38 condition: service_healthy
39 ports:
40 - "${SERVER_PORT:-3000}:3000"
41 volumes:
42 - ./apps/server/src:/app/apps/server/src
43 - ./apps/server/prisma:/app/apps/server/prisma
44 - ./packages:/app/packages
45 - pnpm-cache:/root/.local/share/pnpm
46 command: sh -c "cd /app/apps/server && pnpm prisma:deploy && pnpm dev"
47 healthcheck:
48 test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
49 interval: 15s
50 timeout: 5s
51 retries: 3
52 start_period: 30s
53
54 client:
55 build:
56 context: .
57 dockerfile: apps/client/Dockerfile
58 target: development
59 environment:
60 VITE_SERVER_URL: ${VITE_SERVER_URL:-http://localhost:3000}
61 VITE_PROXY_TARGET: http://server:3000
62 VITE_DOCS_URL: ${VITE_DOCS_URL:-http://localhost:3001}
63 GRAPHQL_SCHEMA_URL: ${GRAPHQL_SCHEMA_URL:-http://server:3000/graphql}
64 depends_on:
65 server:
66 condition: service_healthy
67 ports:
68 - "${CLIENT_PORT:-5173}:5173"
69 volumes:
70 - ./apps/client/src:/app/apps/client/src
71 - ./packages:/app/packages
72 - pnpm-cache:/root/.local/share/pnpm
73 command: sh -c "cd /app/apps/client && (pnpm codegen || true) && pnpm dev"
74 healthcheck:
75 test: ["CMD", "curl", "-f", "http://localhost:5173"]
76 interval: 15s
77 timeout: 5s
78 retries: 3
79 start_period: 20s
80
81 docs:
82 build:
83 context: .
84 dockerfile: apps/docs/Dockerfile
85 target: development
86 environment:
87 VITE_CLIENT_URL: ${VITE_CLIENT_URL:-http://localhost:5173}
88 VITE_SERVER_URL: ${VITE_SERVER_URL:-http://localhost:3000}
89 ports:
90 - "${DOCS_PORT:-3001}:3001"
91 volumes:
92 - ./apps/docs/src:/app/apps/docs/src
93 - ./apps/docs/content:/app/apps/docs/content
94 - ./packages:/app/packages
95 - pnpm-cache:/root/.local/share/pnpm
96 healthcheck:
97 test: ["CMD", "curl", "-f", "http://localhost:3001"]
98 interval: 15s
99 timeout: 5s
100 retries: 3
101 start_period: 20s
102
103 model-download:
104 image: alpine:latest
105 volumes:
106 - ./ai-models:/models
107 command: |
108 sh -c '
109 MODEL_FILE="/models/mistral-7b-instruct-v0.2.Q4_K_M.gguf"
110 if [ -f "$$MODEL_FILE" ]; then
111 echo "Model already exists, skipping download"
112 exit 0
113 fi
114 echo "Downloading Mistral 7B model (~4.4GB)..."
115 apk add --no-cache wget
116 wget -c -O "$$MODEL_FILE" \
117 "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf"
118 echo "Model download complete"
119 '
120
121 llama:
122 # CPU-only llama.cpp server (slow: ~6 tokens/sec)
123 # For GPU acceleration:
124 # - macOS: Use hybrid mode with native Metal (see DOCKER_GPU.md)
125 # - Linux: Use docker-compose.nvidia.yml for NVIDIA GPU support
126 image: ghcr.io/ggml-org/llama.cpp:server
127 profiles:
128 - docker-llama
129 ports:
130 - "${LLAMA_PORT:-8080}:8080"
131 volumes:
132 - ./ai-models:/models
133 command: -m /models/mistral-7b-instruct-v0.2.Q4_K_M.gguf --port 8080 --host 0.0.0.0 -c 4096 -ngl 0 # -ngl 0 = CPU only
134 depends_on:
135 model-download:
136 condition: service_completed_successfully
137 healthcheck:
138 test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
139 interval: 30s
140 timeout: 10s
141 retries: 3
142 start_period: 30s
143
144volumes:
145 db-data:
146 pnpm-cache: