because I got bored of customising my CV for every job
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat(ai-provider): add OpenAI and Anthropic providers with registry pattern

+398 -40
+29 -2
.env.example
··· 7 7 8 8 # Server 9 9 SERVER_PORT=3000 10 + NODE_ENV=development 10 11 JWT_SECRET=your-super-secret-jwt-key-here 11 12 JWT_ACCESS_TOKEN_EXPIRY=15m 12 13 JWT_REFRESH_TOKEN_EXPIRY=7d ··· 14 15 # Client 15 16 CLIENT_PORT=5173 16 17 VITE_SERVER_URL=http://localhost:3000 18 + CLIENT_URL=http://localhost:5173 17 19 18 20 # Docs 19 21 DOCS_PORT=3001 20 22 VITE_CLIENT_URL=http://localhost:5173 23 + VITE_DOCS_URL=http://localhost:3001 21 24 22 25 # Prisma 23 26 PRISMA_ENABLE_TRACING=false 24 - VITE_DOCS_URL=http://localhost:3001 27 + 28 + # Encryption 25 29 ENCRYPTION_KEY=94caadf1e9765adf9d89fc3c440f4b67651ec85b3bc0cf8fe3b0e1db2c585779 26 - # AI/LLM Configuration 30 + 31 + # Email (optional in dev - emails logged to console) 32 + # RESEND_API_KEY= 33 + # EMAIL_FROM_ADDRESS=noreply@example.com 34 + # EMAIL_FROM_NAME=CV Generator 35 + 36 + # AI Provider Configuration 37 + # Options: llama-cpp (default), openai, anthropic 38 + AI_PROVIDER=llama-cpp 39 + AI_TEMPERATURE=0.1 40 + AI_MAX_TOKENS=2048 41 + AI_TIMEOUT=60000 42 + 43 + # Llama.cpp (when AI_PROVIDER=llama-cpp) 27 44 LLAMA_URL=http://llama:8080 28 45 MODEL_PATH=/models/mistral-7b-instruct-v0.2.Q4_K_M.gguf 46 + 47 + # OpenAI (when AI_PROVIDER=openai) 48 + # OPENAI_API_KEY=sk-... 49 + # OPENAI_BASE_URL=https://api.openai.com 50 + # OPENAI_MODEL=gpt-4o-mini 51 + 52 + # Anthropic (when AI_PROVIDER=anthropic) 53 + # ANTHROPIC_API_KEY=sk-ant-... 54 + # ANTHROPIC_BASE_URL=https://api.anthropic.com 55 + # ANTHROPIC_MODEL=claude-sonnet-4-5-20250929 29 56 30 57 # LinkedIn OAuth (Future) 31 58 # LINKEDIN_CLIENT_ID=
+31
apps/server/src/config/env.validation.ts
··· 70 70 "string.min": 71 71 "ENCRYPTION_KEY must be at least 32 characters long for security", 72 72 }), 73 + 74 + // AI Provider Configuration 75 + AI_PROVIDER: Joi.string() 76 + .valid("llama-cpp", "openai", "anthropic") 77 + .default("llama-cpp"), 78 + AI_TEMPERATURE: Joi.number().min(0).max(2).default(0.1), 79 + AI_MAX_TOKENS: Joi.number().integer().min(1).default(2048), 80 + AI_TIMEOUT: Joi.number().integer().min(1000).default(60000), 81 + 82 + // Llama.cpp 83 + LLAMA_URL: Joi.string().uri().default("http://llama:8080"), 84 + 85 + // OpenAI (required when AI_PROVIDER=openai) 86 + OPENAI_API_KEY: Joi.string().when("AI_PROVIDER", { 87 + is: "openai", 88 + then: Joi.string().required(), 89 + otherwise: Joi.string().allow("").optional(), 90 + }), 91 + OPENAI_BASE_URL: Joi.string().uri().default("https://api.openai.com"), 92 + OPENAI_MODEL: Joi.string().default("gpt-4o-mini"), 93 + 94 + // Anthropic (required when AI_PROVIDER=anthropic) 95 + ANTHROPIC_API_KEY: Joi.string().when("AI_PROVIDER", { 96 + is: "anthropic", 97 + then: Joi.string().required(), 98 + otherwise: Joi.string().allow("").optional(), 99 + }), 100 + ANTHROPIC_BASE_URL: Joi.string() 101 + .uri() 102 + .default("https://api.anthropic.com"), 103 + ANTHROPIC_MODEL: Joi.string().default("claude-sonnet-4-5-20250929"), 73 104 });
+3 -1
apps/server/src/modules/cv-parser/cv-parser.module.ts
··· 15 15 16 16 @Module({ 17 17 imports: [ 18 - CVParserCoreModule.forRoot({ type: "llama-cpp" }), 18 + CVParserCoreModule.forRoot({ 19 + type: (process.env.AI_PROVIDER as "llama-cpp" | "openai" | "anthropic") || "llama-cpp", 20 + }), 19 21 FileExtractionModule.forRoot(), 20 22 DatabaseModule, 21 23 MulterModule.register({
+40
packages/ai-provider/src/ai-provider.registry.ts
··· 1 + import type { ConfigService } from '@nestjs/config'; 2 + import type { AIProvider } from './types'; 3 + 4 + type ProviderFactory = (configService: ConfigService) => AIProvider; 5 + 6 + const registry = new Map<string, ProviderFactory>(); 7 + 8 + /** 9 + * Register an AI provider factory. Call this at module load time 10 + * (e.g., at the bottom of each provider file). 11 + */ 12 + export const registerAIProvider = ( 13 + type: string, 14 + factory: ProviderFactory, 15 + ): void => { 16 + registry.set(type, factory); 17 + }; 18 + 19 + /** 20 + * Resolve a registered AI provider by type. 21 + * @throws if the type has not been registered. 22 + */ 23 + export const resolveAIProvider = ( 24 + type: string, 25 + configService: ConfigService, 26 + ): AIProvider => { 27 + const factory = registry.get(type); 28 + if (!factory) { 29 + const available = [...registry.keys()].join(', '); 30 + throw new Error( 31 + `Unknown AI provider type: "${type}". Registered providers: ${available || 'none'}`, 32 + ); 33 + } 34 + return factory(configService); 35 + }; 36 + 37 + /** 38 + * List all registered provider type names. 39 + */ 40 + export const registeredProviderTypes = (): string[] => [...registry.keys()];
+5 -26
packages/ai-provider/src/ai.module.ts
··· 1 1 import { DynamicModule, Module } from '@nestjs/common'; 2 2 import { ConfigModule, ConfigService } from '@nestjs/config'; 3 + import { resolveAIProvider } from './ai-provider.registry'; 3 4 import type { AIProvider } from './types'; 4 - import { LlamaCppProvider, type LlamaCppConfig } from './providers'; 5 + 6 + import './providers'; 5 7 6 8 export const AI_PROVIDER = Symbol('AI_PROVIDER'); 7 9 8 - export type AIProviderType = 'llama-cpp'; 10 + export type AIProviderType = 'llama-cpp' | 'openai' | 'anthropic'; 9 11 10 12 export interface AIModuleOptions { 11 13 type: AIProviderType; ··· 22 24 provide: AI_PROVIDER, 23 25 inject: [ConfigService], 24 26 useFactory: (configService: ConfigService): AIProvider => 25 - AIModule.createProvider(options.type, configService), 27 + resolveAIProvider(options.type, configService), 26 28 }, 27 29 ], 28 30 exports: [AI_PROVIDER], 29 31 }; 30 - } 31 - 32 - private static createProvider( 33 - type: AIProviderType, 34 - configService: ConfigService, 35 - ): AIProvider { 36 - switch (type) { 37 - case 'llama-cpp': 38 - return AIModule.createLlamaCppProvider(configService); 39 - default: 40 - throw new Error(`Unknown AI provider type: ${type}`); 41 - } 42 - } 43 - 44 - private static createLlamaCppProvider(configService: ConfigService): AIProvider { 45 - const config: LlamaCppConfig = { 46 - baseUrl: configService.get<string>('LLAMA_URL', 'http://llama:8080'), 47 - defaultTemperature: configService.get<number>('AI_TEMPERATURE', 0.1), 48 - defaultMaxTokens: configService.get<number>('AI_MAX_TOKENS', 2048), 49 - timeout: configService.get<number>('AI_TIMEOUT', 60000), 50 - }; 51 - 52 - return new LlamaCppProvider(config); 53 32 } 54 33 }
+8 -1
packages/ai-provider/src/index.ts
··· 7 7 } from './types'; 8 8 9 9 // Providers 10 - export { LlamaCppProvider, type LlamaCppConfig } from './providers'; 10 + export { 11 + LlamaCppProvider, type LlamaCppConfig, 12 + OpenAIProvider, type OpenAIConfig, 13 + AnthropicProvider, type AnthropicConfig, 14 + } from './providers'; 15 + 16 + // Registry 17 + export { registerAIProvider, resolveAIProvider, registeredProviderTypes } from './ai-provider.registry'; 11 18 12 19 // NestJS Module 13 20 export { AIModule, AI_PROVIDER, type AIModuleOptions, type AIProviderType } from './ai.module';
+140
packages/ai-provider/src/providers/anthropic.provider.ts
··· 1 + import type { ConfigService } from '@nestjs/config'; 2 + import { registerAIProvider } from '../ai-provider.registry'; 3 + import type { 4 + AIProvider, 5 + AIProviderConfig, 6 + AICompletionRequest, 7 + AICompletionResponse, 8 + } from '../types'; 9 + 10 + export interface AnthropicConfig extends AIProviderConfig { 11 + model?: string; 12 + } 13 + 14 + /** 15 + * AI provider implementation for the Anthropic Messages API. 16 + */ 17 + export class AnthropicProvider implements AIProvider { 18 + readonly name = 'anthropic'; 19 + 20 + private readonly baseUrl: string; 21 + private readonly apiKey: string; 22 + private readonly model: string; 23 + private readonly defaultTemperature: number; 24 + private readonly defaultMaxTokens: number; 25 + private readonly timeout: number; 26 + 27 + constructor(config: AnthropicConfig) { 28 + this.baseUrl = config.baseUrl.replace(/\/$/, ''); 29 + this.apiKey = config.apiKey ?? ''; 30 + this.model = config.model ?? 'claude-sonnet-4-5-20250929'; 31 + this.defaultTemperature = config.defaultTemperature ?? 0.1; 32 + this.defaultMaxTokens = config.defaultMaxTokens ?? 2048; 33 + this.timeout = config.timeout ?? 60000; 34 + } 35 + 36 + async complete(request: AICompletionRequest): Promise<AICompletionResponse> { 37 + const controller = new AbortController(); 38 + const timeoutId = setTimeout(() => controller.abort(), this.timeout); 39 + 40 + try { 41 + const body: Record<string, unknown> = { 42 + model: this.model, 43 + max_tokens: request.maxTokens ?? this.defaultMaxTokens, 44 + temperature: request.temperature ?? this.defaultTemperature, 45 + messages: [{ role: 'user', content: request.prompt }], 46 + }; 47 + 48 + if (request.systemPrompt) { 49 + body.system = request.systemPrompt; 50 + } 51 + if (request.stopSequences) { 52 + body.stop_sequences = request.stopSequences; 53 + } 54 + 55 + const response = await fetch(`${this.baseUrl}/v1/messages`, { 56 + method: 'POST', 57 + headers: { 58 + 'Content-Type': 'application/json', 59 + 'x-api-key': this.apiKey, 60 + 'anthropic-version': '2023-06-01', 61 + }, 62 + body: JSON.stringify(body), 63 + signal: controller.signal, 64 + }); 65 + 66 + if (!response.ok) { 67 + const errorBody = await response.text(); 68 + throw new Error(`Anthropic API error: ${response.status} ${errorBody}`); 69 + } 70 + 71 + const result = (await response.json()) as { 72 + content?: Array<{ type: string; text?: string }>; 73 + usage?: { 74 + input_tokens?: number; 75 + output_tokens?: number; 76 + }; 77 + model?: string; 78 + stop_reason?: string; 79 + }; 80 + 81 + const stopReasonMap: Record<string, AICompletionResponse['finishReason']> = { 82 + end_turn: 'stop', 83 + stop_sequence: 'stop', 84 + max_tokens: 'length', 85 + }; 86 + 87 + const textBlock = result.content?.find((b) => b.type === 'text'); 88 + 89 + return { 90 + content: textBlock?.text ?? '', 91 + promptTokens: result.usage?.input_tokens, 92 + completionTokens: result.usage?.output_tokens, 93 + model: result.model ?? this.model, 94 + finishReason: stopReasonMap[result.stop_reason ?? ''] ?? 'stop', 95 + }; 96 + } finally { 97 + clearTimeout(timeoutId); 98 + } 99 + } 100 + 101 + async isHealthy(): Promise<boolean> { 102 + try { 103 + const controller = new AbortController(); 104 + const timeoutId = setTimeout(() => controller.abort(), 10000); 105 + 106 + const response = await fetch(`${this.baseUrl}/v1/messages`, { 107 + method: 'POST', 108 + headers: { 109 + 'Content-Type': 'application/json', 110 + 'x-api-key': this.apiKey, 111 + 'anthropic-version': '2023-06-01', 112 + }, 113 + body: JSON.stringify({ 114 + model: this.model, 115 + max_tokens: 1, 116 + messages: [{ role: 'user', content: 'ping' }], 117 + }), 118 + signal: controller.signal, 119 + }); 120 + 121 + clearTimeout(timeoutId); 122 + return response.ok; 123 + } catch { 124 + return false; 125 + } 126 + } 127 + 128 + static fromConfigService(configService: ConfigService): AnthropicProvider { 129 + return new AnthropicProvider({ 130 + baseUrl: configService.get<string>('ANTHROPIC_BASE_URL', 'https://api.anthropic.com'), 131 + apiKey: configService.get<string>('ANTHROPIC_API_KEY', ''), 132 + model: configService.get<string>('ANTHROPIC_MODEL', 'claude-sonnet-4-5-20250929'), 133 + defaultTemperature: configService.get<number>('AI_TEMPERATURE', 0.1), 134 + defaultMaxTokens: configService.get<number>('AI_MAX_TOKENS', 2048), 135 + timeout: configService.get<number>('AI_TIMEOUT', 60000), 136 + }); 137 + } 138 + } 139 + 140 + registerAIProvider('anthropic', (cs) => AnthropicProvider.fromConfigService(cs));
+2
packages/ai-provider/src/providers/index.ts
··· 1 1 export { LlamaCppProvider, type LlamaCppConfig } from './llama-cpp.provider'; 2 + export { OpenAIProvider, type OpenAIConfig } from './openai.provider'; 3 + export { AnthropicProvider, type AnthropicConfig } from './anthropic.provider';
+9 -10
packages/ai-provider/src/providers/llama-cpp.provider.ts
··· 1 + import type { ConfigService } from '@nestjs/config'; 2 + import { registerAIProvider } from '../ai-provider.registry'; 1 3 import type { 2 4 AIProvider, 3 5 AIProviderConfig, ··· 101 103 } 102 104 } 103 105 104 - /** 105 - * Create a LlamaCppProvider from environment variables 106 - */ 107 - static fromEnv(): LlamaCppProvider { 108 - const baseUrl = process.env['LLAMA_URL'] ?? 'http://localhost:8080'; 109 - 106 + static fromConfigService(configService: ConfigService): LlamaCppProvider { 110 107 return new LlamaCppProvider({ 111 - baseUrl, 112 - defaultTemperature: 0.1, 113 - defaultMaxTokens: 2048, 114 - timeout: 60000, 108 + baseUrl: configService.get<string>('LLAMA_URL', 'http://llama:8080'), 109 + defaultTemperature: configService.get<number>('AI_TEMPERATURE', 0.1), 110 + defaultMaxTokens: configService.get<number>('AI_MAX_TOKENS', 2048), 111 + timeout: configService.get<number>('AI_TIMEOUT', 60000), 115 112 }); 116 113 } 117 114 } 115 + 116 + registerAIProvider('llama-cpp', (cs) => LlamaCppProvider.fromConfigService(cs));
+131
packages/ai-provider/src/providers/openai.provider.ts
··· 1 + import type { ConfigService } from '@nestjs/config'; 2 + import { registerAIProvider } from '../ai-provider.registry'; 3 + import type { 4 + AIProvider, 5 + AIProviderConfig, 6 + AICompletionRequest, 7 + AICompletionResponse, 8 + } from '../types'; 9 + 10 + export interface OpenAIConfig extends AIProviderConfig { 11 + model?: string; 12 + } 13 + 14 + /** 15 + * AI provider implementation for OpenAI-compatible APIs. 16 + * Works with OpenAI, Azure OpenAI, and any OpenAI-compatible endpoint. 17 + */ 18 + export class OpenAIProvider implements AIProvider { 19 + readonly name = 'openai'; 20 + 21 + private readonly baseUrl: string; 22 + private readonly apiKey: string; 23 + private readonly model: string; 24 + private readonly defaultTemperature: number; 25 + private readonly defaultMaxTokens: number; 26 + private readonly timeout: number; 27 + 28 + constructor(config: OpenAIConfig) { 29 + this.baseUrl = config.baseUrl.replace(/\/$/, ''); 30 + this.apiKey = config.apiKey ?? ''; 31 + this.model = config.model ?? 'gpt-4o-mini'; 32 + this.defaultTemperature = config.defaultTemperature ?? 0.1; 33 + this.defaultMaxTokens = config.defaultMaxTokens ?? 2048; 34 + this.timeout = config.timeout ?? 60000; 35 + } 36 + 37 + async complete(request: AICompletionRequest): Promise<AICompletionResponse> { 38 + const messages: Array<{ role: string; content: string }> = []; 39 + 40 + if (request.systemPrompt) { 41 + messages.push({ role: 'system', content: request.systemPrompt }); 42 + } 43 + messages.push({ role: 'user', content: request.prompt }); 44 + 45 + const controller = new AbortController(); 46 + const timeoutId = setTimeout(() => controller.abort(), this.timeout); 47 + 48 + try { 49 + const response = await fetch(`${this.baseUrl}/v1/chat/completions`, { 50 + method: 'POST', 51 + headers: { 52 + 'Content-Type': 'application/json', 53 + 'Authorization': `Bearer ${this.apiKey}`, 54 + }, 55 + body: JSON.stringify({ 56 + model: this.model, 57 + messages, 58 + temperature: request.temperature ?? this.defaultTemperature, 59 + max_tokens: request.maxTokens ?? this.defaultMaxTokens, 60 + ...(request.stopSequences ? { stop: request.stopSequences } : {}), 61 + }), 62 + signal: controller.signal, 63 + }); 64 + 65 + if (!response.ok) { 66 + const body = await response.text(); 67 + throw new Error(`OpenAI API error: ${response.status} ${body}`); 68 + } 69 + 70 + const result = (await response.json()) as { 71 + choices?: Array<{ 72 + message?: { content?: string }; 73 + finish_reason?: string; 74 + }>; 75 + usage?: { 76 + prompt_tokens?: number; 77 + completion_tokens?: number; 78 + }; 79 + model?: string; 80 + }; 81 + 82 + const finishReasonMap: Record<string, AICompletionResponse['finishReason']> = { 83 + stop: 'stop', 84 + length: 'length', 85 + content_filter: 'content_filter', 86 + }; 87 + 88 + const rawReason = result.choices?.[0]?.finish_reason ?? 'stop'; 89 + 90 + return { 91 + content: result.choices?.[0]?.message?.content ?? '', 92 + promptTokens: result.usage?.prompt_tokens, 93 + completionTokens: result.usage?.completion_tokens, 94 + model: result.model ?? this.model, 95 + finishReason: finishReasonMap[rawReason] ?? 'stop', 96 + }; 97 + } finally { 98 + clearTimeout(timeoutId); 99 + } 100 + } 101 + 102 + async isHealthy(): Promise<boolean> { 103 + try { 104 + const controller = new AbortController(); 105 + const timeoutId = setTimeout(() => controller.abort(), 5000); 106 + 107 + const response = await fetch(`${this.baseUrl}/v1/models`, { 108 + headers: { 'Authorization': `Bearer ${this.apiKey}` }, 109 + signal: controller.signal, 110 + }); 111 + 112 + clearTimeout(timeoutId); 113 + return response.ok; 114 + } catch { 115 + return false; 116 + } 117 + } 118 + 119 + static fromConfigService(configService: ConfigService): OpenAIProvider { 120 + return new OpenAIProvider({ 121 + baseUrl: configService.get<string>('OPENAI_BASE_URL', 'https://api.openai.com'), 122 + apiKey: configService.get<string>('OPENAI_API_KEY', ''), 123 + model: configService.get<string>('OPENAI_MODEL', 'gpt-4o-mini'), 124 + defaultTemperature: configService.get<number>('AI_TEMPERATURE', 0.1), 125 + defaultMaxTokens: configService.get<number>('AI_MAX_TOKENS', 2048), 126 + timeout: configService.get<number>('AI_TIMEOUT', 60000), 127 + }); 128 + } 129 + } 130 + 131 + registerAIProvider('openai', (cs) => OpenAIProvider.fromConfigService(cs));