this repo has no description
1import { Chalk } from "chalk";
2import { randomUUIDv7 } from "bun";
3
4// Create a console logger with fancy colors
5const chalk = new Chalk({ level: 3 });
6const endpoints = [
7 "/api/stories",
8 "/api/stats/total-stories",
9 "/api/stats/verified-users",
10];
11
12// Script configuration
13/**
14 * Stress Test Configuration Parameters
15 *
16 * @remarks
17 * These settings control the behavior and intensity of the load test.
18 * Modify with extreme caution as improper values may cause service disruption.
19 */
20const CONFIG = {
21 /** Target server endpoint - modify for production targets */
22 baseUrl: "http://localhost:3000",
23
24 /** @critical Initial concurrency value - starts with significant load */
25 startConcurrency: 500, // Higher initial load for stress testing
26
27 /** @warning Maximum concurrent users - can overload production systems */
28 maxConcurrency: 10000, // Increased maximum for thorough performance evaluation
29
30 /** Multiplicative step between concurrency levels (geometric progression) */
31 concurrencyFactor: 2.0, // More aggressive scaling to identify breaking points faster
32
33 /** @critical Number of sequential requests each simulated user will make */
34 requestsPerUser: 25, // Increased per-user workload for extended session simulation
35
36 /** Maximum milliseconds before timing out a request */
37 requestTimeout: 8000, // Reduced timeout to identify latency issues earlier
38
39 /** Milliseconds to wait between sequential requests from same user */
40 delayBetweenRequests: 20, // Reduced delay for more intensive testing
41
42 /** Milliseconds to pause between concurrency level increases */
43 delayBetweenLevels: 2000, // Shorter recovery time between test phases
44
45 /** Whether to utilize HTTP caching mechanisms (ETag) */
46 runWithCaching: false, // Disabled caching to maximize server load
47
48 /** @critical Minimum success rate percentage to continue testing */
49 successThreshold: 95, // Lowered success threshold to detect degradation earlier
50
51 /** @critical Maximum acceptable p95 response time in milliseconds */
52 responseTimeThreshold: 350, // Stricter response time requirements
53
54 /** Whether to abort testing when thresholds are exceeded */
55 stopOnFailure: true, // Halt on threshold breach to prevent cascading failures
56
57 /** Suppress detailed per-request logging to reduce client-side overhead */
58 disableDetailedLogging: true, // Limit logging to improve test client performance
59
60 /** Track Time To First Byte as separate metric */
61 measureTTFB: true, // Important for network latency analysis
62
63 /** Calculate and store statistical distribution of response times */
64 trackPercentiles: true, // Essential for performance analysis
65
66 /** Track time requests spend in queue vs processing (advanced) */
67 trackQueueTime: false, // Disabled to reduce complexity
68
69 /** @critical Number of requests to execute before measurement begins */
70 warmupRequests: 100, // Increased warmup to ensure system stabilization
71};
72
73// Time buckets for percentile tracking (in ms)
74const TIME_BUCKETS = [
75 0, 10, 25, 50, 75, 100, 150, 200, 250, 300, 400, 500, 750, 1000, 1500, 2000,
76 3000, 5000, 7500, 10000, 15000, 30000,
77];
78
79// Stats tracking
80type EndpointStats = {
81 totalRequests: number;
82 successfulRequests: number;
83 notModifiedResponses: number;
84 failedRequests: number;
85 responseTimeTotal: number;
86 ttfbTimeTotal: number; // Time to first byte total
87 processingTimeTotal: number; // Server processing time (TTFB to full response)
88 responseTimeMin: number;
89 responseTimeMax: number;
90 ttfbTimeMin: number;
91 ttfbTimeMax: number;
92 timeBuckets: number[]; // For percentile calculations
93 ttfbTimeBuckets: number[]; // TTFB percentiles
94};
95
96// Add memory usage tracking
97type ConcurrencyStats = {
98 concurrency: number;
99 totalRequests: number;
100 successfulRequests: number;
101 notModifiedResponses: number;
102 failedRequests: number;
103 responseTimeTotal: number;
104 ttfbTimeTotal: number;
105 processingTimeTotal: number;
106 responseTimeMin: number;
107 responseTimeMax: number;
108 ttfbTimeMin: number;
109 ttfbTimeMax: number;
110 p50ResponseTime: number; // 50th percentile (median)
111 p90ResponseTime: number; // 90th percentile
112 p95ResponseTime: number; // 95th percentile
113 p99ResponseTime: number; // 99th percentile
114 p50TTFB: number; // TTFB percentiles
115 p90TTFB: number;
116 p95TTFB: number;
117 p99TTFB: number;
118 startTime: number;
119 endTime: number;
120 userCompletedCount: number;
121 requestsPerSecond: number;
122 successRate: number;
123 endpoints: Record<string, EndpointStats>;
124 memoryUsage?: {
125 rss: number;
126 heapTotal: number;
127 heapUsed: number;
128 external: number;
129 };
130};
131
132const concurrencyResults: ConcurrencyStats[] = [];
133let breakingPoint: ConcurrencyStats | null = null;
134
135// Current level stats
136const stats = {
137 concurrency: 0,
138 totalRequests: 0,
139 successfulRequests: 0,
140 notModifiedResponses: 0,
141 failedRequests: 0,
142 responseTimeTotal: 0,
143 ttfbTimeTotal: 0,
144 processingTimeTotal: 0,
145 responseTimeMin: Number.MAX_VALUE,
146 responseTimeMax: 0,
147 ttfbTimeMin: Number.MAX_VALUE,
148 ttfbTimeMax: 0,
149 p50ResponseTime: 0,
150 p90ResponseTime: 0,
151 p95ResponseTime: 0,
152 p99ResponseTime: 0,
153 p50TTFB: 0,
154 p90TTFB: 0,
155 p95TTFB: 0,
156 p99TTFB: 0,
157 startTime: 0,
158 endTime: 0,
159 userCompletedCount: 0,
160 requestsPerSecond: 0,
161 successRate: 0,
162 endpoints: {} as Record<string, EndpointStats>,
163};
164
165// Initialize stats for each endpoint
166for (const endpoint of endpoints) {
167 stats.endpoints[endpoint] = {
168 totalRequests: 0,
169 successfulRequests: 0,
170 notModifiedResponses: 0,
171 failedRequests: 0,
172 responseTimeTotal: 0,
173 ttfbTimeTotal: 0,
174 processingTimeTotal: 0,
175 responseTimeMin: Number.MAX_VALUE,
176 responseTimeMax: 0,
177 ttfbTimeMin: Number.MAX_VALUE,
178 ttfbTimeMax: 0,
179 timeBuckets: new Array(TIME_BUCKETS.length).fill(0),
180 ttfbTimeBuckets: new Array(TIME_BUCKETS.length).fill(0),
181 };
182}
183
184// ETag cache for each endpoint by user
185const etagCache: Record<string, string> = {};
186// Helper function to calculate percentiles from time buckets
187function calculatePercentile(buckets: number[], percentile: number): number {
188 const totalSamples = buckets.reduce((sum, count) => sum + count, 0);
189 if (totalSamples === 0) return 0;
190
191 const targetCount = totalSamples * (percentile / 100);
192 let currentCount = 0;
193
194 for (let i = 0; i < buckets.length; i++) {
195 currentCount += buckets[i] ?? 0; // Handle potential undefined values safely
196 if (currentCount >= targetCount) {
197 // Return the bucket boundary
198 return TIME_BUCKETS[i] ?? 0; // Handle potential undefined values safely
199 }
200 }
201
202 return TIME_BUCKETS[TIME_BUCKETS.length - 1] ?? 0; // Handle potential undefined value
203}
204// Helper function to add a time to the appropriate bucket
205function addTimeToBucket(buckets: number[], time: number): void {
206 for (let i = 0; i < TIME_BUCKETS.length; i++) {
207 if (
208 time <= (TIME_BUCKETS[i] || Number.MAX_VALUE) ||
209 i === TIME_BUCKETS.length - 1
210 ) {
211 buckets[i] = (buckets[i] || 0) + 1;
212 break;
213 }
214 }
215}
216
217// Spinner for loading animation
218class Spinner {
219 private frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"];
220 private interval: NodeJS.Timeout | null = null;
221 private currentFrame = 0;
222 private text: string;
223
224 constructor(text: string) {
225 this.text = text;
226 }
227
228 start() {
229 this.interval = setInterval(() => {
230 process.stdout.write(
231 `\r${chalk.cyan(this.frames[this.currentFrame])} ${this.text}`,
232 );
233 this.currentFrame = (this.currentFrame + 1) % this.frames.length;
234 }, 80);
235 }
236
237 stop() {
238 if (this.interval) {
239 clearInterval(this.interval);
240 this.interval = null;
241 process.stdout.write(
242 "\r \r",
243 );
244 }
245 }
246
247 setText(text: string) {
248 this.text = text;
249 }
250}
251
252// Helper to log with timestamp
253function logWithTime(
254 message: string,
255 type: "info" | "success" | "error" | "warn" = "info",
256) {
257 const timestamp = new Date().toISOString().split("T")[1]?.slice(0, -1) || "";
258 const prefix = {
259 info: chalk.blue(`[${timestamp}] ℹ️ `),
260 success: chalk.green(`[${timestamp}] ✅ `),
261 error: chalk.red(`[${timestamp}] ❌ `),
262 warn: chalk.yellow(`[${timestamp}] ⚠️ `),
263 }[type];
264
265 console.log(`${prefix}${message}`);
266}
267
268// Make a HTTP request with timing
269async function makeRequest(
270 endpoint: string,
271 userId: string,
272 requestId: number,
273): Promise<void> {
274 const url = `${CONFIG.baseUrl}${endpoint}`;
275 const headers: Record<string, string> = {
276 "User-Agent": `stress-test-user-${userId}/request-${requestId}`,
277 };
278
279 // Add ETag if available and caching is enabled
280 const cacheKey = `${userId}-${endpoint}`;
281 if (CONFIG.runWithCaching && etagCache[cacheKey]) {
282 headers["If-None-Match"] = etagCache[cacheKey];
283 }
284
285 try {
286 // Start timing
287 const startTime = performance.now();
288
289 // Create AbortController for timeout
290 const controller = new AbortController();
291 const timeoutId = setTimeout(() => {
292 controller.abort();
293 }, CONFIG.requestTimeout);
294
295 // Make the request
296 const response = await fetch(url, {
297 headers,
298 signal: controller.signal,
299 });
300
301 // Measure TTFB as soon as headers are available
302 const ttfbTime = performance.now() - startTime;
303
304 // Get the response body
305 const text = await response.text();
306
307 // Clear timeout
308 clearTimeout(timeoutId);
309
310 // End timing after body is received
311 const endTime = performance.now();
312 const responseTime = endTime - startTime;
313 const processingTime = responseTime - ttfbTime;
314
315 // Track overall stats
316 stats.totalRequests++;
317 stats.responseTimeTotal += responseTime;
318 stats.ttfbTimeTotal += ttfbTime;
319 stats.processingTimeTotal += processingTime;
320 stats.responseTimeMin = Math.min(stats.responseTimeMin, responseTime);
321 stats.responseTimeMax = Math.max(stats.responseTimeMax, responseTime);
322 stats.ttfbTimeMin = Math.min(stats.ttfbTimeMin, ttfbTime);
323 stats.ttfbTimeMax = Math.max(stats.ttfbTimeMax, ttfbTime);
324
325 // Ensure the endpoint exists in stats.endpoints
326 if (!stats.endpoints[endpoint]) {
327 stats.endpoints[endpoint] = {
328 totalRequests: 0,
329 successfulRequests: 0,
330 notModifiedResponses: 0,
331 failedRequests: 0,
332 responseTimeTotal: 0,
333 ttfbTimeTotal: 0,
334 processingTimeTotal: 0,
335 responseTimeMin: Number.MAX_VALUE,
336 responseTimeMax: 0,
337 ttfbTimeMin: Number.MAX_VALUE,
338 ttfbTimeMax: 0,
339 timeBuckets: new Array(TIME_BUCKETS.length).fill(0),
340 ttfbTimeBuckets: new Array(TIME_BUCKETS.length).fill(0),
341 };
342 }
343
344 // Track endpoint-specific stats
345 stats.endpoints[endpoint].totalRequests++;
346 stats.endpoints[endpoint].responseTimeTotal += responseTime;
347 stats.endpoints[endpoint].ttfbTimeTotal += ttfbTime;
348 stats.endpoints[endpoint].processingTimeTotal += processingTime;
349 stats.endpoints[endpoint].responseTimeMin = Math.min(
350 stats.endpoints[endpoint].responseTimeMin,
351 responseTime,
352 );
353 stats.endpoints[endpoint].responseTimeMax = Math.max(
354 stats.endpoints[endpoint].responseTimeMax,
355 responseTime,
356 );
357 stats.endpoints[endpoint].ttfbTimeMin = Math.min(
358 stats.endpoints[endpoint].ttfbTimeMin,
359 ttfbTime,
360 );
361 stats.endpoints[endpoint].ttfbTimeMax = Math.max(
362 stats.endpoints[endpoint].ttfbTimeMax,
363 ttfbTime,
364 );
365
366 // Track time buckets for percentiles
367 if (CONFIG.trackPercentiles) {
368 addTimeToBucket(stats.endpoints[endpoint].timeBuckets, responseTime);
369 addTimeToBucket(stats.endpoints[endpoint].ttfbTimeBuckets, ttfbTime);
370 }
371
372 if (response.status === 304) {
373 stats.notModifiedResponses++;
374 stats.endpoints[endpoint].notModifiedResponses++;
375 stats.successfulRequests++; // Count 304 as success
376 stats.endpoints[endpoint].successfulRequests++;
377
378 if (!CONFIG.disableDetailedLogging) {
379 logWithTime(
380 `User ${userId.slice(0, 4)} - Request ${requestId} - ${endpoint} - 304 Not Modified (${responseTime.toFixed(2)}ms, TTFB: ${ttfbTime.toFixed(2)}ms)`,
381 "info",
382 );
383 }
384 } else if (response.ok) {
385 stats.successfulRequests++;
386 stats.endpoints[endpoint].successfulRequests++;
387
388 if (!CONFIG.disableDetailedLogging) {
389 logWithTime(
390 `User ${userId.slice(0, 4)} - Request ${requestId} - ${endpoint} - ${response.status} OK (${responseTime.toFixed(2)}ms, TTFB: ${ttfbTime.toFixed(2)}ms)`,
391 "success",
392 );
393 }
394
395 // Store ETag for future requests if caching is enabled
396 if (CONFIG.runWithCaching) {
397 const etag = response.headers.get("ETag");
398 if (etag) {
399 etagCache[cacheKey] = etag;
400 }
401 }
402
403 // Parse JSON response for validation
404 try {
405 JSON.parse(text);
406 } catch (e) {
407 stats.failedRequests++;
408 stats.endpoints[endpoint].failedRequests++;
409 stats.successfulRequests--;
410 stats.endpoints[endpoint].successfulRequests--;
411
412 logWithTime(
413 `User ${userId.slice(0, 4)} - Request ${requestId} - ${endpoint} - Invalid JSON response`,
414 "error",
415 );
416 }
417 } else {
418 stats.failedRequests++;
419 stats.endpoints[endpoint].failedRequests++;
420
421 // Always log errors, even if detailed logging is disabled
422 logWithTime(
423 `User ${userId.slice(0, 4)} - Request ${requestId} - ${endpoint} - ${response.status} Error (${responseTime.toFixed(2)}ms)`,
424 "error",
425 );
426 }
427 } catch (error) {
428 stats.failedRequests++;
429
430 // Ensure the endpoint exists in stats.endpoints
431 if (!stats.endpoints[endpoint]) {
432 stats.endpoints[endpoint] = {
433 totalRequests: 0,
434 successfulRequests: 0,
435 notModifiedResponses: 0,
436 failedRequests: 0,
437 responseTimeTotal: 0,
438 ttfbTimeTotal: 0,
439 processingTimeTotal: 0,
440 responseTimeMin: Number.MAX_VALUE,
441 responseTimeMax: 0,
442 ttfbTimeMin: Number.MAX_VALUE,
443 ttfbTimeMax: 0,
444 timeBuckets: new Array(TIME_BUCKETS.length).fill(0),
445 ttfbTimeBuckets: new Array(TIME_BUCKETS.length).fill(0),
446 };
447 }
448
449 stats.endpoints[endpoint].failedRequests++;
450
451 // Check if this was a timeout
452 const errorMessage = error instanceof Error ? error.message : String(error);
453 const isTimeout =
454 errorMessage.includes("abort") || errorMessage.includes("timeout");
455
456 // Always log errors, even if detailed logging is disabled
457 logWithTime(
458 `User ${userId.slice(0, 4)} - Request ${requestId} - ${endpoint} - ${isTimeout ? "Timeout" : "Exception"}: ${errorMessage}`,
459 "error",
460 );
461 }
462}
463
464// Simulate a user session
465async function simulateUser(userId: string): Promise<void> {
466 try {
467 for (let i = 0; i < CONFIG.requestsPerUser; i++) {
468 // Choose a random endpoint
469 const endpoint = endpoints[Math.floor(Math.random() * endpoints.length)];
470
471 // Make sure endpoint is not undefined before adding it
472 if (endpoint) {
473 // Make the request
474 await makeRequest(endpoint, userId, i + 1);
475
476 // Add a small delay between requests to simulate real user behavior
477 if (CONFIG.delayBetweenRequests > 0) {
478 await new Promise((resolve) =>
479 setTimeout(resolve, CONFIG.delayBetweenRequests),
480 );
481 }
482 }
483 }
484 } catch (error) {
485 logWithTime(
486 `User ${userId.slice(0, 4)} - Error: ${(error as Error).message}`,
487 "error",
488 );
489 } finally {
490 // Mark user as completed regardless of success/failure
491 stats.userCompletedCount++;
492 }
493}
494
495// Do warmup requests to prime the server cache
496async function warmupServer(): Promise<void> {
497 logWithTime(
498 `Warming up server with ${CONFIG.warmupRequests} requests...`,
499 "info",
500 );
501
502 const spinner = new Spinner("Warming up server...");
503 spinner.start();
504
505 const promises: Promise<void>[] = [];
506
507 for (let i = 0; i < CONFIG.warmupRequests; i++) {
508 const endpoint = endpoints[i % endpoints.length];
509 promises.push(
510 fetch(`${CONFIG.baseUrl}${endpoint}`)
511 .then(async (response) => {
512 // Store the ETag for future use
513 const etag = response.headers.get("ETag");
514 if (etag && CONFIG.runWithCaching) {
515 etagCache[`warmup-${endpoint}`] = etag;
516 }
517
518 // Read the response to completion
519 await response.text();
520 })
521 .catch((e) => {
522 logWithTime(`Warmup request error: ${e.message}`, "error");
523 }),
524 );
525 }
526
527 await Promise.allSettled(promises);
528 spinner.stop();
529
530 logWithTime("Server warmup complete", "success");
531}
532
533// Calculate percentiles after test completion
534function calculatePercentiles(): void {
535 if (!CONFIG.trackPercentiles) return;
536
537 // Initialize combined stats objects to track cumulative data
538 const combinedResponseBuckets = new Array(TIME_BUCKETS.length).fill(0);
539 const combinedTTFBBuckets = new Array(TIME_BUCKETS.length).fill(0);
540
541 // Combine all endpoint buckets
542 for (const endpoint in stats.endpoints) {
543 const endpointStats = stats.endpoints[endpoint];
544
545 if (!endpointStats) continue;
546
547 // Add this endpoint's data to the combined buckets
548 for (let i = 0; i < TIME_BUCKETS.length; i++) {
549 combinedResponseBuckets[i] += endpointStats.timeBuckets[i] || 0;
550 combinedTTFBBuckets[i] += endpointStats.ttfbTimeBuckets[i] || 0;
551 }
552 }
553
554 // Calculate overall percentiles from combined data
555 stats.p50ResponseTime = calculatePercentile(combinedResponseBuckets, 50);
556 stats.p90ResponseTime = calculatePercentile(combinedResponseBuckets, 90);
557 stats.p95ResponseTime = calculatePercentile(combinedResponseBuckets, 95);
558 stats.p99ResponseTime = calculatePercentile(combinedResponseBuckets, 99);
559
560 stats.p50TTFB = calculatePercentile(combinedTTFBBuckets, 50);
561 stats.p90TTFB = calculatePercentile(combinedTTFBBuckets, 90);
562 stats.p95TTFB = calculatePercentile(combinedTTFBBuckets, 95);
563 stats.p99TTFB = calculatePercentile(combinedTTFBBuckets, 99);
564}
565
566// Print results in a fancy way
567function printResults() {
568 console.log("\n");
569 console.log(chalk.bold.cyan("🚀 Stress Test Results 🚀"));
570 console.log(
571 chalk.gray("════════════════════════════════════════════════════════"),
572 );
573 console.log(chalk.bold.white("📊 General Stats:"));
574 console.log(
575 `${chalk.cyan("Total Users:")} ${chalk.yellow(stats.concurrency)}`,
576 );
577 console.log(
578 `${chalk.cyan("Completed Users:")} ${chalk.yellow(stats.userCompletedCount)}`,
579 );
580 console.log(
581 `${chalk.cyan("Total Requests:")} ${chalk.yellow(stats.totalRequests)}`,
582 );
583 console.log(
584 `${chalk.cyan("Successful Requests:")} ${chalk.green(stats.successfulRequests)} (${(
585 (stats.successfulRequests / stats.totalRequests) *
586 100
587 ).toFixed(2)}%)`,
588 );
589 console.log(
590 `${chalk.cyan("Not Modified (304):")} ${chalk.blue(stats.notModifiedResponses)} (${((stats.notModifiedResponses / stats.totalRequests) * 100).toFixed(2)}%)`,
591 );
592 console.log(
593 `${chalk.cyan("Failed Requests:")} ${chalk.red(stats.failedRequests)} (${((stats.failedRequests / stats.totalRequests) * 100).toFixed(2)}%)`,
594 );
595
596 const durationInSeconds = (stats.endTime - stats.startTime) / 1000;
597 console.log(
598 `${chalk.cyan("Test Duration:")} ${chalk.yellow(durationInSeconds.toFixed(2))} seconds`,
599 );
600 console.log(
601 `${chalk.cyan("Requests per Second:")} ${chalk.yellow((stats.totalRequests / durationInSeconds).toFixed(2))}`,
602 );
603
604 const avgResponseTime = stats.responseTimeTotal / stats.totalRequests;
605 const avgTTFB = stats.ttfbTimeTotal / stats.totalRequests;
606 const avgProcessingTime = stats.processingTimeTotal / stats.totalRequests;
607
608 console.log(
609 `${chalk.cyan("Average Response Time:")} ${chalk.yellow(avgResponseTime.toFixed(2))} ms`,
610 );
611 console.log(
612 `${chalk.cyan("Average TTFB:")} ${chalk.yellow(avgTTFB.toFixed(2))} ms`,
613 );
614 console.log(
615 `${chalk.cyan("Average Processing Time:")} ${chalk.yellow(avgProcessingTime.toFixed(2))} ms`,
616 );
617
618 console.log(
619 `${chalk.cyan("Min Response Time:")} ${chalk.green(stats.responseTimeMin.toFixed(2))} ms`,
620 );
621 console.log(
622 `${chalk.cyan("Max Response Time:")} ${chalk.red(stats.responseTimeMax.toFixed(2))} ms`,
623 );
624
625 if (CONFIG.trackPercentiles) {
626 console.log(
627 `${chalk.cyan("Response Time (p50/p95/p99):")} ${chalk.yellow(stats.p50ResponseTime.toFixed(2))}/${chalk.yellow(stats.p95ResponseTime.toFixed(2))}/${chalk.red(stats.p99ResponseTime.toFixed(2))} ms`,
628 );
629 console.log(
630 `${chalk.cyan("TTFB Time (p50/p95/p99):")} ${chalk.yellow(stats.p50TTFB.toFixed(2))}/${chalk.yellow(stats.p95TTFB.toFixed(2))}/${chalk.red(stats.p99TTFB.toFixed(2))} ms`,
631 );
632 }
633
634 console.log("\n");
635 console.log(chalk.bold.white("📈 Endpoint Stats:"));
636
637 for (const [endpoint, endpointStats] of Object.entries(stats.endpoints)) {
638 if (endpointStats.totalRequests === 0) continue;
639
640 console.log(
641 chalk.gray("────────────────────────────────────────────────────"),
642 );
643 console.log(chalk.bold.cyan(`Endpoint: ${endpoint}`));
644 console.log(
645 `${chalk.cyan("Total Requests:")} ${chalk.yellow(endpointStats.totalRequests)}`,
646 );
647 console.log(
648 `${chalk.cyan("Successful Requests:")} ${chalk.green(endpointStats.successfulRequests)} (${((endpointStats.successfulRequests / endpointStats.totalRequests) * 100).toFixed(2)}%)`,
649 );
650 console.log(
651 `${chalk.cyan("Not Modified (304):")} ${chalk.blue(endpointStats.notModifiedResponses)} (${((endpointStats.notModifiedResponses / endpointStats.totalRequests) * 100).toFixed(2)}%)`,
652 );
653 console.log(
654 `${chalk.cyan("Failed Requests:")} ${chalk.red(endpointStats.failedRequests)} (${((endpointStats.failedRequests / endpointStats.totalRequests) * 100).toFixed(2)}%)`,
655 );
656
657 const avgResponseTime =
658 endpointStats.responseTimeTotal / endpointStats.totalRequests;
659 const avgEndpointTTFB =
660 endpointStats.ttfbTimeTotal / endpointStats.totalRequests;
661
662 console.log(
663 `${chalk.cyan("Average Response Time:")} ${chalk.yellow(avgResponseTime.toFixed(2))} ms`,
664 );
665 console.log(
666 `${chalk.cyan("Average TTFB:")} ${chalk.yellow(avgEndpointTTFB.toFixed(2))} ms`,
667 );
668 console.log(
669 `${chalk.cyan("Min Response Time:")} ${chalk.green(endpointStats.responseTimeMin.toFixed(2))} ms`,
670 );
671 console.log(
672 `${chalk.cyan("Max Response Time:")} ${chalk.red(endpointStats.responseTimeMax.toFixed(2))} ms`,
673 );
674 }
675
676 console.log(
677 chalk.gray("════════════════════════════════════════════════════════"),
678 );
679 console.log(chalk.bold.green("✅ Stress Test Completed"));
680 if (CONFIG.runWithCaching) {
681 console.log(chalk.bold.blue("ℹ️ Test ran with caching enabled (ETags)"));
682 } else {
683 console.log(chalk.bold.yellow("⚠️ Test ran without caching (no ETags)"));
684 }
685}
686
687// Main function
688async function runConcurrencyLevel(
689 concurrencyLevel: number,
690): Promise<ConcurrencyStats> {
691 // Reset stats for this level
692 Object.assign(stats, {
693 concurrency: concurrencyLevel,
694 totalRequests: 0,
695 successfulRequests: 0,
696 notModifiedResponses: 0,
697 failedRequests: 0,
698 responseTimeTotal: 0,
699 ttfbTimeTotal: 0,
700 processingTimeTotal: 0,
701 responseTimeMin: Number.MAX_VALUE,
702 responseTimeMax: 0,
703 ttfbTimeMin: Number.MAX_VALUE,
704 ttfbTimeMax: 0,
705 p50ResponseTime: 0,
706 p90ResponseTime: 0,
707 p95ResponseTime: 0,
708 p99ResponseTime: 0,
709 p50TTFB: 0,
710 p90TTFB: 0,
711 p95TTFB: 0,
712 p99TTFB: 0,
713 startTime: 0,
714 endTime: 0,
715 userCompletedCount: 0,
716 requestsPerSecond: 0,
717 successRate: 0,
718 endpoints: {},
719 });
720
721 // Reset endpoint stats
722 for (const endpoint of endpoints) {
723 stats.endpoints[endpoint] = {
724 totalRequests: 0,
725 successfulRequests: 0,
726 notModifiedResponses: 0,
727 failedRequests: 0,
728 responseTimeTotal: 0,
729 ttfbTimeTotal: 0,
730 processingTimeTotal: 0,
731 responseTimeMin: Number.MAX_VALUE,
732 responseTimeMax: 0,
733 ttfbTimeMin: Number.MAX_VALUE,
734 ttfbTimeMax: 0,
735 timeBuckets: new Array(TIME_BUCKETS.length).fill(0),
736 ttfbTimeBuckets: new Array(TIME_BUCKETS.length).fill(0),
737 };
738 }
739
740 logWithTime(`Running concurrency level: ${concurrencyLevel} users`, "info");
741 stats.startTime = performance.now();
742
743 // Create user promises
744 const userPromises: Promise<void>[] = [];
745
746 for (let i = 0; i < concurrencyLevel; i++) {
747 const userId = randomUUIDv7();
748 userPromises.push(simulateUser(userId));
749 }
750
751 // Wait for all users to complete
752 const spinner = new Spinner(
753 `Running ${concurrencyLevel} concurrent users...`,
754 );
755 spinner.start();
756
757 // Only update spinner occasionally to reduce logging overhead
758 const updateIntervalMs = concurrencyLevel > 10000 ? 500 : 100;
759
760 let lastCount = 0;
761 const updateInterval = setInterval(() => {
762 if (stats.userCompletedCount > lastCount) {
763 lastCount = stats.userCompletedCount;
764 // Only update text if significant progress has been made
765 if (
766 stats.userCompletedCount === concurrencyLevel ||
767 stats.userCompletedCount %
768 Math.max(1, Math.floor(concurrencyLevel / 20)) ===
769 0
770 ) {
771 spinner.setText(
772 `Progress: ${stats.userCompletedCount}/${concurrencyLevel} users (${Math.floor((stats.userCompletedCount / concurrencyLevel) * 100)}%)`,
773 );
774 }
775 }
776 }, updateIntervalMs);
777
778 await Promise.allSettled(userPromises);
779
780 clearInterval(updateInterval);
781 spinner.stop();
782
783 stats.endTime = performance.now();
784
785 // Calculate final stats
786 const durationInSeconds = (stats.endTime - stats.startTime) / 1000;
787 stats.requestsPerSecond = stats.totalRequests / durationInSeconds;
788 stats.successRate =
789 stats.totalRequests > 0
790 ? ((stats.successfulRequests + stats.notModifiedResponses) /
791 stats.totalRequests) *
792 100
793 : 0;
794
795 // Calculate percentiles from time buckets
796 calculatePercentiles();
797
798 // Capture memory usage
799 if (process.memoryUsage) {
800 const memoryUsage = process.memoryUsage();
801 (stats as Record<string, unknown>).memoryUsage = {
802 rss: memoryUsage.rss,
803 heapTotal: memoryUsage.heapTotal,
804 heapUsed: memoryUsage.heapUsed,
805 external: memoryUsage.external,
806 };
807 }
808
809 // Create a deep copy of the stats to return
810 const result: ConcurrencyStats = JSON.parse(JSON.stringify(stats));
811
812 return result;
813}
814
815// Print a summary of all concurrency levels tested
816function printConcurrencySummary(): void {
817 console.log("\n");
818 console.log(chalk.bold.cyan("📊 Concurrency Level Summary"));
819 console.log(
820 chalk.gray("════════════════════════════════════════════════════════"),
821 );
822
823 // Table headers
824 console.log(
825 chalk.bold(
826 `${chalk.cyan("Concurrency").padEnd(10)} | ` +
827 `${chalk.cyan("RPS").padEnd(8)} | ` +
828 `${chalk.cyan("Success %").padEnd(10)} | ` +
829 `${chalk.cyan("Avg(ms)").padEnd(8)} | ` +
830 `${chalk.cyan("p95(ms)").padEnd(8)} | ` +
831 `${chalk.cyan("p99(ms)").padEnd(8)} | ` +
832 `${chalk.cyan("TTFB p95").padEnd(8)} | ` +
833 `${chalk.cyan("Status")}`,
834 ),
835 );
836
837 // Separator
838 console.log(
839 chalk.gray(
840 "───────────┼──────────┼────────────┼──────────┼──────────┼──────────┼──────────┼──────────",
841 ),
842 );
843
844 // For each concurrency level tested
845 for (const result of concurrencyResults) {
846 const isBreakingPoint =
847 breakingPoint && result.concurrency === breakingPoint.concurrency;
848
849 // Format status based on thresholds
850 const statusColor =
851 result.successRate < CONFIG.successThreshold
852 ? chalk.red
853 : result.p95ResponseTime > CONFIG.responseTimeThreshold
854 ? chalk.yellow
855 : chalk.green;
856
857 const status =
858 result.successRate < CONFIG.successThreshold
859 ? "FAIL"
860 : result.p95ResponseTime > CONFIG.responseTimeThreshold
861 ? "SLOW"
862 : "PASS";
863
864 // Text color for the entire row
865 const rowColor = isBreakingPoint ? chalk.bold.red : chalk.white;
866
867 console.log(
868 rowColor(
869 `${result.concurrency.toString().padEnd(10)} | ${result.requestsPerSecond.toFixed(1).padEnd(8)} | ${result.successRate.toFixed(1).padEnd(10)} | ${(result.responseTimeTotal / result.totalRequests).toFixed(1).padEnd(8)} | ${result.p95ResponseTime.toFixed(1).padEnd(8)} | ${result.p99ResponseTime.toFixed(1).padEnd(8)} | ${result.p95TTFB.toFixed(1).padEnd(8)} | ${statusColor(status)}${isBreakingPoint ? " ← BREAKING POINT" : ""}`,
870 ),
871 );
872 }
873
874 console.log(
875 chalk.gray("════════════════════════════════════════════════════════"),
876 );
877
878 if (breakingPoint) {
879 console.log(
880 chalk.yellow(
881 `⚠️ Breaking point detected at ${chalk.bold(breakingPoint.concurrency)} concurrent users`,
882 ),
883 );
884 console.log(
885 ` - Success Rate: ${chalk.bold(breakingPoint.successRate.toFixed(2))}% (Threshold: ${CONFIG.successThreshold}%)`,
886 );
887 console.log(
888 ` - p95 Response Time: ${chalk.bold(breakingPoint.p95ResponseTime.toFixed(2))}ms (Threshold: ${CONFIG.responseTimeThreshold}ms)`,
889 );
890 } else {
891 const lastConcurrency =
892 concurrencyResults.length > 0
893 ? concurrencyResults[concurrencyResults.length - 1]?.concurrency || 0
894 : 0;
895
896 console.log(
897 chalk.green(
898 `✅ No breaking point detected up to ${chalk.bold(lastConcurrency)} concurrent users`,
899 ),
900 );
901 }
902
903 // Find the highest RPS level
904 if (concurrencyResults.length > 0) {
905 const maxRpsResult = concurrencyResults.reduce((prev, current) =>
906 current.requestsPerSecond > prev.requestsPerSecond ? current : prev,
907 );
908
909 console.log(
910 chalk.green(
911 `⚡ Peak performance: ${chalk.bold(maxRpsResult.requestsPerSecond.toFixed(2))} requests/second at ${chalk.bold(maxRpsResult.concurrency)} concurrent users`,
912 ),
913 );
914 }
915}
916
917// Export results to CSV file
918function exportToCsv(): string {
919 const csvRows: string[] = [];
920
921 // Add header row
922 csvRows.push(
923 "Concurrency,Requests,Success Rate,Requests/Sec,Avg Time (ms),p50 (ms),p95 (ms),p99 (ms),TTFB p50 (ms),TTFB p95 (ms),TTFB p99 (ms)",
924 );
925
926 // Add data rows
927 for (const result of concurrencyResults) {
928 csvRows.push(
929 [
930 result.concurrency,
931 result.totalRequests,
932 result.successRate.toFixed(2),
933 result.requestsPerSecond.toFixed(2),
934 (result.responseTimeTotal / result.totalRequests).toFixed(2),
935 result.p50ResponseTime.toFixed(2),
936 result.p95ResponseTime.toFixed(2),
937 result.p99ResponseTime.toFixed(2),
938 result.p50TTFB.toFixed(2),
939 result.p95TTFB.toFixed(2),
940 result.p99TTFB.toFixed(2),
941 ].join(","),
942 );
943 }
944
945 // Join all rows with newlines
946 return csvRows.join("\n");
947}
948
949// Export detailed results to JSON file
950function exportToJson(): string {
951 return JSON.stringify(
952 {
953 config: CONFIG,
954 results: concurrencyResults,
955 breakingPoint: breakingPoint,
956 timestamp: new Date().toISOString(),
957 },
958 null,
959 2,
960 );
961}
962
963// Checks if a test run fails the success criteria
964function checkFailureCriteria(result: ConcurrencyStats): boolean {
965 // Check success rate threshold
966 if (result.successRate < CONFIG.successThreshold) {
967 logWithTime(
968 `Success rate ${result.successRate.toFixed(2)}% is below threshold ${CONFIG.successThreshold}%`,
969 "warn",
970 );
971 return true;
972 }
973
974 // Check response time threshold (p95)
975 if (result.p95ResponseTime > CONFIG.responseTimeThreshold) {
976 logWithTime(
977 `p95 response time ${result.p95ResponseTime.toFixed(2)}ms exceeds threshold ${CONFIG.responseTimeThreshold}ms`,
978 "warn",
979 );
980 return true;
981 }
982
983 return false;
984}
985
986// Save result files
987async function saveResultFiles(): Promise<void> {
988 try {
989 const timestamp = new Date()
990 .toISOString()
991 .replace(/:/g, "-")
992 .replace(/\./g, "-");
993
994 // Save CSV results
995 const csvContent = exportToCsv();
996 const csvFilename = `stress-test-results-${timestamp}.csv`;
997 await Bun.write(csvFilename, csvContent);
998 logWithTime(`Saved CSV results to ${csvFilename}`, "success");
999
1000 // Save JSON results
1001 const jsonContent = exportToJson();
1002 const jsonFilename = `stress-test-results-${timestamp}.json`;
1003 await Bun.write(jsonFilename, jsonContent);
1004 logWithTime(`Saved detailed JSON results to ${jsonFilename}`, "success");
1005 } catch (error) {
1006 logWithTime(
1007 `Error saving result files: ${(error as Error).message}`,
1008 "error",
1009 );
1010 }
1011}
1012
1013// Main test function that runs through increasing concurrency levels
1014async function runTest(): Promise<void> {
1015 console.log(chalk.bold.cyan("🚀 API Stress Test 🚀"));
1016 console.log(
1017 chalk.gray("════════════════════════════════════════════════════════"),
1018 );
1019 console.log(chalk.cyan(`Base URL: ${CONFIG.baseUrl}`));
1020 console.log(chalk.cyan(`Endpoints: ${endpoints.join(", ")}`));
1021 console.log(
1022 chalk.cyan(
1023 `Concurrency: ${CONFIG.startConcurrency} to ${CONFIG.maxConcurrency} (×${CONFIG.concurrencyFactor} steps)`,
1024 ),
1025 );
1026 console.log(chalk.cyan(`Requests per user: ${CONFIG.requestsPerUser}`));
1027 console.log(chalk.cyan(`Success threshold: ${CONFIG.successThreshold}%`));
1028 console.log(
1029 chalk.cyan(`Response time threshold: ${CONFIG.responseTimeThreshold}ms`),
1030 );
1031
1032 if (CONFIG.runWithCaching) {
1033 console.log(chalk.blue("ℹ️ Caching enabled (using ETags)"));
1034 } else {
1035 console.log(chalk.yellow("⚠️ Caching disabled (no ETags)"));
1036 }
1037
1038 console.log(
1039 chalk.gray("════════════════════════════════════════════════════════"),
1040 );
1041
1042 // Warm up the server first
1043 await warmupServer();
1044
1045 // Start with the initial concurrency level
1046 let concurrencyLevel = CONFIG.startConcurrency;
1047
1048 // Keep testing until we hit the max concurrency or a breaking point
1049 while (concurrencyLevel <= CONFIG.maxConcurrency) {
1050 // Run the test at this concurrency level
1051 const result = await runConcurrencyLevel(concurrencyLevel);
1052
1053 // Store the result
1054 concurrencyResults.push(result);
1055
1056 // Print brief stats for this level
1057 logWithTime(
1058 `Completed level: ${concurrencyLevel} users, ` +
1059 `RPS: ${result.requestsPerSecond.toFixed(2)}, ` +
1060 `Success: ${result.successRate.toFixed(2)}%, ` +
1061 `Avg: ${(result.responseTimeTotal / result.totalRequests).toFixed(2)}ms, ` +
1062 `p95: ${result.p95ResponseTime.toFixed(2)}ms`,
1063 "success",
1064 );
1065
1066 // Check if we should stop
1067 if (CONFIG.stopOnFailure && checkFailureCriteria(result)) {
1068 breakingPoint = result;
1069 logWithTime(
1070 `Breaking point reached at ${concurrencyLevel} concurrent users`,
1071 "warn",
1072 );
1073 break;
1074 }
1075
1076 // Increase concurrency level
1077 concurrencyLevel = Math.round(concurrencyLevel * CONFIG.concurrencyFactor);
1078
1079 // Add a delay between levels
1080 if (concurrencyLevel <= CONFIG.maxConcurrency) {
1081 logWithTime(
1082 `Waiting ${CONFIG.delayBetweenLevels / 1000} seconds before next level...`,
1083 "info",
1084 );
1085 await new Promise((resolve) =>
1086 setTimeout(resolve, CONFIG.delayBetweenLevels),
1087 );
1088 }
1089 }
1090
1091 // Print final results
1092 printConcurrencySummary();
1093 printResults();
1094
1095 // Save result files
1096 await saveResultFiles();
1097}
1098
1099// Check args for custom config overrides
1100function parseCliArgs(): void {
1101 const args = process.argv.slice(2);
1102
1103 for (let i = 0; i < args.length; i++) {
1104 const arg = args[i];
1105
1106 if (!arg) continue;
1107
1108 // Check for configuration overrides
1109 if (arg.startsWith("--")) {
1110 const configKey = arg
1111 .slice(2)
1112 .replace(/-([a-z])/g, (g) => g[1]?.toUpperCase() || "");
1113 const configValue = args[i + 1];
1114
1115 if (configValue && !configValue.startsWith("--")) {
1116 try {
1117 // Convert numeric strings or booleans
1118 if (/^\d+$/.test(configValue)) {
1119 (CONFIG as Record<string, unknown>)[configKey] = Number.parseInt(
1120 configValue,
1121 10,
1122 );
1123 } else if (/^\d+\.\d+$/.test(configValue)) {
1124 (CONFIG as Record<string, unknown>)[configKey] =
1125 Number.parseFloat(configValue);
1126 } else if (configValue === "true" || configValue === "false") {
1127 (CONFIG as Record<string, unknown>)[configKey] =
1128 configValue === "true";
1129 } else {
1130 (CONFIG as Record<string, unknown>)[configKey] = configValue;
1131 }
1132
1133 logWithTime(`Config override: ${configKey} = ${configValue}`, "info");
1134 i++; // Skip the value
1135 } catch (e) {
1136 logWithTime(
1137 `Error parsing config value for ${configKey}: ${e}`,
1138 "error",
1139 );
1140 }
1141 }
1142 }
1143 }
1144}
1145
1146// Entry point
1147async function main(): Promise<void> {
1148 try {
1149 // Parse CLI arguments
1150 parseCliArgs();
1151
1152 // Run the test
1153 await runTest();
1154 } catch (error) {
1155 logWithTime(`Stress test failed: ${(error as Error).message}`, "error");
1156 console.error(error);
1157 process.exit(1);
1158 }
1159}
1160
1161// Start the test
1162main();