firefox + llama.cpp == very good prose.
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat(extension) initial version

eagleusb 1e8812df 1a24dc8d

+984
+152
src/api.ts
··· 1 + import { API_BASE_URL, API_PARAMS, API_TIMEOUT_MS } from "./config"; 2 + import type { ApiErrorResponse, ApiChatCompletionStreamChunk } from "./types/api"; 3 + 4 + /** Error thrown when the API call fails for any reason (network, HTTP, malformed response). */ 5 + export class ApiError extends Error { 6 + constructor( 7 + message: string, 8 + public readonly statusCode?: number, 9 + public readonly overrideCause?: unknown, 10 + ) { 11 + super(message); 12 + this.name = "ApiError"; 13 + } 14 + } 15 + 16 + /** 17 + * Streams a text correction request to the local llama.cpp server. 18 + * 19 + * Yields each token as it arrives from the SSE stream, enabling progressive 20 + * display in the UI without waiting for the full response. 21 + * 22 + * @param text - Validated, non-empty input text 23 + * @param systemPrompt - System prompt to instruct the model 24 + * @yields Individual content tokens from the model's stream 25 + * @throws {@link ApiError} on timeout, HTTP errors, or network failures 26 + */ 27 + export async function* streamCorrection( 28 + text: string, 29 + systemPrompt: string, 30 + ): AsyncGenerator<string, void, undefined> { 31 + const controller = new AbortController(); 32 + const timeout = setTimeout(() => controller.abort(), API_TIMEOUT_MS); 33 + 34 + let response: Response; 35 + 36 + try { 37 + response = await fetch(`${API_BASE_URL}/v1/chat/completions`, { 38 + method: "POST", 39 + headers: { "Content-Type": "application/json" }, 40 + body: JSON.stringify({ 41 + ...API_PARAMS, 42 + stream: true, 43 + messages: [ 44 + { role: "system", content: systemPrompt }, 45 + { role: "user", content: text }, 46 + ], 47 + }), 48 + signal: controller.signal, 49 + }); 50 + } catch (err: unknown) { 51 + clearTimeout(timeout); 52 + 53 + if (err instanceof DOMException && err.name === "AbortError") { 54 + throw new ApiError( 55 + `Request timed out after ${API_TIMEOUT_MS / 1_000} seconds.`, 56 + ); 57 + } 58 + 59 + throw new ApiError( 60 + `Failed to connect to API at ${API_BASE_URL}. Is llama.cpp server running?`, 61 + undefined, 62 + err, 63 + ); 64 + } 65 + 66 + // Connection established — clear the connect-timeout 67 + clearTimeout(timeout); 68 + 69 + // ─── HTTP status errors ────────────────────────────────────────────────── 70 + 71 + if (!response.ok) { 72 + const status = response.status; 73 + 74 + let detail: string; 75 + try { 76 + const body = (await response.json()) as ApiErrorResponse; 77 + detail = body.error?.message ?? response.statusText; 78 + } catch { 79 + detail = response.statusText; 80 + } 81 + 82 + switch (status) { 83 + case 404: 84 + throw new ApiError( 85 + `API endpoint not found (404). Is llama.cpp server running at ${API_BASE_URL}?`, 86 + status, 87 + ); 88 + case 429: 89 + throw new ApiError("Rate limited by the API (429). Please wait and try again.", status); 90 + case 502: 91 + case 503: 92 + throw new ApiError( 93 + `Server is unavailable (${status}). Check llama.cpp server logs.`, 94 + status, 95 + ); 96 + default: 97 + if (status >= 500) { 98 + throw new ApiError(`Server error (${status}): ${detail}`, status); 99 + } 100 + throw new ApiError(`HTTP ${status}: ${detail}`, status); 101 + } 102 + } 103 + 104 + // ─── SSE stream parsing ───────────────────────────────────────────────── 105 + 106 + const body = response.body; 107 + if (!body) { 108 + throw new ApiError("Streaming not supported: response body is null."); 109 + } 110 + 111 + const reader = body.getReader(); 112 + const decoder = new TextDecoder(); 113 + let buffer = ""; 114 + 115 + try { 116 + while (true) { 117 + const { done, value } = await reader.read(); 118 + if (done) break; 119 + 120 + buffer += decoder.decode(value, { stream: true }); 121 + 122 + const lines = buffer.split("\n"); 123 + buffer = lines.pop()!; 124 + 125 + for (const line of lines) { 126 + const trimmed = line.trim(); 127 + 128 + if (!trimmed || trimmed === "data: [DONE]") { 129 + continue; 130 + } 131 + 132 + if (!trimmed.startsWith("data: ")) { 133 + continue; 134 + } 135 + 136 + let chunk: ApiChatCompletionStreamChunk; 137 + try { 138 + chunk = JSON.parse(trimmed.slice(6)); 139 + } catch { 140 + continue; 141 + } 142 + 143 + const token = chunk.choices?.[0]?.delta?.content; 144 + if (token) { 145 + yield token; 146 + } 147 + } 148 + } 149 + } finally { 150 + reader.releaseLock(); 151 + } 152 + }
+143
src/background.ts
··· 1 + import { streamCorrection, ApiError } from "./api"; 2 + import { validateInput, ValidationError } from "./validation"; 3 + import { CORRECT_PROMPT, SUGGEST_PROMPT } from "./config"; 4 + 5 + /** Context menu item ID. */ 6 + const MENU_ID = "correct-with-llamacpp"; 7 + 8 + /** Section identifiers for streaming responses. */ 9 + type Section = "corrected" | "suggested"; 10 + 11 + /** 12 + * Pending state for a popup window that hasn't signalled "ready" yet. 13 + */ 14 + interface PendingResult { 15 + tabId: number; 16 + resolve: () => void; 17 + } 18 + 19 + let pending: PendingResult | null = null; 20 + 21 + // ─── Context menu setup ──────────────────────────────────────────────────── 22 + 23 + browser.runtime.onInstalled.addListener(() => { 24 + browser.contextMenus.create({ 25 + id: MENU_ID, 26 + title: "shakespeare edit (selection)", 27 + contexts: ["selection"], 28 + }); 29 + }); 30 + 31 + // ─── Message handler (result tab readiness) ──────────────────────────────── 32 + 33 + browser.runtime.onMessage.addListener((msg: { type: string }) => { 34 + if (msg.type === "ready") { 35 + if (pending) { 36 + pending.resolve(); 37 + } 38 + } 39 + }); 40 + 41 + // ─── Context menu click handler ──────────────────────────────────────────── 42 + 43 + browser.contextMenus.onClicked.addListener(async (info) => { 44 + if (info.menuItemId !== MENU_ID) { 45 + return; 46 + } 47 + 48 + // 1. Validate input 49 + let inputText: string; 50 + try { 51 + inputText = validateInput(info.selectionText); 52 + } catch (err: unknown) { 53 + const msg = err instanceof ValidationError ? err.message : "Invalid text selection."; 54 + openPopupWithError(msg); 55 + return; 56 + } 57 + 58 + // 2. Open popup immediately (shows loading spinner) 59 + const window = await browser.windows.create({ 60 + type: "popup", 61 + url: browser.runtime.getURL("result.html"), 62 + width: 700, 63 + height: 500, 64 + }); 65 + 66 + const tab = window.tabs?.[0]; 67 + if (!tab?.id) { 68 + return; 69 + } 70 + const tabId = tab.id; 71 + 72 + // 3. Wait for the result tab to signal "ready" 73 + const readyPromise = new Promise<void>((resolve) => { 74 + pending = { tabId, resolve }; 75 + }); 76 + await readyPromise; 77 + pending = null; 78 + 79 + // 4. Tell the result tab to show the original text 80 + await browser.tabs.sendMessage(tabId, { 81 + type: "start", 82 + original: inputText, 83 + }); 84 + 85 + // 5. Sequential streaming: corrected first, then suggested 86 + try { 87 + await streamSection(tabId, inputText, CORRECT_PROMPT, "corrected"); 88 + await streamSection(tabId, inputText, SUGGEST_PROMPT, "suggested"); 89 + await browser.tabs.sendMessage(tabId, { type: "done" }); 90 + } catch (err: unknown) { 91 + const msg = 92 + err instanceof ApiError || err instanceof ValidationError 93 + ? err.message 94 + : "An unexpected error occurred."; 95 + await browser.tabs.sendMessage(tabId, { type: "error", message: msg }); 96 + } 97 + }); 98 + 99 + // ─── Stream a single section from the API ────────────────────────────────── 100 + 101 + async function streamSection( 102 + tabId: number, 103 + text: string, 104 + systemPrompt: string, 105 + section: Section, 106 + ): Promise<void> { 107 + await browser.tabs.sendMessage(tabId, { type: "section-start", section }); 108 + 109 + for await (const token of streamCorrection(text, systemPrompt)) { 110 + await browser.tabs.sendMessage(tabId, { 111 + type: "stream", 112 + section, 113 + token, 114 + }); 115 + } 116 + 117 + await browser.tabs.sendMessage(tabId, { type: "section-done", section }); 118 + } 119 + 120 + // ─── Helper: open popup with an error when validation fails immediately ──── 121 + 122 + async function openPopupWithError(message: string): Promise<void> { 123 + const window = await browser.windows.create({ 124 + type: "popup", 125 + url: browser.runtime.getURL("result.html"), 126 + width: 600, 127 + height: 400, 128 + }); 129 + 130 + const tab = window.tabs?.[0]; 131 + if (!tab?.id) { 132 + return; 133 + } 134 + const tabId = tab.id; 135 + 136 + const readyPromise = new Promise<void>((resolve) => { 137 + pending = { tabId, resolve }; 138 + }); 139 + await readyPromise; 140 + pending = null; 141 + 142 + await browser.tabs.sendMessage(tabId, { type: "error", message }); 143 + }
+67
src/config.ts
··· 1 + import type { ApiChatCompletionRequest } from "./types/api"; 2 + 3 + /** Base URL of the llama.cpp server (OpenAI-compatible API). */ 4 + export const API_BASE_URL = "http://localhost:8080"; 5 + 6 + /** Shared tone rules appended to every prompt. */ 7 + const TONE_RULES = `# Tone 8 + When responding, you must follow these rules: 9 + - follow the original tone and style 10 + - answer directly from your knowledge when you can 11 + - be concise, prioritize clarity, brevity and don't repeat yourself 12 + - admit when you're unsure rather than making things up`; 13 + 14 + /** 15 + * System prompt for grammar/spelling correction. 16 + * Instructs the model to return only the corrected text. 17 + */ 18 + export const CORRECT_PROMPT = `# Agent Guidelines 19 + You are an agent specialized in english grammar correction. 20 + Correct the grammar, spelling, and punctuation of the submitted text. 21 + 22 + # Output 23 + Return ONLY the corrected text. No headings, no explanations, no markdown formatting. 24 + 25 + ${TONE_RULES}`; 26 + 27 + /** 28 + * System prompt for wording improvement. 29 + * Instructs the model to return a better-worded version of the text. 30 + */ 31 + export const SUGGEST_PROMPT = `# Agent Guidelines 32 + You are an agent specialized in english writing improvement. 33 + Rewrite the submitted text with better wording and phrasing while preserving the original meaning. 34 + 35 + # Output 36 + Return ONLY the improved text, with the same format, return lines, spacing, and punctuation. No headings, no explanations, no markdown formatting. 37 + 38 + ${TONE_RULES}`; 39 + 40 + /** Parameters sent to the chat completions endpoint (generation/sampling subset). */ 41 + export const API_PARAMS: Pick<ApiChatCompletionRequest, 42 + | "temperature" 43 + | "max_tokens" 44 + | "top_p" 45 + | "top_k" 46 + | "min_p" 47 + | "repeat_penalty" 48 + | "frequency_penalty" 49 + | "presence_penalty" 50 + | "stream" 51 + > = { 52 + temperature: 1.0, 53 + max_tokens: 2048, 54 + top_p: 0.95, 55 + top_k: 40, 56 + min_p: 0.01, 57 + repeat_penalty: 1.0, 58 + frequency_penalty: 0.0, 59 + presence_penalty: 0.0, 60 + stream: true, 61 + }; 62 + 63 + /** HTTP request timeout in milliseconds. */ 64 + export const API_TIMEOUT_MS = 30_000; 65 + 66 + /** Maximum allowed input text length in characters. */ 67 + export const MAX_INPUT_LENGTH = 10_000;
+29
src/manifest.json
··· 1 + { 2 + "manifest_version": 2, 3 + "name": "Shakespeare", 4 + "version": "0.1.0", 5 + "description": "Correct selected text via local OpenAI compatible API", 6 + "browser_specific_settings": { 7 + "gecko": { 8 + "id": "shakespeare@eagleusb.com", 9 + "strict_min_version": "142.0", 10 + "data_collection_permissions": { 11 + "required": [ 12 + "none" 13 + ] 14 + } 15 + } 16 + }, 17 + "permissions": [ 18 + "contextMenus", 19 + "activeTab", 20 + "tabs", 21 + "http://localhost:8080/*" 22 + ], 23 + "background": { 24 + "scripts": [ 25 + "background.js" 26 + ] 27 + }, 28 + "icons": {} 29 + }
+217
src/result.html
··· 1 + <!DOCTYPE html> 2 + <html lang="en"> 3 + <head> 4 + <meta charset="UTF-8"> 5 + <meta name="viewport" content="width=device-width, initial-scale=1.0"> 6 + <title>shakespeare prose.</title> 7 + <style> 8 + * { margin: 0; padding: 0; box-sizing: border-box; } 9 + 10 + body { 11 + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; 12 + background: #1a1a2e; 13 + color: #e0e0e0; 14 + padding: 20px; 15 + min-height: 100vh; 16 + } 17 + 18 + h1 { 19 + font-size: 14px; 20 + font-weight: 600; 21 + color: #8888aa; 22 + text-transform: uppercase; 23 + letter-spacing: 0.5px; 24 + margin-bottom: 16px; 25 + } 26 + 27 + /* ─── Loading spinner ──────────────────────────────────────────── */ 28 + 29 + #loading { 30 + display: flex; 31 + flex-direction: column; 32 + align-items: center; 33 + justify-content: center; 34 + padding: 60px 0; 35 + gap: 16px; 36 + } 37 + 38 + .spinner { 39 + width: 36px; 40 + height: 36px; 41 + border: 3px solid #333; 42 + border-top-color: #6c63ff; 43 + border-radius: 50%; 44 + animation: spin 0.8s linear infinite; 45 + } 46 + 47 + @keyframes spin { to { transform: rotate(360deg); } } 48 + 49 + #loading p { 50 + font-size: 14px; 51 + color: #8888aa; 52 + } 53 + 54 + /* ─── Error display ────────────────────────────────────────────── */ 55 + 56 + #error { 57 + display: none; 58 + background: #3a1a1a; 59 + border: 1px solid #6b2a2a; 60 + border-radius: 8px; 61 + padding: 16px; 62 + color: #ff8888; 63 + font-size: 14px; 64 + line-height: 1.5; 65 + } 66 + 67 + #error h2 { 68 + font-size: 13px; 69 + font-weight: 600; 70 + margin-bottom: 8px; 71 + color: #ff6666; 72 + } 73 + 74 + /* ─── Result display ───────────────────────────────────────────── */ 75 + 76 + #result { 77 + display: none; 78 + } 79 + 80 + .section { 81 + margin-bottom: 16px; 82 + } 83 + 84 + .section-label { 85 + font-size: 11px; 86 + font-weight: 600; 87 + color: #6c63ff; 88 + text-transform: uppercase; 89 + letter-spacing: 0.5px; 90 + margin-bottom: 8px; 91 + } 92 + 93 + .section-content { 94 + background: #16213e; 95 + border: 1px solid #2a2a4a; 96 + border-radius: 8px; 97 + padding: 14px; 98 + font-size: 14px; 99 + line-height: 1.6; 100 + white-space: pre-wrap; 101 + word-break: break-word; 102 + } 103 + 104 + .section-content.original { 105 + color: #aaaacc; 106 + text-decoration: line-through; 107 + text-decoration-color: #665566; 108 + } 109 + 110 + .section-content.corrected { 111 + color: #aaffaa; 112 + border-color: #2a4a2a; 113 + } 114 + 115 + .section-content.suggested { 116 + color: #aaddff; 117 + border-color: #2a3a5a; 118 + } 119 + 120 + .copy-btn { 121 + display: inline-flex; 122 + align-items: center; 123 + gap: 6px; 124 + margin-top: 10px; 125 + padding: 8px 16px; 126 + background: #6c63ff; 127 + color: #fff; 128 + border: none; 129 + border-radius: 6px; 130 + font-size: 13px; 131 + font-weight: 500; 132 + cursor: pointer; 133 + transition: background 0.15s; 134 + } 135 + 136 + .copy-btn:hover { background: #5a52e0; } 137 + .copy-btn:active { background: #4a42c0; } 138 + 139 + .copy-btn.copied { 140 + background: #4caf50; 141 + } 142 + 143 + /* ─── Streaming indicator (bouncing dots) ──────────────────────── */ 144 + 145 + .streaming-indicator { 146 + display: inline-flex; 147 + align-items: center; 148 + gap: 4px; 149 + padding: 4px 0; 150 + } 151 + 152 + .streaming-indicator.hidden { 153 + display: none; 154 + } 155 + 156 + .streaming-indicator span { 157 + display: inline-block; 158 + width: 6px; 159 + height: 6px; 160 + border-radius: 50%; 161 + background: #6c63ff; 162 + animation: bounce 1.2s ease-in-out infinite; 163 + } 164 + 165 + .streaming-indicator span:nth-child(2) { 166 + animation-delay: 0.15s; 167 + } 168 + 169 + .streaming-indicator span:nth-child(3) { 170 + animation-delay: 0.3s; 171 + } 172 + 173 + @keyframes bounce { 174 + 0%, 60%, 100% { transform: translateY(0); opacity: 0.4; } 175 + 30% { transform: translateY(-6px); opacity: 1; } 176 + } 177 + </style> 178 + </head> 179 + <body> 180 + <div id="loading"> 181 + <div class="spinner"></div> 182 + <p>Correcting text...</p> 183 + </div> 184 + 185 + <div id="error"> 186 + <h2>Error</h2> 187 + <p id="error-message"></p> 188 + </div> 189 + 190 + <div id="result"> 191 + <div class="section"> 192 + <div class="section-label">Original</div> 193 + <div class="section-content original" id="original-text"></div> 194 + </div> 195 + <div class="section"> 196 + <div class="section-label">Corrected</div> 197 + <div class="section-content corrected" id="corrected-text"> 198 + <div id="corrected-indicator" class="streaming-indicator hidden"> 199 + <span></span><span></span><span></span> 200 + </div> 201 + </div> 202 + <button type="button" class="copy-btn" id="copy-corrected-btn" disabled>Copy corrected</button> 203 + </div> 204 + <div class="section"> 205 + <div class="section-label">Suggested</div> 206 + <div class="section-content suggested" id="suggested-text"> 207 + <div id="suggested-indicator" class="streaming-indicator hidden"> 208 + <span></span><span></span><span></span> 209 + </div> 210 + </div> 211 + <button type="button" class="copy-btn" id="copy-suggested-btn" disabled>Copy suggested</button> 212 + </div> 213 + </div> 214 + 215 + <script src="result.js"></script> 216 + </body> 217 + </html>
+158
src/result.ts
··· 1 + // ─── DOM references ──────────────────────────────────────────────────────── 2 + 3 + const loadingEl = document.getElementById("loading")!; 4 + const errorEl = document.getElementById("error")!; 5 + const errorMessageEl = document.getElementById("error-message")!; 6 + const resultEl = document.getElementById("result")!; 7 + const originalTextEl = document.getElementById("original-text")!; 8 + const correctedTextEl = document.getElementById("corrected-text")!; 9 + const suggestedTextEl = document.getElementById("suggested-text")!; 10 + const correctedIndicator = document.getElementById("corrected-indicator")!; 11 + const suggestedIndicator = document.getElementById("suggested-indicator")!; 12 + const copyCorrectedBtn = document.getElementById("copy-corrected-btn") as HTMLButtonElement; 13 + const copySuggestedBtn = document.getElementById("copy-suggested-btn") as HTMLButtonElement; 14 + 15 + // ─── Section state ───────────────────────────────────────────────────────── 16 + 17 + type Section = "corrected" | "suggested"; 18 + 19 + interface SectionState { 20 + el: HTMLElement; 21 + indicator: HTMLElement; 22 + copyBtn: HTMLButtonElement; 23 + accumulated: string; 24 + firstToken: boolean; 25 + } 26 + 27 + const sections: Record<Section, SectionState> = { 28 + corrected: { 29 + el: correctedTextEl, 30 + indicator: correctedIndicator, 31 + copyBtn: copyCorrectedBtn, 32 + accumulated: "", 33 + firstToken: true, 34 + }, 35 + suggested: { 36 + el: suggestedTextEl, 37 + indicator: suggestedIndicator, 38 + copyBtn: copySuggestedBtn, 39 + accumulated: "", 40 + firstToken: true, 41 + }, 42 + }; 43 + 44 + // ─── Message types from background script ────────────────────────────────── 45 + 46 + type ResultMessage = 47 + | { type: "start"; original: string } 48 + | { type: "section-start"; section: Section } 49 + | { type: "stream"; section: Section; token: string } 50 + | { type: "section-done"; section: Section } 51 + | { type: "done" } 52 + | { type: "error"; message: string }; 53 + 54 + // ─── Message handler ─────────────────────────────────────────────────────── 55 + 56 + browser.runtime.onMessage.addListener((msg: ResultMessage) => { 57 + switch (msg.type) { 58 + case "start": 59 + showStart(msg.original); 60 + break; 61 + case "section-start": 62 + showSectionStart(msg.section); 63 + break; 64 + case "stream": 65 + appendToken(msg.section, msg.token); 66 + break; 67 + case "section-done": 68 + showSectionDone(msg.section); 69 + break; 70 + case "done": 71 + break; 72 + case "error": 73 + showError(msg.message); 74 + break; 75 + } 76 + }); 77 + 78 + // ─── UI helpers ──────────────────────────────────────────────────────────── 79 + 80 + function showStart(original: string): void { 81 + loadingEl.style.display = "none"; 82 + errorEl.style.display = "none"; 83 + resultEl.style.display = "block"; 84 + 85 + originalTextEl.textContent = original; 86 + 87 + for (const state of Object.values(sections)) { 88 + state.el.textContent = ""; 89 + state.accumulated = ""; 90 + state.firstToken = true; 91 + state.copyBtn.disabled = true; 92 + } 93 + } 94 + 95 + function showSectionStart(section: Section): void { 96 + const state = sections[section]; 97 + state.el.textContent = ""; 98 + state.el.appendChild(state.indicator); 99 + state.indicator.classList.remove("hidden"); 100 + } 101 + 102 + function appendToken(section: Section, token: string): void { 103 + const state = sections[section]; 104 + 105 + if (state.firstToken) { 106 + state.indicator.classList.add("hidden"); 107 + state.el.textContent = ""; 108 + state.firstToken = false; 109 + } 110 + 111 + state.accumulated += token; 112 + state.el.textContent = state.accumulated; 113 + } 114 + 115 + function showSectionDone(section: Section): void { 116 + sections[section].copyBtn.disabled = false; 117 + } 118 + 119 + function showError(message: string): void { 120 + loadingEl.style.display = "none"; 121 + resultEl.style.display = "none"; 122 + errorEl.style.display = "block"; 123 + 124 + errorMessageEl.textContent = message; 125 + } 126 + 127 + // ─── Copy buttons ────────────────────────────────────────────────────────── 128 + 129 + function setupCopyButton(btn: HTMLButtonElement, label: string): void { 130 + btn.addEventListener("click", async () => { 131 + const text = btn.closest(".section")?.querySelector(".section-content")?.textContent; 132 + if (!text) { 133 + return; 134 + } 135 + 136 + try { 137 + await navigator.clipboard.writeText(text); 138 + btn.textContent = "Copied!"; 139 + btn.classList.add("copied"); 140 + setTimeout(() => { 141 + btn.textContent = label; 142 + btn.classList.remove("copied"); 143 + }, 2_000); 144 + } catch { 145 + btn.textContent = "Copy failed"; 146 + setTimeout(() => { 147 + btn.textContent = label; 148 + }, 2_000); 149 + } 150 + }); 151 + } 152 + 153 + setupCopyButton(copyCorrectedBtn, "Copy corrected"); 154 + setupCopyButton(copySuggestedBtn, "Copy suggested"); 155 + 156 + // ─── Signal readiness to background script ───────────────────────────────── 157 + 158 + browser.runtime.sendMessage({ type: "ready" });
+133
src/types/api.d.ts
··· 1 + /** 2 + * Types adapted from llama.cpp upstream: 3 + * https://github.com/ggml-org/llama.cpp/blob/master/tools/server/webui/src/lib/types/api.d.ts 4 + */ 5 + 6 + export interface ApiChatMessageContentPart { 7 + type: string; 8 + text?: string; 9 + image_url?: { 10 + url: string; 11 + }; 12 + input_audio?: { 13 + data: string; 14 + format: "wav" | "mp3"; 15 + }; 16 + } 17 + 18 + export interface ApiChatCompletionToolFunction { 19 + name: string; 20 + description?: string; 21 + parameters: Record<string, unknown>; 22 + } 23 + 24 + export interface ApiChatCompletionTool { 25 + type: "function"; 26 + function: ApiChatCompletionToolFunction; 27 + } 28 + 29 + export interface ApiChatCompletionToolCallFunctionDelta { 30 + name?: string; 31 + arguments?: string; 32 + } 33 + 34 + export interface ApiChatCompletionToolCallDelta { 35 + index?: number; 36 + id?: string; 37 + type?: string; 38 + function?: ApiChatCompletionToolCallFunctionDelta; 39 + } 40 + 41 + export type ChatRole = "system" | "user" | "assistant" | "tool"; 42 + 43 + export interface ApiChatCompletionRequest { 44 + messages: Array<{ 45 + role: ChatRole; 46 + content: string | ApiChatMessageContentPart[]; 47 + reasoning_content?: string; 48 + tool_calls?: ApiChatCompletionToolCallDelta[]; 49 + tool_call_id?: string; 50 + }>; 51 + stream?: boolean; 52 + model?: string; 53 + return_progress?: boolean; 54 + tools?: ApiChatCompletionTool[]; 55 + /** Reasoning parameters */ 56 + reasoning_format?: string; 57 + /** Generation parameters */ 58 + temperature?: number; 59 + max_tokens?: number; 60 + /** Sampling parameters */ 61 + dynatemp_range?: number; 62 + dynatemp_exponent?: number; 63 + top_k?: number; 64 + top_p?: number; 65 + min_p?: number; 66 + xtc_probability?: number; 67 + xtc_threshold?: number; 68 + typ_p?: number; 69 + /** Penalty parameters */ 70 + repeat_last_n?: number; 71 + repeat_penalty?: number; 72 + presence_penalty?: number; 73 + frequency_penalty?: number; 74 + dry_multiplier?: number; 75 + dry_base?: number; 76 + dry_allowed_length?: number; 77 + dry_penalty_last_n?: number; 78 + /** Sampler configuration */ 79 + samplers?: string[]; 80 + backend_sampling?: boolean; 81 + /** Custom parameters (JSON string) */ 82 + custom?: Record<string, unknown>; 83 + timings_per_token?: boolean; 84 + } 85 + 86 + export interface ApiChatCompletionResponse { 87 + model?: string; 88 + choices: Array<{ 89 + model?: string; 90 + metadata?: { model?: string }; 91 + message: { 92 + content: string; 93 + reasoning_content?: string; 94 + model?: string; 95 + tool_calls?: Array<ApiChatCompletionToolCallDelta & { 96 + function?: ApiChatCompletionToolCallFunctionDelta & { arguments?: string }; 97 + }>; 98 + }; 99 + finish_reason?: string | null; 100 + }>; 101 + } 102 + 103 + export interface ApiChatCompletionStreamChunk { 104 + object?: string; 105 + model?: string; 106 + choices: Array<{ 107 + model?: string; 108 + metadata?: { model?: string }; 109 + delta: { 110 + content?: string; 111 + reasoning_content?: string; 112 + model?: string; 113 + tool_calls?: ApiChatCompletionToolCallDelta[]; 114 + }; 115 + finish_reason?: string | null; 116 + }>; 117 + } 118 + 119 + export interface ApiErrorResponse { 120 + error: 121 + | { 122 + code: number; 123 + message: string; 124 + type: "exceed_context_size_error"; 125 + n_prompt_tokens: number; 126 + n_ctx: number; 127 + } 128 + | { 129 + code: number; 130 + message: string; 131 + type?: string; 132 + }; 133 + }
+85
src/validation.ts
··· 1 + import { MAX_INPUT_LENGTH } from "./config"; 2 + import type { ApiChatCompletionResponse } from "./types/api"; 3 + 4 + /** Error thrown when input or output validation fails. */ 5 + export class ValidationError extends Error { 6 + constructor(message: string) { 7 + super(message); 8 + this.name = "ValidationError"; 9 + } 10 + } 11 + 12 + /** 13 + * Validates and sanitises the user-selected text before sending it to the API. 14 + * 15 + * @param text - Raw value from `info.selectionText` (may be `undefined` or any type) 16 + * @returns Trimmed, validated string 17 + * @throws {@link ValidationError} when the input is invalid 18 + */ 19 + export function validateInput(text: unknown): string { 20 + if (typeof text !== "string") { 21 + throw new ValidationError("Selected text is not a valid string."); 22 + } 23 + 24 + if (text.includes("\0")) { 25 + throw new ValidationError("Selected text contains invalid characters (null bytes)."); 26 + } 27 + 28 + const trimmed = text.trim(); 29 + 30 + if (trimmed.length === 0) { 31 + throw new ValidationError("Selected text is empty."); 32 + } 33 + 34 + if (!/\S/.test(trimmed)) { 35 + throw new ValidationError("Selected text contains no readable content."); 36 + } 37 + 38 + if (trimmed.length > MAX_INPUT_LENGTH) { 39 + throw new ValidationError( 40 + `Selected text is too long (${trimmed.length.toLocaleString()} characters). Maximum is ${MAX_INPUT_LENGTH.toLocaleString()}.`, 41 + ); 42 + } 43 + 44 + return trimmed; 45 + } 46 + 47 + /** 48 + * Validates the parsed JSON response from the OpenAI-compatible chat completions endpoint. 49 + * 50 + * Expected shape: 51 + * ```json 52 + * { "choices": [{ "message": { "content": "..." } }] } 53 + * ``` 54 + * 55 + * @param data - Parsed JSON from the API response 56 + * @returns The validated content string 57 + * @throws {@link ValidationError} when the response is malformed or empty 58 + */ 59 + export function validateOutput(data: unknown): string { 60 + if (typeof data !== "object" || data === null) { 61 + throw new ValidationError("Invalid API response: expected a JSON object."); 62 + } 63 + 64 + const response = data as ApiChatCompletionResponse; 65 + 66 + if (!Array.isArray(response.choices) || response.choices.length === 0) { 67 + throw new ValidationError("Empty response from API (no choices returned)."); 68 + } 69 + 70 + const firstChoice = response.choices[0]; 71 + if (!firstChoice?.message) { 72 + throw new ValidationError("Malformed response: missing message object."); 73 + } 74 + 75 + if (typeof firstChoice.message.content !== "string") { 76 + throw new ValidationError("Malformed response: content is not a string."); 77 + } 78 + 79 + const content = firstChoice.message.content.trim(); 80 + if (content.length === 0) { 81 + throw new ValidationError("API returned an empty correction."); 82 + } 83 + 84 + return content; 85 + }