A simple Bluesky bot to make sense of the noise, with responses powered by Gemini, similar to Grok.
1import modelPrompt from "../model/prompt.txt";
2import { ChatMessage, Conversation, RichText } from "@skyware/bot";
3import * as c from "../core";
4import * as tools from "../tools";
5import consola from "consola";
6import { env } from "../env";
7import db from "../db";
8import { messages } from "../db/schema";
9import { and, count, eq, gte, lt } from "drizzle-orm";
10import {
11 exceedsGraphemes,
12 multipartResponse,
13 parseConversation,
14 saveMessage,
15} from "../utils/conversation";
16
17const logger = consola.withTag("Message Handler");
18
19type SupportedFunctionCall = typeof c.SUPPORTED_FUNCTION_CALLS[number];
20
21async function generateAIResponse(parsedContext: string, messages: {
22 role: string;
23 parts: {
24 text: string;
25 }[];
26}[]) {
27 const config = {
28 model: env.GEMINI_MODEL,
29 config: {
30 tools: tools.declarations,
31 },
32 };
33
34 const contents = [
35 {
36 role: "model" as const,
37 parts: [
38 {
39 text: modelPrompt
40 .replace("$handle", env.HANDLE),
41 },
42 ],
43 },
44 {
45 role: "model" as const,
46 parts: [
47 {
48 text: parsedContext,
49 },
50 ],
51 },
52 ...messages,
53 ];
54
55 let inference = await c.ai.models.generateContent({
56 ...config,
57 contents,
58 });
59
60 logger.log(
61 `Initial inference took ${inference.usageMetadata?.totalTokenCount} tokens`,
62 );
63
64 if (inference.functionCalls && inference.functionCalls.length > 0) {
65 const call = inference.functionCalls[0];
66
67 if (
68 call &&
69 c.SUPPORTED_FUNCTION_CALLS.includes(
70 call.name as SupportedFunctionCall,
71 )
72 ) {
73 logger.log("Function called invoked:", call.name);
74
75 const functionResponse = await tools.handler(
76 call as typeof call & { name: SupportedFunctionCall },
77 );
78
79 logger.log("Function response:", functionResponse);
80
81 //@ts-ignore
82 contents.push(inference.candidates[0]?.content!);
83
84 contents.push({
85 role: "user" as const,
86 parts: [{
87 //@ts-ignore
88 functionResponse: {
89 name: call.name as string,
90 response: { res: functionResponse },
91 },
92 }],
93 });
94
95 inference = await c.ai.models.generateContent({
96 ...config,
97 contents,
98 });
99 }
100 }
101
102 return inference;
103}
104
105function addCitations(
106 inference: Awaited<ReturnType<typeof c.ai.models.generateContent>>,
107) {
108 let originalText = inference.text ?? "";
109 if (!inference.candidates) {
110 return originalText;
111 }
112 const supports = inference.candidates[0]?.groundingMetadata
113 ?.groundingSupports;
114 const chunks = inference.candidates[0]?.groundingMetadata?.groundingChunks;
115
116 const richText = new RichText();
117
118 if (!supports || !chunks || originalText === "") {
119 return richText.addText(originalText);
120 }
121
122 const sortedSupports = [...supports].sort(
123 (a, b) => (b.segment?.endIndex ?? 0) - (a.segment?.endIndex ?? 0),
124 );
125
126 let currentText = originalText;
127
128 for (const support of sortedSupports) {
129 const endIndex = support.segment?.endIndex;
130 if (endIndex === undefined || !support.groundingChunkIndices?.length) {
131 continue;
132 }
133
134 const citationLinks = support.groundingChunkIndices
135 .map((i) => {
136 const uri = chunks[i]?.web?.uri;
137 if (uri) {
138 return { index: i + 1, uri };
139 }
140 return null;
141 })
142 .filter(Boolean);
143
144 if (citationLinks.length > 0) {
145 richText.addText(currentText.slice(endIndex));
146
147 citationLinks.forEach((citation, idx) => {
148 if (citation) {
149 richText.addLink(`[${citation.index}]`, citation.uri);
150 if (idx < citationLinks.length - 1) {
151 richText.addText(", ");
152 }
153 }
154 });
155
156 currentText = currentText.slice(0, endIndex);
157 }
158 }
159
160 richText.addText(currentText);
161
162 return richText;
163}
164
165export async function handler(message: ChatMessage): Promise<void> {
166 const conversation = await message.getConversation();
167 // ? Conversation should always be able to be found, but just in case:
168 if (!conversation) {
169 logger.error("Cannot find conversation");
170 return;
171 }
172
173 const authorized = env.AUTHORIZED_USERS == null
174 ? true
175 : env.AUTHORIZED_USERS.includes(message.senderDid as any);
176
177 if (!authorized) {
178 await conversation.sendMessage({
179 text: c.UNAUTHORIZED_MESSAGE,
180 });
181
182 return;
183 }
184
185 if (message.senderDid != env.ADMIN_DID) {
186 const todayStart = new Date();
187 todayStart.setHours(0, 0, 0, 0);
188
189 const dailyCount = await db
190 .select({ count: count(messages.id) })
191 .from(messages)
192 .where(
193 and(
194 eq(messages.did, message.senderDid),
195 gte(messages.created_at, todayStart),
196 ),
197 );
198
199 if (dailyCount[0]!.count >= env.DAILY_QUERY_LIMIT) {
200 conversation.sendMessage({
201 text: c.QUOTA_EXCEEDED_MESSAGE,
202 });
203 return;
204 }
205 }
206
207 logger.success("Found conversation");
208 conversation.sendMessage({
209 text: "...",
210 });
211
212 const parsedConversation = await parseConversation(conversation, message);
213
214 try {
215 const inference = await generateAIResponse(
216 parsedConversation.context,
217 parsedConversation.messages,
218 );
219 if (!inference) {
220 logger.error("Failed to generate text. Returned undefined.");
221 return;
222 }
223
224 const responseText = inference.text;
225 const responseWithCitations = addCitations(inference);
226
227 if (responseWithCitations) {
228 logger.success("Generated text:", responseText);
229 saveMessage(conversation, env.DID, responseText!);
230
231 if (exceedsGraphemes(responseWithCitations)) {
232 multipartResponse(conversation, responseWithCitations);
233 } else {
234 conversation.sendMessage({
235 text: responseWithCitations,
236 });
237 }
238 }
239 } catch (error: any) {
240 logger.error("Error in post handler:", error);
241 let errorMsg = c.ERROR_MESSAGE;
242
243 if (error.error.code == 503) {
244 errorMsg =
245 "Sorry, the AI model is currently overloaded. Please try again later.";
246 }
247
248 await conversation.sendMessage({
249 text: errorMsg,
250 });
251 }
252}