ive harnessed the harness
1{
2 "dataset_id": "internal-eval-template",
3 "description": "Minimal template for the exact retrieval benchmark harness.",
4 "memories": [
5 {
6 "memory_id": 1,
7 "namespace": "default",
8 "layer": "L1",
9 "text": "on april 20 i moved the chat model serving from lm studio to llama-server.",
10 "event_time": 1776643200,
11 "status": "active",
12 "source_ref": "example:chat:1",
13 "tags": ["project:klbr", "topic:llama-server"]
14 },
15 {
16 "memory_id": 2,
17 "namespace": "default",
18 "layer": "L1",
19 "text": "the embedding server should stay separate and use bge-m3 with dimension 1024.",
20 "event_time": 1776729600,
21 "status": "active",
22 "source_ref": "example:chat:2",
23 "tags": ["project:klbr", "topic:embeddings"]
24 }
25 ],
26 "queries": [
27 {
28 "query_id": "q1",
29 "split": "dev",
30 "category": "exact recent event",
31 "namespace": "default",
32 "text": "what did i switch from lm studio to?",
33 "gold_memory_ids": [1],
34 "no_hit": false,
35 "reference_time": 1776816000
36 },
37 {
38 "query_id": "q2",
39 "split": "dev",
40 "category": "conflict / update question",
41 "namespace": "default",
42 "text": "what embedding model are we using?",
43 "gold_memory_ids": [2],
44 "no_hit": false,
45 "reference_time": 1776816000
46 },
47 {
48 "query_id": "q3",
49 "split": "test",
50 "category": "no-hit query",
51 "namespace": "default",
52 "text": "what city did i move to?",
53 "gold_memory_ids": [],
54 "no_hit": true,
55 "reference_time": 1776816000
56 }
57 ]
58}