added a memory cooldown

This commit is contained in:
Luna
2026-03-03 20:34:17 +01:00
parent 7fc2c683cd
commit 65de299320
4 changed files with 29 additions and 3 deletions

View File

@@ -13,4 +13,6 @@ ENABLE_DASHBOARD=false
DASHBOARD_PORT=3000 DASHBOARD_PORT=3000
ENABLE_WEB_SEARCH=true ENABLE_WEB_SEARCH=true
OPENAI_API_KEY=your_openai_api_key OPENAI_API_KEY=your_openai_api_key
# Memory retrieval cooldown (ms) before the same long-term entry can be reused
MEMORY_COOLDOWN_MS=180000

View File

@@ -102,8 +102,9 @@ Nova is a friendly, slightly witty Discord companion that chats naturally in DMs
- **Embedding math:** `text-embedding-3-small` returns 1,536 floating-point numbers for each text chunk. That giant array is a vector map of the messages meaning; similar moments land near each other in 1,536-dimensional space. - **Embedding math:** `text-embedding-3-small` returns 1,536 floating-point numbers for each text chunk. That giant array is a vector map of the messages meaning; similar moments land near each other in 1,536-dimensional space.
- **What gets embedded:** After every user→bot turn, `recordInteraction()` (see [src/memory.js](src/memory.js)) bundles the pair, scores its importance, asks OpenAI for an embedding, and stores `{ content, embedding, importance, timestamp }` inside the SQLite tables. - **What gets embedded:** After every user→bot turn, `recordInteraction()` (see [src/memory.js](src/memory.js)) bundles the pair, scores its importance, asks OpenAI for an embedding, and stores `{ content, embedding, importance, timestamp }` inside the SQLite tables.
- **Why so many numbers:** Cosine similarity needs raw vectors to compare new thoughts to past ones. When a fresh message arrives, `retrieveRelevantMemories()` embeds it too, calculates cosine similarity against every stored vector, adds a small importance boost, and returns the top five memories to inject into the system prompt. - **Why so many numbers:** Cosine similarity needs raw vectors to compare new thoughts to past ones. When a fresh message arrives, `retrieveRelevantMemories()` embeds it too, calculates cosine similarity against every stored vector, adds a small importance boost, and returns the top five memories to inject into the system prompt.
- **Self-cleaning:** If the DB grows past the configured limits, low-importance items are trimmed, summaries compress the short-term transcript, and you can delete `data/memory.sqlite` to reset everything cleanly. - **Memory cooldown:** `MEMORY_COOLDOWN_MS` (defaults to 180000 ms) keeps a long-term memory out of the retrieval window for a few minutes after it was just used so Nova has to pull fresh context before repeating herself, while still falling back automatically if there isnt anything new to surface.
- **Self-cleaning:** If the DB grows past the configured limits, low-importance items are trimmed, summaries compress the short-term transcript, and you can delete `data/memory.sqlite` to reset everything cleanly.
### Migrating legacy `memory.json` ### Migrating legacy `memory.json`
- Keep your original `data/memory.json` in place and delete/rename `data/memory.sqlite` before launching the bot. - Keep your original `data/memory.json` in place and delete/rename `data/memory.sqlite` before launching the bot.

View File

@@ -34,6 +34,7 @@ export const config = {
legacyMemoryFile, legacyMemoryFile,
summaryTriggerChars: 2200, summaryTriggerChars: 2200,
memoryPruneThreshold: 0.2, memoryPruneThreshold: 0.2,
memoryCooldownMs: process.env.MEMORY_COOLDOWN_MS ? parseInt(process.env.MEMORY_COOLDOWN_MS, 10) : 3 * 60 * 1000,
maxMemories: 8000, maxMemories: 8000,
relevantMemoryCount: 3, relevantMemoryCount: 3,
longTermFetchLimit: 120, longTermFetchLimit: 120,

View File

@@ -41,6 +41,15 @@ const parseEmbedding = (raw) => {
} }
}; };
const memoryUsageMap = new Map();
const getMemoryUsageMapForUser = (userId) => {
if (!memoryUsageMap.has(userId)) {
memoryUsageMap.set(userId, new Map());
}
return memoryUsageMap.get(userId);
};
const __dirname = path.dirname(fileURLToPath(import.meta.url)); const __dirname = path.dirname(fileURLToPath(import.meta.url));
const wasmDir = path.resolve(__dirname, '../node_modules/sql.js/dist'); const wasmDir = path.resolve(__dirname, '../node_modules/sql.js/dist');
@@ -333,8 +342,16 @@ const retrieveRelevantMemories = async (db, userId, query, options = {}) => {
if (!rows.length) { if (!rows.length) {
return []; return [];
} }
const now = Date.now();
const cooldown = config.memoryCooldownMs || 0;
const usage = memoryUsageMap.get(userId);
const eligibleRows =
cooldown && usage
? rows.filter((entry) => now - (usage.get(entry.id) || 0) > cooldown)
: rows;
const rowsToScore = eligibleRows.length ? eligibleRows : rows;
const queryEmbedding = await createEmbedding(query); const queryEmbedding = await createEmbedding(query);
return rows const scored = rowsToScore
.map((entry) => { .map((entry) => {
const embedding = parseEmbedding(entry.embedding); const embedding = parseEmbedding(entry.embedding);
return { return {
@@ -345,6 +362,11 @@ const retrieveRelevantMemories = async (db, userId, query, options = {}) => {
}) })
.sort((a, b) => b.score - a.score) .sort((a, b) => b.score - a.score)
.slice(0, config.relevantMemoryCount); .slice(0, config.relevantMemoryCount);
if (scored.length) {
const usageMap = getMemoryUsageMapForUser(userId);
scored.forEach((entry) => usageMap.set(entry.id, now));
}
return scored;
}; };
export async function appendShortTerm(userId, role, content) { export async function appendShortTerm(userId, role, content) {