added a memory cooldown
This commit is contained in:
@@ -13,4 +13,6 @@ ENABLE_DASHBOARD=false
|
||||
DASHBOARD_PORT=3000
|
||||
ENABLE_WEB_SEARCH=true
|
||||
OPENAI_API_KEY=your_openai_api_key
|
||||
# Memory retrieval cooldown (ms) before the same long-term entry can be reused
|
||||
MEMORY_COOLDOWN_MS=180000
|
||||
|
||||
|
||||
@@ -103,6 +103,7 @@ Nova is a friendly, slightly witty Discord companion that chats naturally in DMs
|
||||
- **Embedding math:** `text-embedding-3-small` returns 1,536 floating-point numbers for each text chunk. That giant array is a vector map of the message’s meaning; similar moments land near each other in 1,536-dimensional space.
|
||||
- **What gets embedded:** After every user→bot turn, `recordInteraction()` (see [src/memory.js](src/memory.js)) bundles the pair, scores its importance, asks OpenAI for an embedding, and stores `{ content, embedding, importance, timestamp }` inside the SQLite tables.
|
||||
- **Why so many numbers:** Cosine similarity needs raw vectors to compare new thoughts to past ones. When a fresh message arrives, `retrieveRelevantMemories()` embeds it too, calculates cosine similarity against every stored vector, adds a small importance boost, and returns the top five memories to inject into the system prompt.
|
||||
- **Memory cooldown:** `MEMORY_COOLDOWN_MS` (defaults to 180000 ms) keeps a long-term memory out of the retrieval window for a few minutes after it was just used so Nova has to pull fresh context before repeating herself, while still falling back automatically if there isn’t anything new to surface.
|
||||
- **Self-cleaning:** If the DB grows past the configured limits, low-importance items are trimmed, summaries compress the short-term transcript, and you can delete `data/memory.sqlite` to reset everything cleanly.
|
||||
|
||||
### Migrating legacy `memory.json`
|
||||
|
||||
@@ -34,6 +34,7 @@ export const config = {
|
||||
legacyMemoryFile,
|
||||
summaryTriggerChars: 2200,
|
||||
memoryPruneThreshold: 0.2,
|
||||
memoryCooldownMs: process.env.MEMORY_COOLDOWN_MS ? parseInt(process.env.MEMORY_COOLDOWN_MS, 10) : 3 * 60 * 1000,
|
||||
maxMemories: 8000,
|
||||
relevantMemoryCount: 3,
|
||||
longTermFetchLimit: 120,
|
||||
|
||||
@@ -41,6 +41,15 @@ const parseEmbedding = (raw) => {
|
||||
}
|
||||
};
|
||||
|
||||
const memoryUsageMap = new Map();
|
||||
|
||||
const getMemoryUsageMapForUser = (userId) => {
|
||||
if (!memoryUsageMap.has(userId)) {
|
||||
memoryUsageMap.set(userId, new Map());
|
||||
}
|
||||
return memoryUsageMap.get(userId);
|
||||
};
|
||||
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
||||
const wasmDir = path.resolve(__dirname, '../node_modules/sql.js/dist');
|
||||
|
||||
@@ -333,8 +342,16 @@ const retrieveRelevantMemories = async (db, userId, query, options = {}) => {
|
||||
if (!rows.length) {
|
||||
return [];
|
||||
}
|
||||
const now = Date.now();
|
||||
const cooldown = config.memoryCooldownMs || 0;
|
||||
const usage = memoryUsageMap.get(userId);
|
||||
const eligibleRows =
|
||||
cooldown && usage
|
||||
? rows.filter((entry) => now - (usage.get(entry.id) || 0) > cooldown)
|
||||
: rows;
|
||||
const rowsToScore = eligibleRows.length ? eligibleRows : rows;
|
||||
const queryEmbedding = await createEmbedding(query);
|
||||
return rows
|
||||
const scored = rowsToScore
|
||||
.map((entry) => {
|
||||
const embedding = parseEmbedding(entry.embedding);
|
||||
return {
|
||||
@@ -345,6 +362,11 @@ const retrieveRelevantMemories = async (db, userId, query, options = {}) => {
|
||||
})
|
||||
.sort((a, b) => b.score - a.score)
|
||||
.slice(0, config.relevantMemoryCount);
|
||||
if (scored.length) {
|
||||
const usageMap = getMemoryUsageMapForUser(userId);
|
||||
scored.forEach((entry) => usageMap.set(entry.id, now));
|
||||
}
|
||||
return scored;
|
||||
};
|
||||
|
||||
export async function appendShortTerm(userId, role, content) {
|
||||
|
||||
Reference in New Issue
Block a user