actually adding openai fallback oopsie lol kekw

This commit is contained in:
Luna
2026-03-01 16:39:43 +01:00
parent f9bd2f1ee0
commit 96ddb6555c
3 changed files with 174 additions and 29 deletions

View File

@@ -530,7 +530,7 @@ client.on('messageCreate', async (message) => {
searchOutage: intelMeta.searchOutage,
});
const reply = await chatCompletion(messages, { temperature: 0.6, maxTokens: 200 });
const finalReply = (reply && reply.trim()) || "I'm here, just had a tiny brain freeze. Mind repeating that?";
const finalReply = (reply && reply.trim()) || "Brain crashed, Please try again";
const chunks = splitResponses(finalReply);
const outputs = chunks.length ? chunks : [finalReply];
@@ -544,7 +544,7 @@ client.on('messageCreate', async (message) => {
} catch (error) {
console.error('[bot] Failed to respond:', error);
if (!message.channel?.send) return;
await message.channel.send('Hit a snag reaching my brain server. Try again in a few seconds?');
await message.channel.send('Someone tell Luna there is a problem with my AI.');
}
});

View File

@@ -45,4 +45,7 @@ export const config = {
// signals to stop or the `continuationMaxProactive` limit is reached.
continuationIntervalMs: process.env.CONTINUATION_INTERVAL_MS ? parseInt(process.env.CONTINUATION_INTERVAL_MS, 10) : 10000,
continuationMaxProactive: process.env.CONTINUATION_MAX_PROACTIVE ? parseInt(process.env.CONTINUATION_MAX_PROACTIVE, 10) : 10,
openaiKey: process.env.OPENAI_API_KEY || '',
openaiModel: process.env.OPENAI_MODEL || 'gpt-3.5-turbo',
enableFallbackOpenAI: process.env.ENABLE_OPENAI_FALLBACK === 'true',
};

View File

@@ -9,19 +9,44 @@ async function withRetry(fn, attempts = 3, delayMs = 1500) {
return await fn();
} catch (err) {
lastErr = err;
const status = err?.status || (err?.response && err.response.status) || err?.statusCode || 0;
const status =
err?.status ||
(err?.response && err.response.status) ||
err?.statusCode ||
0;
const code = err?.code || err?.name || '';
const retryableNetworkCodes = ['UND_ERR_CONNECT_TIMEOUT', 'ECONNRESET', 'ETIMEDOUT', 'ENOTFOUND', 'ECONNREFUSED', 'EAI_AGAIN'];
const isRetryableNetworkError = retryableNetworkCodes.includes(code);
if (status === 429 || status >= 500 || isRetryableNetworkError) {
const backoff = delayMs * Math.pow(2, i); // exponential backoff
console.warn(`[openrouter] retry ${i + 1}/${attempts} after ${backoff}ms due to status=${status} code=${code}`);
const retryableNetworkCodes = [
'UND_ERR_CONNECT_TIMEOUT',
'ECONNRESET',
'ETIMEDOUT',
'ENOTFOUND',
'ECONNREFUSED',
'EAI_AGAIN',
];
const isRetryableNetworkError =
retryableNetworkCodes.includes(code);
if (
status === 429 ||
status >= 500 ||
isRetryableNetworkError
) {
const backoff = delayMs * Math.pow(2, i);
console.warn(
`[openrouter] retry ${i + 1}/${attempts} after ${backoff}ms`
);
await sleep(backoff);
continue;
}
break;
}
}
throw lastErr;
}
@@ -30,16 +55,23 @@ function buildHeaders() {
Authorization: `Bearer ${config.openRouterKey}`,
'Content-Type': 'application/json',
};
if (config.openrouterReferer) headers['HTTP-Referer'] = config.openrouterReferer;
if (config.openrouterTitle) headers['X-OpenRouter-Title'] = config.openrouterTitle;
if (config.openrouterReferer)
headers['HTTP-Referer'] = config.openrouterReferer;
if (config.openrouterTitle)
headers['X-OpenRouter-Title'] = config.openrouterTitle;
return headers;
}
async function postJson(path, body) {
const url = `https://openrouter.ai/api/v1${path}`;
const headers = buildHeaders();
const controller = new AbortController();
const timeout = config.openrouterTimeoutMs || 30000;
const timeoutId = setTimeout(() => {
const e = new Error(`Request timed out after ${timeout}ms`);
e.code = 'UND_ERR_CONNECT_TIMEOUT';
@@ -48,33 +80,91 @@ async function postJson(path, body) {
}, timeout);
try {
const res = await fetch(url, { method: 'POST', headers, body: JSON.stringify(body), signal: controller.signal });
const res = await fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: controller.signal,
});
if (!res.ok) {
const text = await res.text().catch(() => '');
const err = new Error(`OpenRouter ${res.status} ${res.statusText}: ${text}`);
const err = new Error(
`OpenRouter ${res.status} ${res.statusText}: ${text}`
);
err.status = res.status;
throw err;
}
return res.json();
} catch (err) {
if (err.name === 'AbortError' || err.message?.includes('timed out')) {
if (
err.name === 'AbortError' ||
err.message?.includes('timed out')
) {
const e = new Error(`Connect Timeout Error after ${timeout}ms`);
e.code = 'UND_ERR_CONNECT_TIMEOUT';
throw e;
}
throw err;
} finally {
clearTimeout(timeoutId);
}
}
async function openAiFallbackChat(messages, options = {}) {
if (!config.enableFallbackOpenAI || !config.openaiKey) {
throw new Error('OpenAI fallback disabled or missing API key');
}
const {
model = config.openaiModel || 'gpt-3.5-turbo',
temperature = 0.7,
maxTokens = 400,
} = options;
const res = await fetch(
'https://api.openai.com/v1/chat/completions',
{
method: 'POST',
headers: {
Authorization: `Bearer ${config.openaiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model,
messages,
temperature,
max_tokens: maxTokens,
}),
}
);
if (!res.ok) {
const text = await res.text().catch(() => '');
throw new Error(
`OpenAI fallback failed: ${res.status} ${text}`
);
}
const data = await res.json();
return (
data?.choices?.[0]?.message?.content ||
''
);
}
export async function chatCompletion(messages, options = {}) {
const {
model = config.chatModel,
temperature = 0.7,
maxTokens = 400,
fallback = true,
} = options;
try {
const payload = {
model,
messages,
@@ -82,21 +172,73 @@ export async function chatCompletion(messages, options = {}) {
max_tokens: maxTokens,
};
const data = await withRetry(() => postJson('/chat/completions', payload));
const text = data?.choices?.[0]?.message?.content || data?.choices?.[0]?.text || '';
return (text && String(text).trim()) || '';
const data = await withRetry(() =>
postJson('/chat/completions', payload)
);
const text =
data?.choices?.[0]?.message?.content ||
data?.choices?.[0]?.text ||
'';
if (text && String(text).trim()) {
return String(text).trim();
}
throw new Error('Empty response from primary model');
} catch (err) {
console.warn(
'[chatCompletion] primary model failed:',
err?.message
);
if (!fallback) throw err;
try {
return await openAiFallbackChat(messages, options);
} catch (fallbackErr) {
console.error(
'[chatCompletion] fallback model also failed:',
fallbackErr?.message
);
throw fallbackErr;
}
}
}
export async function createEmbedding(text) {
if (!text || !text.trim()) return [];
const payload = { model: config.embedModel, input: text };
const data = await withRetry(() => postJson('/embeddings', payload));
const payload = {
model: config.embedModel,
input: text,
};
const data = await withRetry(() =>
postJson('/embeddings', payload)
);
return data?.data?.[0]?.embedding || [];
}
export async function summarizeConversation(summarySoFar, transcriptChunk) {
const system = { role: 'system', content: 'You compress Discord chats. Keep tone casual, capture facts, goals, and emotional state. Max 120 words.' };
const prompt = `Existing summary (can be empty): ${summarySoFar || 'None'}\nNew messages:\n${transcriptChunk}`;
export async function summarizeConversation(
summarySoFar,
transcriptChunk
) {
const system = {
role: 'system',
content:
'You compress Discord chats. Keep tone casual, capture facts, goals, and emotional state. Max 120 words.',
};
const prompt = `Existing summary (can be empty): ${
summarySoFar || 'None'
}\nNew messages:\n${transcriptChunk}`;
const user = { role: 'user', content: prompt };
return chatCompletion([system, user], { temperature: 0.4, maxTokens: 180 });
return chatCompletion([system, user], {
temperature: 0.4,
maxTokens: 180,
});
}