From 96ddb6555c898b493b0893e93c84221dc506bdec Mon Sep 17 00:00:00 2001 From: Luna Date: Sun, 1 Mar 2026 16:39:43 +0100 Subject: [PATCH] actually adding openai fallback oopsie lol kekw --- src/bot.js | 4 +- src/config.js | 3 + src/openai.js | 196 +++++++++++++++++++++++++++++++++++++++++++------- 3 files changed, 174 insertions(+), 29 deletions(-) diff --git a/src/bot.js b/src/bot.js index ff34c75..9d22075 100644 --- a/src/bot.js +++ b/src/bot.js @@ -530,7 +530,7 @@ client.on('messageCreate', async (message) => { searchOutage: intelMeta.searchOutage, }); const reply = await chatCompletion(messages, { temperature: 0.6, maxTokens: 200 }); - const finalReply = (reply && reply.trim()) || "I'm here, just had a tiny brain freeze. Mind repeating that?"; + const finalReply = (reply && reply.trim()) || "Brain crashed, Please try again"; const chunks = splitResponses(finalReply); const outputs = chunks.length ? chunks : [finalReply]; @@ -544,7 +544,7 @@ client.on('messageCreate', async (message) => { } catch (error) { console.error('[bot] Failed to respond:', error); if (!message.channel?.send) return; - await message.channel.send('Hit a snag reaching my brain server. Try again in a few seconds?'); + await message.channel.send('Someone tell Luna there is a problem with my AI.'); } }); diff --git a/src/config.js b/src/config.js index a6a7e3c..f1155ab 100644 --- a/src/config.js +++ b/src/config.js @@ -45,4 +45,7 @@ export const config = { // signals to stop or the `continuationMaxProactive` limit is reached. continuationIntervalMs: process.env.CONTINUATION_INTERVAL_MS ? parseInt(process.env.CONTINUATION_INTERVAL_MS, 10) : 10000, continuationMaxProactive: process.env.CONTINUATION_MAX_PROACTIVE ? parseInt(process.env.CONTINUATION_MAX_PROACTIVE, 10) : 10, + openaiKey: process.env.OPENAI_API_KEY || '', + openaiModel: process.env.OPENAI_MODEL || 'gpt-3.5-turbo', + enableFallbackOpenAI: process.env.ENABLE_OPENAI_FALLBACK === 'true', }; diff --git a/src/openai.js b/src/openai.js index 85f38bc..dfbe4e9 100644 --- a/src/openai.js +++ b/src/openai.js @@ -9,19 +9,44 @@ async function withRetry(fn, attempts = 3, delayMs = 1500) { return await fn(); } catch (err) { lastErr = err; - const status = err?.status || (err?.response && err.response.status) || err?.statusCode || 0; + + const status = + err?.status || + (err?.response && err.response.status) || + err?.statusCode || + 0; + const code = err?.code || err?.name || ''; - const retryableNetworkCodes = ['UND_ERR_CONNECT_TIMEOUT', 'ECONNRESET', 'ETIMEDOUT', 'ENOTFOUND', 'ECONNREFUSED', 'EAI_AGAIN']; - const isRetryableNetworkError = retryableNetworkCodes.includes(code); - if (status === 429 || status >= 500 || isRetryableNetworkError) { - const backoff = delayMs * Math.pow(2, i); // exponential backoff - console.warn(`[openrouter] retry ${i + 1}/${attempts} after ${backoff}ms due to status=${status} code=${code}`); + + const retryableNetworkCodes = [ + 'UND_ERR_CONNECT_TIMEOUT', + 'ECONNRESET', + 'ETIMEDOUT', + 'ENOTFOUND', + 'ECONNREFUSED', + 'EAI_AGAIN', + ]; + + const isRetryableNetworkError = + retryableNetworkCodes.includes(code); + + if ( + status === 429 || + status >= 500 || + isRetryableNetworkError + ) { + const backoff = delayMs * Math.pow(2, i); + console.warn( + `[openrouter] retry ${i + 1}/${attempts} after ${backoff}ms` + ); await sleep(backoff); continue; } + break; } } + throw lastErr; } @@ -30,16 +55,23 @@ function buildHeaders() { Authorization: `Bearer ${config.openRouterKey}`, 'Content-Type': 'application/json', }; - if (config.openrouterReferer) headers['HTTP-Referer'] = config.openrouterReferer; - if (config.openrouterTitle) headers['X-OpenRouter-Title'] = config.openrouterTitle; + + if (config.openrouterReferer) + headers['HTTP-Referer'] = config.openrouterReferer; + + if (config.openrouterTitle) + headers['X-OpenRouter-Title'] = config.openrouterTitle; + return headers; } async function postJson(path, body) { const url = `https://openrouter.ai/api/v1${path}`; const headers = buildHeaders(); + const controller = new AbortController(); const timeout = config.openrouterTimeoutMs || 30000; + const timeoutId = setTimeout(() => { const e = new Error(`Request timed out after ${timeout}ms`); e.code = 'UND_ERR_CONNECT_TIMEOUT'; @@ -48,55 +80,165 @@ async function postJson(path, body) { }, timeout); try { - const res = await fetch(url, { method: 'POST', headers, body: JSON.stringify(body), signal: controller.signal }); + const res = await fetch(url, { + method: 'POST', + headers, + body: JSON.stringify(body), + signal: controller.signal, + }); + if (!res.ok) { const text = await res.text().catch(() => ''); - const err = new Error(`OpenRouter ${res.status} ${res.statusText}: ${text}`); + const err = new Error( + `OpenRouter ${res.status} ${res.statusText}: ${text}` + ); err.status = res.status; throw err; } + return res.json(); } catch (err) { - if (err.name === 'AbortError' || err.message?.includes('timed out')) { + if ( + err.name === 'AbortError' || + err.message?.includes('timed out') + ) { const e = new Error(`Connect Timeout Error after ${timeout}ms`); e.code = 'UND_ERR_CONNECT_TIMEOUT'; throw e; } + throw err; } finally { clearTimeout(timeoutId); } } +async function openAiFallbackChat(messages, options = {}) { + if (!config.enableFallbackOpenAI || !config.openaiKey) { + throw new Error('OpenAI fallback disabled or missing API key'); + } + + const { + model = config.openaiModel || 'gpt-3.5-turbo', + temperature = 0.7, + maxTokens = 400, + } = options; + + const res = await fetch( + 'https://api.openai.com/v1/chat/completions', + { + method: 'POST', + headers: { + Authorization: `Bearer ${config.openaiKey}`, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + model, + messages, + temperature, + max_tokens: maxTokens, + }), + } + ); + + if (!res.ok) { + const text = await res.text().catch(() => ''); + throw new Error( + `OpenAI fallback failed: ${res.status} ${text}` + ); + } + + const data = await res.json(); + + return ( + data?.choices?.[0]?.message?.content || + '' + ); +} + export async function chatCompletion(messages, options = {}) { const { model = config.chatModel, temperature = 0.7, maxTokens = 400, + fallback = true, } = options; - const payload = { - model, - messages, - temperature, - max_tokens: maxTokens, - }; + try { + const payload = { + model, + messages, + temperature, + max_tokens: maxTokens, + }; - const data = await withRetry(() => postJson('/chat/completions', payload)); - const text = data?.choices?.[0]?.message?.content || data?.choices?.[0]?.text || ''; - return (text && String(text).trim()) || ''; + const data = await withRetry(() => + postJson('/chat/completions', payload) + ); + + const text = + data?.choices?.[0]?.message?.content || + data?.choices?.[0]?.text || + ''; + + if (text && String(text).trim()) { + return String(text).trim(); + } + + throw new Error('Empty response from primary model'); + } catch (err) { + console.warn( + '[chatCompletion] primary model failed:', + err?.message + ); + + if (!fallback) throw err; + + try { + return await openAiFallbackChat(messages, options); + } catch (fallbackErr) { + console.error( + '[chatCompletion] fallback model also failed:', + fallbackErr?.message + ); + throw fallbackErr; + } + } } export async function createEmbedding(text) { if (!text || !text.trim()) return []; - const payload = { model: config.embedModel, input: text }; - const data = await withRetry(() => postJson('/embeddings', payload)); + + const payload = { + model: config.embedModel, + input: text, + }; + + const data = await withRetry(() => + postJson('/embeddings', payload) + ); + return data?.data?.[0]?.embedding || []; } -export async function summarizeConversation(summarySoFar, transcriptChunk) { - const system = { role: 'system', content: 'You compress Discord chats. Keep tone casual, capture facts, goals, and emotional state. Max 120 words.' }; - const prompt = `Existing summary (can be empty): ${summarySoFar || 'None'}\nNew messages:\n${transcriptChunk}`; +export async function summarizeConversation( + summarySoFar, + transcriptChunk +) { + const system = { + role: 'system', + content: + 'You compress Discord chats. Keep tone casual, capture facts, goals, and emotional state. Max 120 words.', + }; + + const prompt = `Existing summary (can be empty): ${ + summarySoFar || 'None' + }\nNew messages:\n${transcriptChunk}`; + const user = { role: 'user', content: prompt }; - return chatCompletion([system, user], { temperature: 0.4, maxTokens: 180 }); -} + + return chatCompletion([system, user], { + temperature: 0.4, + maxTokens: 180, + }); +} \ No newline at end of file