diff --git a/src/execution.ts b/src/execution.ts index 5ece5a1..feb4bba 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -1,4 +1,5 @@ import DiscordApi, { GuildTextBasedChannel } from "discord.js"; +import { ChatCompletionRequestMessage, ChatCompletionResponseMessage } from "openai"; import { database, openai } from "./index"; import Moderation from "./moderation"; @@ -220,6 +221,7 @@ async function executeFromQueue(channel: string) { const channelQueue = channelsRunning.get(channel) as ChannelQueue; const message = channelQueue.at(0) as RequestMessage; let functionRanCounter = 0; + let OpenAImessages: ChatCompletionRequestMessage[] = []; // ignore if we can't even send anything to reply if (!canReplyToRequest(message)) return; @@ -238,28 +240,11 @@ async function executeFromQueue(channel: string) { message.deferReply(); } - const OpenAImessages = toOpenAIMessages(messages); - let answer = await openai.createChatCompletion({ - ...config.chatCompletionConfig, - messages: OpenAImessages, - // FIXME: don't use new instance of FunctionManager - functions: new FunctionManager().getFunctions(), - }); + OpenAImessages = toOpenAIMessages(messages); + let generatedMessage: ChatCompletionResponseMessage | undefined = undefined; + let answer: Awaited>; - logUsedTokens(answer, message, ++functionRanCounter); - - let generatedMessage = answer.data.choices[0].message; - if (!generatedMessage) throw new Error("empty message received"); - - // handle function calls - while (generatedMessage.function_call) { - OpenAImessages.push(generatedMessage); - OpenAImessages.push({ - role: "function", - name: generatedMessage.function_call.name, - // FIXME: don't use new instance of FunctionManager - content: new FunctionManager().handleFunction(generatedMessage.function_call), - }); + do { answer = await openai.createChatCompletion({ ...config.chatCompletionConfig, messages: OpenAImessages, @@ -270,8 +255,19 @@ async function executeFromQueue(channel: string) { logUsedTokens(answer, message, ++functionRanCounter); generatedMessage = answer.data.choices[0].message; - if (!generatedMessage) throw new Error("empty message received"); - } + if (!generatedMessage) throw new Error("Empty message received"); + + // handle function calls + if (generatedMessage.function_call) { + OpenAImessages.push(generatedMessage); + OpenAImessages.push({ + role: "function", + name: generatedMessage.function_call.name, + // FIXME: don't use new instance of FunctionManager + content: new FunctionManager().handleFunction(generatedMessage.function_call), + }); + } + } while (generatedMessage.function_call); const answerContent = answer.data.choices[0].message?.content; @@ -299,6 +295,10 @@ async function executeFromQueue(channel: string) { } catch (e) { console.error(`Error ocurred while handling chat completion request (${(e as object).constructor.name}):`); console.error(e); + if (OpenAImessages.length != 0) { + console.error("Messages:"); + console.error(OpenAImessages); + } let errorText = "\n"; diff --git a/src/funcitonManager.ts b/src/funcitonManager.ts index cbc2c99..008c14e 100644 --- a/src/funcitonManager.ts +++ b/src/funcitonManager.ts @@ -61,9 +61,14 @@ export default class FunctionManager { } public handleFunction(request: ChatCompletionRequestMessageFunctionCall) { - - const parsedArguments = JSON.parse(request.arguments ?? ""); - return this.store.get(request.name ?? "")?.execute(parsedArguments); + try { + const parsedArguments = JSON.parse(request.arguments ?? ""); + return this.store.get(request.name ?? "")?.execute(parsedArguments); + } + catch (e) { + console.error("Function arguments raw: " + request.arguments); + throw new Error(`Failed to parse the function JSON arguments when running function [${request.name}]`, {cause: e}); + } } }