diff --git a/src/execution.ts b/src/execution.ts index f1b332c..38699eb 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -1,5 +1,6 @@ import DiscordApi, { GuildTextBasedChannel } from "discord.js"; import { ChatCompletionRequestMessage, ChatCompletionResponseMessage } from "openai"; +import Axios from "axios"; import { database, openai } from "./index"; import Moderation from "./moderation"; @@ -9,13 +10,19 @@ import FunctionManager from "./funcitonManager"; type NonNullableInObject = { [k in keyof T]: k extends V ? NonNullable : T[k] }; type apiRequest = DiscordApi.Message | DiscordApi.RepliableInteraction; -export type request = apiRequest & NonNullableInObject; +export type RequestMessage = apiRequest & NonNullableInObject; + +class ChannelsRunningValue extends Array { + tries = 0; + + shift() { + this.tries = 0; + return super.shift(); + } +} /** Stores the queue requests on the channels. */ -const channelsRunning: DiscordApi.Collection = new DiscordApi.Collection(); - -type ChannelQueue = NonNullable>; -type RequestMessage = NonNullable>; +const channelsRunning: DiscordApi.Collection = new DiscordApi.Collection(); /** * Gets the user that requested the execution @@ -99,7 +106,7 @@ export async function getNthUseInLimitTimestamp(user: string | { id: string }, r * @returns Promise of the done action */ function requestReply( - request: request, + request: RequestMessage, message: DiscordApi.MessageReplyOptions & DiscordApi.InteractionReplyOptions, // TODO: add support for these below replyOptions: DiscordApi.MessageReplyOptions = {}, @@ -170,10 +177,10 @@ export async function queueRequest(request: apiRequest) { const messagesForChannel = channelsRunning.ensure( request.channelId, - () => { return []; }, + () => { return new ChannelsRunningValue; }, ); const shouldStart = messagesForChannel.length === 0; - messagesForChannel.push(request as request); + messagesForChannel.push(request as RequestMessage); if (shouldStart) void executeFromQueue(request.channelId); } @@ -218,7 +225,7 @@ function logUsedTokens( * @param channel the channel to run the queue for */ async function executeFromQueue(channel: string) { - const channelQueue = channelsRunning.get(channel) as ChannelQueue; + const channelQueue = channelsRunning.get(channel) as ChannelsRunningValue; const message = channelQueue.at(0) as RequestMessage; let functionRanCounter = 0; let OpenAImessages: ChatCompletionRequestMessage[] = []; @@ -303,6 +310,10 @@ async function executeFromQueue(channel: string) { errorText += e.message; } else errorText = ""; + if (Axios.isAxiosError(e) && e.code?.match(/^5..$/) && channelQueue.tries < 3) { + channelQueue.tries++; + return executeFromQueue(channel); + } await requestReply( message, diff --git a/src/toOpenAIMessages.ts b/src/toOpenAIMessages.ts index 0d5e1c2..598238d 100644 --- a/src/toOpenAIMessages.ts +++ b/src/toOpenAIMessages.ts @@ -4,14 +4,14 @@ import FoldToAscii from "fold-to-ascii"; import config from "./config"; import countTokens from "./tokenCounter"; -import { request } from "./execution"; +import { RequestMessage } from "./execution"; /** * formats the request to use as a message contend in OpenAI api * @param request the request to format * @returns the formatted request */ -export async function formatRequestOrResponse(request: request | InteractionResponse): Promise { +export async function formatRequestOrResponse(request: RequestMessage | InteractionResponse): Promise { if (request instanceof DiscordMessage) { return formatMessage(request); }