diff --git a/src/commands/ask.ts b/src/commands/ask.ts deleted file mode 100644 index ea39da7..0000000 --- a/src/commands/ask.ts +++ /dev/null @@ -1,74 +0,0 @@ -import { - APIApplicationCommandOption -, ApplicationCommandOptionType -, ApplicationCommandType -, ChatInputCommandInteraction -} from "discord.js"; -import { ChatCompletionMessageParam } from "openai/resources"; - -import - Command -,{ApplicationIntegrationType -, InteractionContextTypes -} from "../command"; -import { config } from "../index"; -import { executeChatCompletion, replyInMultiMessage } from "../execution"; -import { formatName } from "../toOpenAIMessages"; - -export default class Ask extends Command implements Command { - name = "ask"; - description = "Promts the bot to reply to a single message without any history context"; - type = ApplicationCommandType.ChatInput; - options: APIApplicationCommandOption[] = [ - { - name: "content", - description: "The content of the prompt", - type: ApplicationCommandOptionType.String, - required: true, - }, - { - name: "ephemeral", - description: "if true, only you can see the response (default true)", - type: ApplicationCommandOptionType.Boolean, - required: false, - } - ]; - integration_types = [ - ApplicationIntegrationType.Guild_Install, - ApplicationIntegrationType.User_Install, - ]; - contexts = [ - InteractionContextTypes.Guild, - InteractionContextTypes.BotDM, - InteractionContextTypes.PrivateChannel, - ]; - - async execute(interaction: ChatInputCommandInteraction) { - const content = interaction.options.getString("content", true); - const ephemeral = interaction.options.getBoolean("ephemeral", false) ?? true; - - if (!interaction.channel && !interaction.channelId) { - console.error("No channel found in interaction"); - console.error(interaction); - await interaction.reply({ - content: "No channel found in interaction???", - ephemeral: true - }); - return; - } - - // TODO: check content in moderation API - - const messages: ChatCompletionMessageParam[] = [ - ...config.systemPrompt(interaction), - { role: "user", name: formatName(interaction.user.displayName), content } - ]; - - const [answer] = await Promise.all([ - executeChatCompletion(messages, interaction), - interaction.deferReply({ ephemeral }), - ]); - - await replyInMultiMessage(answer.choices[0].message.content, interaction); - } -} diff --git a/src/configDefault.ts b/src/configDefault.ts index dcd4dd9..12ce515 100644 --- a/src/configDefault.ts +++ b/src/configDefault.ts @@ -1,3 +1,4 @@ +import { Message } from "discord.js"; import { ChatCompletionMessageParam as OpenAIMessage, ChatCompletionCreateParamsNonStreaming as ChatCompletionRequestData, @@ -5,7 +6,6 @@ import { import IQuota from "./IQuota"; import MessageCount from "./quota/messageCount"; -import { apiRequest } from "./execution"; export interface IConfigRequired { /** Tokens to authentiate with */ @@ -14,9 +14,9 @@ export interface IConfigRequired { readonly OpenAI: string; }; /** Messages to append at the start of every chat history when sending to API */ - systemPrompt(context: apiRequest): OpenAIMessage[]; + systemPrompt(context: Message): OpenAIMessage[]; /** OpenAI model config */ - readonly chatCompletionParams: Omit; + readonly chatCompletionParams: Omit; /** Limits for message selection */ readonly readLimits: { /** Maximum message age to include (in miliseconds) */ diff --git a/src/execution.ts b/src/execution.ts index 579a435..d68f7c0 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -73,7 +73,7 @@ export function getAuthor(request: apiRequest) { * @returns Promise of the done action */ function requestReply( - request: apiRequest, + request: RequestMessage, message: DiscordApi.MessageReplyOptions & DiscordApi.InteractionReplyOptions, // TODO: add support for these below replyOptions: DiscordApi.MessageReplyOptions = {}, @@ -172,41 +172,29 @@ export async function queueRequest(request: apiRequest) { * Logs used tokens to the terminal and to the database * @param answer the response that OpenAI returned * @param message the message that initiated the execution - * @param functionRan counter of how many function have been ran (to distinct records in database) + * @param functionRan counter of how many function have been ran */ function logUsedTokens( answer: ChatCompletion, - message: apiRequest | undefined = undefined, - functionRan: number = 0, + message: RequestMessage, + functionRan: number, ) { const usage = answer.usage; - const functionNames = - answer.choices[0].message.tool_calls?.map( - v => v.type === "function" ? v.function.name : `[unknown type]` - ); + const functionName = answer.choices[0].message?.function_call?.name; if (usage !== undefined) { - if (!message) { - // log usage to stdout even if we can't store it in database - console.warn(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens from unknown call`); - // it doesn't make sense to store usage in database if we don't know where it came from - return; - } - const channelName: string = !message.channel ? "[No channel]" - : !message.channel.isDMBased() ? `#${message.channel.name} (${message.guild?.name})` - : `#@${getAuthor(message).tag}` - ; - console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in ${channelName}${functionNames && functionNames.length > 0 ? " [Tools: " + functionNames.join(", ") + "]" : ""}`); + const channelName: string = !message.channel.isDMBased() ? `${message.channel.name} (${message.guild?.name})` : `@${getAuthor(message).tag}`; + console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in #${channelName}${functionName ? " [Function: " + functionName + "]" : ""}`); database.usage.create({ data: { timestamp: message.createdAt, user: BigInt(getAuthor(message).id), - channel: BigInt(message.channelId ?? 0), + channel: BigInt(message.channelId), guild: message.guildId ? BigInt(message.guildId) : null, usageRequest: usage.prompt_tokens, usageResponse: usage.completion_tokens, - functionName: functionNames?.join(", ") ?? null, - functionRan: functionRan, + functionName: functionName ?? null, + functionRan: functionName ? functionRan : 0, } }).catch((e => { console.error("Failed to push to a database"); @@ -222,6 +210,7 @@ function logUsedTokens( async function executeFromQueue(channel: string) { const channelQueue = channelsRunning.get(channel) as ChannelsRunningValue; const message = channelQueue.at(0) as RequestMessage; + let functionRanCounter = 0; let OpenAImessages: ChatCompletionMessageParam[] = []; // ignore if we can't even send anything to reply @@ -249,13 +238,54 @@ async function executeFromQueue(channel: string) { }); OpenAImessages = toOpenAIMessages(messages.values()); - const answer = await executeChatCompletion(OpenAImessages, message); + let generatedMessage: ChatCompletionMessage | undefined = undefined; + let answer: Awaited>; + + do { + answer = await openai.chat.completions.create({ + ...config.chatCompletionParams, + messages: OpenAImessages, + // FIXME: don't use new instance of FunctionManager + tools: new FunctionManager().getToolsForOpenAi(), + }); + + logUsedTokens(answer, message, ++functionRanCounter); + + generatedMessage = answer.choices[0].message; + if (!generatedMessage) throw new Error("Empty message received"); + + // handle tool calls + if (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0) { + OpenAImessages.push(generatedMessage); + // FIXME: don't use new instance of FunctionManager + OpenAImessages.push(...(await new FunctionManager().handleToolCalls(generatedMessage.tool_calls))); + } + } while (generatedMessage.function_call); channelQueue.stopTyping(); const answerContent = answer.choices[0].message?.content; - await replyInMultiMessage(answerContent, message); + if (answerContent === null || answerContent === "") { + if (message instanceof DiscordApi.Message) message.react("😶").catch(() => {/* GRACEFAIL: It's okay if the bot won't reply */}); + } + else { + const answerMessagesContent :string[] = [""]; + for (const i of answerContent.split(/\n\n/)) { + if (answerMessagesContent[answerMessagesContent.length-1].length + i.length < 2000) { + answerMessagesContent[answerMessagesContent.length-1] += "\n\n" + i; + } + else { + answerMessagesContent.push(i); + } + } + + for (const i of answerMessagesContent) { + const response = requestReply(message, {content: i}, {allowedMentions: { repliedUser: false }}); + + await response.then(rval => Moderation.checkMessageNoReturn(rval)); + } + } } catch (e) { let errorText: string = ""; channelQueue.stopTyping(); @@ -309,70 +339,3 @@ async function executeFromQueue(channel: string) { else return executeFromQueue(channel); } - -/** - * Replies to a message and splits to multiple messages if needed. - * @param answerContent - The content of the answer. - * @param message - The request message to reply to. - */ -export async function replyInMultiMessage(answerContent: string | null, message: apiRequest) { - if (answerContent === null || answerContent === "") { - if (message instanceof DiscordApi.Message) message.react("😶").catch(() => { }); - } - else { - const answerMessagesContent: string[] = [""]; - for (const i of answerContent.split(/\n\n/)) { - if (answerMessagesContent[answerMessagesContent.length - 1].length + i.length < 2000) { - answerMessagesContent[answerMessagesContent.length - 1] += "\n\n" + i; - } - else { - answerMessagesContent.push(i); - } - } - - for (const i of answerMessagesContent) { - const response = requestReply(message, { content: i }, { allowedMentions: { repliedUser: false } }); - - await response.then(rval => Moderation.checkMessageNoReturn(rval)); - } - } -} - -/** - * Executes the chat completion process. - * - * @param OpenAImessages An array of ChatCompletionMessageParam objects representing the messages for chat completion. - * @param message An optional RequestMessage object representing the request message, used for logging. - * @returns A Promise that resolves to the answer from the chat completion process. - */ -export async function executeChatCompletion( - OpenAImessages: ChatCompletionMessageParam[], - message: apiRequest | undefined, -) { - let generatedMessage: ChatCompletionMessage | undefined = undefined; - let answer: Awaited>; - let functionRanCounter = 0; - - do { - answer = await openai.chat.completions.create({ - ...config.chatCompletionParams, - messages: OpenAImessages, - // FIXME: don't use new instance of FunctionManager - tools: new FunctionManager().getToolsForOpenAi(), - }); - - functionRanCounter += answer.choices[0].message?.tool_calls?.length ?? 0; - logUsedTokens(answer, message, ++functionRanCounter); - - generatedMessage = answer.choices[0].message; - if (!generatedMessage) throw new Error("Empty message received"); - - // handle tool calls - if (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0) { - OpenAImessages.push(generatedMessage); - // FIXME: don't use new instance of FunctionManager - OpenAImessages.push(...(await new FunctionManager().handleToolCalls(generatedMessage.tool_calls))); - } - } while (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0); - return answer; -} diff --git a/src/toOpenAIMessages.ts b/src/toOpenAIMessages.ts index 54ad00f..f93d235 100644 --- a/src/toOpenAIMessages.ts +++ b/src/toOpenAIMessages.ts @@ -63,7 +63,7 @@ export function formatMessage(message: DiscordMessage): string { * @param name the name to format * @returns formatted name */ -export function formatName(name: string): string { +function formatName(name: string): string { // replace all characters to ascii equivelant return FoldToAscii.foldReplacing(name) // White spaces are not allowed