From 67a6e4d4864dd699dcfe14fb7634662d6b03fd2f Mon Sep 17 00:00:00 2001 From: Wroclaw Date: Thu, 25 Apr 2024 01:10:30 +0200 Subject: [PATCH 1/6] execution+configDefault: retrofit for tooll_calls api --- src/configDefault.ts | 2 +- src/execution.ts | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/configDefault.ts b/src/configDefault.ts index 12ce515..620a363 100644 --- a/src/configDefault.ts +++ b/src/configDefault.ts @@ -16,7 +16,7 @@ export interface IConfigRequired { /** Messages to append at the start of every chat history when sending to API */ systemPrompt(context: Message): OpenAIMessage[]; /** OpenAI model config */ - readonly chatCompletionParams: Omit; + readonly chatCompletionParams: Omit; /** Limits for message selection */ readonly readLimits: { /** Maximum message age to include (in miliseconds) */ diff --git a/src/execution.ts b/src/execution.ts index d68f7c0..1cfcaa8 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -180,10 +180,13 @@ function logUsedTokens( functionRan: number, ) { const usage = answer.usage; - const functionName = answer.choices[0].message?.function_call?.name; + const functionNames = + answer.choices[0].message.tool_calls?.map( + v => v.type === "function" ? v.function.name : `[unknown type]` + ); if (usage !== undefined) { const channelName: string = !message.channel.isDMBased() ? `${message.channel.name} (${message.guild?.name})` : `@${getAuthor(message).tag}`; - console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in #${channelName}${functionName ? " [Function: " + functionName + "]" : ""}`); + console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in #${channelName}${functionNames && functionNames.length > 0 ? " [Tools: " + functionNames.join(", ") + "]" : ""}`); database.usage.create({ data: { @@ -193,8 +196,8 @@ function logUsedTokens( guild: message.guildId ? BigInt(message.guildId) : null, usageRequest: usage.prompt_tokens, usageResponse: usage.completion_tokens, - functionName: functionName ?? null, - functionRan: functionName ? functionRan : 0, + functionName: functionNames?.join(", ") ?? null, + functionRan: functionNames ? functionRan : 0, } }).catch((e => { console.error("Failed to push to a database"); @@ -249,6 +252,7 @@ async function executeFromQueue(channel: string) { tools: new FunctionManager().getToolsForOpenAi(), }); + functionRanCounter += answer.choices[0].message?.tool_calls?.length ?? 0; logUsedTokens(answer, message, ++functionRanCounter); generatedMessage = answer.choices[0].message; @@ -260,7 +264,7 @@ async function executeFromQueue(channel: string) { // FIXME: don't use new instance of FunctionManager OpenAImessages.push(...(await new FunctionManager().handleToolCalls(generatedMessage.tool_calls))); } - } while (generatedMessage.function_call); + } while (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0); channelQueue.stopTyping(); From d3567c36070b0c3443d24ecb2182b6f97e8d1b81 Mon Sep 17 00:00:00 2001 From: Wroclaw Date: Fri, 26 Apr 2024 04:02:09 +0200 Subject: [PATCH 2/6] execution: handle undefined message in logUsedTokens --- src/execution.ts | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/execution.ts b/src/execution.ts index 1cfcaa8..63679fa 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -172,12 +172,12 @@ export async function queueRequest(request: apiRequest) { * Logs used tokens to the terminal and to the database * @param answer the response that OpenAI returned * @param message the message that initiated the execution - * @param functionRan counter of how many function have been ran + * @param functionRan counter of how many function have been ran (to distinct records in database) */ function logUsedTokens( answer: ChatCompletion, - message: RequestMessage, - functionRan: number, + message: RequestMessage | undefined = undefined, + functionRan: number = 0, ) { const usage = answer.usage; const functionNames = @@ -185,6 +185,12 @@ function logUsedTokens( v => v.type === "function" ? v.function.name : `[unknown type]` ); if (usage !== undefined) { + if (!message) { + // log usage to stdout even if we can't store it in database + console.warn(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens from unknown call`); + // it doesn't make sense to store usage in database if we don't know where it came from + return; + } const channelName: string = !message.channel.isDMBased() ? `${message.channel.name} (${message.guild?.name})` : `@${getAuthor(message).tag}`; console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in #${channelName}${functionNames && functionNames.length > 0 ? " [Tools: " + functionNames.join(", ") + "]" : ""}`); @@ -197,7 +203,7 @@ function logUsedTokens( usageRequest: usage.prompt_tokens, usageResponse: usage.completion_tokens, functionName: functionNames?.join(", ") ?? null, - functionRan: functionNames ? functionRan : 0, + functionRan: functionRan, } }).catch((e => { console.error("Failed to push to a database"); From 482f72a4d12f85815f99eb0de89f132eb67185b3 Mon Sep 17 00:00:00 2001 From: Wroclaw Date: Fri, 26 Apr 2024 04:03:34 +0200 Subject: [PATCH 3/6] execution: factor out chat completion process --- src/execution.ts | 65 +++++++++++++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 25 deletions(-) diff --git a/src/execution.ts b/src/execution.ts index 63679fa..4010b51 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -219,7 +219,6 @@ function logUsedTokens( async function executeFromQueue(channel: string) { const channelQueue = channelsRunning.get(channel) as ChannelsRunningValue; const message = channelQueue.at(0) as RequestMessage; - let functionRanCounter = 0; let OpenAImessages: ChatCompletionMessageParam[] = []; // ignore if we can't even send anything to reply @@ -247,30 +246,7 @@ async function executeFromQueue(channel: string) { }); OpenAImessages = toOpenAIMessages(messages.values()); - let generatedMessage: ChatCompletionMessage | undefined = undefined; - let answer: Awaited>; - - do { - answer = await openai.chat.completions.create({ - ...config.chatCompletionParams, - messages: OpenAImessages, - // FIXME: don't use new instance of FunctionManager - tools: new FunctionManager().getToolsForOpenAi(), - }); - - functionRanCounter += answer.choices[0].message?.tool_calls?.length ?? 0; - logUsedTokens(answer, message, ++functionRanCounter); - - generatedMessage = answer.choices[0].message; - if (!generatedMessage) throw new Error("Empty message received"); - - // handle tool calls - if (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0) { - OpenAImessages.push(generatedMessage); - // FIXME: don't use new instance of FunctionManager - OpenAImessages.push(...(await new FunctionManager().handleToolCalls(generatedMessage.tool_calls))); - } - } while (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0); + const answer = await executeChatCompletion(OpenAImessages, message); channelQueue.stopTyping(); @@ -349,3 +325,42 @@ async function executeFromQueue(channel: string) { else return executeFromQueue(channel); } + +/** + * Executes the chat completion process. + * + * @param OpenAImessages An array of ChatCompletionMessageParam objects representing the messages for chat completion. + * @param message An optional RequestMessage object representing the request message, used for logging. + * @returns A Promise that resolves to the answer from the chat completion process. + */ +async function executeChatCompletion( + OpenAImessages: ChatCompletionMessageParam[], + message: RequestMessage | undefined, +) { + let generatedMessage: ChatCompletionMessage | undefined = undefined; + let answer: Awaited>; + let functionRanCounter = 0; + + do { + answer = await openai.chat.completions.create({ + ...config.chatCompletionParams, + messages: OpenAImessages, + // FIXME: don't use new instance of FunctionManager + tools: new FunctionManager().getToolsForOpenAi(), + }); + + functionRanCounter += answer.choices[0].message?.tool_calls?.length ?? 0; + logUsedTokens(answer, message, ++functionRanCounter); + + generatedMessage = answer.choices[0].message; + if (!generatedMessage) throw new Error("Empty message received"); + + // handle tool calls + if (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0) { + OpenAImessages.push(generatedMessage); + // FIXME: don't use new instance of FunctionManager + OpenAImessages.push(...(await new FunctionManager().handleToolCalls(generatedMessage.tool_calls))); + } + } while (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0); + return answer; +} From 2fab1b1b428fa9fcd9b8c02e18cdb3423e2abd51 Mon Sep 17 00:00:00 2001 From: Wroclaw Date: Fri, 26 Apr 2024 05:17:32 +0200 Subject: [PATCH 4/6] execution: factor out replying code to it's own fuction --- src/execution.ts | 49 ++++++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/src/execution.ts b/src/execution.ts index 4010b51..a78131f 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -252,26 +252,7 @@ async function executeFromQueue(channel: string) { const answerContent = answer.choices[0].message?.content; - if (answerContent === null || answerContent === "") { - if (message instanceof DiscordApi.Message) message.react("😶").catch(() => {/* GRACEFAIL: It's okay if the bot won't reply */}); - } - else { - const answerMessagesContent :string[] = [""]; - for (const i of answerContent.split(/\n\n/)) { - if (answerMessagesContent[answerMessagesContent.length-1].length + i.length < 2000) { - answerMessagesContent[answerMessagesContent.length-1] += "\n\n" + i; - } - else { - answerMessagesContent.push(i); - } - } - - for (const i of answerMessagesContent) { - const response = requestReply(message, {content: i}, {allowedMentions: { repliedUser: false }}); - - await response.then(rval => Moderation.checkMessageNoReturn(rval)); - } - } + await replyInMultiMessage(answerContent, message); } catch (e) { let errorText: string = ""; channelQueue.stopTyping(); @@ -326,6 +307,34 @@ async function executeFromQueue(channel: string) { return executeFromQueue(channel); } +/** + * Replies to a message and splits to multiple messages if needed. + * @param answerContent - The content of the answer. + * @param message - The request message to reply to. + */ +async function replyInMultiMessage(answerContent: string | null, message: RequestMessage) { + if (answerContent === null || answerContent === "") { + if (message instanceof DiscordApi.Message) message.react("😶").catch(() => { }); + } + else { + const answerMessagesContent: string[] = [""]; + for (const i of answerContent.split(/\n\n/)) { + if (answerMessagesContent[answerMessagesContent.length - 1].length + i.length < 2000) { + answerMessagesContent[answerMessagesContent.length - 1] += "\n\n" + i; + } + else { + answerMessagesContent.push(i); + } + } + + for (const i of answerMessagesContent) { + const response = requestReply(message, { content: i }, { allowedMentions: { repliedUser: false } }); + + await response.then(rval => Moderation.checkMessageNoReturn(rval)); + } + } +} + /** * Executes the chat completion process. * From 9f5dfefb31448839826bb5dc7010cd2ee2ce2920 Mon Sep 17 00:00:00 2001 From: Wroclaw Date: Fri, 26 Apr 2024 05:38:41 +0200 Subject: [PATCH 5/6] execution: support for requests that don't have channel set Interactions initiated outside of servers where bot is don't have channel assigned --- src/configDefault.ts | 4 ++-- src/execution.ts | 17 ++++++++++------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/configDefault.ts b/src/configDefault.ts index 620a363..dcd4dd9 100644 --- a/src/configDefault.ts +++ b/src/configDefault.ts @@ -1,4 +1,3 @@ -import { Message } from "discord.js"; import { ChatCompletionMessageParam as OpenAIMessage, ChatCompletionCreateParamsNonStreaming as ChatCompletionRequestData, @@ -6,6 +5,7 @@ import { import IQuota from "./IQuota"; import MessageCount from "./quota/messageCount"; +import { apiRequest } from "./execution"; export interface IConfigRequired { /** Tokens to authentiate with */ @@ -14,7 +14,7 @@ export interface IConfigRequired { readonly OpenAI: string; }; /** Messages to append at the start of every chat history when sending to API */ - systemPrompt(context: Message): OpenAIMessage[]; + systemPrompt(context: apiRequest): OpenAIMessage[]; /** OpenAI model config */ readonly chatCompletionParams: Omit; /** Limits for message selection */ diff --git a/src/execution.ts b/src/execution.ts index a78131f..f715564 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -73,7 +73,7 @@ export function getAuthor(request: apiRequest) { * @returns Promise of the done action */ function requestReply( - request: RequestMessage, + request: apiRequest, message: DiscordApi.MessageReplyOptions & DiscordApi.InteractionReplyOptions, // TODO: add support for these below replyOptions: DiscordApi.MessageReplyOptions = {}, @@ -176,7 +176,7 @@ export async function queueRequest(request: apiRequest) { */ function logUsedTokens( answer: ChatCompletion, - message: RequestMessage | undefined = undefined, + message: apiRequest | undefined = undefined, functionRan: number = 0, ) { const usage = answer.usage; @@ -191,14 +191,17 @@ function logUsedTokens( // it doesn't make sense to store usage in database if we don't know where it came from return; } - const channelName: string = !message.channel.isDMBased() ? `${message.channel.name} (${message.guild?.name})` : `@${getAuthor(message).tag}`; - console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in #${channelName}${functionNames && functionNames.length > 0 ? " [Tools: " + functionNames.join(", ") + "]" : ""}`); + const channelName: string = !message.channel ? "[No channel]" + : !message.channel.isDMBased() ? `#${message.channel.name} (${message.guild?.name})` + : `#@${getAuthor(message).tag}` + ; + console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in ${channelName}${functionNames && functionNames.length > 0 ? " [Tools: " + functionNames.join(", ") + "]" : ""}`); database.usage.create({ data: { timestamp: message.createdAt, user: BigInt(getAuthor(message).id), - channel: BigInt(message.channelId), + channel: BigInt(message.channelId ?? 0), guild: message.guildId ? BigInt(message.guildId) : null, usageRequest: usage.prompt_tokens, usageResponse: usage.completion_tokens, @@ -312,7 +315,7 @@ async function executeFromQueue(channel: string) { * @param answerContent - The content of the answer. * @param message - The request message to reply to. */ -async function replyInMultiMessage(answerContent: string | null, message: RequestMessage) { +async function replyInMultiMessage(answerContent: string | null, message: apiRequest) { if (answerContent === null || answerContent === "") { if (message instanceof DiscordApi.Message) message.react("😶").catch(() => { }); } @@ -344,7 +347,7 @@ async function replyInMultiMessage(answerContent: string | null, message: Reques */ async function executeChatCompletion( OpenAImessages: ChatCompletionMessageParam[], - message: RequestMessage | undefined, + message: apiRequest | undefined, ) { let generatedMessage: ChatCompletionMessage | undefined = undefined; let answer: Awaited>; From 6efb6e587679cd678739a3f8ea1362f34c4b5e4f Mon Sep 17 00:00:00 2001 From: Wroclaw Date: Fri, 26 Apr 2024 05:41:07 +0200 Subject: [PATCH 6/6] commands/ask: create Allows to interact with the bot when bot is user installed in discord. --- src/commands/ask.ts | 74 +++++++++++++++++++++++++++++++++++++++++ src/execution.ts | 4 +-- src/toOpenAIMessages.ts | 2 +- 3 files changed, 77 insertions(+), 3 deletions(-) create mode 100644 src/commands/ask.ts diff --git a/src/commands/ask.ts b/src/commands/ask.ts new file mode 100644 index 0000000..ea39da7 --- /dev/null +++ b/src/commands/ask.ts @@ -0,0 +1,74 @@ +import { + APIApplicationCommandOption +, ApplicationCommandOptionType +, ApplicationCommandType +, ChatInputCommandInteraction +} from "discord.js"; +import { ChatCompletionMessageParam } from "openai/resources"; + +import + Command +,{ApplicationIntegrationType +, InteractionContextTypes +} from "../command"; +import { config } from "../index"; +import { executeChatCompletion, replyInMultiMessage } from "../execution"; +import { formatName } from "../toOpenAIMessages"; + +export default class Ask extends Command implements Command { + name = "ask"; + description = "Promts the bot to reply to a single message without any history context"; + type = ApplicationCommandType.ChatInput; + options: APIApplicationCommandOption[] = [ + { + name: "content", + description: "The content of the prompt", + type: ApplicationCommandOptionType.String, + required: true, + }, + { + name: "ephemeral", + description: "if true, only you can see the response (default true)", + type: ApplicationCommandOptionType.Boolean, + required: false, + } + ]; + integration_types = [ + ApplicationIntegrationType.Guild_Install, + ApplicationIntegrationType.User_Install, + ]; + contexts = [ + InteractionContextTypes.Guild, + InteractionContextTypes.BotDM, + InteractionContextTypes.PrivateChannel, + ]; + + async execute(interaction: ChatInputCommandInteraction) { + const content = interaction.options.getString("content", true); + const ephemeral = interaction.options.getBoolean("ephemeral", false) ?? true; + + if (!interaction.channel && !interaction.channelId) { + console.error("No channel found in interaction"); + console.error(interaction); + await interaction.reply({ + content: "No channel found in interaction???", + ephemeral: true + }); + return; + } + + // TODO: check content in moderation API + + const messages: ChatCompletionMessageParam[] = [ + ...config.systemPrompt(interaction), + { role: "user", name: formatName(interaction.user.displayName), content } + ]; + + const [answer] = await Promise.all([ + executeChatCompletion(messages, interaction), + interaction.deferReply({ ephemeral }), + ]); + + await replyInMultiMessage(answer.choices[0].message.content, interaction); + } +} diff --git a/src/execution.ts b/src/execution.ts index f715564..579a435 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -315,7 +315,7 @@ async function executeFromQueue(channel: string) { * @param answerContent - The content of the answer. * @param message - The request message to reply to. */ -async function replyInMultiMessage(answerContent: string | null, message: apiRequest) { +export async function replyInMultiMessage(answerContent: string | null, message: apiRequest) { if (answerContent === null || answerContent === "") { if (message instanceof DiscordApi.Message) message.react("😶").catch(() => { }); } @@ -345,7 +345,7 @@ async function replyInMultiMessage(answerContent: string | null, message: apiReq * @param message An optional RequestMessage object representing the request message, used for logging. * @returns A Promise that resolves to the answer from the chat completion process. */ -async function executeChatCompletion( +export async function executeChatCompletion( OpenAImessages: ChatCompletionMessageParam[], message: apiRequest | undefined, ) { diff --git a/src/toOpenAIMessages.ts b/src/toOpenAIMessages.ts index f93d235..54ad00f 100644 --- a/src/toOpenAIMessages.ts +++ b/src/toOpenAIMessages.ts @@ -63,7 +63,7 @@ export function formatMessage(message: DiscordMessage): string { * @param name the name to format * @returns formatted name */ -function formatName(name: string): string { +export function formatName(name: string): string { // replace all characters to ascii equivelant return FoldToAscii.foldReplacing(name) // White spaces are not allowed