diff --git a/src/commands/ask.ts b/src/commands/ask.ts new file mode 100644 index 0000000..ea39da7 --- /dev/null +++ b/src/commands/ask.ts @@ -0,0 +1,74 @@ +import { + APIApplicationCommandOption +, ApplicationCommandOptionType +, ApplicationCommandType +, ChatInputCommandInteraction +} from "discord.js"; +import { ChatCompletionMessageParam } from "openai/resources"; + +import + Command +,{ApplicationIntegrationType +, InteractionContextTypes +} from "../command"; +import { config } from "../index"; +import { executeChatCompletion, replyInMultiMessage } from "../execution"; +import { formatName } from "../toOpenAIMessages"; + +export default class Ask extends Command implements Command { + name = "ask"; + description = "Promts the bot to reply to a single message without any history context"; + type = ApplicationCommandType.ChatInput; + options: APIApplicationCommandOption[] = [ + { + name: "content", + description: "The content of the prompt", + type: ApplicationCommandOptionType.String, + required: true, + }, + { + name: "ephemeral", + description: "if true, only you can see the response (default true)", + type: ApplicationCommandOptionType.Boolean, + required: false, + } + ]; + integration_types = [ + ApplicationIntegrationType.Guild_Install, + ApplicationIntegrationType.User_Install, + ]; + contexts = [ + InteractionContextTypes.Guild, + InteractionContextTypes.BotDM, + InteractionContextTypes.PrivateChannel, + ]; + + async execute(interaction: ChatInputCommandInteraction) { + const content = interaction.options.getString("content", true); + const ephemeral = interaction.options.getBoolean("ephemeral", false) ?? true; + + if (!interaction.channel && !interaction.channelId) { + console.error("No channel found in interaction"); + console.error(interaction); + await interaction.reply({ + content: "No channel found in interaction???", + ephemeral: true + }); + return; + } + + // TODO: check content in moderation API + + const messages: ChatCompletionMessageParam[] = [ + ...config.systemPrompt(interaction), + { role: "user", name: formatName(interaction.user.displayName), content } + ]; + + const [answer] = await Promise.all([ + executeChatCompletion(messages, interaction), + interaction.deferReply({ ephemeral }), + ]); + + await replyInMultiMessage(answer.choices[0].message.content, interaction); + } +} diff --git a/src/configDefault.ts b/src/configDefault.ts index 12ce515..dcd4dd9 100644 --- a/src/configDefault.ts +++ b/src/configDefault.ts @@ -1,4 +1,3 @@ -import { Message } from "discord.js"; import { ChatCompletionMessageParam as OpenAIMessage, ChatCompletionCreateParamsNonStreaming as ChatCompletionRequestData, @@ -6,6 +5,7 @@ import { import IQuota from "./IQuota"; import MessageCount from "./quota/messageCount"; +import { apiRequest } from "./execution"; export interface IConfigRequired { /** Tokens to authentiate with */ @@ -14,9 +14,9 @@ export interface IConfigRequired { readonly OpenAI: string; }; /** Messages to append at the start of every chat history when sending to API */ - systemPrompt(context: Message): OpenAIMessage[]; + systemPrompt(context: apiRequest): OpenAIMessage[]; /** OpenAI model config */ - readonly chatCompletionParams: Omit; + readonly chatCompletionParams: Omit; /** Limits for message selection */ readonly readLimits: { /** Maximum message age to include (in miliseconds) */ diff --git a/src/execution.ts b/src/execution.ts index d68f7c0..579a435 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -73,7 +73,7 @@ export function getAuthor(request: apiRequest) { * @returns Promise of the done action */ function requestReply( - request: RequestMessage, + request: apiRequest, message: DiscordApi.MessageReplyOptions & DiscordApi.InteractionReplyOptions, // TODO: add support for these below replyOptions: DiscordApi.MessageReplyOptions = {}, @@ -172,29 +172,41 @@ export async function queueRequest(request: apiRequest) { * Logs used tokens to the terminal and to the database * @param answer the response that OpenAI returned * @param message the message that initiated the execution - * @param functionRan counter of how many function have been ran + * @param functionRan counter of how many function have been ran (to distinct records in database) */ function logUsedTokens( answer: ChatCompletion, - message: RequestMessage, - functionRan: number, + message: apiRequest | undefined = undefined, + functionRan: number = 0, ) { const usage = answer.usage; - const functionName = answer.choices[0].message?.function_call?.name; + const functionNames = + answer.choices[0].message.tool_calls?.map( + v => v.type === "function" ? v.function.name : `[unknown type]` + ); if (usage !== undefined) { - const channelName: string = !message.channel.isDMBased() ? `${message.channel.name} (${message.guild?.name})` : `@${getAuthor(message).tag}`; - console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in #${channelName}${functionName ? " [Function: " + functionName + "]" : ""}`); + if (!message) { + // log usage to stdout even if we can't store it in database + console.warn(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens from unknown call`); + // it doesn't make sense to store usage in database if we don't know where it came from + return; + } + const channelName: string = !message.channel ? "[No channel]" + : !message.channel.isDMBased() ? `#${message.channel.name} (${message.guild?.name})` + : `#@${getAuthor(message).tag}` + ; + console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in ${channelName}${functionNames && functionNames.length > 0 ? " [Tools: " + functionNames.join(", ") + "]" : ""}`); database.usage.create({ data: { timestamp: message.createdAt, user: BigInt(getAuthor(message).id), - channel: BigInt(message.channelId), + channel: BigInt(message.channelId ?? 0), guild: message.guildId ? BigInt(message.guildId) : null, usageRequest: usage.prompt_tokens, usageResponse: usage.completion_tokens, - functionName: functionName ?? null, - functionRan: functionName ? functionRan : 0, + functionName: functionNames?.join(", ") ?? null, + functionRan: functionRan, } }).catch((e => { console.error("Failed to push to a database"); @@ -210,7 +222,6 @@ function logUsedTokens( async function executeFromQueue(channel: string) { const channelQueue = channelsRunning.get(channel) as ChannelsRunningValue; const message = channelQueue.at(0) as RequestMessage; - let functionRanCounter = 0; let OpenAImessages: ChatCompletionMessageParam[] = []; // ignore if we can't even send anything to reply @@ -238,54 +249,13 @@ async function executeFromQueue(channel: string) { }); OpenAImessages = toOpenAIMessages(messages.values()); - let generatedMessage: ChatCompletionMessage | undefined = undefined; - let answer: Awaited>; - - do { - answer = await openai.chat.completions.create({ - ...config.chatCompletionParams, - messages: OpenAImessages, - // FIXME: don't use new instance of FunctionManager - tools: new FunctionManager().getToolsForOpenAi(), - }); - - logUsedTokens(answer, message, ++functionRanCounter); - - generatedMessage = answer.choices[0].message; - if (!generatedMessage) throw new Error("Empty message received"); - - // handle tool calls - if (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0) { - OpenAImessages.push(generatedMessage); - // FIXME: don't use new instance of FunctionManager - OpenAImessages.push(...(await new FunctionManager().handleToolCalls(generatedMessage.tool_calls))); - } - } while (generatedMessage.function_call); + const answer = await executeChatCompletion(OpenAImessages, message); channelQueue.stopTyping(); const answerContent = answer.choices[0].message?.content; - if (answerContent === null || answerContent === "") { - if (message instanceof DiscordApi.Message) message.react("😶").catch(() => {/* GRACEFAIL: It's okay if the bot won't reply */}); - } - else { - const answerMessagesContent :string[] = [""]; - for (const i of answerContent.split(/\n\n/)) { - if (answerMessagesContent[answerMessagesContent.length-1].length + i.length < 2000) { - answerMessagesContent[answerMessagesContent.length-1] += "\n\n" + i; - } - else { - answerMessagesContent.push(i); - } - } - - for (const i of answerMessagesContent) { - const response = requestReply(message, {content: i}, {allowedMentions: { repliedUser: false }}); - - await response.then(rval => Moderation.checkMessageNoReturn(rval)); - } - } + await replyInMultiMessage(answerContent, message); } catch (e) { let errorText: string = ""; channelQueue.stopTyping(); @@ -339,3 +309,70 @@ async function executeFromQueue(channel: string) { else return executeFromQueue(channel); } + +/** + * Replies to a message and splits to multiple messages if needed. + * @param answerContent - The content of the answer. + * @param message - The request message to reply to. + */ +export async function replyInMultiMessage(answerContent: string | null, message: apiRequest) { + if (answerContent === null || answerContent === "") { + if (message instanceof DiscordApi.Message) message.react("😶").catch(() => { }); + } + else { + const answerMessagesContent: string[] = [""]; + for (const i of answerContent.split(/\n\n/)) { + if (answerMessagesContent[answerMessagesContent.length - 1].length + i.length < 2000) { + answerMessagesContent[answerMessagesContent.length - 1] += "\n\n" + i; + } + else { + answerMessagesContent.push(i); + } + } + + for (const i of answerMessagesContent) { + const response = requestReply(message, { content: i }, { allowedMentions: { repliedUser: false } }); + + await response.then(rval => Moderation.checkMessageNoReturn(rval)); + } + } +} + +/** + * Executes the chat completion process. + * + * @param OpenAImessages An array of ChatCompletionMessageParam objects representing the messages for chat completion. + * @param message An optional RequestMessage object representing the request message, used for logging. + * @returns A Promise that resolves to the answer from the chat completion process. + */ +export async function executeChatCompletion( + OpenAImessages: ChatCompletionMessageParam[], + message: apiRequest | undefined, +) { + let generatedMessage: ChatCompletionMessage | undefined = undefined; + let answer: Awaited>; + let functionRanCounter = 0; + + do { + answer = await openai.chat.completions.create({ + ...config.chatCompletionParams, + messages: OpenAImessages, + // FIXME: don't use new instance of FunctionManager + tools: new FunctionManager().getToolsForOpenAi(), + }); + + functionRanCounter += answer.choices[0].message?.tool_calls?.length ?? 0; + logUsedTokens(answer, message, ++functionRanCounter); + + generatedMessage = answer.choices[0].message; + if (!generatedMessage) throw new Error("Empty message received"); + + // handle tool calls + if (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0) { + OpenAImessages.push(generatedMessage); + // FIXME: don't use new instance of FunctionManager + OpenAImessages.push(...(await new FunctionManager().handleToolCalls(generatedMessage.tool_calls))); + } + } while (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0); + return answer; +} diff --git a/src/toOpenAIMessages.ts b/src/toOpenAIMessages.ts index f93d235..54ad00f 100644 --- a/src/toOpenAIMessages.ts +++ b/src/toOpenAIMessages.ts @@ -63,7 +63,7 @@ export function formatMessage(message: DiscordMessage): string { * @param name the name to format * @returns formatted name */ -function formatName(name: string): string { +export function formatName(name: string): string { // replace all characters to ascii equivelant return FoldToAscii.foldReplacing(name) // White spaces are not allowed