Compare commits
6 commits
370b7623b5
...
6efb6e5876
Author | SHA1 | Date | |
---|---|---|---|
6efb6e5876 | |||
9f5dfefb31 | |||
2fab1b1b42 | |||
482f72a4d1 | |||
d3567c3607 | |||
67a6e4d486 |
4 changed files with 169 additions and 58 deletions
74
src/commands/ask.ts
Normal file
74
src/commands/ask.ts
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
import {
|
||||||
|
APIApplicationCommandOption
|
||||||
|
, ApplicationCommandOptionType
|
||||||
|
, ApplicationCommandType
|
||||||
|
, ChatInputCommandInteraction
|
||||||
|
} from "discord.js";
|
||||||
|
import { ChatCompletionMessageParam } from "openai/resources";
|
||||||
|
|
||||||
|
import
|
||||||
|
Command
|
||||||
|
,{ApplicationIntegrationType
|
||||||
|
, InteractionContextTypes
|
||||||
|
} from "../command";
|
||||||
|
import { config } from "../index";
|
||||||
|
import { executeChatCompletion, replyInMultiMessage } from "../execution";
|
||||||
|
import { formatName } from "../toOpenAIMessages";
|
||||||
|
|
||||||
|
export default class Ask extends Command implements Command {
|
||||||
|
name = "ask";
|
||||||
|
description = "Promts the bot to reply to a single message without any history context";
|
||||||
|
type = ApplicationCommandType.ChatInput;
|
||||||
|
options: APIApplicationCommandOption[] = [
|
||||||
|
{
|
||||||
|
name: "content",
|
||||||
|
description: "The content of the prompt",
|
||||||
|
type: ApplicationCommandOptionType.String,
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ephemeral",
|
||||||
|
description: "if true, only you can see the response (default true)",
|
||||||
|
type: ApplicationCommandOptionType.Boolean,
|
||||||
|
required: false,
|
||||||
|
}
|
||||||
|
];
|
||||||
|
integration_types = [
|
||||||
|
ApplicationIntegrationType.Guild_Install,
|
||||||
|
ApplicationIntegrationType.User_Install,
|
||||||
|
];
|
||||||
|
contexts = [
|
||||||
|
InteractionContextTypes.Guild,
|
||||||
|
InteractionContextTypes.BotDM,
|
||||||
|
InteractionContextTypes.PrivateChannel,
|
||||||
|
];
|
||||||
|
|
||||||
|
async execute(interaction: ChatInputCommandInteraction) {
|
||||||
|
const content = interaction.options.getString("content", true);
|
||||||
|
const ephemeral = interaction.options.getBoolean("ephemeral", false) ?? true;
|
||||||
|
|
||||||
|
if (!interaction.channel && !interaction.channelId) {
|
||||||
|
console.error("No channel found in interaction");
|
||||||
|
console.error(interaction);
|
||||||
|
await interaction.reply({
|
||||||
|
content: "No channel found in interaction???",
|
||||||
|
ephemeral: true
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: check content in moderation API
|
||||||
|
|
||||||
|
const messages: ChatCompletionMessageParam[] = [
|
||||||
|
...config.systemPrompt(interaction),
|
||||||
|
{ role: "user", name: formatName(interaction.user.displayName), content }
|
||||||
|
];
|
||||||
|
|
||||||
|
const [answer] = await Promise.all([
|
||||||
|
executeChatCompletion(messages, interaction),
|
||||||
|
interaction.deferReply({ ephemeral }),
|
||||||
|
]);
|
||||||
|
|
||||||
|
await replyInMultiMessage(answer.choices[0].message.content, interaction);
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,4 +1,3 @@
|
||||||
import { Message } from "discord.js";
|
|
||||||
import {
|
import {
|
||||||
ChatCompletionMessageParam as OpenAIMessage,
|
ChatCompletionMessageParam as OpenAIMessage,
|
||||||
ChatCompletionCreateParamsNonStreaming as ChatCompletionRequestData,
|
ChatCompletionCreateParamsNonStreaming as ChatCompletionRequestData,
|
||||||
|
@ -6,6 +5,7 @@ import {
|
||||||
|
|
||||||
import IQuota from "./IQuota";
|
import IQuota from "./IQuota";
|
||||||
import MessageCount from "./quota/messageCount";
|
import MessageCount from "./quota/messageCount";
|
||||||
|
import { apiRequest } from "./execution";
|
||||||
|
|
||||||
export interface IConfigRequired {
|
export interface IConfigRequired {
|
||||||
/** Tokens to authentiate with */
|
/** Tokens to authentiate with */
|
||||||
|
@ -14,9 +14,9 @@ export interface IConfigRequired {
|
||||||
readonly OpenAI: string;
|
readonly OpenAI: string;
|
||||||
};
|
};
|
||||||
/** Messages to append at the start of every chat history when sending to API */
|
/** Messages to append at the start of every chat history when sending to API */
|
||||||
systemPrompt(context: Message): OpenAIMessage[];
|
systemPrompt(context: apiRequest): OpenAIMessage[];
|
||||||
/** OpenAI model config */
|
/** OpenAI model config */
|
||||||
readonly chatCompletionParams: Omit<ChatCompletionRequestData, "messages" | "function_call" | "functions" | "n">;
|
readonly chatCompletionParams: Omit<ChatCompletionRequestData, "messages" | "function_call" | "tool_call" | "functions" | "n">;
|
||||||
/** Limits for message selection */
|
/** Limits for message selection */
|
||||||
readonly readLimits: {
|
readonly readLimits: {
|
||||||
/** Maximum message age to include (in miliseconds) */
|
/** Maximum message age to include (in miliseconds) */
|
||||||
|
|
145
src/execution.ts
145
src/execution.ts
|
@ -73,7 +73,7 @@ export function getAuthor(request: apiRequest) {
|
||||||
* @returns Promise of the done action
|
* @returns Promise of the done action
|
||||||
*/
|
*/
|
||||||
function requestReply(
|
function requestReply(
|
||||||
request: RequestMessage,
|
request: apiRequest,
|
||||||
message: DiscordApi.MessageReplyOptions & DiscordApi.InteractionReplyOptions,
|
message: DiscordApi.MessageReplyOptions & DiscordApi.InteractionReplyOptions,
|
||||||
// TODO: add support for these below
|
// TODO: add support for these below
|
||||||
replyOptions: DiscordApi.MessageReplyOptions = {},
|
replyOptions: DiscordApi.MessageReplyOptions = {},
|
||||||
|
@ -172,29 +172,41 @@ export async function queueRequest(request: apiRequest) {
|
||||||
* Logs used tokens to the terminal and to the database
|
* Logs used tokens to the terminal and to the database
|
||||||
* @param answer the response that OpenAI returned
|
* @param answer the response that OpenAI returned
|
||||||
* @param message the message that initiated the execution
|
* @param message the message that initiated the execution
|
||||||
* @param functionRan counter of how many function have been ran
|
* @param functionRan counter of how many function have been ran (to distinct records in database)
|
||||||
*/
|
*/
|
||||||
function logUsedTokens(
|
function logUsedTokens(
|
||||||
answer: ChatCompletion,
|
answer: ChatCompletion,
|
||||||
message: RequestMessage,
|
message: apiRequest | undefined = undefined,
|
||||||
functionRan: number,
|
functionRan: number = 0,
|
||||||
) {
|
) {
|
||||||
const usage = answer.usage;
|
const usage = answer.usage;
|
||||||
const functionName = answer.choices[0].message?.function_call?.name;
|
const functionNames =
|
||||||
|
answer.choices[0].message.tool_calls?.map(
|
||||||
|
v => v.type === "function" ? v.function.name : `[unknown type]`
|
||||||
|
);
|
||||||
if (usage !== undefined) {
|
if (usage !== undefined) {
|
||||||
const channelName: string = !message.channel.isDMBased() ? `${message.channel.name} (${message.guild?.name})` : `@${getAuthor(message).tag}`;
|
if (!message) {
|
||||||
console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in #${channelName}${functionName ? " [Function: " + functionName + "]" : ""}`);
|
// log usage to stdout even if we can't store it in database
|
||||||
|
console.warn(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens from unknown call`);
|
||||||
|
// it doesn't make sense to store usage in database if we don't know where it came from
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const channelName: string = !message.channel ? "[No channel]"
|
||||||
|
: !message.channel.isDMBased() ? `#${message.channel.name} (${message.guild?.name})`
|
||||||
|
: `#@${getAuthor(message).tag}`
|
||||||
|
;
|
||||||
|
console.log(`Used ${usage.total_tokens} (${usage.prompt_tokens} + ${usage.completion_tokens}) tokens for ${getAuthor(message).tag} (${getAuthor(message).id}) in ${channelName}${functionNames && functionNames.length > 0 ? " [Tools: " + functionNames.join(", ") + "]" : ""}`);
|
||||||
|
|
||||||
database.usage.create({
|
database.usage.create({
|
||||||
data: {
|
data: {
|
||||||
timestamp: message.createdAt,
|
timestamp: message.createdAt,
|
||||||
user: BigInt(getAuthor(message).id),
|
user: BigInt(getAuthor(message).id),
|
||||||
channel: BigInt(message.channelId),
|
channel: BigInt(message.channelId ?? 0),
|
||||||
guild: message.guildId ? BigInt(message.guildId) : null,
|
guild: message.guildId ? BigInt(message.guildId) : null,
|
||||||
usageRequest: usage.prompt_tokens,
|
usageRequest: usage.prompt_tokens,
|
||||||
usageResponse: usage.completion_tokens,
|
usageResponse: usage.completion_tokens,
|
||||||
functionName: functionName ?? null,
|
functionName: functionNames?.join(", ") ?? null,
|
||||||
functionRan: functionName ? functionRan : 0,
|
functionRan: functionRan,
|
||||||
}
|
}
|
||||||
}).catch((e => {
|
}).catch((e => {
|
||||||
console.error("Failed to push to a database");
|
console.error("Failed to push to a database");
|
||||||
|
@ -210,7 +222,6 @@ function logUsedTokens(
|
||||||
async function executeFromQueue(channel: string) {
|
async function executeFromQueue(channel: string) {
|
||||||
const channelQueue = channelsRunning.get(channel) as ChannelsRunningValue;
|
const channelQueue = channelsRunning.get(channel) as ChannelsRunningValue;
|
||||||
const message = channelQueue.at(0) as RequestMessage;
|
const message = channelQueue.at(0) as RequestMessage;
|
||||||
let functionRanCounter = 0;
|
|
||||||
let OpenAImessages: ChatCompletionMessageParam[] = [];
|
let OpenAImessages: ChatCompletionMessageParam[] = [];
|
||||||
|
|
||||||
// ignore if we can't even send anything to reply
|
// ignore if we can't even send anything to reply
|
||||||
|
@ -238,54 +249,13 @@ async function executeFromQueue(channel: string) {
|
||||||
});
|
});
|
||||||
|
|
||||||
OpenAImessages = toOpenAIMessages(messages.values());
|
OpenAImessages = toOpenAIMessages(messages.values());
|
||||||
let generatedMessage: ChatCompletionMessage | undefined = undefined;
|
const answer = await executeChatCompletion(OpenAImessages, message);
|
||||||
let answer: Awaited<ReturnType<typeof openai.chat.completions.create>>;
|
|
||||||
|
|
||||||
do {
|
|
||||||
answer = await openai.chat.completions.create({
|
|
||||||
...config.chatCompletionParams,
|
|
||||||
messages: OpenAImessages,
|
|
||||||
// FIXME: don't use new instance of FunctionManager
|
|
||||||
tools: new FunctionManager().getToolsForOpenAi(),
|
|
||||||
});
|
|
||||||
|
|
||||||
logUsedTokens(answer, message, ++functionRanCounter);
|
|
||||||
|
|
||||||
generatedMessage = answer.choices[0].message;
|
|
||||||
if (!generatedMessage) throw new Error("Empty message received");
|
|
||||||
|
|
||||||
// handle tool calls
|
|
||||||
if (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0) {
|
|
||||||
OpenAImessages.push(generatedMessage);
|
|
||||||
// FIXME: don't use new instance of FunctionManager
|
|
||||||
OpenAImessages.push(...(await new FunctionManager().handleToolCalls(generatedMessage.tool_calls)));
|
|
||||||
}
|
|
||||||
} while (generatedMessage.function_call);
|
|
||||||
|
|
||||||
channelQueue.stopTyping();
|
channelQueue.stopTyping();
|
||||||
|
|
||||||
const answerContent = answer.choices[0].message?.content;
|
const answerContent = answer.choices[0].message?.content;
|
||||||
|
|
||||||
if (answerContent === null || answerContent === "") {
|
await replyInMultiMessage(answerContent, message);
|
||||||
if (message instanceof DiscordApi.Message) message.react("😶").catch(() => {/* GRACEFAIL: It's okay if the bot won't reply */});
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
const answerMessagesContent :string[] = [""];
|
|
||||||
for (const i of answerContent.split(/\n\n/)) {
|
|
||||||
if (answerMessagesContent[answerMessagesContent.length-1].length + i.length < 2000) {
|
|
||||||
answerMessagesContent[answerMessagesContent.length-1] += "\n\n" + i;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
answerMessagesContent.push(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const i of answerMessagesContent) {
|
|
||||||
const response = requestReply(message, {content: i}, {allowedMentions: { repliedUser: false }});
|
|
||||||
|
|
||||||
await response.then(rval => Moderation.checkMessageNoReturn(rval));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
let errorText: string = "";
|
let errorText: string = "";
|
||||||
channelQueue.stopTyping();
|
channelQueue.stopTyping();
|
||||||
|
@ -339,3 +309,70 @@ async function executeFromQueue(channel: string) {
|
||||||
else
|
else
|
||||||
return executeFromQueue(channel);
|
return executeFromQueue(channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Replies to a message and splits to multiple messages if needed.
|
||||||
|
* @param answerContent - The content of the answer.
|
||||||
|
* @param message - The request message to reply to.
|
||||||
|
*/
|
||||||
|
export async function replyInMultiMessage(answerContent: string | null, message: apiRequest) {
|
||||||
|
if (answerContent === null || answerContent === "") {
|
||||||
|
if (message instanceof DiscordApi.Message) message.react("😶").catch(() => { });
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
const answerMessagesContent: string[] = [""];
|
||||||
|
for (const i of answerContent.split(/\n\n/)) {
|
||||||
|
if (answerMessagesContent[answerMessagesContent.length - 1].length + i.length < 2000) {
|
||||||
|
answerMessagesContent[answerMessagesContent.length - 1] += "\n\n" + i;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
answerMessagesContent.push(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const i of answerMessagesContent) {
|
||||||
|
const response = requestReply(message, { content: i }, { allowedMentions: { repliedUser: false } });
|
||||||
|
|
||||||
|
await response.then(rval => Moderation.checkMessageNoReturn(rval));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executes the chat completion process.
|
||||||
|
*
|
||||||
|
* @param OpenAImessages An array of ChatCompletionMessageParam objects representing the messages for chat completion.
|
||||||
|
* @param message An optional RequestMessage object representing the request message, used for logging.
|
||||||
|
* @returns A Promise that resolves to the answer from the chat completion process.
|
||||||
|
*/
|
||||||
|
export async function executeChatCompletion(
|
||||||
|
OpenAImessages: ChatCompletionMessageParam[],
|
||||||
|
message: apiRequest | undefined,
|
||||||
|
) {
|
||||||
|
let generatedMessage: ChatCompletionMessage | undefined = undefined;
|
||||||
|
let answer: Awaited<ReturnType<typeof openai.chat.completions.create>>;
|
||||||
|
let functionRanCounter = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
answer = await openai.chat.completions.create({
|
||||||
|
...config.chatCompletionParams,
|
||||||
|
messages: OpenAImessages,
|
||||||
|
// FIXME: don't use new instance of FunctionManager
|
||||||
|
tools: new FunctionManager().getToolsForOpenAi(),
|
||||||
|
});
|
||||||
|
|
||||||
|
functionRanCounter += answer.choices[0].message?.tool_calls?.length ?? 0;
|
||||||
|
logUsedTokens(answer, message, ++functionRanCounter);
|
||||||
|
|
||||||
|
generatedMessage = answer.choices[0].message;
|
||||||
|
if (!generatedMessage) throw new Error("Empty message received");
|
||||||
|
|
||||||
|
// handle tool calls
|
||||||
|
if (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0) {
|
||||||
|
OpenAImessages.push(generatedMessage);
|
||||||
|
// FIXME: don't use new instance of FunctionManager
|
||||||
|
OpenAImessages.push(...(await new FunctionManager().handleToolCalls(generatedMessage.tool_calls)));
|
||||||
|
}
|
||||||
|
} while (generatedMessage.tool_calls !== undefined && generatedMessage.tool_calls.length > 0);
|
||||||
|
return answer;
|
||||||
|
}
|
||||||
|
|
|
@ -63,7 +63,7 @@ export function formatMessage(message: DiscordMessage): string {
|
||||||
* @param name the name to format
|
* @param name the name to format
|
||||||
* @returns formatted name
|
* @returns formatted name
|
||||||
*/
|
*/
|
||||||
function formatName(name: string): string {
|
export function formatName(name: string): string {
|
||||||
// replace all characters to ascii equivelant
|
// replace all characters to ascii equivelant
|
||||||
return FoldToAscii.foldReplacing(name)
|
return FoldToAscii.foldReplacing(name)
|
||||||
// White spaces are not allowed
|
// White spaces are not allowed
|
||||||
|
|
Loading…
Add table
Reference in a new issue