diff --git a/.gitignore b/.gitignore index ecc7ea9..5bb261a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ dist node_modules -src/config.* +config.* .env diff --git a/README.md b/README.md index d31ac5c..946bb38 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,10 @@ # GPTcord Connect ChatGPT to your Discord chat! This is a direct bridge to bring ChatGPT to discord. + +## Configuration + +Project can be configured in two ways: + +1. `config.ts` in the project root folder, that exports an `IConfig`. [That](./src/configDefault.ts) interface can be imported from `./src/configDefault`. +2. Using environment variables, as defined in [defaultConfig.ts](./src/configDefault.ts). diff --git a/src/configDefault.ts b/src/configDefault.ts new file mode 100644 index 0000000..a4a6bf3 --- /dev/null +++ b/src/configDefault.ts @@ -0,0 +1,91 @@ +import { Message } from "discord.js"; +import { + ChatCompletionRequestMessage as OpenAIMessage, + CreateChatCompletionRequest as ChatCompletionRequestData, +} from "openai"; + +export interface IConfigRequired { + readonly calendarParams: Intl.DateTimeFormatOptions; + /** Tokens to authentiate with */ + readonly tokens: { + readonly Discord: string; + readonly OpenAI: string; + }; + /** Messages to append at the start of every chat history when sending to API */ + systemPrompt(context: Message): OpenAIMessage[]; + /** OpenAI model config */ + readonly chatCompletionParams: Omit; + /** Limits for message selection */ + readonly readLimits: { + /** Maximum message age to include (in miliseconds) */ + readonly time: number; + /** Maximum number of messages to select (maximum 100) */ + readonly messages: number; + /** Maximum total token usage for messages (counted locally) */ + readonly tokens: number; + }; + /** Default limits for user inteacting with the bot */ + readonly userLimits: { + /** How much requests can an user make if it's not overriden in a database entry */ + readonly requests: number; + }; +} + +export type IConfig = Partial; + +export default function newConfig(config?: IConfig): IConfigRequired { + return { ...defaultConfig, ...config }; +} + +function envAsString(key: string): string | undefined { + key = key.toLocaleUpperCase(); + return process.env[key]; +} + +function envAsNumber(key: string): number | undefined { + key = key.toLocaleUpperCase(); + const value = Number(process.env[key]); + return !Number.isNaN(value) ? value : undefined; +} + +const defaultConfig: IConfigRequired = { + calendarParams: { + weekday: "short", + year: "numeric", + month: "short", + day: "numeric", + hour: "2-digit", + minute: "2-digit", + hour12: false, + }, + tokens: { + Discord: envAsString("TOKENS__DISCORD") ?? "", + OpenAI: envAsString("TOKENS__OPENAI") ?? "", + }, + systemPrompt(): OpenAIMessage[] { + return [ + { + role: "system", + content: + envAsString("SYSTEM_PROMPT") ?? + `You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.`, + }, + ]; + }, + chatCompletionParams: { + model: envAsString("CHAT_COMPLETION_PARAMS__MODEL") ?? "gpt-3.5-turbo", + max_tokens: envAsNumber("CHAT_COMPLETION_PARAMS__MAX_TOKENS") ?? 384, + frequency_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__FREQUENCY_PENALTY"), + presence_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__PRESENCE_PENALTY"), + temperature: envAsNumber("CHAT_COMPLETION_PARAMS__TEMPERATURE"), + top_p: envAsNumber("CHAT_COMPLETION_PARAMS__TOP_P"), + }, + readLimits: { + time: envAsNumber("READ_LIMITS__TIME") ?? 60 * 60 * 1000, + messages: envAsNumber("READ_LIMITS__MESSAGES") ?? 50, + tokens: envAsNumber("READ_LIMITS__TOKENS") ?? 2048, + }, + userLimits: { + requests: envAsNumber("USER_LIMITS__REQUESTS") ?? 25, + }, +}; diff --git a/src/config_example.ts b/src/config_example.ts deleted file mode 100644 index ae7e88e..0000000 --- a/src/config_example.ts +++ /dev/null @@ -1,49 +0,0 @@ -import { ChatCompletionRequestMessage as OpenAIMessage , CreateChatCompletionRequest as ChatCompletionRequestData } from "openai"; - -// Don't forget to rename the file to config.ts - -export default class config { - static readonly calendarConfig: Intl.DateTimeFormatOptions = { - weekday: "short", - year: "numeric", - month: "short", - day: "numeric", - hour: "2-digit", - minute: "2-digit", - hour12: false, - }; - - /** Tokens to authenticate with */ - static readonly tokens = { - Discord: "Discord token here", - OpenAI: "OpenAI token here", - }; - - /** Messages to append at the start of every chat when sending to API */ - static systemPrompt(): OpenAIMessage[] { - return [ - { role: "system", content: `You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.` } - ]; - } - - /** OpenAI model config */ - static readonly chatCompletionConfig: Omit = { - model: "gpt-3.5-turbo", - max_tokens: 384, - }; - - /** limits for message selection */ - static readonly readLimits = { - /** maximum time in the past for messages (in miliseconds) */ - time: 60*60*1000, - /** maximum number of messages to select (maximum 100) */ - messages: 50, - /** maximum total token usage for messages */ - tokens: 2048, - }; - /** default user limits */ - static readonly userLimits = { - /** how much requests can an user make if it's not overriden in database entry */ - requests: 25, - }; -} diff --git a/src/execution.ts b/src/execution.ts index 3cd0e8f..eb11228 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -2,9 +2,8 @@ import DiscordApi, { GuildTextBasedChannel, TextBasedChannel } from "discord.js" import { ChatCompletionRequestMessage, ChatCompletionResponseMessage } from "openai"; import Axios from "axios"; -import { database, openai } from "./index"; +import { database, openai, config } from "./index"; import Moderation from "./moderation"; -import config from "./config"; import toOpenAIMessages from "./toOpenAIMessages"; import FunctionManager from "./funcitonManager"; @@ -287,7 +286,7 @@ async function executeFromQueue(channel: string) { do { answer = await openai.createChatCompletion({ - ...config.chatCompletionConfig, + ...config.chatCompletionParams, messages: OpenAImessages, // FIXME: don't use new instance of FunctionManager functions: new FunctionManager().getFunctions(), diff --git a/src/funcitonManager.ts b/src/funcitonManager.ts index 5168328..27d3a91 100644 --- a/src/funcitonManager.ts +++ b/src/funcitonManager.ts @@ -1,6 +1,6 @@ import { ChatCompletionFunctions, ChatCompletionRequestMessage, ChatCompletionRequestMessageFunctionCall } from "openai"; -import config from "./config"; +import { config } from "./index"; type parameterMap = { string: string, @@ -104,6 +104,6 @@ class GetTime extends OpenAIFunction> { }; execute(): string { - return `${Intl.DateTimeFormat().resolvedOptions().timeZone}): ${new Date().toLocaleString("en-US", config.calendarConfig)}`; + return `${Intl.DateTimeFormat().resolvedOptions().timeZone}): ${new Date().toLocaleString("en-US", config.calendarParams)}`; } } diff --git a/src/index.ts b/src/index.ts index b96a479..556ddbd 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,11 +1,12 @@ import DiscordApi from "discord.js"; import { Configuration as OpenAIApiConfiguration, OpenAIApi } from "openai"; import { PrismaClient } from "@prisma/client"; +import Typescript from "typescript"; +import fs from "node:fs"; -import config from "./config"; import { queueRequest } from "./execution"; import InteractionManager from "./interactionManager"; - +import newConfig, { IConfig, IConfigRequired } from "./configDefault"; const discord = new DiscordApi.Client({ intents: [ DiscordApi.GatewayIntentBits.Guilds, @@ -14,6 +15,26 @@ const discord = new DiscordApi.Client({ ] }); +function getConfig() { + let fileConfig: IConfig | undefined = undefined; + try { + fs.statSync("./config.ts"); + const program = Typescript.createProgram( + ["./config.ts"], + {outDir: "./dist"} + ); + console.log(program.emit(program.getSourceFile("./config.ts"))); + // eslint-disable-next-line @typescript-eslint/no-var-requires, @typescript-eslint/no-unsafe-member-access + fileConfig = require("./config").default as IConfig; + } catch (e) { + //FIXME: make errors more descriptive to the enduser + console.log(e); + } + return fileConfig !== undefined ? newConfig(fileConfig) : newConfig(); +} + +export const config: IConfigRequired = getConfig(); + export const openai = new OpenAIApi(new OpenAIApiConfiguration({ apiKey: config.tokens.OpenAI })); diff --git a/src/moderation.ts b/src/moderation.ts index d949b12..9a7439b 100644 --- a/src/moderation.ts +++ b/src/moderation.ts @@ -2,7 +2,7 @@ import { Collection, InteractionResponse, Message, SnowflakeUtil } from "discord import { openai } from "./index"; import { formatRequestOrResponse } from "./toOpenAIMessages"; -import config from "./config"; +import { config } from "./index"; export default class Moderation { /** Represents cache of messages that have been checked aganist OpenAI moderation API. */ diff --git a/src/scripts/pushCommands.ts b/src/scripts/pushCommands.ts index ecd03f3..3f88ee5 100644 --- a/src/scripts/pushCommands.ts +++ b/src/scripts/pushCommands.ts @@ -1,7 +1,7 @@ // https://discordjs.guide/creating-your-bot/command-deployment.html#guild-commands import { REST, RESTGetAPIOAuth2CurrentApplicationResult, RESTPostAPIApplicationCommandsJSONBody, Routes } from "discord.js"; -import config from "../config"; +import { config } from "../index"; import requireDirectory from "require-directory"; import Command from "../command"; diff --git a/src/toOpenAIMessages.ts b/src/toOpenAIMessages.ts index 3f3c63c..f7ed362 100644 --- a/src/toOpenAIMessages.ts +++ b/src/toOpenAIMessages.ts @@ -2,7 +2,7 @@ import { ChatCompletionRequestMessage as OpenAIMessage } from "openai"; import { Collection, Message as DiscordMessage, InteractionResponse } from "discord.js"; import FoldToAscii from "fold-to-ascii"; -import config from "./config"; +import { config } from "./index"; import countTokens from "./tokenCounter"; import { RequestMessage } from "./execution"; @@ -115,7 +115,7 @@ export default function toOpenAIMessages( }); } - rvalue.push(...config.systemPrompt().reverse()); + rvalue.push(...config.systemPrompt([...messages][0]).reverse()); return rvalue.reverse(); }