diff --git a/.eslintrc.json b/.eslintrc.json index 2c7e934..88c772c 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -2,12 +2,11 @@ "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended-type-checked"], "parser": "@typescript-eslint/parser", "parserOptions": { - "project": "./tsconfig.eslint.json" + "project": "./tsconfig.json" }, "rules": { "@typescript-eslint/no-floating-promises": "warn", "@typescript-eslint/no-unsafe-declaration-merging": "warn", - "@typescript-eslint/no-unused-vars": "warn", "@typescript-eslint/semi": ["error", "always"], "semi": "off", "eol-last": ["error", "always"], diff --git a/.gitignore b/.gitignore index 5bb261a..ecc7ea9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ dist node_modules -config.* +src/config.* .env diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index d288a6a..0000000 --- a/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM node:18 -WORKDIR /app - -# Packages -COPY package*.json ./ -RUN npm install -D - -# Database schema -COPY schema.prisma . -RUN npx prisma generate - -# Typescript compiling -COPY tsconfig.json . -COPY src ./src -RUN npx tsc - -# Run the app -CMD ["node", "dist/index.js"] diff --git a/README.md b/README.md index 946bb38..d31ac5c 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,3 @@ # GPTcord Connect ChatGPT to your Discord chat! This is a direct bridge to bring ChatGPT to discord. - -## Configuration - -Project can be configured in two ways: - -1. `config.ts` in the project root folder, that exports an `IConfig`. [That](./src/configDefault.ts) interface can be imported from `./src/configDefault`. -2. Using environment variables, as defined in [defaultConfig.ts](./src/configDefault.ts). diff --git a/package.json b/package.json index 27b96fb..9933a8c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "gptcord", - "version": "0.1.0", + "version": "0.0.2", "description": "", "main": "./dist/index.js", "scripts": { diff --git a/schema.prisma b/schema.prisma index 762bf90..e3d10e8 100644 --- a/schema.prisma +++ b/schema.prisma @@ -13,7 +13,7 @@ model Usage { user BigInt channel BigInt guild BigInt? - usageRequest Int + usageReguest Int usageResponse Int functionName String? functionRan Int @default(0) diff --git a/src/configDefault.ts b/src/configDefault.ts deleted file mode 100644 index a4a6bf3..0000000 --- a/src/configDefault.ts +++ /dev/null @@ -1,91 +0,0 @@ -import { Message } from "discord.js"; -import { - ChatCompletionRequestMessage as OpenAIMessage, - CreateChatCompletionRequest as ChatCompletionRequestData, -} from "openai"; - -export interface IConfigRequired { - readonly calendarParams: Intl.DateTimeFormatOptions; - /** Tokens to authentiate with */ - readonly tokens: { - readonly Discord: string; - readonly OpenAI: string; - }; - /** Messages to append at the start of every chat history when sending to API */ - systemPrompt(context: Message): OpenAIMessage[]; - /** OpenAI model config */ - readonly chatCompletionParams: Omit; - /** Limits for message selection */ - readonly readLimits: { - /** Maximum message age to include (in miliseconds) */ - readonly time: number; - /** Maximum number of messages to select (maximum 100) */ - readonly messages: number; - /** Maximum total token usage for messages (counted locally) */ - readonly tokens: number; - }; - /** Default limits for user inteacting with the bot */ - readonly userLimits: { - /** How much requests can an user make if it's not overriden in a database entry */ - readonly requests: number; - }; -} - -export type IConfig = Partial; - -export default function newConfig(config?: IConfig): IConfigRequired { - return { ...defaultConfig, ...config }; -} - -function envAsString(key: string): string | undefined { - key = key.toLocaleUpperCase(); - return process.env[key]; -} - -function envAsNumber(key: string): number | undefined { - key = key.toLocaleUpperCase(); - const value = Number(process.env[key]); - return !Number.isNaN(value) ? value : undefined; -} - -const defaultConfig: IConfigRequired = { - calendarParams: { - weekday: "short", - year: "numeric", - month: "short", - day: "numeric", - hour: "2-digit", - minute: "2-digit", - hour12: false, - }, - tokens: { - Discord: envAsString("TOKENS__DISCORD") ?? "", - OpenAI: envAsString("TOKENS__OPENAI") ?? "", - }, - systemPrompt(): OpenAIMessage[] { - return [ - { - role: "system", - content: - envAsString("SYSTEM_PROMPT") ?? - `You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.`, - }, - ]; - }, - chatCompletionParams: { - model: envAsString("CHAT_COMPLETION_PARAMS__MODEL") ?? "gpt-3.5-turbo", - max_tokens: envAsNumber("CHAT_COMPLETION_PARAMS__MAX_TOKENS") ?? 384, - frequency_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__FREQUENCY_PENALTY"), - presence_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__PRESENCE_PENALTY"), - temperature: envAsNumber("CHAT_COMPLETION_PARAMS__TEMPERATURE"), - top_p: envAsNumber("CHAT_COMPLETION_PARAMS__TOP_P"), - }, - readLimits: { - time: envAsNumber("READ_LIMITS__TIME") ?? 60 * 60 * 1000, - messages: envAsNumber("READ_LIMITS__MESSAGES") ?? 50, - tokens: envAsNumber("READ_LIMITS__TOKENS") ?? 2048, - }, - userLimits: { - requests: envAsNumber("USER_LIMITS__REQUESTS") ?? 25, - }, -}; diff --git a/src/config_example.ts b/src/config_example.ts new file mode 100644 index 0000000..6e1af96 --- /dev/null +++ b/src/config_example.ts @@ -0,0 +1,44 @@ +import { ChatCompletionRequestMessage as OpenAIMessage , CreateChatCompletionRequest as ChatCompletionRequestData } from "openai"; + +// Don't forget to rename the file to config.ts + +export default class config { + static readonly calendarConfig: Intl.DateTimeFormatOptions = { + weekday: "short", + year: "numeric", + month: "short", + day: "numeric", + hour: "2-digit", + minute: "2-digit", + hour12: false, + }; + + /** Tokens to authenticate with */ + static readonly tokens = { + Discord: "Discord token here", + OpenAI: "OpenAI token here", + }; + + /** Messages to append at the start of every chat when sending to API */ + static systemPrompt(): OpenAIMessage[] { + return [ + { role: "system", content: `You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.` } + ]; + } + + /** OpenAI model config */ + static readonly chatCompletionConfig: Omit = { + model: "gpt-3.5-turbo", + max_tokens: 384, + }; + + /** limits for message selection */ + static readonly limits = { + /** maximum time in the past for messages (in miliseconds) */ + time: 60*60*1000, + /** maximum number of messages to select (maximum 100) */ + messages: 50, + /** maximum total token usage for messages */ + tokens: 2048, + }; +} diff --git a/src/execution.ts b/src/execution.ts index eb11228..8879b00 100644 --- a/src/execution.ts +++ b/src/execution.ts @@ -2,8 +2,9 @@ import DiscordApi, { GuildTextBasedChannel, TextBasedChannel } from "discord.js" import { ChatCompletionRequestMessage, ChatCompletionResponseMessage } from "openai"; import Axios from "axios"; -import { database, openai, config } from "./index"; +import { database, openai } from "./index"; import Moderation from "./moderation"; +import config from "./config"; import toOpenAIMessages from "./toOpenAIMessages"; import FunctionManager from "./funcitonManager"; @@ -85,7 +86,7 @@ export async function getUserLimit(user: string | { id: string }, requestTimesta }, }))._all; - if (!userLimits || !userLimits.limit) return {limit: config.userLimits.requests, remaining: config.userLimits.requests - usedLimit}; + if (!userLimits || !userLimits.limit) return {limit: 25, remaining: 25 - usedLimit}; return {limit: userLimits.limit, remaining: userLimits.limit - usedLimit}; } @@ -260,9 +261,9 @@ async function executeFromQueue(channel: string) { if (!canReplyToRequest(message)) return; try { - let messages: DiscordApi.Collection = await message.channel.messages.fetch({ limit: config.readLimits.messages, cache: false }); + let messages: DiscordApi.Collection = await message.channel.messages.fetch({ limit: config.limits.messages, cache: false }); - messages = messages.filter(m => message.createdTimestamp - m.createdTimestamp < config.readLimits.time ); + messages = messages.filter(m => message.createdTimestamp - m.createdTimestamp < config.limits.time ); messages.forEach(m => { Moderation.checkMessageNoReturn(m); }); @@ -286,7 +287,7 @@ async function executeFromQueue(channel: string) { do { answer = await openai.createChatCompletion({ - ...config.chatCompletionParams, + ...config.chatCompletionConfig, messages: OpenAImessages, // FIXME: don't use new instance of FunctionManager functions: new FunctionManager().getFunctions(), diff --git a/src/funcitonManager.ts b/src/funcitonManager.ts index 27d3a91..5168328 100644 --- a/src/funcitonManager.ts +++ b/src/funcitonManager.ts @@ -1,6 +1,6 @@ import { ChatCompletionFunctions, ChatCompletionRequestMessage, ChatCompletionRequestMessageFunctionCall } from "openai"; -import { config } from "./index"; +import config from "./config"; type parameterMap = { string: string, @@ -104,6 +104,6 @@ class GetTime extends OpenAIFunction> { }; execute(): string { - return `${Intl.DateTimeFormat().resolvedOptions().timeZone}): ${new Date().toLocaleString("en-US", config.calendarParams)}`; + return `${Intl.DateTimeFormat().resolvedOptions().timeZone}): ${new Date().toLocaleString("en-US", config.calendarConfig)}`; } } diff --git a/src/index.ts b/src/index.ts index 556ddbd..b96a479 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,12 +1,11 @@ import DiscordApi from "discord.js"; import { Configuration as OpenAIApiConfiguration, OpenAIApi } from "openai"; import { PrismaClient } from "@prisma/client"; -import Typescript from "typescript"; -import fs from "node:fs"; +import config from "./config"; import { queueRequest } from "./execution"; import InteractionManager from "./interactionManager"; -import newConfig, { IConfig, IConfigRequired } from "./configDefault"; + const discord = new DiscordApi.Client({ intents: [ DiscordApi.GatewayIntentBits.Guilds, @@ -15,26 +14,6 @@ const discord = new DiscordApi.Client({ ] }); -function getConfig() { - let fileConfig: IConfig | undefined = undefined; - try { - fs.statSync("./config.ts"); - const program = Typescript.createProgram( - ["./config.ts"], - {outDir: "./dist"} - ); - console.log(program.emit(program.getSourceFile("./config.ts"))); - // eslint-disable-next-line @typescript-eslint/no-var-requires, @typescript-eslint/no-unsafe-member-access - fileConfig = require("./config").default as IConfig; - } catch (e) { - //FIXME: make errors more descriptive to the enduser - console.log(e); - } - return fileConfig !== undefined ? newConfig(fileConfig) : newConfig(); -} - -export const config: IConfigRequired = getConfig(); - export const openai = new OpenAIApi(new OpenAIApiConfiguration({ apiKey: config.tokens.OpenAI })); diff --git a/src/moderation.ts b/src/moderation.ts index 9a7439b..c896ac8 100644 --- a/src/moderation.ts +++ b/src/moderation.ts @@ -2,7 +2,7 @@ import { Collection, InteractionResponse, Message, SnowflakeUtil } from "discord import { openai } from "./index"; import { formatRequestOrResponse } from "./toOpenAIMessages"; -import { config } from "./index"; +import config from "./config"; export default class Moderation { /** Represents cache of messages that have been checked aganist OpenAI moderation API. */ @@ -60,7 +60,7 @@ export default class Moderation { public static removeExpiredCacheEntries() { const now = Date.now(); for (const i of this.cache.keys()) { - if (now - SnowflakeUtil.timestampFrom(i) >= config.readLimits.time * 2) { + if (now - SnowflakeUtil.timestampFrom(i) >= config.limits.time * 2) { this.cache.delete(i); } } diff --git a/src/scripts/pushCommands.ts b/src/scripts/pushCommands.ts index 3f88ee5..ecd03f3 100644 --- a/src/scripts/pushCommands.ts +++ b/src/scripts/pushCommands.ts @@ -1,7 +1,7 @@ // https://discordjs.guide/creating-your-bot/command-deployment.html#guild-commands import { REST, RESTGetAPIOAuth2CurrentApplicationResult, RESTPostAPIApplicationCommandsJSONBody, Routes } from "discord.js"; -import { config } from "../index"; +import config from "../config"; import requireDirectory from "require-directory"; import Command from "../command"; diff --git a/src/toOpenAIMessages.ts b/src/toOpenAIMessages.ts index f7ed362..e868b45 100644 --- a/src/toOpenAIMessages.ts +++ b/src/toOpenAIMessages.ts @@ -2,7 +2,7 @@ import { ChatCompletionRequestMessage as OpenAIMessage } from "openai"; import { Collection, Message as DiscordMessage, InteractionResponse } from "discord.js"; import FoldToAscii from "fold-to-ascii"; -import { config } from "./index"; +import config from "./config"; import countTokens from "./tokenCounter"; import { RequestMessage } from "./execution"; @@ -107,7 +107,7 @@ export default function toOpenAIMessages( const content = formatMessage(message); // FIXME: tokens are not being counted properly (it's lower than it is) but it's enough for me for now. tokenCount += countTokens(content); - if (tokenCount > config.readLimits.tokens) break; + if (tokenCount > config.limits.tokens) break; rvalue.push({ role: message.author.id === message.client.user.id ? "assistant" : "user", content: content, @@ -115,7 +115,7 @@ export default function toOpenAIMessages( }); } - rvalue.push(...config.systemPrompt([...messages][0]).reverse()); + rvalue.push(...config.systemPrompt().reverse()); return rvalue.reverse(); } diff --git a/tsconfig.eslint.json b/tsconfig.eslint.json deleted file mode 100644 index 87ed905..0000000 --- a/tsconfig.eslint.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "include": ["**/*"], - "extends": ["./tsconfig.json"] -} \ No newline at end of file diff --git a/tsconfig.json b/tsconfig.json index 66df360..510958d 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,16 +1,13 @@ { - "include": [ - "./src/**/*" - ], "compilerOptions": { "target": "ES2022", - "module": "CommonJS", + "module": "commonjs", "sourceMap": true, "outDir": "./dist/", "rootDir": "./src/", "strict": true, "moduleResolution": "node", "esModuleInterop": true, - "resolveJsonModule": true + "resolveJsonModule": true, } }