Compare commits

..

5 commits

16 changed files with 166 additions and 66 deletions

View file

@ -2,11 +2,12 @@
"extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended-type-checked"], "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended-type-checked"],
"parser": "@typescript-eslint/parser", "parser": "@typescript-eslint/parser",
"parserOptions": { "parserOptions": {
"project": "./tsconfig.json" "project": "./tsconfig.eslint.json"
}, },
"rules": { "rules": {
"@typescript-eslint/no-floating-promises": "warn", "@typescript-eslint/no-floating-promises": "warn",
"@typescript-eslint/no-unsafe-declaration-merging": "warn", "@typescript-eslint/no-unsafe-declaration-merging": "warn",
"@typescript-eslint/no-unused-vars": "warn",
"@typescript-eslint/semi": ["error", "always"], "@typescript-eslint/semi": ["error", "always"],
"semi": "off", "semi": "off",
"eol-last": ["error", "always"], "eol-last": ["error", "always"],

2
.gitignore vendored
View file

@ -1,4 +1,4 @@
dist dist
node_modules node_modules
src/config.* config.*
.env .env

18
Dockerfile Normal file
View file

@ -0,0 +1,18 @@
FROM node:18
WORKDIR /app
# Packages
COPY package*.json ./
RUN npm install -D
# Database schema
COPY schema.prisma .
RUN npx prisma generate
# Typescript compiling
COPY tsconfig.json .
COPY src ./src
RUN npx tsc
# Run the app
CMD ["node", "dist/index.js"]

View file

@ -1,3 +1,10 @@
# GPTcord # GPTcord
Connect ChatGPT to your Discord chat! This is a direct bridge to bring ChatGPT to discord. Connect ChatGPT to your Discord chat! This is a direct bridge to bring ChatGPT to discord.
## Configuration
Project can be configured in two ways:
1. `config.ts` in the project root folder, that exports an `IConfig`. [That](./src/configDefault.ts) interface can be imported from `./src/configDefault`.
2. Using environment variables, as defined in [defaultConfig.ts](./src/configDefault.ts).

View file

@ -1,6 +1,6 @@
{ {
"name": "gptcord", "name": "gptcord",
"version": "0.0.2", "version": "0.1.0",
"description": "", "description": "",
"main": "./dist/index.js", "main": "./dist/index.js",
"scripts": { "scripts": {

View file

@ -13,7 +13,7 @@ model Usage {
user BigInt user BigInt
channel BigInt channel BigInt
guild BigInt? guild BigInt?
usageReguest Int usageRequest Int
usageResponse Int usageResponse Int
functionName String? functionName String?
functionRan Int @default(0) functionRan Int @default(0)

91
src/configDefault.ts Normal file
View file

@ -0,0 +1,91 @@
import { Message } from "discord.js";
import {
ChatCompletionRequestMessage as OpenAIMessage,
CreateChatCompletionRequest as ChatCompletionRequestData,
} from "openai";
export interface IConfigRequired {
readonly calendarParams: Intl.DateTimeFormatOptions;
/** Tokens to authentiate with */
readonly tokens: {
readonly Discord: string;
readonly OpenAI: string;
};
/** Messages to append at the start of every chat history when sending to API */
systemPrompt(context: Message): OpenAIMessage[];
/** OpenAI model config */
readonly chatCompletionParams: Omit<ChatCompletionRequestData, "messages" | "function_call" | "functions" | "n">;
/** Limits for message selection */
readonly readLimits: {
/** Maximum message age to include (in miliseconds) */
readonly time: number;
/** Maximum number of messages to select (maximum 100) */
readonly messages: number;
/** Maximum total token usage for messages (counted locally) */
readonly tokens: number;
};
/** Default limits for user inteacting with the bot */
readonly userLimits: {
/** How much requests can an user make if it's not overriden in a database entry */
readonly requests: number;
};
}
export type IConfig = Partial<IConfigRequired>;
export default function newConfig(config?: IConfig): IConfigRequired {
return { ...defaultConfig, ...config };
}
function envAsString(key: string): string | undefined {
key = key.toLocaleUpperCase();
return process.env[key];
}
function envAsNumber(key: string): number | undefined {
key = key.toLocaleUpperCase();
const value = Number(process.env[key]);
return !Number.isNaN(value) ? value : undefined;
}
const defaultConfig: IConfigRequired = {
calendarParams: {
weekday: "short",
year: "numeric",
month: "short",
day: "numeric",
hour: "2-digit",
minute: "2-digit",
hour12: false,
},
tokens: {
Discord: envAsString("TOKENS__DISCORD") ?? "",
OpenAI: envAsString("TOKENS__OPENAI") ?? "",
},
systemPrompt(): OpenAIMessage[] {
return [
{
role: "system",
content:
envAsString("SYSTEM_PROMPT") ??
`You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.`,
},
];
},
chatCompletionParams: {
model: envAsString("CHAT_COMPLETION_PARAMS__MODEL") ?? "gpt-3.5-turbo",
max_tokens: envAsNumber("CHAT_COMPLETION_PARAMS__MAX_TOKENS") ?? 384,
frequency_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__FREQUENCY_PENALTY"),
presence_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__PRESENCE_PENALTY"),
temperature: envAsNumber("CHAT_COMPLETION_PARAMS__TEMPERATURE"),
top_p: envAsNumber("CHAT_COMPLETION_PARAMS__TOP_P"),
},
readLimits: {
time: envAsNumber("READ_LIMITS__TIME") ?? 60 * 60 * 1000,
messages: envAsNumber("READ_LIMITS__MESSAGES") ?? 50,
tokens: envAsNumber("READ_LIMITS__TOKENS") ?? 2048,
},
userLimits: {
requests: envAsNumber("USER_LIMITS__REQUESTS") ?? 25,
},
};

View file

@ -1,44 +0,0 @@
import { ChatCompletionRequestMessage as OpenAIMessage , CreateChatCompletionRequest as ChatCompletionRequestData } from "openai";
// Don't forget to rename the file to config.ts
export default class config {
static readonly calendarConfig: Intl.DateTimeFormatOptions = {
weekday: "short",
year: "numeric",
month: "short",
day: "numeric",
hour: "2-digit",
minute: "2-digit",
hour12: false,
};
/** Tokens to authenticate with */
static readonly tokens = {
Discord: "Discord token here",
OpenAI: "OpenAI token here",
};
/** Messages to append at the start of every chat when sending to API */
static systemPrompt(): OpenAIMessage[] {
return [
{ role: "system", content: `You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.` }
];
}
/** OpenAI model config */
static readonly chatCompletionConfig: Omit<ChatCompletionRequestData, "messages"> = {
model: "gpt-3.5-turbo",
max_tokens: 384,
};
/** limits for message selection */
static readonly limits = {
/** maximum time in the past for messages (in miliseconds) */
time: 60*60*1000,
/** maximum number of messages to select (maximum 100) */
messages: 50,
/** maximum total token usage for messages */
tokens: 2048,
};
}

View file

@ -2,9 +2,8 @@ import DiscordApi, { GuildTextBasedChannel, TextBasedChannel } from "discord.js"
import { ChatCompletionRequestMessage, ChatCompletionResponseMessage } from "openai"; import { ChatCompletionRequestMessage, ChatCompletionResponseMessage } from "openai";
import Axios from "axios"; import Axios from "axios";
import { database, openai } from "./index"; import { database, openai, config } from "./index";
import Moderation from "./moderation"; import Moderation from "./moderation";
import config from "./config";
import toOpenAIMessages from "./toOpenAIMessages"; import toOpenAIMessages from "./toOpenAIMessages";
import FunctionManager from "./funcitonManager"; import FunctionManager from "./funcitonManager";
@ -86,7 +85,7 @@ export async function getUserLimit(user: string | { id: string }, requestTimesta
}, },
}))._all; }))._all;
if (!userLimits || !userLimits.limit) return {limit: 25, remaining: 25 - usedLimit}; if (!userLimits || !userLimits.limit) return {limit: config.userLimits.requests, remaining: config.userLimits.requests - usedLimit};
return {limit: userLimits.limit, remaining: userLimits.limit - usedLimit}; return {limit: userLimits.limit, remaining: userLimits.limit - usedLimit};
} }
@ -261,9 +260,9 @@ async function executeFromQueue(channel: string) {
if (!canReplyToRequest(message)) return; if (!canReplyToRequest(message)) return;
try { try {
let messages: DiscordApi.Collection<string, DiscordApi.Message> = await message.channel.messages.fetch({ limit: config.limits.messages, cache: false }); let messages: DiscordApi.Collection<string, DiscordApi.Message> = await message.channel.messages.fetch({ limit: config.readLimits.messages, cache: false });
messages = messages.filter(m => message.createdTimestamp - m.createdTimestamp < config.limits.time ); messages = messages.filter(m => message.createdTimestamp - m.createdTimestamp < config.readLimits.time );
messages.forEach(m => { Moderation.checkMessageNoReturn(m); }); messages.forEach(m => { Moderation.checkMessageNoReturn(m); });
@ -287,7 +286,7 @@ async function executeFromQueue(channel: string) {
do { do {
answer = await openai.createChatCompletion({ answer = await openai.createChatCompletion({
...config.chatCompletionConfig, ...config.chatCompletionParams,
messages: OpenAImessages, messages: OpenAImessages,
// FIXME: don't use new instance of FunctionManager // FIXME: don't use new instance of FunctionManager
functions: new FunctionManager().getFunctions(), functions: new FunctionManager().getFunctions(),

View file

@ -1,6 +1,6 @@
import { ChatCompletionFunctions, ChatCompletionRequestMessage, ChatCompletionRequestMessageFunctionCall } from "openai"; import { ChatCompletionFunctions, ChatCompletionRequestMessage, ChatCompletionRequestMessageFunctionCall } from "openai";
import config from "./config"; import { config } from "./index";
type parameterMap = { type parameterMap = {
string: string, string: string,
@ -104,6 +104,6 @@ class GetTime extends OpenAIFunction<Record<string, never>> {
}; };
execute(): string { execute(): string {
return `${Intl.DateTimeFormat().resolvedOptions().timeZone}): ${new Date().toLocaleString("en-US", config.calendarConfig)}`; return `${Intl.DateTimeFormat().resolvedOptions().timeZone}): ${new Date().toLocaleString("en-US", config.calendarParams)}`;
} }
} }

View file

@ -1,11 +1,12 @@
import DiscordApi from "discord.js"; import DiscordApi from "discord.js";
import { Configuration as OpenAIApiConfiguration, OpenAIApi } from "openai"; import { Configuration as OpenAIApiConfiguration, OpenAIApi } from "openai";
import { PrismaClient } from "@prisma/client"; import { PrismaClient } from "@prisma/client";
import Typescript from "typescript";
import fs from "node:fs";
import config from "./config";
import { queueRequest } from "./execution"; import { queueRequest } from "./execution";
import InteractionManager from "./interactionManager"; import InteractionManager from "./interactionManager";
import newConfig, { IConfig, IConfigRequired } from "./configDefault";
const discord = new DiscordApi.Client({ const discord = new DiscordApi.Client({
intents: [ intents: [
DiscordApi.GatewayIntentBits.Guilds, DiscordApi.GatewayIntentBits.Guilds,
@ -14,6 +15,26 @@ const discord = new DiscordApi.Client({
] ]
}); });
function getConfig() {
let fileConfig: IConfig | undefined = undefined;
try {
fs.statSync("./config.ts");
const program = Typescript.createProgram(
["./config.ts"],
{outDir: "./dist"}
);
console.log(program.emit(program.getSourceFile("./config.ts")));
// eslint-disable-next-line @typescript-eslint/no-var-requires, @typescript-eslint/no-unsafe-member-access
fileConfig = require("./config").default as IConfig;
} catch (e) {
//FIXME: make errors more descriptive to the enduser
console.log(e);
}
return fileConfig !== undefined ? newConfig(fileConfig) : newConfig();
}
export const config: IConfigRequired = getConfig();
export const openai = new OpenAIApi(new OpenAIApiConfiguration({ export const openai = new OpenAIApi(new OpenAIApiConfiguration({
apiKey: config.tokens.OpenAI apiKey: config.tokens.OpenAI
})); }));

View file

@ -2,7 +2,7 @@ import { Collection, InteractionResponse, Message, SnowflakeUtil } from "discord
import { openai } from "./index"; import { openai } from "./index";
import { formatRequestOrResponse } from "./toOpenAIMessages"; import { formatRequestOrResponse } from "./toOpenAIMessages";
import config from "./config"; import { config } from "./index";
export default class Moderation { export default class Moderation {
/** Represents cache of messages that have been checked aganist OpenAI moderation API. */ /** Represents cache of messages that have been checked aganist OpenAI moderation API. */
@ -60,7 +60,7 @@ export default class Moderation {
public static removeExpiredCacheEntries() { public static removeExpiredCacheEntries() {
const now = Date.now(); const now = Date.now();
for (const i of this.cache.keys()) { for (const i of this.cache.keys()) {
if (now - SnowflakeUtil.timestampFrom(i) >= config.limits.time * 2) { if (now - SnowflakeUtil.timestampFrom(i) >= config.readLimits.time * 2) {
this.cache.delete(i); this.cache.delete(i);
} }
} }

View file

@ -1,7 +1,7 @@
// https://discordjs.guide/creating-your-bot/command-deployment.html#guild-commands // https://discordjs.guide/creating-your-bot/command-deployment.html#guild-commands
import { REST, RESTGetAPIOAuth2CurrentApplicationResult, RESTPostAPIApplicationCommandsJSONBody, Routes } from "discord.js"; import { REST, RESTGetAPIOAuth2CurrentApplicationResult, RESTPostAPIApplicationCommandsJSONBody, Routes } from "discord.js";
import config from "../config"; import { config } from "../index";
import requireDirectory from "require-directory"; import requireDirectory from "require-directory";
import Command from "../command"; import Command from "../command";

View file

@ -2,7 +2,7 @@ import { ChatCompletionRequestMessage as OpenAIMessage } from "openai";
import { Collection, Message as DiscordMessage, InteractionResponse } from "discord.js"; import { Collection, Message as DiscordMessage, InteractionResponse } from "discord.js";
import FoldToAscii from "fold-to-ascii"; import FoldToAscii from "fold-to-ascii";
import config from "./config"; import { config } from "./index";
import countTokens from "./tokenCounter"; import countTokens from "./tokenCounter";
import { RequestMessage } from "./execution"; import { RequestMessage } from "./execution";
@ -107,7 +107,7 @@ export default function toOpenAIMessages(
const content = formatMessage(message); const content = formatMessage(message);
// FIXME: tokens are not being counted properly (it's lower than it is) but it's enough for me for now. // FIXME: tokens are not being counted properly (it's lower than it is) but it's enough for me for now.
tokenCount += countTokens(content); tokenCount += countTokens(content);
if (tokenCount > config.limits.tokens) break; if (tokenCount > config.readLimits.tokens) break;
rvalue.push({ rvalue.push({
role: message.author.id === message.client.user.id ? "assistant" : "user", role: message.author.id === message.client.user.id ? "assistant" : "user",
content: content, content: content,
@ -115,7 +115,7 @@ export default function toOpenAIMessages(
}); });
} }
rvalue.push(...config.systemPrompt().reverse()); rvalue.push(...config.systemPrompt([...messages][0]).reverse());
return rvalue.reverse(); return rvalue.reverse();
} }

4
tsconfig.eslint.json Normal file
View file

@ -0,0 +1,4 @@
{
"include": ["**/*"],
"extends": ["./tsconfig.json"]
}

View file

@ -1,13 +1,16 @@
{ {
"include": [
"./src/**/*"
],
"compilerOptions": { "compilerOptions": {
"target": "ES2022", "target": "ES2022",
"module": "commonjs", "module": "CommonJS",
"sourceMap": true, "sourceMap": true,
"outDir": "./dist/", "outDir": "./dist/",
"rootDir": "./src/", "rootDir": "./src/",
"strict": true, "strict": true,
"moduleResolution": "node", "moduleResolution": "node",
"esModuleInterop": true, "esModuleInterop": true,
"resolveJsonModule": true, "resolveJsonModule": true
} }
} }