Configuration: recactor how it is written and handled
This commit is contained in:
parent
13e993b964
commit
7ff4abc3c0
10 changed files with 130 additions and 61 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1,4 +1,4 @@
|
|||
dist
|
||||
node_modules
|
||||
src/config.*
|
||||
config.*
|
||||
.env
|
||||
|
|
|
@ -1,3 +1,10 @@
|
|||
# GPTcord
|
||||
|
||||
Connect ChatGPT to your Discord chat! This is a direct bridge to bring ChatGPT to discord.
|
||||
|
||||
## Configuration
|
||||
|
||||
Project can be configured in two ways:
|
||||
|
||||
1. `config.ts` in the project root folder, that exports an `IConfig`. [That](./src/configDefault.ts) interface can be imported from `./src/configDefault`.
|
||||
2. Using environment variables, as defined in [defaultConfig.ts](./src/configDefault.ts).
|
||||
|
|
91
src/configDefault.ts
Normal file
91
src/configDefault.ts
Normal file
|
@ -0,0 +1,91 @@
|
|||
import { Message } from "discord.js";
|
||||
import {
|
||||
ChatCompletionRequestMessage as OpenAIMessage,
|
||||
CreateChatCompletionRequest as ChatCompletionRequestData,
|
||||
} from "openai";
|
||||
|
||||
export interface IConfigRequired {
|
||||
readonly calendarParams: Intl.DateTimeFormatOptions;
|
||||
/** Tokens to authentiate with */
|
||||
readonly tokens: {
|
||||
readonly Discord: string;
|
||||
readonly OpenAI: string;
|
||||
};
|
||||
/** Messages to append at the start of every chat history when sending to API */
|
||||
systemPrompt(context: Message): OpenAIMessage[];
|
||||
/** OpenAI model config */
|
||||
readonly chatCompletionParams: Omit<ChatCompletionRequestData, "messages" | "function_call" | "functions" | "n">;
|
||||
/** Limits for message selection */
|
||||
readonly readLimits: {
|
||||
/** Maximum message age to include (in miliseconds) */
|
||||
readonly time: number;
|
||||
/** Maximum number of messages to select (maximum 100) */
|
||||
readonly messages: number;
|
||||
/** Maximum total token usage for messages (counted locally) */
|
||||
readonly tokens: number;
|
||||
};
|
||||
/** Default limits for user inteacting with the bot */
|
||||
readonly userLimits: {
|
||||
/** How much requests can an user make if it's not overriden in a database entry */
|
||||
readonly requests: number;
|
||||
};
|
||||
}
|
||||
|
||||
export type IConfig = Partial<IConfigRequired>;
|
||||
|
||||
export default function newConfig(config?: IConfig): IConfigRequired {
|
||||
return { ...defaultConfig, ...config };
|
||||
}
|
||||
|
||||
function envAsString(key: string): string | undefined {
|
||||
key = key.toLocaleUpperCase();
|
||||
return process.env[key];
|
||||
}
|
||||
|
||||
function envAsNumber(key: string): number | undefined {
|
||||
key = key.toLocaleUpperCase();
|
||||
const value = Number(process.env[key]);
|
||||
return !Number.isNaN(value) ? value : undefined;
|
||||
}
|
||||
|
||||
const defaultConfig: IConfigRequired = {
|
||||
calendarParams: {
|
||||
weekday: "short",
|
||||
year: "numeric",
|
||||
month: "short",
|
||||
day: "numeric",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
hour12: false,
|
||||
},
|
||||
tokens: {
|
||||
Discord: envAsString("TOKENS__DISCORD") ?? "",
|
||||
OpenAI: envAsString("TOKENS__OPENAI") ?? "",
|
||||
},
|
||||
systemPrompt(): OpenAIMessage[] {
|
||||
return [
|
||||
{
|
||||
role: "system",
|
||||
content:
|
||||
envAsString("SYSTEM_PROMPT") ??
|
||||
`You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.`,
|
||||
},
|
||||
];
|
||||
},
|
||||
chatCompletionParams: {
|
||||
model: envAsString("CHAT_COMPLETION_PARAMS__MODEL") ?? "gpt-3.5-turbo",
|
||||
max_tokens: envAsNumber("CHAT_COMPLETION_PARAMS__MAX_TOKENS") ?? 384,
|
||||
frequency_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__FREQUENCY_PENALTY"),
|
||||
presence_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__PRESENCE_PENALTY"),
|
||||
temperature: envAsNumber("CHAT_COMPLETION_PARAMS__TEMPERATURE"),
|
||||
top_p: envAsNumber("CHAT_COMPLETION_PARAMS__TOP_P"),
|
||||
},
|
||||
readLimits: {
|
||||
time: envAsNumber("READ_LIMITS__TIME") ?? 60 * 60 * 1000,
|
||||
messages: envAsNumber("READ_LIMITS__MESSAGES") ?? 50,
|
||||
tokens: envAsNumber("READ_LIMITS__TOKENS") ?? 2048,
|
||||
},
|
||||
userLimits: {
|
||||
requests: envAsNumber("USER_LIMITS__REQUESTS") ?? 25,
|
||||
},
|
||||
};
|
|
@ -1,49 +0,0 @@
|
|||
import { ChatCompletionRequestMessage as OpenAIMessage , CreateChatCompletionRequest as ChatCompletionRequestData } from "openai";
|
||||
|
||||
// Don't forget to rename the file to config.ts
|
||||
|
||||
export default class config {
|
||||
static readonly calendarConfig: Intl.DateTimeFormatOptions = {
|
||||
weekday: "short",
|
||||
year: "numeric",
|
||||
month: "short",
|
||||
day: "numeric",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
hour12: false,
|
||||
};
|
||||
|
||||
/** Tokens to authenticate with */
|
||||
static readonly tokens = {
|
||||
Discord: "Discord token here",
|
||||
OpenAI: "OpenAI token here",
|
||||
};
|
||||
|
||||
/** Messages to append at the start of every chat when sending to API */
|
||||
static systemPrompt(): OpenAIMessage[] {
|
||||
return [
|
||||
{ role: "system", content: `You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.` }
|
||||
];
|
||||
}
|
||||
|
||||
/** OpenAI model config */
|
||||
static readonly chatCompletionConfig: Omit<ChatCompletionRequestData, "messages"> = {
|
||||
model: "gpt-3.5-turbo",
|
||||
max_tokens: 384,
|
||||
};
|
||||
|
||||
/** limits for message selection */
|
||||
static readonly readLimits = {
|
||||
/** maximum time in the past for messages (in miliseconds) */
|
||||
time: 60*60*1000,
|
||||
/** maximum number of messages to select (maximum 100) */
|
||||
messages: 50,
|
||||
/** maximum total token usage for messages */
|
||||
tokens: 2048,
|
||||
};
|
||||
/** default user limits */
|
||||
static readonly userLimits = {
|
||||
/** how much requests can an user make if it's not overriden in database entry */
|
||||
requests: 25,
|
||||
};
|
||||
}
|
|
@ -2,9 +2,8 @@ import DiscordApi, { GuildTextBasedChannel, TextBasedChannel } from "discord.js"
|
|||
import { ChatCompletionRequestMessage, ChatCompletionResponseMessage } from "openai";
|
||||
import Axios from "axios";
|
||||
|
||||
import { database, openai } from "./index";
|
||||
import { database, openai, config } from "./index";
|
||||
import Moderation from "./moderation";
|
||||
import config from "./config";
|
||||
import toOpenAIMessages from "./toOpenAIMessages";
|
||||
import FunctionManager from "./funcitonManager";
|
||||
|
||||
|
@ -287,7 +286,7 @@ async function executeFromQueue(channel: string) {
|
|||
|
||||
do {
|
||||
answer = await openai.createChatCompletion({
|
||||
...config.chatCompletionConfig,
|
||||
...config.chatCompletionParams,
|
||||
messages: OpenAImessages,
|
||||
// FIXME: don't use new instance of FunctionManager
|
||||
functions: new FunctionManager().getFunctions(),
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import { ChatCompletionFunctions, ChatCompletionRequestMessage, ChatCompletionRequestMessageFunctionCall } from "openai";
|
||||
|
||||
import config from "./config";
|
||||
import { config } from "./index";
|
||||
|
||||
type parameterMap = {
|
||||
string: string,
|
||||
|
@ -104,6 +104,6 @@ class GetTime extends OpenAIFunction<Record<string, never>> {
|
|||
};
|
||||
|
||||
execute(): string {
|
||||
return `${Intl.DateTimeFormat().resolvedOptions().timeZone}): ${new Date().toLocaleString("en-US", config.calendarConfig)}`;
|
||||
return `${Intl.DateTimeFormat().resolvedOptions().timeZone}): ${new Date().toLocaleString("en-US", config.calendarParams)}`;
|
||||
}
|
||||
}
|
||||
|
|
25
src/index.ts
25
src/index.ts
|
@ -1,11 +1,12 @@
|
|||
import DiscordApi from "discord.js";
|
||||
import { Configuration as OpenAIApiConfiguration, OpenAIApi } from "openai";
|
||||
import { PrismaClient } from "@prisma/client";
|
||||
import Typescript from "typescript";
|
||||
import fs from "node:fs";
|
||||
|
||||
import config from "./config";
|
||||
import { queueRequest } from "./execution";
|
||||
import InteractionManager from "./interactionManager";
|
||||
|
||||
import newConfig, { IConfig, IConfigRequired } from "./configDefault";
|
||||
const discord = new DiscordApi.Client({
|
||||
intents: [
|
||||
DiscordApi.GatewayIntentBits.Guilds,
|
||||
|
@ -14,6 +15,26 @@ const discord = new DiscordApi.Client({
|
|||
]
|
||||
});
|
||||
|
||||
function getConfig() {
|
||||
let fileConfig: IConfig | undefined = undefined;
|
||||
try {
|
||||
fs.statSync("./config.ts");
|
||||
const program = Typescript.createProgram(
|
||||
["./config.ts"],
|
||||
{outDir: "./dist"}
|
||||
);
|
||||
console.log(program.emit(program.getSourceFile("./config.ts")));
|
||||
// eslint-disable-next-line @typescript-eslint/no-var-requires, @typescript-eslint/no-unsafe-member-access
|
||||
fileConfig = require("./config").default as IConfig;
|
||||
} catch (e) {
|
||||
//FIXME: make errors more descriptive to the enduser
|
||||
console.log(e);
|
||||
}
|
||||
return fileConfig !== undefined ? newConfig(fileConfig) : newConfig();
|
||||
}
|
||||
|
||||
export const config: IConfigRequired = getConfig();
|
||||
|
||||
export const openai = new OpenAIApi(new OpenAIApiConfiguration({
|
||||
apiKey: config.tokens.OpenAI
|
||||
}));
|
||||
|
|
|
@ -2,7 +2,7 @@ import { Collection, InteractionResponse, Message, SnowflakeUtil } from "discord
|
|||
|
||||
import { openai } from "./index";
|
||||
import { formatRequestOrResponse } from "./toOpenAIMessages";
|
||||
import config from "./config";
|
||||
import { config } from "./index";
|
||||
|
||||
export default class Moderation {
|
||||
/** Represents cache of messages that have been checked aganist OpenAI moderation API. */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// https://discordjs.guide/creating-your-bot/command-deployment.html#guild-commands
|
||||
|
||||
import { REST, RESTGetAPIOAuth2CurrentApplicationResult, RESTPostAPIApplicationCommandsJSONBody, Routes } from "discord.js";
|
||||
import config from "../config";
|
||||
import { config } from "../index";
|
||||
import requireDirectory from "require-directory";
|
||||
|
||||
import Command from "../command";
|
||||
|
|
|
@ -2,7 +2,7 @@ import { ChatCompletionRequestMessage as OpenAIMessage } from "openai";
|
|||
import { Collection, Message as DiscordMessage, InteractionResponse } from "discord.js";
|
||||
import FoldToAscii from "fold-to-ascii";
|
||||
|
||||
import config from "./config";
|
||||
import { config } from "./index";
|
||||
import countTokens from "./tokenCounter";
|
||||
import { RequestMessage } from "./execution";
|
||||
|
||||
|
@ -115,7 +115,7 @@ export default function toOpenAIMessages(
|
|||
});
|
||||
}
|
||||
|
||||
rvalue.push(...config.systemPrompt().reverse());
|
||||
rvalue.push(...config.systemPrompt([...messages][0]).reverse());
|
||||
|
||||
return rvalue.reverse();
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue