also renamed limits to quota I believe this new approach would allow me and bot hosters to add, implement or change the quota behavior more easily. Reimplemented the currently existing "Message count" limit to use the new IQuota, refactoring a code *a little*.
92 lines
3 KiB
TypeScript
92 lines
3 KiB
TypeScript
import { Message } from "discord.js";
|
|
import {
|
|
ChatCompletionRequestMessage as OpenAIMessage,
|
|
CreateChatCompletionRequest as ChatCompletionRequestData,
|
|
} from "openai";
|
|
|
|
import IQuota from "./IQuota";
|
|
import MessageCount from "./quota/messageCount";
|
|
|
|
export interface IConfigRequired {
|
|
readonly calendarParams: Intl.DateTimeFormatOptions;
|
|
/** Tokens to authentiate with */
|
|
readonly tokens: {
|
|
readonly Discord: string;
|
|
readonly OpenAI: string;
|
|
};
|
|
/** Messages to append at the start of every chat history when sending to API */
|
|
systemPrompt(context: Message): OpenAIMessage[];
|
|
/** OpenAI model config */
|
|
readonly chatCompletionParams: Omit<ChatCompletionRequestData, "messages" | "function_call" | "functions" | "n">;
|
|
/** Limits for message selection */
|
|
readonly readLimits: {
|
|
/** Maximum message age to include (in miliseconds) */
|
|
readonly time: number;
|
|
/** Maximum number of messages to select (maximum 100) */
|
|
readonly messages: number;
|
|
/** Maximum total token usage for messages (counted locally) */
|
|
readonly tokens: number;
|
|
};
|
|
/**
|
|
* Quota parameters to use when checking limits
|
|
* you can find some in `./quota`
|
|
*/
|
|
readonly quota: IQuota;
|
|
}
|
|
|
|
export type IConfig = Partial<IConfigRequired>;
|
|
|
|
export default function newConfig(config?: IConfig): IConfigRequired {
|
|
return { ...defaultConfig, ...config };
|
|
}
|
|
|
|
function envAsString(key: string): string | undefined {
|
|
key = key.toLocaleUpperCase();
|
|
return process.env[key];
|
|
}
|
|
|
|
function envAsNumber(key: string): number | undefined {
|
|
key = key.toLocaleUpperCase();
|
|
const value = Number(process.env[key]);
|
|
return !Number.isNaN(value) ? value : undefined;
|
|
}
|
|
|
|
const defaultConfig: IConfigRequired = {
|
|
calendarParams: {
|
|
weekday: "short",
|
|
year: "numeric",
|
|
month: "short",
|
|
day: "numeric",
|
|
hour: "2-digit",
|
|
minute: "2-digit",
|
|
hour12: false,
|
|
},
|
|
tokens: {
|
|
Discord: envAsString("TOKENS__DISCORD") ?? "",
|
|
OpenAI: envAsString("TOKENS__OPENAI") ?? "",
|
|
},
|
|
systemPrompt(): OpenAIMessage[] {
|
|
return [
|
|
{
|
|
role: "system",
|
|
content:
|
|
envAsString("SYSTEM_PROMPT") ??
|
|
`You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.`,
|
|
},
|
|
];
|
|
},
|
|
chatCompletionParams: {
|
|
model: envAsString("CHAT_COMPLETION_PARAMS__MODEL") ?? "gpt-3.5-turbo",
|
|
max_tokens: envAsNumber("CHAT_COMPLETION_PARAMS__MAX_TOKENS") ?? 384,
|
|
frequency_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__FREQUENCY_PENALTY"),
|
|
presence_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__PRESENCE_PENALTY"),
|
|
temperature: envAsNumber("CHAT_COMPLETION_PARAMS__TEMPERATURE"),
|
|
top_p: envAsNumber("CHAT_COMPLETION_PARAMS__TOP_P"),
|
|
},
|
|
readLimits: {
|
|
time: envAsNumber("READ_LIMITS__TIME") ?? 60 * 60 * 1000,
|
|
messages: envAsNumber("READ_LIMITS__MESSAGES") ?? 50,
|
|
tokens: envAsNumber("READ_LIMITS__TOKENS") ?? 2048,
|
|
},
|
|
quota: new MessageCount(envAsNumber("QUOTA__DEFAULT_QUOTA"), envAsNumber("QUOTA__LOOKBACK"))
|
|
};
|