82 lines
2.8 KiB
TypeScript
82 lines
2.8 KiB
TypeScript
import { Message } from "discord.js";
|
|
import {
|
|
ChatCompletionMessageParam as OpenAIMessage,
|
|
ChatCompletionCreateParamsNonStreaming as ChatCompletionRequestData,
|
|
} from "openai/resources/chat";
|
|
|
|
import IQuota from "./IQuota";
|
|
import MessageCount from "./quota/messageCount";
|
|
|
|
export interface IConfigRequired {
|
|
/** Tokens to authentiate with */
|
|
readonly tokens: {
|
|
readonly Discord: string;
|
|
readonly OpenAI: string;
|
|
};
|
|
/** Messages to append at the start of every chat history when sending to API */
|
|
systemPrompt(context: Message): OpenAIMessage[];
|
|
/** OpenAI model config */
|
|
readonly chatCompletionParams: Omit<ChatCompletionRequestData, "messages" | "function_call" | "functions" | "n">;
|
|
/** Limits for message selection */
|
|
readonly readLimits: {
|
|
/** Maximum message age to include (in miliseconds) */
|
|
readonly time: number;
|
|
/** Maximum number of messages to select (maximum 100) */
|
|
readonly messages: number;
|
|
/** Maximum total token usage for messages (counted locally) */
|
|
readonly tokens: number;
|
|
};
|
|
/**
|
|
* Quota parameters to use when checking limits
|
|
* you can find some in `./quota`
|
|
*/
|
|
readonly quota: IQuota;
|
|
}
|
|
|
|
export type IConfig = Partial<IConfigRequired>;
|
|
|
|
export default function newConfig(config?: IConfig): IConfigRequired {
|
|
return { ...defaultConfig, ...config };
|
|
}
|
|
|
|
function envAsString(key: string): string | undefined {
|
|
key = key.toLocaleUpperCase();
|
|
return process.env[key];
|
|
}
|
|
|
|
function envAsNumber(key: string): number | undefined {
|
|
key = key.toLocaleUpperCase();
|
|
const value = Number(process.env[key]);
|
|
return !Number.isNaN(value) ? value : undefined;
|
|
}
|
|
|
|
const defaultConfig: IConfigRequired = {
|
|
tokens: {
|
|
Discord: envAsString("TOKENS__DISCORD") ?? "",
|
|
OpenAI: envAsString("TOKENS__OPENAI") ?? "",
|
|
},
|
|
systemPrompt(): OpenAIMessage[] {
|
|
return [
|
|
{
|
|
role: "system",
|
|
content:
|
|
envAsString("SYSTEM_PROMPT") ??
|
|
`You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.`,
|
|
},
|
|
];
|
|
},
|
|
chatCompletionParams: {
|
|
model: envAsString("CHAT_COMPLETION_PARAMS__MODEL") ?? "gpt-3.5-turbo",
|
|
max_tokens: envAsNumber("CHAT_COMPLETION_PARAMS__MAX_TOKENS") ?? 384,
|
|
frequency_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__FREQUENCY_PENALTY"),
|
|
presence_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__PRESENCE_PENALTY"),
|
|
temperature: envAsNumber("CHAT_COMPLETION_PARAMS__TEMPERATURE"),
|
|
top_p: envAsNumber("CHAT_COMPLETION_PARAMS__TOP_P"),
|
|
},
|
|
readLimits: {
|
|
time: envAsNumber("READ_LIMITS__TIME") ?? 60 * 60 * 1000,
|
|
messages: envAsNumber("READ_LIMITS__MESSAGES") ?? 50,
|
|
tokens: envAsNumber("READ_LIMITS__TOKENS") ?? 2048,
|
|
},
|
|
quota: new MessageCount(envAsNumber("QUOTA__DEFAULT_QUOTA"), envAsNumber("QUOTA__LOOKBACK"))
|
|
};
|